summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acconfig.h19
-rw-r--r--include/acpi/acexcep.h15
-rw-r--r--include/acpi/acoutput.h6
-rw-r--r--include/acpi/acpi_bus.h9
-rw-r--r--include/acpi/acpi_drivers.h7
-rw-r--r--include/acpi/acpixf.h9
-rw-r--r--include/acpi/actbl.h1
-rw-r--r--include/acpi/actbl3.h23
-rw-r--r--include/acpi/actypes.h6
-rw-r--r--include/acpi/cppc_acpi.h4
-rw-r--r--include/acpi/platform/aclinux.h4
-rw-r--r--include/asm-generic/4level-fixup.h2
-rw-r--r--include/asm-generic/5level-fixup.h3
-rw-r--r--include/asm-generic/bitops/builtin-fls.h2
-rw-r--r--include/asm-generic/bitops/fls.h2
-rw-r--r--include/asm-generic/bug.h16
-rw-r--r--include/asm-generic/compat.h24
-rw-r--r--include/asm-generic/dma-mapping.h11
-rw-r--r--include/asm-generic/error-injection.h1
-rw-r--r--include/asm-generic/export.h25
-rw-r--r--include/asm-generic/fixmap.h1
-rw-r--r--include/asm-generic/hugetlb.h88
-rw-r--r--include/asm-generic/io.h3
-rw-r--r--include/asm-generic/percpu.h4
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h3
-rw-r--r--include/asm-generic/pgtable-nop4d.h3
-rw-r--r--include/asm-generic/pgtable-nopmd.h2
-rw-r--r--include/asm-generic/pgtable-nopud.h3
-rw-r--r--include/asm-generic/pgtable.h81
-rw-r--r--include/asm-generic/qrwlock.h7
-rw-r--r--include/asm-generic/qspinlock.h16
-rw-r--r--include/asm-generic/sections.h14
-rw-r--r--include/asm-generic/tlb.h113
-rw-r--r--include/asm-generic/uaccess.h12
-rw-r--r--include/asm-generic/unistd.h13
-rw-r--r--include/asm-generic/vmlinux.lds.h44
-rw-r--r--include/crypto/acompress.h16
-rw-r--r--include/crypto/aead.h32
-rw-r--r--include/crypto/akcipher.h34
-rw-r--r--include/crypto/algapi.h14
-rw-r--r--include/crypto/asym_tpm_subtype.h19
-rw-r--r--include/crypto/cbc.h2
-rw-r--r--include/crypto/chacha.h54
-rw-r--r--include/crypto/chacha20.h28
-rw-r--r--include/crypto/hash.h16
-rw-r--r--include/crypto/hash_info.h1
-rw-r--r--include/crypto/internal/cryptouser.h13
-rw-r--r--include/crypto/internal/geniv.h2
-rw-r--r--include/crypto/internal/skcipher.h2
-rw-r--r--include/crypto/kpp.h21
-rw-r--r--include/crypto/mcryptd.h114
-rw-r--r--include/crypto/morus1280_glue.h2
-rw-r--r--include/crypto/morus640_glue.h2
-rw-r--r--include/crypto/nhpoly1305.h74
-rw-r--r--include/crypto/null.h2
-rw-r--r--include/crypto/poly1305.h28
-rw-r--r--include/crypto/public_key.h14
-rw-r--r--include/crypto/rng.h8
-rw-r--r--include/crypto/skcipher.h111
-rw-r--r--include/crypto/speck.h62
-rw-r--r--include/crypto/streebog.h34
-rw-r--r--include/drm/bridge/dw_hdmi.h1
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h14
-rw-r--r--include/drm/drmP.h8
-rw-r--r--include/drm/drm_atomic.h44
-rw-r--r--include/drm/drm_atomic_helper.h44
-rw-r--r--include/drm/drm_atomic_state_helper.h73
-rw-r--r--include/drm/drm_atomic_uapi.h58
-rw-r--r--include/drm/drm_blend.h6
-rw-r--r--include/drm/drm_client.h5
-rw-r--r--include/drm/drm_color_mgmt.h1
-rw-r--r--include/drm/drm_connector.h116
-rw-r--r--include/drm/drm_crtc.h50
-rw-r--r--include/drm/drm_crtc_helper.h6
-rw-r--r--include/drm/drm_damage_helper.h99
-rw-r--r--include/drm/drm_device.h10
-rw-r--r--include/drm/drm_dp_helper.h109
-rw-r--r--include/drm/drm_dp_mst_helper.h6
-rw-r--r--include/drm/drm_drv.h25
-rw-r--r--include/drm/drm_dsc.h485
-rw-r--r--include/drm/drm_edid.h6
-rw-r--r--include/drm/drm_encoder.h1
-rw-r--r--include/drm/drm_fb_cma_helper.h3
-rw-r--r--include/drm/drm_fb_helper.h34
-rw-r--r--include/drm/drm_file.h14
-rw-r--r--include/drm/drm_fourcc.h113
-rw-r--r--include/drm/drm_framebuffer.h24
-rw-r--r--include/drm/drm_gem.h181
-rw-r--r--include/drm/drm_gem_cma_helper.h24
-rw-r--r--include/drm/drm_global.h53
-rw-r--r--include/drm/drm_hdcp.h212
-rw-r--r--include/drm/drm_mipi_dsi.h8
-rw-r--r--include/drm/drm_mode_config.h42
-rw-r--r--include/drm/drm_modeset_lock.h59
-rw-r--r--include/drm/drm_panel.h2
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/drm/drm_plane.h63
-rw-r--r--include/drm/drm_plane_helper.h35
-rw-r--r--include/drm/drm_prime.h4
-rw-r--r--include/drm/drm_print.h2
-rw-r--r--include/drm/drm_property.h5
-rw-r--r--include/drm/drm_syncobj.h7
-rw-r--r--include/drm/drm_util.h32
-rw-r--r--include/drm/drm_vblank.h8
-rw-r--r--include/drm/gpu_scheduler.h57
-rw-r--r--include/drm/i915_pciids.h22
-rw-r--r--include/drm/tinydrm/tinydrm.h35
-rw-r--r--include/drm/ttm/ttm_bo_api.h34
-rw-r--r--include/drm/ttm/ttm_bo_driver.h51
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h4
-rw-r--r--include/drm/ttm/ttm_lock.h248
-rw-r--r--include/drm/ttm/ttm_memory.h4
-rw-r--r--include/drm/ttm/ttm_object.h354
-rw-r--r--include/dt-bindings/bus/ti-sysc.h2
-rw-r--r--include/dt-bindings/clock/am3.h119
-rw-r--r--include/dt-bindings/clock/am4.h132
-rw-r--r--include/dt-bindings/clock/at91.h15
-rw-r--r--include/dt-bindings/clock/bcm2835-aux.h10
-rw-r--r--include/dt-bindings/clock/bcm2835.h10
-rw-r--r--include/dt-bindings/clock/dra7.h327
-rw-r--r--include/dt-bindings/clock/exynos3250.h5
-rw-r--r--include/dt-bindings/clock/exynos4.h37
-rw-r--r--include/dt-bindings/clock/exynos5250.h7
-rw-r--r--include/dt-bindings/clock/exynos5260-clk.h7
-rw-r--r--include/dt-bindings/clock/exynos5410.h7
-rw-r--r--include/dt-bindings/clock/exynos5420.h7
-rw-r--r--include/dt-bindings/clock/exynos5433.h5
-rw-r--r--include/dt-bindings/clock/exynos5440.h44
-rw-r--r--include/dt-bindings/clock/exynos7-clk.h7
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h18
-rw-r--r--include/dt-bindings/clock/hi3670-clock.h348
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h5
-rw-r--r--include/dt-bindings/clock/imx6sl-clock.h4
-rw-r--r--include/dt-bindings/clock/imx6sll-clock.h3
-rw-r--r--include/dt-bindings/clock/imx6sx-clock.h3
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h3
-rw-r--r--include/dt-bindings/clock/imx7ulp-clock.h116
-rw-r--r--include/dt-bindings/clock/imx8-clock.h289
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h395
-rw-r--r--include/dt-bindings/clock/jz4725b-cgu.h35
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h1
-rw-r--r--include/dt-bindings/clock/maxim,max77686.h5
-rw-r--r--include/dt-bindings/clock/maxim,max77802.h5
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h4
-rw-r--r--include/dt-bindings/clock/mt7629-clk.h203
-rw-r--r--include/dt-bindings/clock/qcom,camcc-sdm845.h116
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8960.h2
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8996.h9
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h94
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcs404.h165
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm660.h156
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h5
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sdm845.h24
-rw-r--r--include/dt-bindings/clock/qcom,lpass-sdm845.h15
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h4
-rw-r--r--include/dt-bindings/clock/r7s72100-clock.h7
-rw-r--r--include/dt-bindings/clock/r7s9210-cpg-mssr.h20
-rw-r--r--include/dt-bindings/clock/r8a7743-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7744-cpg-mssr.h39
-rw-r--r--include/dt-bindings/clock/r8a7745-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a774a1-cpg-mssr.h58
-rw-r--r--include/dt-bindings/clock/r8a774c0-cpg-mssr.h60
-rw-r--r--include/dt-bindings/clock/r8a7790-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7791-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7792-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7793-clock.h12
-rw-r--r--include/dt-bindings/clock/r8a7793-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h8
-rw-r--r--include/dt-bindings/clock/r8a7794-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a7795-cpg-mssr.h10
-rw-r--r--include/dt-bindings/clock/r8a7796-cpg-mssr.h10
-rw-r--r--include/dt-bindings/clock/r8a77970-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/r8a77995-cpg-mssr.h13
-rw-r--r--include/dt-bindings/clock/renesas-cpg-mssr.h8
-rw-r--r--include/dt-bindings/clock/rk3188-cru-common.h3
-rw-r--r--include/dt-bindings/clock/rk3328-cru.h3
-rw-r--r--include/dt-bindings/clock/s3c2410.h5
-rw-r--r--include/dt-bindings/clock/s3c2412.h5
-rw-r--r--include/dt-bindings/clock/s3c2443.h5
-rw-r--r--include/dt-bindings/clock/samsung,s2mps11.h5
-rw-r--r--include/dt-bindings/clock/samsung,s3c64xx-clock.h7
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h1
-rw-r--r--include/dt-bindings/clock/sun8i-de2.h3
-rw-r--r--include/dt-bindings/clock/suniv-ccu-f1c100s.h70
-rw-r--r--include/dt-bindings/clock/xlnx,zynqmp-clk.h116
-rw-r--r--include/dt-bindings/dma/dw-dmac.h14
-rw-r--r--include/dt-bindings/firmware/imx/rsrc.h559
-rw-r--r--include/dt-bindings/gpio/meson-g12a-gpio.h114
-rw-r--r--include/dt-bindings/gpio/tegra186-gpio.h41
-rw-r--r--include/dt-bindings/iio/qcom,spmi-vadc.h125
-rw-r--r--include/dt-bindings/interrupt-controller/arm-gic.h2
-rw-r--r--include/dt-bindings/interrupt-controller/irq.h2
-rw-r--r--include/dt-bindings/mailbox/tegra186-hsp.h11
-rw-r--r--include/dt-bindings/media/xilinx-vip.h5
-rw-r--r--include/dt-bindings/memory/mt2712-larb-port.h95
-rw-r--r--include/dt-bindings/mfd/at91-usart.h17
-rw-r--r--include/dt-bindings/net/mscc-phy-vsc8531.h2
-rw-r--r--include/dt-bindings/phy/phy-ocelot-serdes.h12
-rw-r--r--include/dt-bindings/pinctrl/bcm2835.h8
-rw-r--r--include/dt-bindings/pinctrl/k3.h35
-rw-r--r--include/dt-bindings/pinctrl/mt6797-pinfunc.h1368
-rw-r--r--include/dt-bindings/pinctrl/pads-imx8qm.h960
-rw-r--r--include/dt-bindings/pinctrl/pads-imx8qxp.h751
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-tegra-io-pad.h18
-rw-r--r--include/dt-bindings/pinctrl/r7s9210-pinctrl.h47
-rw-r--r--include/dt-bindings/pinctrl/rzn1-pinctrl.h141
-rw-r--r--include/dt-bindings/power/imx8mq-power.h21
-rw-r--r--include/dt-bindings/power/owl-s900-powergate.h23
-rw-r--r--include/dt-bindings/power/r8a7744-sysc.h24
-rw-r--r--include/dt-bindings/power/r8a774a1-sysc.h31
-rw-r--r--include/dt-bindings/power/r8a774c0-sysc.h25
-rw-r--r--include/dt-bindings/power/r8a77970-sysc.h7
-rw-r--r--include/dt-bindings/power/r8a77980-sysc.h6
-rw-r--r--include/dt-bindings/power/raspberrypi-power.h5
-rw-r--r--include/dt-bindings/power/rk3066-power.h22
-rw-r--r--include/dt-bindings/power/rk3188-power.h24
-rw-r--r--include/dt-bindings/regulator/active-semi,8945a-regulator.h30
-rw-r--r--include/dt-bindings/reset/actions,s700-reset.h34
-rw-r--r--include/dt-bindings/reset/actions,s900-reset.h65
-rw-r--r--include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h17
-rw-r--r--include/dt-bindings/reset/imx7-reset.h4
-rw-r--r--include/dt-bindings/reset/qcom,sdm845-aoss.h17
-rw-r--r--include/dt-bindings/reset/qcom,sdm845-pdc.h20
-rw-r--r--include/dt-bindings/reset/sun8i-de2.h1
-rw-r--r--include/dt-bindings/reset/suniv-ccu-f1c100s.h38
-rw-r--r--include/dt-bindings/sound/qcom,q6afe.h1
-rw-r--r--include/dt-bindings/thermal/tegra194-bpmp-thermal.h15
-rw-r--r--include/dt-bindings/thermal/thermal_exynos.h12
-rw-r--r--include/dt-bindings/usb/pd.h26
-rw-r--r--include/keys/asymmetric-subtype.h9
-rw-r--r--include/keys/trusted.h136
-rw-r--r--include/kvm/arm_arch_timer.h4
-rw-r--r--include/kvm/arm_vgic.h9
-rw-r--r--include/linux/acpi.h41
-rw-r--r--include/linux/adxl.h13
-rw-r--r--include/linux/ahci_platform.h4
-rw-r--r--include/linux/alcor_pci.h286
-rw-r--r--include/linux/amba/mmci.h11
-rw-r--r--include/linux/amifd.h63
-rw-r--r--include/linux/amifdreg.h82
-rw-r--r--include/linux/arch_topology.h1
-rw-r--r--include/linux/arm-smccc.h38
-rw-r--r--include/linux/audit.h8
-rw-r--r--include/linux/avf/virtchnl.h39
-rw-r--r--include/linux/backing-dev-defs.h11
-rw-r--r--include/linux/backing-dev.h4
-rw-r--r--include/linux/binfmts.h4
-rw-r--r--include/linux/bio.h81
-rw-r--r--include/linux/bitmap.h37
-rw-r--r--include/linux/bitops.h33
-rw-r--r--include/linux/blk-cgroup.h283
-rw-r--r--include/linux/blk-mq-pci.h4
-rw-r--r--include/linux/blk-mq-rdma.h2
-rw-r--r--include/linux/blk-mq-virtio.h4
-rw-r--r--include/linux/blk-mq.h87
-rw-r--r--include/linux/blk-pm.h24
-rw-r--r--include/linux/blk_types.h26
-rw-r--r--include/linux/blkdev.h455
-rw-r--r--include/linux/bootmem.h404
-rw-r--r--include/linux/bpf-cgroup.h55
-rw-r--r--include/linux/bpf.h130
-rw-r--r--include/linux/bpf_types.h8
-rw-r--r--include/linux/bpf_verifier.h66
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/bsg-lib.h6
-rw-r--r--include/linux/btf.h20
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/build_bug.h36
-rw-r--r--include/linux/bvec.h3
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/rx-offload.h7
-rw-r--r--include/linux/ceph/auth.h8
-rw-r--r--include/linux/ceph/ceph_features.h17
-rw-r--r--include/linux/ceph/decode.h18
-rw-r--r--include/linux/ceph/libceph.h8
-rw-r--r--include/linux/ceph/messenger.h32
-rw-r--r--include/linux/ceph/msgpool.h11
-rw-r--r--include/linux/ceph/msgr.h2
-rw-r--r--include/linux/ceph/osd_client.h32
-rw-r--r--include/linux/ceph/pagelist.h13
-rw-r--r--include/linux/ceph/rados.h28
-rw-r--r--include/linux/cgroup-defs.h6
-rw-r--r--include/linux/cgroup.h32
-rw-r--r--include/linux/clk-provider.h24
-rw-r--r--include/linux/clk.h90
-rw-r--r--include/linux/clk/at91_pmc.h15
-rw-r--r--include/linux/clk/clk-conf.h5
-rw-r--r--include/linux/clk/renesas.h8
-rw-r--r--include/linux/clk/ti.h7
-rw-r--r--include/linux/clocksource.h8
-rw-r--r--include/linux/compat.h141
-rw-r--r--include/linux/compat_time.h32
-rw-r--r--include/linux/compiler-clang.h25
-rw-r--r--include/linux/compiler-gcc.h176
-rw-r--r--include/linux/compiler-intel.h14
-rw-r--r--include/linux/compiler.h60
-rw-r--r--include/linux/compiler_attributes.h242
-rw-r--r--include/linux/compiler_types.h193
-rw-r--r--include/linux/console.h14
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/cordic.h9
-rw-r--r--include/linux/coredump.h4
-rw-r--r--include/linux/coresight.h41
-rw-r--r--include/linux/cpufeature.h2
-rw-r--r--include/linux/cpufreq.h8
-rw-r--r--include/linux/cpuhotplug.h4
-rw-r--r--include/linux/cpuidle.h13
-rw-r--r--include/linux/crash_core.h2
-rw-r--r--include/linux/crash_dump.h4
-rw-r--r--include/linux/crc-t10dif.h1
-rw-r--r--include/linux/crc64.h11
-rw-r--r--include/linux/cred.h26
-rw-r--r--include/linux/crypto.h249
-rw-r--r--include/linux/cuda.h4
-rw-r--r--include/linux/dax.h15
-rw-r--r--include/linux/dcache.h15
-rw-r--r--include/linux/debug_locks.h4
-rw-r--r--include/linux/delayacct.h23
-rw-r--r--include/linux/dell-led.h7
-rw-r--r--include/linux/devfreq.h21
-rw-r--r--include/linux/device-mapper.h18
-rw-r--r--include/linux/device.h47
-rw-r--r--include/linux/dma-debug.h34
-rw-r--r--include/linux/dma-direct.h25
-rw-r--r--include/linux/dma-fence.h1
-rw-r--r--include/linux/dma-iommu.h1
-rw-r--r--include/linux/dma-mapping.h386
-rw-r--r--include/linux/dma-noncoherent.h34
-rw-r--r--include/linux/dma/pxa-dma.h11
-rw-r--r--include/linux/dma/sprd-dma.h131
-rw-r--r--include/linux/dma_remapping.h58
-rw-r--r--include/linux/dmar.h13
-rw-r--r--include/linux/dns_resolver.h4
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/edac.h9
-rw-r--r--include/linux/efi.h105
-rw-r--r--include/linux/elevator.h96
-rw-r--r--include/linux/elfcore-compat.h8
-rw-r--r--include/linux/energy_model.h187
-rw-r--r--include/linux/etherdevice.h1
-rw-r--r--include/linux/ethtool.h33
-rw-r--r--include/linux/export.h76
-rw-r--r--include/linux/f2fs_fs.h15
-rw-r--r--include/linux/fanotify.h60
-rw-r--r--include/linux/fb.h21
-rw-r--r--include/linux/fdtable.h1
-rw-r--r--include/linux/filter.h87
-rw-r--r--include/linux/firmware/imx/ipc.h59
-rw-r--r--include/linux/firmware/imx/sci.h18
-rw-r--r--include/linux/firmware/imx/svc/misc.h55
-rw-r--r--include/linux/firmware/imx/svc/pm.h85
-rw-r--r--include/linux/firmware/imx/types.h65
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h312
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h217
-rw-r--r--include/linux/firmware/meson/meson_sm.h1
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h116
-rw-r--r--include/linux/font.h4
-rw-r--r--include/linux/fpga/fpga-bridge.h4
-rw-r--r--include/linux/fpga/fpga-mgr.h24
-rw-r--r--include/linux/fpga/fpga-region.h4
-rw-r--r--include/linux/fs.h198
-rw-r--r--include/linux/fscache-cache.h3
-rw-r--r--include/linux/fsi-occ.h25
-rw-r--r--include/linux/fsl/mc.h26
-rw-r--r--include/linux/fsl_ifc.h2
-rw-r--r--include/linux/fsnotify.h75
-rw-r--r--include/linux/fsnotify_backend.h39
-rw-r--r--include/linux/ftrace.h60
-rw-r--r--include/linux/futex.h8
-rw-r--r--include/linux/genalloc.h13
-rw-r--r--include/linux/genhd.h67
-rw-r--r--include/linux/genl_magic_struct.h5
-rw-r--r--include/linux/gfp.h295
-rw-r--r--include/linux/gpio/consumer.h111
-rw-r--r--include/linux/gpio/driver.h56
-rw-r--r--include/linux/hdmi.h28
-rw-r--r--include/linux/hid-sensor-hub.h4
-rw-r--r--include/linux/hid.h5
-rw-r--r--include/linux/highmem.h28
-rw-r--r--include/linux/hmm.h63
-rw-r--r--include/linux/hrtimer.h5
-rw-r--r--include/linux/huge_mm.h37
-rw-r--r--include/linux/hugetlb.h16
-rw-r--r--include/linux/hw_random.h3
-rw-r--r--include/linux/hwmon-sysfs.h39
-rw-r--r--include/linux/hwmon.h2
-rw-r--r--include/linux/hyperv.h38
-rw-r--r--include/linux/i2c.h41
-rw-r--r--include/linux/i3c/ccc.h385
-rw-r--r--include/linux/i3c/device.h331
-rw-r--r--include/linux/i3c/master.h648
-rw-r--r--include/linux/i8253.h1
-rw-r--r--include/linux/ide.h14
-rw-r--r--include/linux/idr.h78
-rw-r--r--include/linux/ieee80211.h140
-rw-r--r--include/linux/if_bridge.h12
-rw-r--r--include/linux/if_tun.h14
-rw-r--r--include/linux/if_vlan.h53
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h3
-rw-r--r--include/linux/iio/common/st_sensors.h2
-rw-r--r--include/linux/ima.h15
-rw-r--r--include/linux/indirect_call_wrapper.h51
-rw-r--r--include/linux/inetdevice.h4
-rw-r--r--include/linux/init.h43
-rw-r--r--include/linux/init_task.h9
-rw-r--r--include/linux/initrd.h3
-rw-r--r--include/linux/intel-iommu.h258
-rw-r--r--include/linux/interrupt.h21
-rw-r--r--include/linux/iomap.h4
-rw-r--r--include/linux/iommu.h51
-rw-r--r--include/linux/ioprio.h13
-rw-r--r--include/linux/iova.h1
-rw-r--r--include/linux/ipc_namespace.h3
-rw-r--r--include/linux/ipmi.h2
-rw-r--r--include/linux/ipmi_smi.h2
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/irq.h9
-rw-r--r--include/linux/irq_sim.h2
-rw-r--r--include/linux/irqchip.h4
-rw-r--r--include/linux/irqchip/arm-gic-common.h6
-rw-r--r--include/linux/irqchip/arm-gic-v3.h19
-rw-r--r--include/linux/irqchip/arm-gic.h16
-rw-r--r--include/linux/irqchip/irq-madera.h132
-rw-r--r--include/linux/irqdomain.h7
-rw-r--r--include/linux/irqflags.h15
-rw-r--r--include/linux/jbd2.h7
-rw-r--r--include/linux/jump_label.h65
-rw-r--r--include/linux/kasan.h101
-rw-r--r--include/linux/kcore.h15
-rw-r--r--include/linux/kernel.h36
-rw-r--r--include/linux/kernfs.h9
-rw-r--r--include/linux/kexec.h12
-rw-r--r--include/linux/key-type.h11
-rw-r--r--include/linux/key.h3
-rw-r--r--include/linux/keyctl.h46
-rw-r--r--include/linux/kgdb.h24
-rw-r--r--include/linux/kprobes.h6
-rw-r--r--include/linux/kref.h5
-rw-r--r--include/linux/kvm_host.h18
-rw-r--r--include/linux/lantiq.h23
-rw-r--r--include/linux/leds.h36
-rw-r--r--include/linux/libata.h7
-rw-r--r--include/linux/libfdt_env.h1
-rw-r--r--include/linux/libnvdimm.h76
-rw-r--r--include/linux/lightnvm.h169
-rw-r--r--include/linux/linkage.h7
-rw-r--r--include/linux/linkmode.h85
-rw-r--r--include/linux/list.h23
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/lockdep.h15
-rw-r--r--include/linux/lsm_hooks.h33
-rw-r--r--include/linux/mailbox_client.h1
-rw-r--r--include/linux/mailbox_controller.h9
-rw-r--r--include/linux/math64.h3
-rw-r--r--include/linux/memblock.h182
-rw-r--r--include/linux/memcontrol.h44
-rw-r--r--include/linux/memory_hotplug.h16
-rw-r--r--include/linux/memremap.h40
-rw-r--r--include/linux/mfd/axp20x.h5
-rw-r--r--include/linux/mfd/cros_ec.h214
-rw-r--r--include/linux/mfd/cros_ec_commands.h306
-rw-r--r--include/linux/mfd/cros_ec_lpc_mec.h90
-rw-r--r--include/linux/mfd/cros_ec_lpc_reg.h61
-rw-r--r--include/linux/mfd/da9063/pdata.h16
-rw-r--r--include/linux/mfd/ingenic-tcu.h56
-rw-r--r--include/linux/mfd/intel_msic.h7
-rw-r--r--include/linux/mfd/intel_soc_pmic.h13
-rw-r--r--include/linux/mfd/intel_soc_pmic_bxtwc.h10
-rw-r--r--include/linux/mfd/madera/core.h2
-rw-r--r--include/linux/mfd/madera/pdata.h1
-rw-r--r--include/linux/mfd/max14577-private.h11
-rw-r--r--include/linux/mfd/max14577.h11
-rw-r--r--include/linux/mfd/max77686-private.h15
-rw-r--r--include/linux/mfd/max77686.h15
-rw-r--r--include/linux/mfd/max77693-common.h6
-rw-r--r--include/linux/mfd/max77693-private.h15
-rw-r--r--include/linux/mfd/max77693.h15
-rw-r--r--include/linux/mfd/max77843-private.h6
-rw-r--r--include/linux/mfd/max8997-private.h15
-rw-r--r--include/linux/mfd/max8997.h16
-rw-r--r--include/linux/mfd/max8998-private.h15
-rw-r--r--include/linux/mfd/max8998.h15
-rw-r--r--include/linux/mfd/mc13xxx.h1
-rw-r--r--include/linux/mfd/rohm-bd718x7.h372
-rw-r--r--include/linux/mfd/samsung/core.h11
-rw-r--r--include/linux/mfd/samsung/irq.h10
-rw-r--r--include/linux/mfd/samsung/rtc.h15
-rw-r--r--include/linux/mfd/samsung/s2mpa01.h7
-rw-r--r--include/linux/mfd/samsung/s2mps11.h9
-rw-r--r--include/linux/mfd/samsung/s2mps13.h14
-rw-r--r--include/linux/mfd/samsung/s2mps14.h14
-rw-r--r--include/linux/mfd/samsung/s2mps15.h11
-rw-r--r--include/linux/mfd/samsung/s2mpu02.h14
-rw-r--r--include/linux/mfd/samsung/s5m8763.h10
-rw-r--r--include/linux/mfd/samsung/s5m8767.h10
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h3
-rw-r--r--include/linux/mfd/ti-lmu.h3
-rw-r--r--include/linux/mfd/tmio.h14
-rw-r--r--include/linux/mfd/wm8994/pdata.h3
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/migrate.h5
-rw-r--r--include/linux/mii.h174
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx5/cq.h13
-rw-r--r--include/linux/mlx5/device.h34
-rw-r--r--include/linux/mlx5/driver.h348
-rw-r--r--include/linux/mlx5/eq.h72
-rw-r--r--include/linux/mlx5/fs.h48
-rw-r--r--include/linux/mlx5/mlx5_ifc.h503
-rw-r--r--include/linux/mlx5/port.h3
-rw-r--r--include/linux/mlx5/qp.h6
-rw-r--r--include/linux/mlx5/srq.h71
-rw-r--r--include/linux/mlx5/transobj.h13
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h200
-rw-r--r--include/linux/mm_inline.h3
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/mm_types_task.h2
-rw-r--r--include/linux/mmc/host.h10
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmc/slot-gpio.h5
-rw-r--r--include/linux/mmu_notifier.h134
-rw-r--r--include/linux/mmzone.h120
-rw-r--r--include/linux/mod_devicetable.h20
-rw-r--r--include/linux/module.h15
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/mroute_base.h11
-rw-r--r--include/linux/msi.h23
-rw-r--r--include/linux/mtd/blktrans.h5
-rw-r--r--include/linux/mtd/cfi.h1
-rw-r--r--include/linux/mtd/jedec.h91
-rw-r--r--include/linux/mtd/mtd.h5
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/mtd/nand_bch.h11
-rw-r--r--include/linux/mtd/nand_ecc.h12
-rw-r--r--include/linux/mtd/onfi.h178
-rw-r--r--include/linux/mtd/platnand.h74
-rw-r--r--include/linux/mtd/rawnand.h785
-rw-r--r--include/linux/mtd/sh_flctl.h16
-rw-r--r--include/linux/mtd/spi-nor.h130
-rw-r--r--include/linux/mtd/spinand.h2
-rw-r--r--include/linux/ndctl.h22
-rw-r--r--include/linux/net_dim.h3
-rw-r--r--include/linux/netdevice.h145
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set.h4
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h3
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h15
-rw-r--r--include/linux/netfilter/nfnetlink.h12
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h3
-rw-r--r--include/linux/netfilter_bridge.h33
-rw-r--r--include/linux/netlink.h60
-rw-r--r--include/linux/netpoll.h9
-rw-r--r--include/linux/nfs4.h9
-rw-r--r--include/linux/nfs_fs.h26
-rw-r--r--include/linux/nfs_fs_sb.h9
-rw-r--r--include/linux/nfs_xdr.h59
-rw-r--r--include/linux/nmi.h2
-rw-r--r--include/linux/nodemask.h2
-rw-r--r--include/linux/notifier.h3
-rw-r--r--include/linux/nvme-fc-driver.h17
-rw-r--r--include/linux/nvme-tcp.h189
-rw-r--r--include/linux/nvme.h74
-rw-r--r--include/linux/nvmem-consumer.h100
-rw-r--r--include/linux/nvmem-provider.h61
-rw-r--r--include/linux/objagg.h46
-rw-r--r--include/linux/of.h93
-rw-r--r--include/linux/of_device.h3
-rw-r--r--include/linux/of_fdt.h1
-rw-r--r--include/linux/of_net.h6
-rw-r--r--include/linux/of_pci.h10
-rw-r--r--include/linux/of_pdt.h2
-rw-r--r--include/linux/oom.h14
-rw-r--r--include/linux/page-flags-layout.h10
-rw-r--r--include/linux/page-flags.h25
-rw-r--r--include/linux/page-isolation.h11
-rw-r--r--include/linux/pageblock-flags.h3
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/linux/pagevec.h8
-rw-r--r--include/linux/pci-dma-compat.h18
-rw-r--r--include/linux/pci-dma.h12
-rw-r--r--include/linux/pci-p2pdma.h114
-rw-r--r--include/linux/pci.h46
-rw-r--r--include/linux/pci_hotplug.h43
-rw-r--r--include/linux/pci_ids.h15
-rw-r--r--include/linux/pe.h2
-rw-r--r--include/linux/percpu-defs.h6
-rw-r--r--include/linux/percpu-refcount.h1
-rw-r--r--include/linux/percpu-rwsem.h2
-rw-r--r--include/linux/percpu.h2
-rw-r--r--include/linux/perf/arm_pmu.h5
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/pfn_t.h4
-rw-r--r--include/linux/phy.h166
-rw-r--r--include/linux/phy/phy-mipi-dphy.h285
-rw-r--r--include/linux/phy/phy-qcom-ufs.h38
-rw-r--r--include/linux/phy/phy.h85
-rw-r--r--include/linux/phy_fixed.h5
-rw-r--r--include/linux/phy_led_triggers.h2
-rw-r--r--include/linux/pid.h11
-rw-r--r--include/linux/pl353-smc.h30
-rw-r--r--include/linux/platform_data/ad7879.h42
-rw-r--r--include/linux/platform_data/ams-delta-fiq.h58
-rw-r--r--include/linux/platform_data/davinci_asp.h1
-rw-r--r--include/linux/platform_data/dma-dw.h6
-rw-r--r--include/linux/platform_data/dma-ep93xx.h2
-rw-r--r--include/linux/platform_data/dma-mcf-edma.h38
-rw-r--r--include/linux/platform_data/ehci-sh.h16
-rw-r--r--include/linux/platform_data/gpio-davinci.h36
-rw-r--r--include/linux/platform_data/gpio-omap.h18
-rw-r--r--include/linux/platform_data/gpio-ts5500.h27
-rw-r--r--include/linux/platform_data/hsmmc-omap.h3
-rw-r--r--include/linux/platform_data/i2c-ocores.h2
-rw-r--r--include/linux/platform_data/ina2xx.h2
-rw-r--r--include/linux/platform_data/mdio-gpio.h14
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h4
-rw-r--r--include/linux/platform_data/mmc-pxamci.h4
-rw-r--r--include/linux/platform_data/mmc-s3cmci.h4
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h1
-rw-r--r--include/linux/platform_data/mv_usb.h1
-rw-r--r--include/linux/platform_data/ntc_thermistor.h5
-rw-r--r--include/linux/platform_data/pm33xx.h29
-rw-r--r--include/linux/platform_data/pxa_sdhci.h4
-rw-r--r--include/linux/platform_data/sh_ipmmu.h18
-rw-r--r--include/linux/platform_data/shmob_drm.h6
-rw-r--r--include/linux/platform_data/spi-davinci.h4
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/platform_data/ti-sysc.h2
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h101
-rw-r--r--include/linux/platform_device.h1
-rw-r--r--include/linux/pm.h5
-rw-r--r--include/linux/pm_domain.h49
-rw-r--r--include/linux/pm_opp.h29
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/pmu.h4
-rw-r--r--include/linux/posix-timers.h2
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/power/charger-manager.h3
-rw-r--r--include/linux/power/smartreflex.h10
-rw-r--r--include/linux/power_supply.h24
-rw-r--r--include/linux/preempt.h5
-rw-r--r--include/linux/printk.h7
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/property.h12
-rw-r--r--include/linux/psi.h54
-rw-r--r--include/linux/psi_types.h92
-rw-r--r--include/linux/pstore.h39
-rw-r--r--include/linux/pstore_ram.h53
-rw-r--r--include/linux/ptp_clock_kernel.h33
-rw-r--r--include/linux/ptr_ring.h2
-rw-r--r--include/linux/ptrace.h24
-rw-r--r--include/linux/pwm.h42
-rw-r--r--include/linux/pxa2xx_ssp.h3
-rw-r--r--include/linux/qcom-geni-se.h13
-rw-r--r--include/linux/qcom_scm.h7
-rw-r--r--include/linux/qed/common_hsi.h10
-rw-r--r--include/linux/qed/iscsi_common.h2
-rw-r--r--include/linux/qed/qed_if.h76
-rw-r--r--include/linux/qed/qed_rdma_if.h11
-rw-r--r--include/linux/quota.h8
-rw-r--r--include/linux/radix-tree.h178
-rw-r--r--include/linux/rbtree_augmented.h4
-rw-r--r--include/linux/rculist.h32
-rw-r--r--include/linux/rcupdate.h154
-rw-r--r--include/linux/rcupdate_wait.h17
-rw-r--r--include/linux/rcutiny.h53
-rw-r--r--include/linux/rcutree.h31
-rw-r--r--include/linux/regmap.h72
-rw-r--r--include/linux/regset.h4
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/linux/regulator/driver.h29
-rw-r--r--include/linux/regulator/fixed.h3
-rw-r--r--include/linux/regulator/machine.h9
-rw-r--r--include/linux/regulator/pfuze100.h3
-rw-r--r--include/linux/remoteproc.h47
-rw-r--r--include/linux/reservation.h12
-rw-r--r--include/linux/reset.h2
-rw-r--r--include/linux/restart_block.h4
-rw-r--r--include/linux/rhashtable.h34
-rw-r--r--include/linux/ring_buffer.h8
-rw-r--r--include/linux/rtc.h53
-rw-r--r--include/linux/rtnetlink.h7
-rw-r--r--include/linux/rwsem.h4
-rw-r--r--include/linux/sa11x0-dma.h24
-rw-r--r--include/linux/sbitmap.h89
-rw-r--r--include/linux/scatterlist.h6
-rw-r--r--include/linux/sched.h80
-rw-r--r--include/linux/sched/cpufreq.h6
-rw-r--r--include/linux/sched/isolation.h4
-rw-r--r--include/linux/sched/loadavg.h24
-rw-r--r--include/linux/sched/mm.h2
-rw-r--r--include/linux/sched/signal.h74
-rw-r--r--include/linux/sched/smt.h20
-rw-r--r--include/linux/sched/stat.h3
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/sched/topology.h23
-rw-r--r--include/linux/sched/user.h5
-rw-r--r--include/linux/scmi_protocol.h4
-rw-r--r--include/linux/seccomp.h9
-rw-r--r--include/linux/security.h88
-rw-r--r--include/linux/serdev.h2
-rw-r--r--include/linux/serial_8250.h4
-rw-r--r--include/linux/serial_core.h41
-rw-r--r--include/linux/serial_sci.h1
-rw-r--r--include/linux/set_memory.h14
-rw-r--r--include/linux/sfp.h2
-rw-r--r--include/linux/shdma-base.h7
-rw-r--r--include/linux/shrinker.h10
-rw-r--r--include/linux/signal.h36
-rw-r--r--include/linux/signal_types.h8
-rw-r--r--include/linux/skbuff.h251
-rw-r--r--include/linux/skmsg.h443
-rw-r--r--include/linux/slab.h115
-rw-r--r--include/linux/slab_def.h13
-rw-r--r--include/linux/smp.h4
-rw-r--r--include/linux/soc/amlogic/meson-canvas.h65
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h133
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h30
-rw-r--r--include/linux/soc/qcom/qmi.h2
-rw-r--r--include/linux/soc/renesas/rcar-sysc.h13
-rw-r--r--include/linux/socket.h10
-rw-r--r--include/linux/soundwire/sdw.h12
-rw-r--r--include/linux/spi/mmc_spi.h15
-rw-r--r--include/linux/spi/pxa2xx_spi.h1
-rw-r--r--include/linux/spi/spi-mem.h91
-rw-r--r--include/linux/spi/spi.h39
-rw-r--r--include/linux/srcu.h82
-rw-r--r--include/linux/srcutiny.h24
-rw-r--r--include/linux/srcutree.h21
-rw-r--r--include/linux/stackleak.h35
-rw-r--r--include/linux/start_kernel.h2
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/string.h27
-rw-r--r--include/linux/sunrpc/auth.h68
-rw-r--r--include/linux/sunrpc/auth_gss.h1
-rw-r--r--include/linux/sunrpc/bc_xprt.h20
-rw-r--r--include/linux/sunrpc/cache.h18
-rw-r--r--include/linux/sunrpc/clnt.h6
-rw-r--r--include/linux/sunrpc/gss_krb5.h36
-rw-r--r--include/linux/sunrpc/metrics.h4
-rw-r--r--include/linux/sunrpc/sched.h16
-rw-r--r--include/linux/sunrpc/svc.h11
-rw-r--r--include/linux/sunrpc/svc_rdma.h15
-rw-r--r--include/linux/sunrpc/svc_xprt.h2
-rw-r--r--include/linux/sunrpc/svcauth.h4
-rw-r--r--include/linux/sunrpc/xdr.h10
-rw-r--r--include/linux/sunrpc/xprt.h36
-rw-r--r--include/linux/sunrpc/xprtsock.h36
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h61
-rw-r--r--include/linux/swapops.h34
-rw-r--r--include/linux/swiotlb.h86
-rw-r--r--include/linux/syscalls.h52
-rw-r--r--include/linux/sysfs.h8
-rw-r--r--include/linux/t10-pi.h9
-rw-r--r--include/linux/tc.h1
-rw-r--r--include/linux/tcp.h4
-rw-r--r--include/linux/tee_drv.h73
-rw-r--r--include/linux/thinkpad_acpi.h16
-rw-r--r--include/linux/thunderbolt.h5
-rw-r--r--include/linux/time32.h94
-rw-r--r--include/linux/timekeeping.h30
-rw-r--r--include/linux/timekeeping32.h58
-rw-r--r--include/linux/torture.h2
-rw-r--r--include/linux/tpm.h11
-rw-r--r--include/linux/trace_events.h11
-rw-r--r--include/linux/tracehook.h17
-rw-r--r--include/linux/tracepoint-defs.h6
-rw-r--r--include/linux/tracepoint.h71
-rw-r--r--include/linux/tty.h10
-rw-r--r--include/linux/tty_driver.h3
-rw-r--r--include/linux/tty_ldisc.h10
-rw-r--r--include/linux/types.h4
-rw-r--r--include/linux/uaccess.h11
-rw-r--r--include/linux/udp.h26
-rw-r--r--include/linux/uio.h72
-rw-r--r--include/linux/uio_driver.h1
-rw-r--r--include/linux/umh.h1
-rw-r--r--include/linux/uprobes.h7
-rw-r--r--include/linux/usb.h4
-rw-r--r--include/linux/usb/ccid.h51
-rw-r--r--include/linux/usb/chipidea.h9
-rw-r--r--include/linux/usb/gadget.h4
-rw-r--r--include/linux/usb/hcd.h5
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/tcpm.h1
-rw-r--r--include/linux/usb/usbnet.h2
-rw-r--r--include/linux/userfaultfd_k.h5
-rw-r--r--include/linux/vga_switcheroo.h3
-rw-r--r--include/linux/virtio_net.h18
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmacache.h5
-rw-r--r--include/linux/vmstat.h5
-rw-r--r--include/linux/vt_kern.h7
-rw-r--r--include/linux/w1.h2
-rw-r--r--include/linux/wait.h20
-rw-r--r--include/linux/watchdog.h3
-rw-r--r--include/linux/wkup_m3_ipc.h9
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/linux/xarray.h1486
-rw-r--r--include/linux/xxhash.h23
-rw-r--r--include/math-emu/op-2.h99
-rw-r--r--include/math-emu/soft-fp.h2
-rw-r--r--include/media/cec.h155
-rw-r--r--include/media/davinci/vpbe.h4
-rw-r--r--include/media/media-device.h29
-rw-r--r--include/media/media-entity.h48
-rw-r--r--include/media/media-request.h442
-rw-r--r--include/media/mpeg2-ctrls.h86
-rw-r--r--include/media/rc-core.h11
-rw-r--r--include/media/rc-map.h1
-rw-r--r--include/media/rcar-fcp.h6
-rw-r--r--include/media/v4l2-async.h111
-rw-r--r--include/media/v4l2-common.h19
-rw-r--r--include/media/v4l2-ctrls.h173
-rw-r--r--include/media/v4l2-dev.h13
-rw-r--r--include/media/v4l2-device.h11
-rw-r--r--include/media/v4l2-dv-timings.h17
-rw-r--r--include/media/v4l2-fh.h4
-rw-r--r--include/media/v4l2-fwnode.h141
-rw-r--r--include/media/v4l2-ioctl.h33
-rw-r--r--include/media/v4l2-mc.h78
-rw-r--r--include/media/v4l2-mediabus.h40
-rw-r--r--include/media/v4l2-mem2mem.h4
-rw-r--r--include/media/v4l2-rect.h26
-rw-r--r--include/media/v4l2-subdev.h6
-rw-r--r--include/media/videobuf2-core.h66
-rw-r--r--include/media/videobuf2-v4l2.h20
-rw-r--r--include/media/vsp1.h8
-rw-r--r--include/net/9p/9p.h12
-rw-r--r--include/net/9p/client.h71
-rw-r--r--include/net/act_api.h52
-rw-r--r--include/net/addrconf.h7
-rw-r--r--include/net/af_rxrpc.h7
-rw-r--r--include/net/af_unix.h8
-rw-r--r--include/net/bluetooth/hci.h14
-rw-r--r--include/net/bluetooth/hci_core.h17
-rw-r--r--include/net/bluetooth/l2cap.h22
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/cfg80211.h359
-rw-r--r--include/net/checksum.h4
-rw-r--r--include/net/devlink.h37
-rw-r--r--include/net/dsa.h4
-rw-r--r--include/net/dst.h10
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/flow_dissector.h6
-rw-r--r--include/net/gen_stats.h6
-rw-r--r--include/net/genetlink.h2
-rw-r--r--include/net/geneve.h6
-rw-r--r--include/net/gre.h13
-rw-r--r--include/net/icmp.h2
-rw-r--r--include/net/ieee80211_radiotap.h21
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/inet6_hashtables.h5
-rw-r--r--include/net/inet_common.h9
-rw-r--r--include/net/inet_ecn.h18
-rw-r--r--include/net/inet_hashtables.h25
-rw-r--r--include/net/inet_sock.h21
-rw-r--r--include/net/ip.h33
-rw-r--r--include/net/ip6_fib.h27
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_fib.h18
-rw-r--r--include/net/ip_tunnels.h40
-rw-r--r--include/net/ipv6.h4
-rw-r--r--include/net/iucv/af_iucv.h5
-rw-r--r--include/net/l3mdev.h22
-rw-r--r--include/net/llc.h1
-rw-r--r--include/net/mac80211.h150
-rw-r--r--include/net/neighbour.h56
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/br_netfilter.h14
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h13
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h13
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h5
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h6
-rw-r--r--include/net/netfilter/nf_conntrack_core.h3
-rw-r--r--include/net/netfilter/nf_conntrack_count.h19
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h7
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h3
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h75
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h2
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h13
-rw-r--r--include/net/netfilter/nf_flow_table.h6
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h7
-rw-r--r--include/net/netfilter/nf_nat_l4proto.h78
-rw-r--r--include/net/netfilter/nf_tables.h9
-rw-r--r--include/net/netfilter/nf_tables_core.h4
-rw-r--r--include/net/netfilter/nfnetlink_log.h1
-rw-r--r--include/net/netlink.h161
-rw-r--r--include/net/netns/conntrack.h6
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/xfrm.h2
-rw-r--r--include/net/nfc/hci.h2
-rw-r--r--include/net/pkt_cls.h145
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/protocol.h9
-rw-r--r--include/net/raw.h14
-rw-r--r--include/net/regulatory.h4
-rw-r--r--include/net/route.h5
-rw-r--r--include/net/rtnetlink.h4
-rw-r--r--include/net/sch_generic.h81
-rw-r--r--include/net/scm.h1
-rw-r--r--include/net/sctp/constants.h7
-rw-r--r--include/net/sctp/sctp.h21
-rw-r--r--include/net/sctp/sm.h6
-rw-r--r--include/net/sctp/structs.h14
-rw-r--r--include/net/sctp/ulpevent.h39
-rw-r--r--include/net/seg6.h1
-rw-r--r--include/net/sock.h105
-rw-r--r--include/net/switchdev.h113
-rw-r--r--include/net/tcp.h142
-rw-r--r--include/net/tls.h113
-rw-r--r--include/net/udp.h60
-rw-r--r--include/net/udp_tunnel.h10
-rw-r--r--include/net/vxlan.h82
-rw-r--r--include/net/xdp.h6
-rw-r--r--include/net/xdp_sock.h127
-rw-r--r--include/net/xfrm.h47
-rw-r--r--include/rdma/ib_addr.h11
-rw-r--r--include/rdma/ib_cache.h24
-rw-r--r--include/rdma/ib_cm.h2
-rw-r--r--include/rdma/ib_fmr_pool.h2
-rw-r--r--include/rdma/ib_mad.h10
-rw-r--r--include/rdma/ib_sa.h38
-rw-r--r--include/rdma/ib_umem.h9
-rw-r--r--include/rdma/ib_umem_odp.h78
-rw-r--r--include/rdma/ib_verbs.h747
-rw-r--r--include/rdma/rdma_cm.h11
-rw-r--r--include/rdma/rdma_netlink.h4
-rw-r--r--include/rdma/rdma_vt.h58
-rw-r--r--include/rdma/rdmavt_qp.h7
-rw-r--r--include/rdma/restrack.h29
-rw-r--r--include/rdma/uverbs_ioctl.h362
-rw-r--r--include/rdma/uverbs_named_ioctl.h13
-rw-r--r--include/rdma/uverbs_std_types.h124
-rw-r--r--include/scsi/scsi_cmnd.h6
-rw-r--r--include/scsi/scsi_dh.h2
-rw-r--r--include/scsi/scsi_driver.h3
-rw-r--r--include/scsi/scsi_host.h38
-rw-r--r--include/scsi/scsi_tcq.h14
-rw-r--r--include/scsi/srp.h26
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h6
-rw-r--r--include/soc/fsl/bman.h8
-rw-r--r--include/soc/fsl/dpaa2-fd.h680
-rw-r--r--include/soc/fsl/dpaa2-global.h192
-rw-r--r--include/soc/fsl/dpaa2-io.h123
-rw-r--r--include/soc/fsl/qe/ucc_fast.h8
-rw-r--r--include/soc/fsl/qman.h40
-rw-r--r--include/soc/mscc/ocelot_hsio.h859
-rw-r--r--include/soc/qcom/cmd-db.h12
-rw-r--r--include/soc/tegra/bpmp-abi.h1188
-rw-r--r--include/soc/tegra/bpmp.h7
-rw-r--r--include/soc/tegra/fuse.h1
-rw-r--r--include/soc/tegra/pmc.h43
-rw-r--r--include/sound/compress_driver.h19
-rw-r--r--include/sound/hda_codec.h535
-rw-r--r--include/sound/hda_component.h11
-rw-r--r--include/sound/hdaudio.h15
-rw-r--r--include/sound/memalloc.h3
-rw-r--r--include/sound/pcm_params.h4
-rw-r--r--include/sound/rawmidi.h1
-rw-r--r--include/sound/simple_card_utils.h33
-rw-r--r--include/sound/soc-acpi-intel-match.h7
-rw-r--r--include/sound/soc-acpi.h15
-rw-r--r--include/sound/soc-dapm.h8
-rw-r--r--include/sound/soc-dpcm.h10
-rw-r--r--include/sound/soc.h59
-rw-r--r--include/target/iscsi/iscsi_target_core.h6
-rw-r--r--include/target/iscsi/iscsi_target_stat.h4
-rw-r--r--include/target/target_core_base.h26
-rw-r--r--include/target/target_core_fabric.h25
-rw-r--r--include/trace/events/afs.h213
-rw-r--r--include/trace/events/bcache.h27
-rw-r--r--include/trace/events/btrfs.h40
-rw-r--r--include/trace/events/cgroup.h47
-rw-r--r--include/trace/events/ext4.h119
-rw-r--r--include/trace/events/f2fs.h1
-rw-r--r--include/trace/events/filelock.h16
-rw-r--r--include/trace/events/hwmon.h71
-rw-r--r--include/trace/events/iscsi.h107
-rw-r--r--include/trace/events/kyber.h96
-rw-r--r--include/trace/events/migrate.h27
-rw-r--r--include/trace/events/mmflags.h1
-rw-r--r--include/trace/events/net.h59
-rw-r--r--include/trace/events/objagg.h228
-rw-r--r--include/trace/events/preemptirq.h23
-rw-r--r--include/trace/events/rcu.h25
-rw-r--r--include/trace/events/rpcrdma.h236
-rw-r--r--include/trace/events/rxrpc.h7
-rw-r--r--include/trace/events/sched.h23
-rw-r--r--include/trace/events/signal.h7
-rw-r--r--include/trace/events/sunrpc.h215
-rw-r--r--include/trace/events/tcp.h7
-rw-r--r--include/uapi/asm-generic/Kbuild.asm1
-rw-r--r--include/uapi/asm-generic/hugetlb_encode.h2
-rw-r--r--include/uapi/asm-generic/ioctls.h2
-rw-r--r--include/uapi/asm-generic/siginfo.h193
-rw-r--r--include/uapi/asm-generic/unistd.h10
-rw-r--r--include/uapi/drm/amdgpu_drm.h8
-rw-r--r--include/uapi/drm/drm_fourcc.h63
-rw-r--r--include/uapi/drm/drm_mode.h22
-rw-r--r--include/uapi/drm/i915_drm.h30
-rw-r--r--include/uapi/drm/msm_drm.h25
-rw-r--r--include/uapi/drm/v3d_drm.h39
-rw-r--r--include/uapi/drm/virtgpu_drm.h13
-rw-r--r--include/uapi/linux/aio_abi.h2
-rw-r--r--include/uapi/linux/android/binder.h10
-rw-r--r--include/uapi/linux/android/binder_ctl.h35
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/auto_fs.h8
-rw-r--r--include/uapi/linux/bcache.h8
-rw-r--r--include/uapi/linux/bfs_fs.h2
-rw-r--r--include/uapi/linux/blkzoned.h3
-rw-r--r--include/uapi/linux/bpf.h330
-rw-r--r--include/uapi/linux/btf.h38
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/btrfs_tree.h1
-rw-r--r--include/uapi/linux/cec.h3
-rw-r--r--include/uapi/linux/cryptouser.h76
-rw-r--r--include/uapi/linux/devlink.h5
-rw-r--r--include/uapi/linux/dns_resolver.h116
-rw-r--r--include/uapi/linux/elf-em.h3
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/ethtool.h19
-rw-r--r--include/uapi/linux/fanotify.h27
-rw-r--r--include/uapi/linux/firewire-cdev.h22
-rw-r--r--include/uapi/linux/fs.h60
-rw-r--r--include/uapi/linux/fuse.h119
-rw-r--r--include/uapi/linux/gen_stats.h1
-rw-r--r--include/uapi/linux/gpio.h2
-rw-r--r--include/uapi/linux/hash_info.h2
-rw-r--r--include/uapi/linux/if_addr.h1
-rw-r--r--include/uapi/linux/if_arp.h18
-rw-r--r--include/uapi/linux/if_bridge.h21
-rw-r--r--include/uapi/linux/if_fddi.h21
-rw-r--r--include/uapi/linux/if_link.h22
-rw-r--r--include/uapi/linux/if_packet.h1
-rw-r--r--include/uapi/linux/if_tun.h1
-rw-r--r--include/uapi/linux/if_tunnel.h20
-rw-r--r--include/uapi/linux/in.h10
-rw-r--r--include/uapi/linux/in6.h1
-rw-r--r--include/uapi/linux/input-event-codes.h17
-rw-r--r--include/uapi/linux/keyctl.h37
-rw-r--r--include/uapi/linux/kfd_ioctl.h45
-rw-r--r--include/uapi/linux/kvm.h47
-rw-r--r--include/uapi/linux/magic.h2
-rw-r--r--include/uapi/linux/media.h8
-rw-r--r--include/uapi/linux/memfd.h2
-rw-r--r--include/uapi/linux/mman.h2
-rw-r--r--include/uapi/linux/mmc/ioctl.h5
-rw-r--r--include/uapi/linux/mount.h58
-rw-r--r--include/uapi/linux/msdos_fs.h9
-rw-r--r--include/uapi/linux/ncsi.h21
-rw-r--r--include/uapi/linux/ndctl.h52
-rw-r--r--include/uapi/linux/neighbour.h2
-rw-r--r--include/uapi/linux/net_namespace.h2
-rw-r--r--include/uapi/linux/net_tstamp.h4
-rw-r--r--include/uapi/linux/netfilter.h4
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h19
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h60
-rw-r--r--include/uapi/linux/netfilter/xt_cgroup.h16
-rw-r--r--include/uapi/linux/netfilter_bridge.h4
-rw-r--r--include/uapi/linux/netfilter_decnet.h10
-rw-r--r--include/uapi/linux/netfilter_ipv4.h28
-rw-r--r--include/uapi/linux/netfilter_ipv6.h29
-rw-r--r--include/uapi/linux/netlink.h1
-rw-r--r--include/uapi/linux/nl80211.h578
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/perf_event.h4
-rw-r--r--include/uapi/linux/pkt_cls.h9
-rw-r--r--include/uapi/linux/pkt_sched.h82
-rw-r--r--include/uapi/linux/prctl.h9
-rw-r--r--include/uapi/linux/ptp_clock.h12
-rw-r--r--include/uapi/linux/rds.h1
-rw-r--r--include/uapi/linux/sctp.h17
-rw-r--r--include/uapi/linux/seccomp.h40
-rw-r--r--include/uapi/linux/serial.h17
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/shm.h2
-rw-r--r--include/uapi/linux/smc_diag.h25
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/sysctl.h1
-rw-r--r--include/uapi/linux/taskstats.h6
-rw-r--r--include/uapi/linux/tcp.h1
-rw-r--r--include/uapi/linux/udmabuf.h33
-rw-r--r--include/uapi/linux/udp.h2
-rw-r--r--include/uapi/linux/usb/tmc.h41
-rw-r--r--include/uapi/linux/usb/video.h304
-rw-r--r--include/uapi/linux/v4l2-common.h28
-rw-r--r--include/uapi/linux/v4l2-controls.h2
-rw-r--r--include/uapi/linux/vfio.h94
-rw-r--r--include/uapi/linux/vhost.h115
-rw-r--r--include/uapi/linux/vhost_types.h128
-rw-r--r--include/uapi/linux/videodev2.h64
-rw-r--r--include/uapi/linux/virtio_balloon.h8
-rw-r--r--include/uapi/linux/virtio_blk.h54
-rw-r--r--include/uapi/linux/virtio_config.h3
-rw-r--r--include/uapi/linux/virtio_gpu.h18
-rw-r--r--include/uapi/linux/virtio_ring.h52
-rw-r--r--include/uapi/mtd/ubi-user.h18
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h6
-rw-r--r--include/uapi/rdma/hns-abi.h6
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h84
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h16
-rw-r--r--include/uapi/rdma/ib_user_verbs.h38
-rw-r--r--include/uapi/rdma/mlx5-abi.h18
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h22
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h12
-rw-r--r--include/uapi/rdma/rdma_netlink.h6
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h7
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h106
-rw-r--r--include/uapi/sound/asound.h2
-rw-r--r--include/uapi/sound/firewire.h20
-rw-r--r--include/uapi/sound/skl-tplg-interface.h106
-rw-r--r--include/video/imx-ipu-v3.h9
-rw-r--r--include/video/samsung_fimd.h10
-rw-r--r--include/video/udlfb.h9
-rw-r--r--include/xen/balloon.h5
-rw-r--r--include/xen/events.h2
-rw-r--r--include/xen/interface/hvm/start_info.h63
-rw-r--r--include/xen/interface/memory.h6
-rw-r--r--include/xen/mem-reservation.h7
-rw-r--r--include/xen/xen-front-pgdir-shbuf.h89
-rw-r--r--include/xen/xen-ops.h137
-rw-r--r--include/xen/xen.h7
1132 files changed, 40532 insertions, 12530 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 012c55cb22ba..53c088247d36 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -89,7 +89,7 @@
/* Maximum object reference count (detects object deletion issues) */
-#define ACPI_MAX_REFERENCE_COUNT 0x1000
+#define ACPI_MAX_REFERENCE_COUNT 0x4000
/* Default page size for use in mapping memory for operation regions */
@@ -173,11 +173,20 @@
#define ACPI_RSDP_CHECKSUM_LENGTH 20
#define ACPI_RSDP_XCHECKSUM_LENGTH 36
-/* SMBus, GSBus and IPMI bidirectional buffer size */
+/*
+ * SMBus, GSBus and IPMI buffer sizes. All have a 2-byte header,
+ * containing both Status and Length.
+ */
+#define ACPI_SERIAL_HEADER_SIZE 2 /* Common for below. Status and Length fields */
+
+#define ACPI_SMBUS_DATA_SIZE 32
+#define ACPI_SMBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_SMBUS_DATA_SIZE
+
+#define ACPI_IPMI_DATA_SIZE 64
+#define ACPI_IPMI_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_IPMI_DATA_SIZE
-#define ACPI_SMBUS_BUFFER_SIZE 34
-#define ACPI_GSBUS_BUFFER_SIZE 34
-#define ACPI_IPMI_BUFFER_SIZE 66
+#define ACPI_MAX_GSBUS_DATA_SIZE 255
+#define ACPI_MAX_GSBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE
/* _sx_d and _sx_w control methods */
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 226e5aeba6c2..09f46050961f 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -59,6 +59,12 @@ struct acpi_exception_info {
#define AE_OK (acpi_status) 0x0000
+#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL)
+#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML)
+#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER)
+#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES)
+#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL)
+
/*
* Environmental exceptions
*/
@@ -165,8 +171,10 @@ struct acpi_exception_info {
#define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021)
#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022)
#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
+#define AE_AML_PROTOCOL EXCEP_AML (0x0024)
+#define AE_AML_BUFFER_LENGTH EXCEP_AML (0x0025)
-#define AE_CODE_AML_MAX 0x0023
+#define AE_CODE_AML_MAX 0x0025
/*
* Internal exceptions used for control
@@ -341,7 +349,10 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
"A namespace node is uninitialized or unresolved"),
EXCEP_TXT("AE_AML_TARGET_TYPE",
- "A target operand of an incorrect type was encountered")
+ "A target operand of an incorrect type was encountered"),
+ EXCEP_TXT("AE_AML_PROTOCOL", "Violation of a fixed ACPI protocol"),
+ EXCEP_TXT("AE_AML_BUFFER_LENGTH",
+ "The length of the buffer is invalid/incorrect")
};
static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 3a26aa7ead23..6db9a6d40c85 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -73,7 +73,8 @@
#define ACPI_LV_RESOURCES 0x00010000
#define ACPI_LV_USER_REQUESTS 0x00020000
#define ACPI_LV_PACKAGE 0x00040000
-#define ACPI_LV_VERBOSITY1 0x0007FF40 | ACPI_LV_ALL_EXCEPTIONS
+#define ACPI_LV_EVALUATION 0x00080000
+#define ACPI_LV_VERBOSITY1 0x000FFF40 | ACPI_LV_ALL_EXCEPTIONS
/* Trace verbosity level 2 [Function tracing and memory allocation] */
@@ -141,6 +142,7 @@
#define ACPI_DB_INTERRUPTS ACPI_DEBUG_LEVEL (ACPI_LV_INTERRUPTS)
#define ACPI_DB_USER_REQUESTS ACPI_DEBUG_LEVEL (ACPI_LV_USER_REQUESTS)
#define ACPI_DB_PACKAGE ACPI_DEBUG_LEVEL (ACPI_LV_PACKAGE)
+#define ACPI_DB_EVALUATION ACPI_DEBUG_LEVEL (ACPI_LV_EVALUATION)
#define ACPI_DB_MUTEX ACPI_DEBUG_LEVEL (ACPI_LV_MUTEX)
#define ACPI_DB_EVENTS ACPI_DEBUG_LEVEL (ACPI_LV_EVENTS)
@@ -148,7 +150,7 @@
/* Defaults for debug_level, debug and normal */
-#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR)
+#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR)
#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index ba4dd54f2c82..0300374101cd 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -346,10 +346,16 @@ struct acpi_device_physical_node {
bool put_online:1;
};
+struct acpi_device_properties {
+ const guid_t *guid;
+ const union acpi_object *properties;
+ struct list_head list;
+};
+
/* ACPI Device Specific Data (_DSD) */
struct acpi_device_data {
const union acpi_object *pointer;
- const union acpi_object *properties;
+ struct list_head properties;
const union acpi_object *of_compatible;
struct list_head subnodes;
};
@@ -595,7 +601,6 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
u64 *size);
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
-void acpi_dma_deconfigure(struct device *dev);
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children);
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 14499757338f..de1804aeaf69 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -88,7 +88,14 @@ int acpi_pci_link_free_irq(acpi_handle handle);
struct pci_bus;
+#ifdef CONFIG_PCI
struct pci_dev *acpi_get_pci_dev(acpi_handle);
+#else
+static inline struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
+{
+ return NULL;
+}
+#endif
/* Arch-defined function to add a bus to the system */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 88072c92ace2..7aa38b648564 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20180629
+#define ACPI_CA_VERSION 0x20181213
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -157,13 +157,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE);
ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
/*
- * Optionally support group module level code.
- * NOTE, this is essentially obsolete and will be removed soon
- * (01/2018).
- */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE);
-
-/*
* Optionally support module level code by parsing an entire table as
* a method as it is loaded. Default is TRUE.
* NOTE, this is essentially obsolete and will be removed soon
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 517addd6b11d..0a977eca0a74 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -38,6 +38,7 @@
#define ACPI_SIG_XSDT "XSDT" /* Extended System Description Table */
#define ACPI_SIG_SSDT "SSDT" /* Secondary System Description Table */
#define ACPI_RSDP_NAME "RSDP" /* Short name for RSDP, not signature */
+#define ACPI_OEM_NAME "OEM" /* Short name for OEM, not signature */
/*
* All tables and structures must be byte-packed to match the ACPI
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 501f341d1d92..ea1ca49c9c1b 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -365,6 +365,29 @@ struct acpi_table_tcpa_server {
*
******************************************************************************/
+/* Revision 3 */
+
+struct acpi_table_tpm23 {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved;
+ u64 control_address;
+ u32 start_method;
+};
+
+/* Value for start_method above */
+
+#define ACPI_TPM23_ACPI_START_METHOD 2
+
+/*
+ * Optional trailer for revision 3. If start method is 2, there is a 4 byte
+ * reserved area of all zeros.
+ */
+struct acpi_tmp23_trailer {
+ u32 reserved;
+};
+
+/* Revision 4 */
+
struct acpi_table_tpm2 {
struct acpi_table_header header; /* Common ACPI table header */
u16 platform_class;
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 66ceb12ebc63..2590627dbfcc 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -527,6 +527,10 @@ typedef u64 acpi_integer;
#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+/* Support for OEMx signature (x can be any character) */
+#define ACPI_IS_OEM_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_OEM_NAME, 3) &&\
+ strnlen (a, ACPI_NAME_SIZE) == ACPI_NAME_SIZE)
+
/*
* Algorithm to obtain access bit width.
* Can be used with access_width of struct acpi_generic_address and access_size of
@@ -1273,6 +1277,8 @@ typedef enum {
#define ACPI_OSI_WIN_10_RS1 0x0E
#define ACPI_OSI_WIN_10_RS2 0x0F
#define ACPI_OSI_WIN_10_RS3 0x10
+#define ACPI_OSI_WIN_10_RS4 0x11
+#define ACPI_OSI_WIN_10_RS5 0x12
/* Definitions of getopt */
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 8e0b8250a139..4f34734e7f36 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -104,6 +104,7 @@ enum cppc_regs {
* today.
*/
struct cppc_perf_caps {
+ u32 guaranteed_perf;
u32 highest_perf;
u32 nominal_perf;
u32 lowest_perf;
@@ -141,5 +142,8 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern int acpi_get_psd_map(struct cppc_cpudata **);
extern unsigned int cppc_get_transition_latency(int cpu);
+extern bool cpc_ffh_supported(void);
+extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
+extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val);
#endif /* _CPPC_ACPI_H*/
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 7451b3bca83a..e3d21d014fcc 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -33,6 +33,10 @@
/* Kernel specific ACPICA configuration */
+#ifdef CONFIG_PCI
+#define ACPI_PCI_CONFIGURED
+#endif
+
#ifdef CONFIG_ACPI_REDUCED_HARDWARE_ONLY
#define ACPI_REDUCED_HARDWARE 1
#endif
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 89f3b03b1445..e3667c9a33a5 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -3,7 +3,7 @@
#define _4LEVEL_FIXUP_H
#define __ARCH_HAS_4LEVEL_HACK
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
#define PUD_SHIFT PGDIR_SHIFT
#define PUD_SIZE PGDIR_SIZE
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index 9c2e0708eb82..bb6cb347018c 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -3,7 +3,7 @@
#define _5LEVEL_FIXUP_H
#define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
#define P4D_SHIFT PGDIR_SHIFT
#define P4D_SIZE PGDIR_SIZE
@@ -26,6 +26,7 @@
#define p4d_clear(p4d) pgd_clear(p4d)
#define p4d_val(p4d) pgd_val(p4d)
#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud)
+#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud)
#define p4d_page(p4d) pgd_page(p4d)
#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d)
diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h
index 62daf940989d..c8455cc28841 100644
--- a/include/asm-generic/bitops/builtin-fls.h
+++ b/include/asm-generic/bitops/builtin-fls.h
@@ -9,7 +9,7 @@
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int fls(int x)
+static __always_inline int fls(unsigned int x)
{
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
}
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
index 753aecaab641..b168bb10e1be 100644
--- a/include/asm-generic/bitops/fls.h
+++ b/include/asm-generic/bitops/fls.h
@@ -10,7 +10,7 @@
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int fls(int x)
+static __always_inline int fls(unsigned int x)
{
int r = 32;
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index a7613e1b0c87..20561a60db9c 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -75,9 +75,19 @@ struct bug_entry {
/*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
- * significant issues that need prompt attention if they should ever
- * appear at runtime. Use the versions with printk format strings
- * to provide better diagnostics.
+ * significant kernel issues that need prompt attention if they should ever
+ * appear at runtime.
+ *
+ * Do not use these macros when checking for invalid external inputs
+ * (e.g. invalid system call arguments, or invalid data coming from
+ * network/devices), and on transient conditions like ENOMEM or EAGAIN.
+ * These macros should be used for recoverable kernel issues only.
+ * For invalid external inputs, transient conditions, etc use
+ * pr_err[_once/_ratelimited]() followed by dump_stack(), if necessary.
+ * Do not include "BUG"/"WARNING" in format strings manually to make these
+ * conditions distinguishable from kernel issues.
+ *
+ * Use the versions with printk format strings to provide better diagnostics.
*/
#ifndef __WARN_TAINT
extern __printf(3, 4)
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index 28819451b6d1..a86f65bffab8 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -1,3 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_COMPAT_H
+#define __ASM_GENERIC_COMPAT_H
-/* This is an empty stub for 32-bit-only architectures */
+/* These types are common across all compat ABIs */
+typedef u32 compat_size_t;
+typedef s32 compat_ssize_t;
+typedef s32 compat_clock_t;
+typedef s32 compat_pid_t;
+typedef u32 compat_ino_t;
+typedef s32 compat_off_t;
+typedef s64 compat_loff_t;
+typedef s32 compat_daddr_t;
+typedef s32 compat_timer_t;
+typedef s32 compat_key_t;
+typedef s16 compat_short_t;
+typedef s32 compat_int_t;
+typedef s32 compat_long_t;
+typedef u16 compat_ushort_t;
+typedef u32 compat_uint_t;
+typedef u32 compat_ulong_t;
+typedef u32 compat_uptr_t;
+typedef u32 compat_aio_context_t;
+
+#endif
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index ad2868263867..c13f46109e88 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -4,16 +4,7 @@
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- /*
- * Use the non-coherent ops if available. If an architecture wants a
- * more fine-grained selection of operations it will have to implement
- * get_arch_dma_ops itself or use the per-device dma_ops.
- */
-#ifdef CONFIG_DMA_NONCOHERENT_OPS
- return &dma_noncoherent_ops;
-#else
- return &dma_direct_ops;
-#endif
+ return NULL;
}
#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
index 296c65442f00..95a159a4137f 100644
--- a/include/asm-generic/error-injection.h
+++ b/include/asm-generic/error-injection.h
@@ -8,6 +8,7 @@ enum {
EI_ETYPE_NULL, /* Return NULL if failure */
EI_ETYPE_ERRNO, /* Return -ERRNO if failure */
EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */
+ EI_ETYPE_TRUE, /* Return true if failure */
};
struct error_injection_entry {
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 68efb950a918..294d6ae785d4 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -5,12 +5,10 @@
#define KSYM_FUNC(x) x
#endif
#ifdef CONFIG_64BIT
-#define __put .quad
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 8
#endif
#else
-#define __put .long
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 4
#endif
@@ -19,6 +17,16 @@
#define KCRC_ALIGN 4
#endif
+.macro __put, val, name
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ .long \val - ., \name - .
+#elif defined(CONFIG_64BIT)
+ .quad \val, \name
+#else
+ .long \val, \name
+#endif
+.endm
+
/*
* note on .section use: @progbits vs %progbits nastiness doesn't matter,
* since we immediately emit into those sections anyway.
@@ -51,16 +59,19 @@ __kcrctab_\name:
.endm
#undef __put
-#if defined(__KSYM_DEPS__)
-
-#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym ===
-
-#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+#if defined(CONFIG_TRIM_UNUSED_KSYMS)
#include <linux/kconfig.h>
#include <generated/autoksyms.h>
+.macro __ksym_marker sym
+ .section ".discard.ksym","a"
+__ksym_marker_\sym:
+ .previous
+.endm
+
#define __EXPORT_SYMBOL(sym, val, sec) \
+ __ksym_marker sym; \
__cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
#define __cond_export_sym(sym, val, sec, conf) \
___cond_export_sym(sym, val, sec, conf)
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 827e4d3bbc7a..8cc7b09c1bc7 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -16,6 +16,7 @@
#define __ASM_GENERIC_FIXMAP_H
#include <linux/bug.h>
+#include <linux/mm_types.h>
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 9d0cde8ab716..71d7b77eea50 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -32,7 +32,7 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
return pte_modify(pte, newprot);
}
-#ifndef huge_pte_clear
+#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
{
@@ -40,4 +40,90 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
}
#endif
+#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_clear_flush(vma, addr, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTE_NONE
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ struct hstate *h = hstate_file(file);
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+
+ return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_GET
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+#endif
+
#endif /* _ASM_GENERIC_HUGETLB_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 66d1d45fa2e1..d356f802945a 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1026,7 +1026,8 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
#define ioport_map ioport_map
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
- return PCI_IOBASE + (port & MMIO_UPPER_LIMIT);
+ port &= IO_SPACE_LIMIT;
+ return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
}
#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 1817a8415a5e..c2de013b2cf4 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -62,10 +62,6 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_ATTRIBUTES
#endif
-#ifndef PER_CPU_DEF_ATTRIBUTES
-#define PER_CPU_DEF_ATTRIBUTES
-#endif
-
#define raw_cpu_generic_read(pcp) \
({ \
*raw_cpu_ptr(&(pcp)); \
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
index 0c34215263b8..829bdb0d6327 100644
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -5,7 +5,7 @@
#ifndef __ASSEMBLY__
#include <asm-generic/5level-fixup.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a pgd gets the size right, and allows
@@ -31,6 +31,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
#define pgd_populate(mm, pgd, pud) do { } while (0)
+#define pgd_populate_safe(mm, pgd, pud) do { } while (0)
/*
* (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 1a29b2a0282b..aebab905e6cd 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -4,7 +4,7 @@
#ifndef __ASSEMBLY__
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
typedef struct { pgd_t pgd; } p4d_t;
@@ -26,6 +26,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd))
#define pgd_populate(mm, pgd, p4d) do { } while (0)
+#define pgd_populate_safe(mm, pgd, p4d) do { } while (0)
/*
* (p4ds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index f35f6e8149e4..b85b8271a73d 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -8,7 +8,7 @@
struct mm_struct;
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
/*
* Having the pmd type consist of a pud gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index e950b9c50f34..c77a1d301155 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -9,7 +9,7 @@
#else
#include <asm-generic/pgtable-nop4d.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a p4d gets the size right, and allows
@@ -35,6 +35,7 @@ static inline void p4d_clear(p4d_t *p4d) { }
#define pud_ERROR(pud) (p4d_ERROR((pud).p4d))
#define p4d_populate(mm, p4d, pud) do { } while (0)
+#define p4d_populate_safe(mm, p4d, pud) do { } while (0)
/*
* (puds are folded into p4ds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 88ebc6102c7c..05e61e6c843f 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -375,7 +375,6 @@ static inline int pte_unused(pte_t pte)
#endif
#ifndef __HAVE_ARCH_PMD_SAME
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return pmd_val(pmd_a) == pmd_val(pmd_b);
@@ -385,21 +384,60 @@ static inline int pud_same(pud_t pud_a, pud_t pud_b)
{
return pud_val(pud_a) == pud_val(pud_b);
}
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+#endif
+
+#ifndef __HAVE_ARCH_P4D_SAME
+static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
{
- BUILD_BUG();
- return 0;
+ return p4d_val(p4d_a) == p4d_val(p4d_b);
}
+#endif
-static inline int pud_same(pud_t pud_a, pud_t pud_b)
+#ifndef __HAVE_ARCH_PGD_SAME
+static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
{
- BUILD_BUG();
- return 0;
+ return pgd_val(pgd_a) == pgd_val(pgd_b);
}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_pte_safe(ptep, pte) \
+({ \
+ WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
+ set_pte(ptep, pte); \
+})
+
+#define set_pmd_safe(pmdp, pmd) \
+({ \
+ WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
+ set_pmd(pmdp, pmd); \
+})
+
+#define set_pud_safe(pudp, pud) \
+({ \
+ WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
+ set_pud(pudp, pud); \
+})
+
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+ set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+ set_pgd(pgdp, pgd); \
+})
+
#ifndef __HAVE_ARCH_DO_SWAP_PAGE
/*
* Some architectures support metadata associated with a page. When a
@@ -757,7 +795,7 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
/*
* Interfaces that can be used by architecture code to keep track of
* memory type of pfn mappings specified by the remap_pfn_range,
- * vm_insert_pfn.
+ * vmf_insert_pfn.
*/
/*
@@ -773,7 +811,7 @@ static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
/*
* track_pfn_insert is called when a _new_ single pfn is established
- * by vm_insert_pfn().
+ * by vmf_insert_pfn().
*/
static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
pfn_t pfn)
@@ -1019,6 +1057,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
int pud_free_pmd_page(pud_t *pud, unsigned long addr);
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
@@ -1046,6 +1085,10 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
+static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+ return 0;
+}
static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return 0;
@@ -1127,4 +1170,20 @@ static inline bool arch_has_pfn_modify_check(void)
#endif
#endif
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 0f7062bd55e5..36254d2da8e0 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -71,8 +71,8 @@ static inline int queued_write_trylock(struct qrwlock *lock)
if (unlikely(cnts))
return 0;
- return likely(atomic_cmpxchg_acquire(&lock->cnts,
- cnts, cnts | _QW_LOCKED) == cnts);
+ return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
+ _QW_LOCKED));
}
/**
* queued_read_lock - acquire read lock of a queue rwlock
@@ -96,8 +96,9 @@ static inline void queued_read_lock(struct qrwlock *lock)
*/
static inline void queued_write_lock(struct qrwlock *lock)
{
+ u32 cnts = 0;
/* Optimize for the unfair lock case where the fair flag is 0. */
- if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
+ if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
return;
queued_write_lock_slowpath(lock);
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 9cc457597ddf..7541fa707f5b 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -66,10 +66,12 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
*/
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
- if (!atomic_read(&lock->val) &&
- (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
- return 1;
- return 0;
+ u32 val = atomic_read(&lock->val);
+
+ if (unlikely(val))
+ return 0;
+
+ return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
}
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
@@ -80,11 +82,11 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
*/
static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
- u32 val;
+ u32 val = 0;
- val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
- if (likely(val == 0))
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
return;
+
queued_spin_lock_slowpath(lock, val);
}
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 849cd8eb5ca0..d79abca81a52 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -141,4 +141,18 @@ static inline bool init_section_intersects(void *virt, size_t size)
return memory_intersects(__init_begin, __init_end, virt, size);
}
+/**
+ * is_kernel_rodata - checks if the pointer address is located in the
+ * .rodata section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .rodata, false otherwise.
+ */
+static inline bool is_kernel_rodata(unsigned long addr)
+{
+ return addr >= (unsigned long)__start_rodata &&
+ addr < (unsigned long)__end_rodata;
+}
+
#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e811ef7b8350..6be86c1c5c58 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -15,10 +15,13 @@
#ifndef _ASM_GENERIC__TLB_H
#define _ASM_GENERIC__TLB_H
+#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#ifdef CONFIG_MMU
+
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
/*
* Semi RCU freeing of the page directories.
@@ -96,12 +99,30 @@ struct mmu_gather {
#endif
unsigned long start;
unsigned long end;
- /* we are in the middle of an operation to clear
- * a full mm and can make some optimizations */
- unsigned int fullmm : 1,
- /* we have performed an operation which
- * requires a complete flush of the tlb */
- need_flush_all : 1;
+ /*
+ * we are in the middle of an operation to clear
+ * a full mm and can make some optimizations
+ */
+ unsigned int fullmm : 1;
+
+ /*
+ * we have performed an operation which
+ * requires a complete flush of the tlb
+ */
+ unsigned int need_flush_all : 1;
+
+ /*
+ * we have removed page directories
+ */
+ unsigned int freed_tables : 1;
+
+ /*
+ * at which levels have we cleared entries?
+ */
+ unsigned int cleared_ptes : 1;
+ unsigned int cleared_pmds : 1;
+ unsigned int cleared_puds : 1;
+ unsigned int cleared_p4ds : 1;
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
@@ -117,6 +138,7 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb,
void tlb_flush_mmu(struct mmu_gather *tlb);
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force);
+void tlb_flush_mmu_free(struct mmu_gather *tlb);
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
@@ -136,6 +158,21 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->start = TASK_SIZE;
tlb->end = 0;
}
+ tlb->freed_tables = 0;
+ tlb->cleared_ptes = 0;
+ tlb->cleared_pmds = 0;
+ tlb->cleared_puds = 0;
+ tlb->cleared_p4ds = 0;
+}
+
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+ if (!tlb->end)
+ return;
+
+ tlb_flush(tlb);
+ mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
+ __tlb_reset_range(tlb);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
@@ -175,6 +212,25 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
}
#endif
+static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
+{
+ if (tlb->cleared_ptes)
+ return PAGE_SHIFT;
+ if (tlb->cleared_pmds)
+ return PMD_SHIFT;
+ if (tlb->cleared_puds)
+ return PUD_SHIFT;
+ if (tlb->cleared_p4ds)
+ return P4D_SHIFT;
+
+ return PAGE_SHIFT;
+}
+
+static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
+{
+ return 1UL << tlb_get_unmap_shift(tlb);
+}
+
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
@@ -186,10 +242,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define __tlb_end_vma(tlb, vma) \
do { \
- if (!tlb->fullmm && tlb->end) { \
- tlb_flush(tlb); \
- __tlb_reset_range(tlb); \
- } \
+ if (!tlb->fullmm) \
+ tlb_flush_mmu_tlbonly(tlb); \
} while (0)
#ifndef tlb_end_vma
@@ -210,13 +264,19 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->cleared_ptes = 1; \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- do { \
- __tlb_adjust_range(tlb, address, huge_page_size(h)); \
- __tlb_remove_tlb_entry(tlb, ptep, address); \
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ do { \
+ unsigned long _sz = huge_page_size(h); \
+ __tlb_adjust_range(tlb, address, _sz); \
+ if (_sz == PMD_SIZE) \
+ tlb->cleared_pmds = 1; \
+ else if (_sz == PUD_SIZE) \
+ tlb->cleared_puds = 1; \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
/**
@@ -230,6 +290,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
+ tlb->cleared_pmds = 1; \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
@@ -244,6 +305,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
+ tlb->cleared_puds = 1; \
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
} while (0)
@@ -269,6 +331,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pte_free_tlb(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ tlb->cleared_pmds = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#endif
@@ -276,7 +340,9 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ tlb->cleared_puds = 1; \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
#endif
@@ -286,6 +352,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pud_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ tlb->cleared_p4ds = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
@@ -295,22 +363,15 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
#endif
-#define tlb_migrate_finish(mm) do {} while (0)
+#endif /* CONFIG_MMU */
-/*
- * Used to flush the TLB when page tables are removed, when lazy
- * TLB mode may cause a CPU to retain intermediate translations
- * pointing to about-to-be-freed page table memory.
- */
-#ifndef HAVE_TLB_FLUSH_REMOVE_TABLES
-#define tlb_flush_remove_tables(mm) do {} while (0)
-#define tlb_flush_remove_tables_local(mm) do {} while (0)
-#endif
+#define tlb_migrate_finish(mm) do {} while (0)
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 6b2e63df2739..d82c78a79da5 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -35,7 +35,7 @@ static inline void set_fs(mm_segment_t fs)
#define segment_eq(a, b) ((a).seg == (b).seg)
#endif
-#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
+#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
/*
* The architecture should really override this if possible, at least
@@ -78,7 +78,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
({ \
void __user *__p = (ptr); \
might_fault(); \
- access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \
+ access_ok(__p, sizeof(*ptr)) ? \
__put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \
-EFAULT; \
})
@@ -140,7 +140,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
({ \
const void __user *__p = (ptr); \
might_fault(); \
- access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
+ access_ok(__p, sizeof(*ptr)) ? \
__get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
((x) = (__typeof__(*(ptr)))0,-EFAULT); \
})
@@ -175,7 +175,7 @@ __strncpy_from_user(char *dst, const char __user *src, long count)
static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
- if (!access_ok(VERIFY_READ, src, 1))
+ if (!access_ok(src, 1))
return -EFAULT;
return __strncpy_from_user(dst, src, count);
}
@@ -196,7 +196,7 @@ strncpy_from_user(char *dst, const char __user *src, long count)
*/
static inline long strnlen_user(const char __user *src, long n)
{
- if (!access_ok(VERIFY_READ, src, 1))
+ if (!access_ok(src, 1))
return 0;
return __strnlen_user(src, n);
}
@@ -217,7 +217,7 @@ static inline __must_check unsigned long
clear_user(void __user *to, unsigned long n)
{
might_fault();
- if (!access_ok(VERIFY_WRITE, to, n))
+ if (!access_ok(to, n))
return n;
return __clear_user(to, n);
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
deleted file mode 100644
index cdf904265caf..000000000000
--- a/include/asm-generic/unistd.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <uapi/asm-generic/unistd.h>
-#include <linux/export.h>
-
-/*
- * These are required system calls, we should
- * invert the logic eventually and let them
- * be selected by default.
- */
-#if __BITS_PER_LONG == 32
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_LLSEEK
-#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index f173b5f30dbe..3d7a6a9c2370 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -54,8 +54,6 @@
#define LOAD_OFFSET 0
#endif
-#include <linux/export.h>
-
/* Align . to a 8 byte boundary equals to maximum function alignment. */
#define ALIGN_FUNCTION() . = ALIGN(8)
@@ -70,7 +68,7 @@
*/
#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
@@ -205,6 +203,15 @@
#define EARLYCON_TABLE()
#endif
+#ifdef CONFIG_SECURITY
+#define LSM_TABLE() . = ALIGN(8); \
+ __start_lsm_info = .; \
+ KEEP(*(.lsm_info.init)) \
+ __end_lsm_info = .;
+#else
+#define LSM_TABLE()
+#endif
+
#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
#define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
@@ -255,10 +262,6 @@
STRUCT_ALIGN(); \
*(__tracepoints) \
/* implement dynamic printk debug */ \
- . = ALIGN(8); \
- __start___jump_table = .; \
- KEEP(*(__jump_table)) \
- __stop___jump_table = .; \
. = ALIGN(8); \
__start___verbose = .; \
KEEP(*(__verbose)) \
@@ -302,6 +305,12 @@
. = __start_init_task + THREAD_SIZE; \
__end_init_task = .;
+#define JUMP_TABLE_DATA \
+ . = ALIGN(8); \
+ __start___jump_table = .; \
+ KEEP(*(__jump_table)) \
+ __stop___jump_table = .;
+
/*
* Allow architectures to handle ro_after_init data on their
* own by defining an empty RO_AFTER_INIT_DATA.
@@ -310,6 +319,7 @@
#define RO_AFTER_INIT_DATA \
__start_ro_after_init = .; \
*(.data..ro_after_init) \
+ JUMP_TABLE_DATA \
__end_ro_after_init = .;
#endif
@@ -475,13 +485,6 @@
#define RODATA RO_DATA_SECTION(4096)
#define RO_DATA(align) RO_DATA_SECTION(align)
-#define SECURITY_INIT \
- .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
- __security_initcall_start = .; \
- KEEP(*(.security_initcall.init)) \
- __security_initcall_end = .; \
- }
-
/*
* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map
@@ -606,7 +609,8 @@
IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(timer) \
- EARLYCON_TABLE()
+ EARLYCON_TABLE() \
+ LSM_TABLE()
#define INIT_TEXT \
*(.init.text .init.text.*) \
@@ -615,8 +619,8 @@
#define EXIT_DATA \
*(.exit.data .exit.data.*) \
- *(.fini_array) \
- *(.dtors) \
+ *(.fini_array .fini_array.*) \
+ *(.dtors .dtors.*) \
MEM_DISCARD(exit.data*) \
MEM_DISCARD(exit.rodata*)
@@ -795,11 +799,6 @@
KEEP(*(.con_initcall.init)) \
__con_initcall_end = .;
-#define SECURITY_INITCALL \
- __security_initcall_start = .; \
- KEEP(*(.security_initcall.init)) \
- __security_initcall_end = .;
-
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
@@ -966,7 +965,6 @@
INIT_SETUP(initsetup_align) \
INIT_CALLS \
CON_INITCALL \
- SECURITY_INITCALL \
INIT_RAM_FS \
}
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index e328b52425a8..a3e766dff917 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -246,8 +246,14 @@ static inline void acomp_request_set_params(struct acomp_req *req,
static inline int crypto_acomp_compress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int slen = req->slen;
+ int ret;
- return tfm->compress(req);
+ crypto_stats_get(alg);
+ ret = tfm->compress(req);
+ crypto_stats_compress(slen, ret, alg);
+ return ret;
}
/**
@@ -262,8 +268,14 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
static inline int crypto_acomp_decompress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int slen = req->slen;
+ int ret;
- return tfm->decompress(req);
+ crypto_stats_get(alg);
+ ret = tfm->decompress(req);
+ crypto_stats_decompress(slen, ret, alg);
+ return ret;
}
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 1e26f790b03f..9ad595f97c65 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -115,7 +115,6 @@ struct aead_request {
* @setkey: see struct skcipher_alg
* @encrypt: see struct skcipher_alg
* @decrypt: see struct skcipher_alg
- * @geniv: see struct skcipher_alg
* @ivsize: see struct skcipher_alg
* @chunksize: see struct skcipher_alg
* @init: Initialize the cryptographic transformation object. This function
@@ -142,8 +141,6 @@ struct aead_alg {
int (*init)(struct crypto_aead *tfm);
void (*exit)(struct crypto_aead *tfm);
- const char *geniv;
-
unsigned int ivsize;
unsigned int maxauthsize;
unsigned int chunksize;
@@ -328,11 +325,17 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
static inline int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_alg *alg = aead->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
+ int ret;
+ crypto_stats_get(alg);
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_aead_alg(aead)->encrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = crypto_aead_alg(aead)->encrypt(req);
+ crypto_stats_aead_encrypt(cryptlen, alg, ret);
+ return ret;
}
/**
@@ -360,14 +363,19 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_alg *alg = aead->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
+ int ret;
+ crypto_stats_get(alg);
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- if (req->cryptlen < crypto_aead_authsize(aead))
- return -EINVAL;
-
- return crypto_aead_alg(aead)->decrypt(req);
+ ret = -ENOKEY;
+ else if (req->cryptlen < crypto_aead_authsize(aead))
+ ret = -EINVAL;
+ else
+ ret = crypto_aead_alg(aead)->decrypt(req);
+ crypto_stats_aead_decrypt(cryptlen, alg, ret);
+ return ret;
}
/**
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index b5e11de4d497..2d690494568c 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -285,8 +285,14 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
-
- return alg->encrypt(req);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ unsigned int src_len = req->src_len;
+ int ret;
+
+ crypto_stats_get(calg);
+ ret = alg->encrypt(req);
+ crypto_stats_akcipher_encrypt(src_len, ret, calg);
+ return ret;
}
/**
@@ -303,8 +309,14 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
-
- return alg->decrypt(req);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ unsigned int src_len = req->src_len;
+ int ret;
+
+ crypto_stats_get(calg);
+ ret = alg->decrypt(req);
+ crypto_stats_akcipher_decrypt(src_len, ret, calg);
+ return ret;
}
/**
@@ -321,8 +333,13 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ int ret;
- return alg->sign(req);
+ crypto_stats_get(calg);
+ ret = alg->sign(req);
+ crypto_stats_akcipher_sign(ret, calg);
+ return ret;
}
/**
@@ -339,8 +356,13 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ int ret;
- return alg->verify(req);
+ crypto_stats_get(calg);
+ ret = alg->verify(req);
+ crypto_stats_akcipher_verify(ret, calg);
+ return ret;
}
/**
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index bd5e8ccf1687..4a5ad10e75f0 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -20,8 +20,10 @@
/*
* Maximum values for blocksize and alignmask, used to allocate
* static buffers that are big enough for any combination of
- * ciphers and architectures.
+ * algs and architectures. Ciphers have a lower maximum size.
*/
+#define MAX_ALGAPI_BLOCKSIZE 160
+#define MAX_ALGAPI_ALIGNMASK 63
#define MAX_CIPHER_BLOCKSIZE 16
#define MAX_CIPHER_ALIGNMASK 15
@@ -425,4 +427,14 @@ static inline void crypto_yield(u32 flags)
#endif
}
+int crypto_register_notifier(struct notifier_block *nb);
+int crypto_unregister_notifier(struct notifier_block *nb);
+
+/* Crypto notification events. */
+enum {
+ CRYPTO_MSG_ALG_REQUEST,
+ CRYPTO_MSG_ALG_REGISTER,
+ CRYPTO_MSG_ALG_LOADED,
+};
+
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/asym_tpm_subtype.h b/include/crypto/asym_tpm_subtype.h
new file mode 100644
index 000000000000..48198c36d6b9
--- /dev/null
+++ b/include/crypto/asym_tpm_subtype.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _LINUX_ASYM_TPM_SUBTYPE_H
+#define _LINUX_ASYM_TPM_SUBTYPE_H
+
+#include <linux/keyctl.h>
+
+struct tpm_key {
+ void *blob;
+ u32 blob_len;
+ uint16_t key_len; /* Size in bits of the key */
+ const void *pub_key; /* pointer inside blob to the public key bytes */
+ uint16_t pub_key_len; /* length of the public key */
+};
+
+struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len);
+
+extern struct asymmetric_key_subtype asym_tpm_subtype;
+
+#endif /* _LINUX_ASYM_TPM_SUBTYPE_H */
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
index f5b8bfc22e6d..3bf28beefa33 100644
--- a/include/crypto/cbc.h
+++ b/include/crypto/cbc.h
@@ -113,7 +113,7 @@ static inline int crypto_cbc_decrypt_inplace(
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
- u8 last_iv[bsize];
+ u8 last_iv[MAX_CIPHER_BLOCKSIZE];
/* Start of the last block. */
src += nbytes - (nbytes & (bsize - 1)) - bsize;
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
new file mode 100644
index 000000000000..1fc70a69d550
--- /dev/null
+++ b/include/crypto/chacha.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values and helper functions for the ChaCha and XChaCha stream ciphers.
+ *
+ * XChaCha extends ChaCha's nonce to 192 bits, while provably retaining ChaCha's
+ * security. Here they share the same key size, tfm context, and setkey
+ * function; only their IV size and encrypt/decrypt function differ.
+ *
+ * The ChaCha paper specifies 20, 12, and 8-round variants. In general, it is
+ * recommended to use the 20-round variant ChaCha20. However, the other
+ * variants can be needed in some performance-sensitive scenarios. The generic
+ * ChaCha code currently allows only the 20 and 12-round variants.
+ */
+
+#ifndef _CRYPTO_CHACHA_H
+#define _CRYPTO_CHACHA_H
+
+#include <crypto/skcipher.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
+#define CHACHA_IV_SIZE 16
+
+#define CHACHA_KEY_SIZE 32
+#define CHACHA_BLOCK_SIZE 64
+#define CHACHAPOLY_IV_SIZE 12
+
+/* 192-bit nonce, then 64-bit stream position */
+#define XCHACHA_IV_SIZE 32
+
+struct chacha_ctx {
+ u32 key[8];
+ int nrounds;
+};
+
+void chacha_block(u32 *state, u8 *stream, int nrounds);
+static inline void chacha20_block(u32 *state, u8 *stream)
+{
+ chacha_block(state, stream, 20);
+}
+void hchacha_block(const u32 *in, u32 *out, int nrounds);
+
+void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv);
+
+int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize);
+int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize);
+
+int crypto_chacha_crypt(struct skcipher_request *req);
+int crypto_xchacha_crypt(struct skcipher_request *req);
+
+#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
deleted file mode 100644
index b83d66073db0..000000000000
--- a/include/crypto/chacha20.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Common values for the ChaCha20 algorithm
- */
-
-#ifndef _CRYPTO_CHACHA20_H
-#define _CRYPTO_CHACHA20_H
-
-#include <crypto/skcipher.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-
-#define CHACHA20_IV_SIZE 16
-#define CHACHA20_KEY_SIZE 32
-#define CHACHA20_BLOCK_SIZE 64
-#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32))
-
-struct chacha20_ctx {
- u32 key[8];
-};
-
-void chacha20_block(u32 *state, u32 *stream);
-void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
-int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize);
-int crypto_chacha20_crypt(struct skcipher_request *req);
-
-#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 76e432cab75d..3b31c1b349ae 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -151,9 +151,13 @@ struct shash_desc {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+#define HASH_MAX_DIGESTSIZE 64
+#define HASH_MAX_DESCSIZE 360
+#define HASH_MAX_STATESIZE 512
+
#define SHASH_DESC_ON_STACK(shash, ctx) \
char __##shash##_desc[sizeof(struct shash_desc) + \
- crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
+ HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
/**
@@ -522,7 +526,15 @@ static inline int crypto_ahash_init(struct ahash_request *req)
*/
static inline int crypto_ahash_update(struct ahash_request *req)
{
- return crypto_ahash_reqtfm(req)->update(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
+ int ret;
+
+ crypto_stats_get(alg);
+ ret = crypto_ahash_reqtfm(req)->update(req);
+ crypto_stats_ahash_update(nbytes, ret, alg);
+ return ret;
}
/**
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
index 56f217d41f12..91786b68dbdb 100644
--- a/include/crypto/hash_info.h
+++ b/include/crypto/hash_info.h
@@ -15,6 +15,7 @@
#include <crypto/sha.h>
#include <crypto/md5.h>
+#include <crypto/streebog.h>
#include <uapi/linux/hash_info.h>
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
new file mode 100644
index 000000000000..40623f4457df
--- /dev/null
+++ b/include/crypto/internal/cryptouser.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <net/netlink.h>
+
+struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
+
+#ifdef CONFIG_CRYPTO_STATS
+int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
+#else
+static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs)
+{
+ return -ENOTSUPP;
+}
+#endif
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 2bcfb931bc5b..71be24cd59bd 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -20,7 +20,7 @@
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
- struct crypto_skcipher *sknull;
+ struct crypto_sync_skcipher *sknull;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index e42f7063f245..453e867b4bd9 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -70,8 +70,6 @@ struct skcipher_walk {
unsigned int alignmask;
};
-extern const struct crypto_type crypto_givcipher_type;
-
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 1bde0a6514fa..1a97e1601422 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -287,8 +287,13 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ int ret;
- return alg->set_secret(tfm, buffer, len);
+ crypto_stats_get(calg);
+ ret = alg->set_secret(tfm, buffer, len);
+ crypto_stats_kpp_set_secret(calg, ret);
+ return ret;
}
/**
@@ -308,8 +313,13 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ int ret;
- return alg->generate_public_key(req);
+ crypto_stats_get(calg);
+ ret = alg->generate_public_key(req);
+ crypto_stats_kpp_generate_public_key(calg, ret);
+ return ret;
}
/**
@@ -326,8 +336,13 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ int ret;
- return alg->compute_shared_secret(req);
+ crypto_stats_get(calg);
+ ret = alg->compute_shared_secret(req);
+ crypto_stats_kpp_compute_shared_secret(calg, ret);
+ return ret;
}
/**
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
deleted file mode 100644
index b67404fc4b34..000000000000
--- a/include/crypto/mcryptd.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Software async multibuffer crypto daemon headers
- *
- * Author:
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * Copyright (c) 2014, Intel Corporation.
- */
-
-#ifndef _CRYPTO_MCRYPT_H
-#define _CRYPTO_MCRYPT_H
-
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <crypto/hash.h>
-
-struct mcryptd_ahash {
- struct crypto_ahash base;
-};
-
-static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
- struct crypto_ahash *tfm)
-{
- return (struct mcryptd_ahash *)tfm;
-}
-
-struct mcryptd_cpu_queue {
- struct crypto_queue queue;
- spinlock_t q_lock;
- struct work_struct work;
-};
-
-struct mcryptd_queue {
- struct mcryptd_cpu_queue __percpu *cpu_queue;
-};
-
-struct mcryptd_instance_ctx {
- struct crypto_spawn spawn;
- struct mcryptd_queue *queue;
-};
-
-struct mcryptd_hash_ctx {
- struct crypto_ahash *child;
- struct mcryptd_alg_state *alg_state;
-};
-
-struct mcryptd_tag {
- /* seq number of request */
- unsigned seq_num;
- /* arrival time of request */
- unsigned long arrival;
- unsigned long expire;
- int cpu;
-};
-
-struct mcryptd_hash_request_ctx {
- struct list_head waiter;
- crypto_completion_t complete;
- struct mcryptd_tag tag;
- struct crypto_hash_walk walk;
- u8 *out;
- int flag;
- struct ahash_request areq;
-};
-
-struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
-struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req);
-void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
-void mcryptd_flusher(struct work_struct *work);
-
-enum mcryptd_req_type {
- MCRYPTD_NONE,
- MCRYPTD_UPDATE,
- MCRYPTD_FINUP,
- MCRYPTD_DIGEST,
- MCRYPTD_FINAL
-};
-
-struct mcryptd_alg_cstate {
- unsigned long next_flush;
- unsigned next_seq_num;
- bool flusher_engaged;
- struct delayed_work flush;
- int cpu;
- struct mcryptd_alg_state *alg_state;
- void *mgr;
- spinlock_t work_lock;
- struct list_head work_list;
- struct list_head flush_list;
-};
-
-struct mcryptd_alg_state {
- struct mcryptd_alg_cstate __percpu *alg_cstate;
- unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
-};
-
-/* return delay in jiffies from current time */
-static inline unsigned long get_delay(unsigned long t)
-{
- long delay;
-
- delay = (long) t - (long) jiffies;
- if (delay <= 0)
- return 0;
- else
- return (unsigned long) delay;
-}
-
-void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay);
-
-#endif
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
index b26dd70efd9a..ba782e10065e 100644
--- a/include/crypto/morus1280_glue.h
+++ b/include/crypto/morus1280_glue.h
@@ -82,7 +82,7 @@ void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- struct aead_alg crypto_morus1280_##id##_algs[] = {\
+ static struct aead_alg crypto_morus1280_##id##_algs[] = {\
{ \
.setkey = crypto_morus1280_glue_setkey, \
.setauthsize = crypto_morus1280_glue_setauthsize, \
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
index 90c8db07e740..27fa790a2362 100644
--- a/include/crypto/morus640_glue.h
+++ b/include/crypto/morus640_glue.h
@@ -82,7 +82,7 @@ void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- struct aead_alg crypto_morus640_##id##_algs[] = {\
+ static struct aead_alg crypto_morus640_##id##_algs[] = {\
{ \
.setkey = crypto_morus640_glue_setkey, \
.setauthsize = crypto_morus640_glue_setauthsize, \
diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h
new file mode 100644
index 000000000000..53c04423c582
--- /dev/null
+++ b/include/crypto/nhpoly1305.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values and helper functions for the NHPoly1305 hash function.
+ */
+
+#ifndef _NHPOLY1305_H
+#define _NHPOLY1305_H
+
+#include <crypto/hash.h>
+#include <crypto/poly1305.h>
+
+/* NH parameterization: */
+
+/* Endianness: little */
+/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */
+
+/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */
+#define NH_PAIR_STRIDE 2
+#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32))
+
+/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */
+#define NH_NUM_PASSES 4
+#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64))
+
+/* Max message size: 1024 bytes (32x compression factor) */
+#define NH_NUM_STRIDES 64
+#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES)
+#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32))
+#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \
+ NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1))
+#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32))
+
+#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES)
+
+struct nhpoly1305_key {
+ struct poly1305_key poly_key;
+ u32 nh_key[NH_KEY_WORDS];
+};
+
+struct nhpoly1305_state {
+
+ /* Running total of polynomial evaluation */
+ struct poly1305_state poly_state;
+
+ /* Partial block buffer */
+ u8 buffer[NH_MESSAGE_UNIT];
+ unsigned int buflen;
+
+ /*
+ * Number of bytes remaining until the current NH message reaches
+ * NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash.
+ */
+ unsigned int nh_remaining;
+
+ __le64 nh_hash[NH_NUM_PASSES];
+};
+
+typedef void (*nh_t)(const u32 *key, const u8 *message, size_t message_len,
+ __le64 hash[NH_NUM_PASSES]);
+
+int crypto_nhpoly1305_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen);
+
+int crypto_nhpoly1305_init(struct shash_desc *desc);
+int crypto_nhpoly1305_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen);
+int crypto_nhpoly1305_update_helper(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen,
+ nh_t nh_fn);
+int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst);
+int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst,
+ nh_t nh_fn);
+
+#endif /* _NHPOLY1305_H */
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 15aeef6e30ef..0ef577cc00e3 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -9,7 +9,7 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
-struct crypto_skcipher *crypto_get_default_null_skcipher(void);
+struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void);
void crypto_put_default_null_skcipher(void);
#endif
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index f718a19da82f..34317ed2071e 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -13,13 +13,21 @@
#define POLY1305_KEY_SIZE 32
#define POLY1305_DIGEST_SIZE 16
+struct poly1305_key {
+ u32 r[5]; /* key, base 2^26 */
+};
+
+struct poly1305_state {
+ u32 h[5]; /* accumulator, base 2^26 */
+};
+
struct poly1305_desc_ctx {
/* key */
- u32 r[5];
+ struct poly1305_key r;
/* finalize key */
u32 s[4];
/* accumulator */
- u32 h[5];
+ struct poly1305_state h;
/* partial buffer */
u8 buf[POLY1305_BLOCK_SIZE];
/* bytes used in partial buffer */
@@ -30,6 +38,22 @@ struct poly1305_desc_ctx {
bool sset;
};
+/*
+ * Poly1305 core functions. These implement the ε-almost-∆-universal hash
+ * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce
+ * ("s key") at the end. They also only support block-aligned inputs.
+ */
+void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key);
+static inline void poly1305_core_init(struct poly1305_state *state)
+{
+ memset(state->h, 0, sizeof(state->h));
+}
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_key *key,
+ const void *src, unsigned int nblocks);
+void poly1305_core_emit(const struct poly1305_state *state, void *dst);
+
+/* Crypto API helper functions for the Poly1305 MAC */
int crypto_poly1305_init(struct shash_desc *desc);
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen);
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index e0b681a717ba..be626eac9113 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -14,6 +14,8 @@
#ifndef _LINUX_PUBLIC_KEY_H
#define _LINUX_PUBLIC_KEY_H
+#include <linux/keyctl.h>
+
/*
* Cryptographic data for the public-key subtype of the asymmetric key type.
*
@@ -23,6 +25,7 @@
struct public_key {
void *key;
u32 keylen;
+ bool key_is_private;
const char *id_type;
const char *pkey_algo;
};
@@ -40,6 +43,7 @@ struct public_key_signature {
u8 digest_size; /* Number of bytes in digest */
const char *pkey_algo;
const char *hash_algo;
+ const char *encoding;
};
extern void public_key_signature_free(struct public_key_signature *sig);
@@ -65,8 +69,14 @@ extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring,
const union key_payload *payload,
struct key *trusted);
-extern int verify_signature(const struct key *key,
- const struct public_key_signature *sig);
+extern int query_asymmetric_key(const struct kernel_pkey_params *,
+ struct kernel_pkey_query *);
+
+extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *);
+extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *);
+extern int create_signature(struct kernel_pkey_params *, const void *, void *);
+extern int verify_signature(const struct key *,
+ const struct public_key_signature *);
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig);
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index b95ede354a66..022a1b896b47 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -140,7 +140,13 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
- return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ int ret;
+
+ crypto_stats_get(alg);
+ ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
+ crypto_stats_rng_generate(alg, dlen, ret);
+ return ret;
}
/**
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 2f327f090c3e..e555294ed77f 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -39,19 +39,6 @@ struct skcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-/**
- * struct skcipher_givcrypt_request - Crypto request with IV generation
- * @seq: Sequence number for IV generation
- * @giv: Space for generated IV
- * @creq: The crypto request itself
- */
-struct skcipher_givcrypt_request {
- u64 seq;
- u8 *giv;
-
- struct ablkcipher_request creq;
-};
-
struct crypto_skcipher {
int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
@@ -65,6 +52,10 @@ struct crypto_skcipher {
struct crypto_tfm base;
};
+struct crypto_sync_skcipher {
+ struct crypto_skcipher base;
+};
+
/**
* struct skcipher_alg - symmetric key cipher definition
* @min_keysize: Minimum key size supported by the transformation. This is the
@@ -139,9 +130,17 @@ struct skcipher_alg {
struct crypto_alg base;
};
-#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+#define MAX_SYNC_SKCIPHER_REQSIZE 384
+/*
+ * This performs a type-check against the "tfm" argument to make sure
+ * all users have the correct skcipher tfm for doing on-stack requests.
+ */
+#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \
char __##name##_desc[sizeof(struct skcipher_request) + \
- crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
+ MAX_SYNC_SKCIPHER_REQSIZE + \
+ (!(sizeof((struct crypto_sync_skcipher *)1 == \
+ (typeof(tfm))1))) \
+ ] CRYPTO_MINALIGN_ATTR; \
struct skcipher_request *name = (void *)__##name##_desc
/**
@@ -197,6 +196,9 @@ static inline struct crypto_skcipher *__crypto_skcipher_cast(
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
u32 type, u32 mask);
+struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name,
+ u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_skcipher_tfm(
struct crypto_skcipher *tfm)
{
@@ -212,6 +214,11 @@ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
}
+static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
+{
+ crypto_free_skcipher(&tfm->base);
+}
+
/**
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -280,6 +287,12 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
return tfm->ivsize;
}
+static inline unsigned int crypto_sync_skcipher_ivsize(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_ivsize(&tfm->base);
+}
+
static inline unsigned int crypto_skcipher_alg_chunksize(
struct skcipher_alg *alg)
{
@@ -356,6 +369,12 @@ static inline unsigned int crypto_skcipher_blocksize(
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
+static inline unsigned int crypto_sync_skcipher_blocksize(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_blocksize(&tfm->base);
+}
+
static inline unsigned int crypto_skcipher_alignmask(
struct crypto_skcipher *tfm)
{
@@ -379,6 +398,24 @@ static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
}
+static inline u32 crypto_sync_skcipher_get_flags(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_get_flags(&tfm->base);
+}
+
+static inline void crypto_sync_skcipher_set_flags(
+ struct crypto_sync_skcipher *tfm, u32 flags)
+{
+ crypto_skcipher_set_flags(&tfm->base, flags);
+}
+
+static inline void crypto_sync_skcipher_clear_flags(
+ struct crypto_sync_skcipher *tfm, u32 flags)
+{
+ crypto_skcipher_clear_flags(&tfm->base, flags);
+}
+
/**
* crypto_skcipher_setkey() - set key for cipher
* @tfm: cipher handle
@@ -401,6 +438,12 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
return tfm->setkey(tfm, key, keylen);
}
+static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_skcipher_setkey(&tfm->base, key, keylen);
+}
+
static inline unsigned int crypto_skcipher_default_keysize(
struct crypto_skcipher *tfm)
{
@@ -422,6 +465,14 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
return __crypto_skcipher_cast(req->base.tfm);
}
+static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm(
+ struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+ return container_of(tfm, struct crypto_sync_skcipher, base);
+}
+
/**
* crypto_skcipher_encrypt() - encrypt plaintext
* @req: reference to the skcipher_request handle that holds all information
@@ -436,11 +487,17 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
+ int ret;
+ crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->encrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = tfm->encrypt(req);
+ crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
+ return ret;
}
/**
@@ -457,11 +514,17 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
+ int ret;
+ crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->decrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = tfm->decrypt(req);
+ crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
+ return ret;
}
/**
@@ -500,6 +563,12 @@ static inline void skcipher_request_set_tfm(struct skcipher_request *req,
req->base.tfm = crypto_skcipher_tfm(tfm);
}
+static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req,
+ struct crypto_sync_skcipher *tfm)
+{
+ skcipher_request_set_tfm(req, &tfm->base);
+}
+
static inline struct skcipher_request *skcipher_request_cast(
struct crypto_async_request *req)
{
diff --git a/include/crypto/speck.h b/include/crypto/speck.h
deleted file mode 100644
index 73cfc952d405..000000000000
--- a/include/crypto/speck.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Common values for the Speck algorithm
- */
-
-#ifndef _CRYPTO_SPECK_H
-#define _CRYPTO_SPECK_H
-
-#include <linux/types.h>
-
-/* Speck128 */
-
-#define SPECK128_BLOCK_SIZE 16
-
-#define SPECK128_128_KEY_SIZE 16
-#define SPECK128_128_NROUNDS 32
-
-#define SPECK128_192_KEY_SIZE 24
-#define SPECK128_192_NROUNDS 33
-
-#define SPECK128_256_KEY_SIZE 32
-#define SPECK128_256_NROUNDS 34
-
-struct speck128_tfm_ctx {
- u64 round_keys[SPECK128_256_NROUNDS];
- int nrounds;
-};
-
-void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
- unsigned int keysize);
-
-/* Speck64 */
-
-#define SPECK64_BLOCK_SIZE 8
-
-#define SPECK64_96_KEY_SIZE 12
-#define SPECK64_96_NROUNDS 26
-
-#define SPECK64_128_KEY_SIZE 16
-#define SPECK64_128_NROUNDS 27
-
-struct speck64_tfm_ctx {
- u32 round_keys[SPECK64_128_NROUNDS];
- int nrounds;
-};
-
-void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
- unsigned int keysize);
-
-#endif /* _CRYPTO_SPECK_H */
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
new file mode 100644
index 000000000000..4af119f7e07b
--- /dev/null
+++ b/include/crypto/streebog.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause */
+/*
+ * Copyright (c) 2013 Alexey Degtyarev <alexey@renatasystems.org>
+ * Copyright (c) 2018 Vitaly Chikunov <vt@altlinux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYPTO_STREEBOG_H_
+#define _CRYPTO_STREEBOG_H_
+
+#include <linux/types.h>
+
+#define STREEBOG256_DIGEST_SIZE 32
+#define STREEBOG512_DIGEST_SIZE 64
+#define STREEBOG_BLOCK_SIZE 64
+
+struct streebog_uint512 {
+ u64 qword[8];
+};
+
+struct streebog_state {
+ u8 buffer[STREEBOG_BLOCK_SIZE];
+ struct streebog_uint512 hash;
+ struct streebog_uint512 h;
+ struct streebog_uint512 N;
+ struct streebog_uint512 Sigma;
+ size_t fillsize;
+};
+
+#endif /* !_CRYPTO_STREEBOG_H_ */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index ccb5aa8468e0..9c56412bb2cf 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -133,6 +133,7 @@ struct dw_hdmi_plat_data {
const struct dw_hdmi_phy_ops *phy_ops;
const char *phy_name;
void *phy_data;
+ unsigned int phy_force_vendor;
/* Synopsys PHY support */
const struct dw_hdmi_mpll_config *mpll_cfg;
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index d9c6d549f971..48a671e782ca 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -19,6 +19,13 @@ struct dw_mipi_dsi_phy_ops {
unsigned int *lane_mbps);
};
+struct dw_mipi_dsi_host_ops {
+ int (*attach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+ int (*detach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+};
+
struct dw_mipi_dsi_plat_data {
void __iomem *base;
unsigned int max_data_lanes;
@@ -27,6 +34,7 @@ struct dw_mipi_dsi_plat_data {
const struct drm_display_mode *mode);
const struct dw_mipi_dsi_phy_ops *phy_ops;
+ const struct dw_mipi_dsi_host_ops *host_ops;
void *priv_data;
};
@@ -35,10 +43,8 @@ struct dw_mipi_dsi *dw_mipi_dsi_probe(struct platform_device *pdev,
const struct dw_mipi_dsi_plat_data
*plat_data);
void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi);
-struct dw_mipi_dsi *dw_mipi_dsi_bind(struct platform_device *pdev,
- struct drm_encoder *encoder,
- const struct dw_mipi_dsi_plat_data
- *plat_data);
+int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder);
void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi);
+void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave);
#endif /* __DW_MIPI_DSI__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index f7a19c2a7a80..bdb0d5548f39 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -68,7 +68,6 @@
#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_global.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_mm.h>
#include <drm/drm_os_linux.h>
@@ -110,7 +109,10 @@ static inline bool drm_can_sleep(void)
return true;
}
-/* helper for handling conditionals in various for_each macros */
-#define for_each_if(condition) if (!(condition)) {} else
+#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
+#else
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
+#endif
#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index da9d95a19580..f9b35834c45d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -29,6 +29,7 @@
#define DRM_ATOMIC_H_
#include <drm/drm_crtc.h>
+#include <drm/drm_util.h>
/**
* struct drm_crtc_commit - track modeset commits on a CRTC
@@ -153,6 +154,17 @@ struct __drm_planes_state {
struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state, *old_state, *new_state;
+
+ /**
+ * @commit:
+ *
+ * A reference to the CRTC commit object that is kept for use by
+ * drm_atomic_helper_wait_for_flip_done() after
+ * drm_atomic_helper_commit_hw_done() is called. This ensures that a
+ * concurrent commit won't free a commit object that is still in use.
+ */
+ struct drm_crtc_commit *commit;
+
s32 __user *out_fence_ptr;
u64 last_vblank_count;
};
@@ -253,7 +265,6 @@ struct __drm_private_objs_state {
* struct drm_atomic_state - the global state object for atomic updates
* @ref: count of all references to this state (will not be freed until zero)
* @dev: parent DRM device
- * @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @async_update: hint for asynchronous plane update
* @planes: pointer to array of structures with per-plane data
@@ -272,6 +283,15 @@ struct drm_atomic_state {
struct kref ref;
struct drm_device *dev;
+
+ /**
+ * @allow_modeset:
+ *
+ * Allow full modeset. This is used by the ATOMIC IOCTL handler to
+ * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should
+ * never consult this flag, instead looking at the output of
+ * drm_atomic_crtc_needs_modeset().
+ */
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
bool async_update : 1;
@@ -373,9 +393,6 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state);
struct drm_crtc_state * __must_check
drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc);
-int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
- struct drm_crtc_state *state, struct drm_property *property,
- uint64_t val);
struct drm_plane_state * __must_check
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane);
@@ -587,25 +604,6 @@ __drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
}
int __must_check
-drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
- const struct drm_display_mode *mode);
-int __must_check
-drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
- struct drm_property_blob *blob);
-int __must_check
-drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
- struct drm_crtc *crtc);
-void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
- struct drm_framebuffer *fb);
-void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
- struct dma_fence *fence);
-int __must_check
-drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
- struct drm_crtc *crtc);
-int drm_atomic_set_writeback_fb_for_connector(
- struct drm_connector_state *conn_state,
- struct drm_framebuffer *fb);
-int __must_check
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc);
int __must_check
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 99e2a5297c69..58214be3bf3d 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -31,6 +31,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_util.h>
struct drm_atomic_state;
struct drm_private_obj;
@@ -125,6 +127,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
void drm_atomic_helper_shutdown(struct drm_device *dev);
+struct drm_atomic_state *
+drm_atomic_helper_duplicate_state(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx);
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev);
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx);
@@ -143,49 +148,10 @@ int drm_atomic_helper_page_flip_target(
uint32_t flags,
uint32_t target,
struct drm_modeset_acquire_ctx *ctx);
-struct drm_encoder *
-drm_atomic_helper_best_encoder(struct drm_connector *connector);
-
-/* default implementations for state handling */
-void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
-struct drm_crtc_state *
-drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
-void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
-
-void drm_atomic_helper_plane_reset(struct drm_plane *plane);
-void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
- struct drm_plane_state *state);
-struct drm_plane_state *
-drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
-void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state);
-
-void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
- struct drm_connector_state *conn_state);
-void drm_atomic_helper_connector_reset(struct drm_connector *connector);
-void
-__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
- struct drm_connector_state *state);
-struct drm_connector_state *
-drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
-struct drm_atomic_state *
-drm_atomic_helper_duplicate_state(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx);
-void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
-void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
- struct drm_connector_state *state);
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx);
-void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
- struct drm_private_state *state);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
new file mode 100644
index 000000000000..66c92cbd8e16
--- /dev/null
+++ b/include/drm/drm_atomic_state_helper.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct drm_crtc_state;
+struct drm_plane;
+struct drm_plane_state;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_private_obj;
+struct drm_private_state;
+struct drm_modeset_acquire_ctx;
+struct drm_device;
+
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
+ struct drm_connector_state *conn_state);
+void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+ struct drm_private_state *state);
diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h
new file mode 100644
index 000000000000..8cec52ad1277
--- /dev/null
+++ b/include/drm/drm_atomic_uapi.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#ifndef DRM_ATOMIC_UAPI_H_
+#define DRM_ATOMIC_UAPI_H_
+
+struct drm_crtc_state;
+struct drm_display_mode;
+struct drm_property_blob;
+struct drm_plane_state;
+struct drm_crtc;
+struct drm_connector_state;
+struct dma_fence;
+struct drm_framebuffer;
+
+int __must_check
+drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
+ const struct drm_display_mode *mode);
+int __must_check
+drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
+ struct drm_property_blob *blob);
+int __must_check
+drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
+ struct drm_crtc *crtc);
+void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
+ struct drm_framebuffer *fb);
+void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+ struct dma_fence *fence);
+int __must_check
+drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
+ struct drm_crtc *crtc);
+
+#endif
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
index 330c561c4c11..88bdfec3bd88 100644
--- a/include/drm/drm_blend.h
+++ b/include/drm/drm_blend.h
@@ -27,6 +27,10 @@
#include <linux/ctype.h>
#include <drm/drm_mode.h>
+#define DRM_MODE_BLEND_PREMULTI 0
+#define DRM_MODE_BLEND_COVERAGE 1
+#define DRM_MODE_BLEND_PIXEL_NONE 2
+
struct drm_device;
struct drm_atomic_state;
struct drm_plane;
@@ -52,4 +56,6 @@ int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
unsigned int zpos);
int drm_atomic_normalize_zpos(struct drm_device *dev,
struct drm_atomic_state *state);
+int drm_plane_create_blend_mode_property(struct drm_plane *plane,
+ unsigned int supported_modes);
#endif
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 989f8e52864d..971bb7853776 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -87,9 +87,10 @@ struct drm_client_dev {
struct drm_file *file;
};
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
- const char *name, const struct drm_client_funcs *funcs);
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+ const char *name, const struct drm_client_funcs *funcs);
void drm_client_release(struct drm_client_dev *client);
+void drm_client_add(struct drm_client_dev *client);
void drm_client_dev_unregister(struct drm_device *dev);
void drm_client_dev_hotplug(struct drm_device *dev);
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index 44f04233e3db..90ef9996d9a4 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -24,6 +24,7 @@
#define __DRM_COLOR_MGMT_H__
#include <linux/ctype.h>
+#include <drm/drm_property.h>
struct drm_crtc;
struct drm_plane;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 97ea41dc678f..9be2181b3ed7 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -28,6 +28,7 @@
#include <linux/ctype.h>
#include <linux/hdmi.h>
#include <drm/drm_mode_object.h>
+#include <drm/drm_util.h>
#include <uapi/drm/drm_mode.h>
@@ -81,6 +82,53 @@ enum drm_connector_status {
connector_status_unknown = 3,
};
+/**
+ * enum drm_connector_registration_status - userspace registration status for
+ * a &drm_connector
+ *
+ * This enum is used to track the status of initializing a connector and
+ * registering it with userspace, so that DRM can prevent bogus modesets on
+ * connectors that no longer exist.
+ */
+enum drm_connector_registration_state {
+ /**
+ * @DRM_CONNECTOR_INITIALIZING: The connector has just been created,
+ * but has yet to be exposed to userspace. There should be no
+ * additional restrictions to how the state of this connector may be
+ * modified.
+ */
+ DRM_CONNECTOR_INITIALIZING = 0,
+
+ /**
+ * @DRM_CONNECTOR_REGISTERED: The connector has been fully initialized
+ * and registered with sysfs, as such it has been exposed to
+ * userspace. There should be no additional restrictions to how the
+ * state of this connector may be modified.
+ */
+ DRM_CONNECTOR_REGISTERED = 1,
+
+ /**
+ * @DRM_CONNECTOR_UNREGISTERED: The connector has either been exposed
+ * to userspace and has since been unregistered and removed from
+ * userspace, or the connector was unregistered before it had a chance
+ * to be exposed to userspace (e.g. still in the
+ * @DRM_CONNECTOR_INITIALIZING state). When a connector is
+ * unregistered, there are additional restrictions to how its state
+ * may be modified:
+ *
+ * - An unregistered connector may only have its DPMS changed from
+ * On->Off. Once DPMS is changed to Off, it may not be switched back
+ * to On.
+ * - Modesets are not allowed on unregistered connectors, unless they
+ * would result in disabling its assigned CRTCs. This means
+ * disabling a CRTC on an unregistered connector is OK, but enabling
+ * one is not.
+ * - Removing a CRTC from an unregistered connector is OK, but new
+ * CRTCs may never be assigned to an unregistered connector.
+ */
+ DRM_CONNECTOR_UNREGISTERED = 2,
+};
+
enum subpixel_order {
SubPixelUnknown = 0,
SubPixelHorizontalRGB,
@@ -460,6 +508,18 @@ struct drm_connector_state {
* drm_writeback_signal_completion()
*/
struct drm_writeback_job *writeback_job;
+
+ /**
+ * @max_requested_bpc: Connector property to limit the maximum bit
+ * depth of the pixels.
+ */
+ u8 max_requested_bpc;
+
+ /**
+ * @max_bpc: Connector max_bpc based on the requested max_bpc property
+ * and the connector bpc limitations obtained from edid.
+ */
+ u8 max_bpc;
};
/**
@@ -852,10 +912,12 @@ struct drm_connector {
bool ycbcr_420_allowed;
/**
- * @registered: Is this connector exposed (registered) with userspace?
+ * @registration_state: Is this connector initializing, exposed
+ * (registered) with userspace, or unregistered?
+ *
* Protected by @mutex.
*/
- bool registered;
+ enum drm_connector_registration_state registration_state;
/**
* @modes:
@@ -910,6 +972,17 @@ struct drm_connector {
struct drm_property *scaling_mode_property;
/**
+ * @vrr_capable_property: Optional property to help userspace
+ * query hardware support for variable refresh rate on a connector.
+ * connector. Drivers can add the property to a connector by
+ * calling drm_connector_attach_vrr_capable_property().
+ *
+ * This should be updated only by calling
+ * drm_connector_set_vrr_capable_property().
+ */
+ struct drm_property *vrr_capable_property;
+
+ /**
* @content_protection_property: DRM ENUM property for content
* protection. See drm_connector_attach_content_protection_property().
*/
@@ -923,6 +996,12 @@ struct drm_connector {
*/
struct drm_property_blob *path_blob_ptr;
+ /**
+ * @max_bpc_property: Default connector property for the max bpc to be
+ * driven out of the connector.
+ */
+ struct drm_property *max_bpc_property;
+
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@ -1083,6 +1162,7 @@ int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
+void drm_connector_attach_edid_property(struct drm_connector *connector);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
int drm_connector_attach_encoder(struct drm_connector *connector,
@@ -1142,27 +1222,21 @@ static inline void drm_connector_put(struct drm_connector *connector)
}
/**
- * drm_connector_reference - acquire a connector reference
+ * drm_connector_is_unregistered - has the connector been unregistered from
+ * userspace?
* @connector: DRM connector
*
- * This is a compatibility alias for drm_connector_get() and should not be
- * used by new code.
- */
-static inline void drm_connector_reference(struct drm_connector *connector)
-{
- drm_connector_get(connector);
-}
-
-/**
- * drm_connector_unreference - release a connector reference
- * @connector: DRM connector
+ * Checks whether or not @connector has been unregistered from userspace.
*
- * This is a compatibility alias for drm_connector_put() and should not be
- * used by new code.
+ * Returns:
+ * True if the connector was unregistered, false if the connector is
+ * registered or has not yet been registered with userspace.
*/
-static inline void drm_connector_unreference(struct drm_connector *connector)
+static inline bool
+drm_connector_is_unregistered(struct drm_connector *connector)
{
- drm_connector_put(connector);
+ return READ_ONCE(connector->registration_state) ==
+ DRM_CONNECTOR_UNREGISTERED;
}
const char *drm_get_connector_status_name(enum drm_connector_status status);
@@ -1182,6 +1256,8 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev);
int drm_connector_attach_content_type_property(struct drm_connector *dev);
int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
+int drm_connector_attach_vrr_capable_property(
+ struct drm_connector *connector);
int drm_connector_attach_content_protection_property(
struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
@@ -1198,8 +1274,12 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
void drm_connector_set_link_status_property(struct drm_connector *connector,
uint64_t link_status);
+void drm_connector_set_vrr_capable_property(
+ struct drm_connector *connector, bool capable);
int drm_connector_init_panel_orientation_property(
struct drm_connector *connector, int width, int height);
+int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
+ int min, int max);
/**
* struct drm_tile_group - Tile group metadata
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 92e7fc7f05a4..39c3900aab3c 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -291,6 +291,15 @@ struct drm_crtc_state {
u32 pageflip_flags;
/**
+ * @vrr_enabled:
+ *
+ * Indicates if variable refresh rate should be enabled for the CRTC.
+ * Support for the requested vrr state will depend on driver and
+ * hardware capabiltiy - lacking support is not treated as failure.
+ */
+ bool vrr_enabled;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
@@ -744,8 +753,45 @@ struct drm_crtc_funcs {
*
* 0 on success or a negative error code on failure.
*/
- int (*set_crc_source)(struct drm_crtc *crtc, const char *source,
- size_t *values_cnt);
+ int (*set_crc_source)(struct drm_crtc *crtc, const char *source);
+ /**
+ * @verify_crc_source:
+ *
+ * verifies the source of CRC checksums of frames before setting the
+ * source for CRC and during crc open. Source parameter can be NULL
+ * while disabling crc source.
+ *
+ * This callback is optional if the driver does not support any CRC
+ * generation functionality.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*verify_crc_source)(struct drm_crtc *crtc, const char *source,
+ size_t *values_cnt);
+ /**
+ * @get_crc_sources:
+ *
+ * Driver callback for getting a list of all the available sources for
+ * CRC generation. This callback depends upon verify_crc_source, So
+ * verify_crc_source callback should be implemented before implementing
+ * this. Driver can pass full list of available crc sources, this
+ * callback does the verification on each crc-source before passing it
+ * to userspace.
+ *
+ * This callback is optional if the driver does not support exporting of
+ * possible CRC sources list.
+ *
+ * RETURNS:
+ *
+ * a constant character pointer to the list of all the available CRC
+ * sources. On failure driver should return NULL. count should be
+ * updated with number of sources in list. if zero we don't process any
+ * source from the list.
+ */
+ const char *const *(*get_crc_sources)(struct drm_crtc *crtc,
+ size_t *count);
/**
* @atomic_print_state:
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 6914633037a5..d65f034843ce 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -57,12 +57,6 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
void drm_helper_resume_force_mode(struct drm_device *dev);
-int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
- struct drm_framebuffer *old_fb);
-int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb);
-
/* drm_probe_helper.c */
int drm_helper_probe_single_connector_modes(struct drm_connector
*connector, uint32_t maxX,
diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h
new file mode 100644
index 000000000000..4487660b26b8
--- /dev/null
+++ b/include/drm/drm_damage_helper.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Deepak Rawat <drawat@vmware.com>
+ *
+ **************************************************************************/
+
+#ifndef DRM_DAMAGE_HELPER_H_
+#define DRM_DAMAGE_HELPER_H_
+
+#include <drm/drm_atomic_helper.h>
+
+/**
+ * drm_atomic_for_each_plane_damage - Iterator macro for plane damage.
+ * @iter: The iterator to advance.
+ * @rect: Return a rectangle in fb coordinate clipped to plane src.
+ *
+ * Note that if the first call to iterator macro return false then no need to do
+ * plane update. Iterator will return full plane src when damage is not passed
+ * by user-space.
+ */
+#define drm_atomic_for_each_plane_damage(iter, rect) \
+ while (drm_atomic_helper_damage_iter_next(iter, rect))
+
+/**
+ * struct drm_atomic_helper_damage_iter - Closure structure for damage iterator.
+ *
+ * This structure tracks state needed to walk the list of plane damage clips.
+ */
+struct drm_atomic_helper_damage_iter {
+ /* private: Plane src in whole number. */
+ struct drm_rect plane_src;
+ /* private: Rectangles in plane damage blob. */
+ const struct drm_rect *clips;
+ /* private: Number of rectangles in plane damage blob. */
+ uint32_t num_clips;
+ /* private: Current clip iterator is advancing on. */
+ uint32_t curr_clip;
+ /* private: Whether need full plane update. */
+ bool full_update;
+};
+
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane);
+void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state);
+int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips);
+void
+drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+ const struct drm_plane_state *old_state,
+ const struct drm_plane_state *new_state);
+bool
+drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
+ struct drm_rect *rect);
+
+/**
+ * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect.
+ * @state: Plane state.
+ *
+ * Returns plane damage rectangles in internal &drm_rect. Currently &drm_rect
+ * can be obtained by simply typecasting &drm_mode_rect. This is because both
+ * are signed 32 and during drm_atomic_check_only() it is verified that damage
+ * clips are inside fb.
+ *
+ * Return: Clips in plane fb_damage_clips blob property.
+ */
+static inline struct drm_rect *
+drm_helper_get_plane_damage_clips(const struct drm_plane_state *state)
+{
+ return (struct drm_rect *)drm_plane_get_damage_clips(state);
+}
+
+#endif
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index f9c6e0e3aec7..42411b3ea0c8 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -46,6 +46,16 @@ struct drm_device {
struct drm_master *master;
/**
+ * @driver_features: per-device driver features
+ *
+ * Drivers can clear specific flags here to disallow
+ * certain features on a per-device basis while still
+ * sharing a single &struct drm_driver instance across
+ * all devices.
+ */
+ u32 driver_features;
+
+ /**
* @unplugged:
*
* Flag to tell if the device has been unplugged.
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 05cc31b5db16..5736c942c85b 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -123,8 +123,9 @@
# define DP_FRAMING_CHANGE_CAP (1 << 1)
# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
-#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
-# define DP_TRAINING_AUX_RD_MASK 0x7F /* XXX 1.2? */
+#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
+# define DP_TRAINING_AUX_RD_MASK 0x7F /* DP 1.3 */
+# define DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT (1 << 7) /* DP 1.3 */
#define DP_ADAPTER_CAP 0x00f /* 1.2 */
# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
@@ -230,6 +231,8 @@
#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */
#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
# define DP_DSC_RGB (1 << 0)
@@ -278,6 +281,8 @@
# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4)
#define DP_DSC_MAX_SLICE_WIDTH 0x06C
+#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560
+#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320
#define DP_DSC_SLICE_CAP_2 0x06D
# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0)
@@ -476,6 +481,7 @@
# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
+# define DP_DECOMPRESSION_EN (1 << 0)
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
@@ -684,6 +690,8 @@
# define DP_EDP_12 0x01
# define DP_EDP_13 0x02
# define DP_EDP_14 0x03
+# define DP_EDP_14a 0x04 /* eDP 1.4a */
+# define DP_EDP_14b 0x05 /* eDP 1.4b */
#define DP_EDP_GENERAL_CAP_1 0x701
# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0)
@@ -904,6 +912,57 @@
#define DP_AUX_HDCP_KSV_FIFO 0x6802C
#define DP_AUX_HDCP_AINFO 0x6803B
+/* DP HDCP2.2 parameter offsets in DPCD address space */
+#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000
+#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008
+#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B
+#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215
+#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D
+#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220
+#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0
+#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0
+#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0
+#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0
+#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0
+#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8
+#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318
+#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328
+#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330
+#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332
+#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335
+#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345
+#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0
+#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0
+#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3
+#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5
+#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473
+#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493
+#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
+#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
+
+/* DP HDCP message start offsets in DPCD address space */
+#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET
+#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET
+#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \
+ DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET
+#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET
+#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET
+#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET
+#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET
+#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET
+
+#define HDCP_2_2_DP_RXSTATUS_LEN 1
+#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0))
+#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1))
+#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2))
+#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
+#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4))
+
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
@@ -962,6 +1021,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_DSC_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
@@ -992,6 +1052,7 @@ struct dp_sdp_header {
#define EDP_SDP_HEADER_REVISION_MASK 0x1F
#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
+#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F
struct edp_vsc_psr {
struct dp_sdp_header sdp_header;
@@ -1058,6 +1119,44 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
}
+/* DP/eDP DSC support */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp);
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3]);
+
+static inline bool
+drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_DECOMPRESSION_IS_SUPPORTED;
+}
+
+static inline u16
+drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+ (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
+}
+
+static inline u32
+drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ /* Max Slicewidth = Number of Pixels * 320 */
+ return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
+ DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+
+/* Forward Error Correction Support on DP 1.4 */
+static inline bool
+drm_dp_sink_supports_fec(const u8 fec_capable)
+{
+ return fec_capable & DP_FEC_CAPABLE;
+}
+
/*
* DisplayPort AUX channel
*/
@@ -1260,12 +1359,12 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
*/
enum drm_dp_quirk {
/**
- * @DP_DPCD_QUIRK_LIMITED_M_N:
+ * @DP_DPCD_QUIRK_CONSTANT_N:
*
* The device requires main link attributes Mvid and Nvid to be limited
- * to 16 bits.
+ * to 16 bits. So will give a constant value (0x8000) for compatability.
*/
- DP_DPCD_QUIRK_LIMITED_M_N,
+ DP_DPCD_QUIRK_CONSTANT_N,
};
/**
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 7f78d26a0766..59f005b419cf 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -409,7 +409,6 @@ struct drm_dp_payload {
struct drm_dp_mst_topology_state {
struct drm_private_state base;
int avail_slots;
- struct drm_atomic_state *state;
struct drm_dp_mst_topology_mgr *mgr;
};
@@ -498,11 +497,6 @@ struct drm_dp_mst_topology_mgr {
int pbn_div;
/**
- * @state: State information for topology manager
- */
- struct drm_dp_mst_topology_state *state;
-
- /**
* @funcs: Atomic helper callbacks
*/
const struct drm_private_state_funcs *funcs;
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 46a8009784df..35af23f5fa0d 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -56,7 +56,6 @@ struct drm_printer;
#define DRIVER_ATOMIC 0x10000
#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
#define DRIVER_SYNCOBJ 0x40000
-#define DRIVER_PREFER_XBGR_30BPP 0x80000
/**
* struct drm_driver - DRM driver structure
@@ -472,6 +471,8 @@ struct drm_driver {
* @gem_prime_export:
*
* export GEM -> dmabuf
+ *
+ * This defaults to drm_gem_prime_export() if not set.
*/
struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
@@ -479,6 +480,8 @@ struct drm_driver {
* @gem_prime_import:
*
* import dmabuf -> GEM
+ *
+ * This defaults to drm_gem_prime_import() if not set.
*/
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
@@ -524,8 +527,10 @@ struct drm_driver {
* @dumb_map_offset:
*
* Allocate an offset in the drm device node's address space to be able to
- * memory map a dumb buffer. GEM-based drivers must use
- * drm_gem_create_mmap_offset() to implement this.
+ * memory map a dumb buffer.
+ *
+ * The default implementation is drm_gem_create_mmap_offset(). GEM based
+ * drivers must not overwrite this.
*
* Called by the user via ioctl.
*
@@ -545,6 +550,9 @@ struct drm_driver {
*
* Called by the user via ioctl.
*
+ * The default implementation is drm_gem_dumb_destroy(). GEM based drivers
+ * must not overwrite this.
+ *
* Returns:
*
* Zero on success, negative errno on failure.
@@ -622,7 +630,6 @@ void drm_dev_unregister(struct drm_device *dev);
void drm_dev_get(struct drm_device *dev);
void drm_dev_put(struct drm_device *dev);
-void drm_dev_unref(struct drm_device *dev);
void drm_put_dev(struct drm_device *dev);
bool drm_dev_enter(struct drm_device *dev, int *idx);
void drm_dev_exit(int idx);
@@ -654,14 +661,14 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
* @dev: DRM device to check
* @feature: feature flag
*
- * This checks @dev for driver features, see &drm_driver.driver_features and the
- * various DRIVER_\* flags.
+ * This checks @dev for driver features, see &drm_driver.driver_features,
+ * &drm_device.driver_features, and the various DRIVER_\* flags.
*
* Returns true if the @feature is supported, false otherwise.
*/
-static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
+static inline bool drm_core_check_feature(struct drm_device *dev, u32 feature)
{
- return dev->driver->driver_features & feature;
+ return dev->driver->driver_features & dev->driver_features & feature;
}
/**
@@ -675,7 +682,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
{
return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
- dev->mode_config.funcs->atomic_commit != NULL;
+ (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL);
}
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h
new file mode 100644
index 000000000000..d03f1b83421a
--- /dev/null
+++ b/include/drm/drm_dsc.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Authors:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#ifndef DRM_DSC_H_
+#define DRM_DSC_H_
+
+#include <drm/drm_dp_helper.h>
+
+/* VESA Display Stream Compression DSC 1.2 constants */
+#define DSC_NUM_BUF_RANGES 15
+#define DSC_MUX_WORD_SIZE_8_10_BPC 48
+#define DSC_MUX_WORD_SIZE_12_BPC 64
+#define DSC_RC_PIXELS_PER_GROUP 3
+#define DSC_SCALE_DECREMENT_INTERVAL_MAX 4095
+#define DSC_RANGE_BPG_OFFSET_MASK 0x3f
+
+/* DSC Rate Control Constants */
+#define DSC_RC_MODEL_SIZE_CONST 8192
+#define DSC_RC_EDGE_FACTOR_CONST 6
+#define DSC_RC_TGT_OFFSET_HI_CONST 3
+#define DSC_RC_TGT_OFFSET_LO_CONST 3
+
+/* DSC PPS constants and macros */
+#define DSC_PPS_VERSION_MAJOR_SHIFT 4
+#define DSC_PPS_BPC_SHIFT 4
+#define DSC_PPS_MSB_SHIFT 8
+#define DSC_PPS_LSB_MASK (0xFF << 0)
+#define DSC_PPS_BPP_HIGH_MASK (0x3 << 8)
+#define DSC_PPS_VBR_EN_SHIFT 2
+#define DSC_PPS_SIMPLE422_SHIFT 3
+#define DSC_PPS_CONVERT_RGB_SHIFT 4
+#define DSC_PPS_BLOCK_PRED_EN_SHIFT 5
+#define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK (0x3 << 8)
+#define DSC_PPS_SCALE_DEC_INT_HIGH_MASK (0xF << 8)
+#define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT 4
+#define DSC_PPS_RC_RANGE_MINQP_SHIFT 11
+#define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6
+#define DSC_PPS_NATIVE_420_SHIFT 1
+#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16
+#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0
+#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13
+
+/* Configuration for a single Rate Control model range */
+struct drm_dsc_rc_range_parameters {
+ /* Min Quantization Parameters allowed for this range */
+ u8 range_min_qp;
+ /* Max Quantization Parameters allowed for this range */
+ u8 range_max_qp;
+ /* Bits/group offset to apply to target for this group */
+ u8 range_bpg_offset;
+};
+
+struct drm_dsc_config {
+ /* Bits / component for previous reconstructed line buffer */
+ u8 line_buf_depth;
+ /* Bits per component to code (must be 8, 10, or 12) */
+ u8 bits_per_component;
+ /*
+ * Flag indicating to do RGB - YCoCg conversion
+ * and back (should be 1 for RGB input)
+ */
+ bool convert_rgb;
+ u8 slice_count;
+ /* Slice Width */
+ u16 slice_width;
+ /* Slice Height */
+ u16 slice_height;
+ /*
+ * 4:2:2 enable mode (from PPS, 4:2:2 conversion happens
+ * outside of DSC encode/decode algorithm)
+ */
+ bool enable422;
+ /* Picture Width */
+ u16 pic_width;
+ /* Picture Height */
+ u16 pic_height;
+ /* Offset to bits/group used by RC to determine QP adjustment */
+ u8 rc_tgt_offset_high;
+ /* Offset to bits/group used by RC to determine QP adjustment */
+ u8 rc_tgt_offset_low;
+ /* Bits/pixel target << 4 (ie., 4 fractional bits) */
+ u16 bits_per_pixel;
+ /*
+ * Factor to determine if an edge is present based
+ * on the bits produced
+ */
+ u8 rc_edge_factor;
+ /* Slow down incrementing once the range reaches this value */
+ u8 rc_quant_incr_limit1;
+ /* Slow down incrementing once the range reaches this value */
+ u8 rc_quant_incr_limit0;
+ /* Number of pixels to delay the initial transmission */
+ u16 initial_xmit_delay;
+ /* Number of pixels to delay the VLD on the decoder,not including SSM */
+ u16 initial_dec_delay;
+ /* Block prediction enable */
+ bool block_pred_enable;
+ /* Bits/group offset to use for first line of the slice */
+ u8 first_line_bpg_offset;
+ /* Value to use for RC model offset at slice start */
+ u16 initial_offset;
+ /* Thresholds defining each of the buffer ranges */
+ u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+ /* Parameters for each of the RC ranges */
+ struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+ /* Total size of RC model */
+ u16 rc_model_size;
+ /* Minimum QP where flatness information is sent */
+ u8 flatness_min_qp;
+ /* Maximum QP where flatness information is sent */
+ u8 flatness_max_qp;
+ /* Initial value for scale factor */
+ u8 initial_scale_value;
+ /* Decrement scale factor every scale_decrement_interval groups */
+ u16 scale_decrement_interval;
+ /* Increment scale factor every scale_increment_interval groups */
+ u16 scale_increment_interval;
+ /* Non-first line BPG offset to use */
+ u16 nfl_bpg_offset;
+ /* BPG offset used to enforce slice bit */
+ u16 slice_bpg_offset;
+ /* Final RC linear transformation offset value */
+ u16 final_offset;
+ /* Enable on-off VBR (ie., disable stuffing bits) */
+ bool vbr_enable;
+ /* Mux word size (in bits) for SSM mode */
+ u8 mux_word_size;
+ /*
+ * The (max) size in bytes of the "chunks" that are
+ * used in slice multiplexing
+ */
+ u16 slice_chunk_size;
+ /* Rate Control buffer siz in bits */
+ u16 rc_bits;
+ /* DSC Minor Version */
+ u8 dsc_version_minor;
+ /* DSC Major version */
+ u8 dsc_version_major;
+ /* Native 4:2:2 support */
+ bool native_422;
+ /* Native 4:2:0 support */
+ bool native_420;
+ /* Additional bits/grp for seconnd line of slice for native 4:2:0 */
+ u8 second_line_bpg_offset;
+ /* Num of bits deallocated for each grp that is not in second line of slice */
+ u16 nsl_bpg_offset;
+ /* Offset adj fr second line in Native 4:2:0 mode */
+ u16 second_line_offset_adj;
+};
+
+/**
+ * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set
+ *
+ * The VESA DSC standard defines picture parameter set (PPS) which display
+ * stream compression encoders must communicate to decoders.
+ * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in
+ * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2.
+ * The PPS fields that span over more than a byte should be stored in Big Endian
+ * format.
+ */
+struct drm_dsc_picture_parameter_set {
+ /**
+ * @dsc_version:
+ * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC
+ * PPS0[7:4] - dsc_version_major: Contains major version of DSC
+ */
+ u8 dsc_version;
+ /**
+ * @pps_identifier:
+ * PPS1[7:0] - Application specific identifier that can be
+ * used to differentiate between different PPS tables.
+ */
+ u8 pps_identifier;
+ /**
+ * @pps_reserved:
+ * PPS2[7:0]- RESERVED Byte
+ */
+ u8 pps_reserved;
+ /**
+ * @pps_3:
+ * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to
+ * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits,
+ * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits,
+ * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2.
+ * PPS3[7:4] - bits_per_component: Bits per component for the original
+ * pixels of the encoded picture.
+ * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2)
+ * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also
+ * allowed only when dsc_minor_version = 0x2)
+ */
+ u8 pps_3;
+ /**
+ * @pps_4:
+ * PPS4[1:0] -These are the most significant 2 bits of
+ * compressed BPP bits_per_pixel[9:0] syntax element.
+ * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled
+ * PPS4[3] - simple_422: Indicates if decoder drops samples to
+ * reconstruct the 4:2:2 picture.
+ * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is
+ * active.
+ * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any
+ * groups in picture
+ * PPS4[7:6] - Reseved bits
+ */
+ u8 pps_4;
+ /**
+ * @bits_per_pixel_low:
+ * PPS5[7:0] - This indicates the lower significant 8 bits of
+ * the compressed BPP bits_per_pixel[9:0] element.
+ */
+ u8 bits_per_pixel_low;
+ /**
+ * @pic_height:
+ * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows
+ * within the raster.
+ */
+ __be16 pic_height;
+ /**
+ * @pic_width:
+ * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within
+ * the raster.
+ */
+ __be16 pic_width;
+ /**
+ * @slice_height:
+ * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels.
+ */
+ __be16 slice_height;
+ /**
+ * @slice_width:
+ * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels.
+ */
+ __be16 slice_width;
+ /**
+ * @chunk_size:
+ * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks
+ * that are used for slice multiplexing.
+ */
+ __be16 chunk_size;
+ /**
+ * @initial_xmit_delay_high:
+ * PPS16[1:0] - Most Significant two bits of initial transmission delay.
+ * It specifies the number of pixel times that the encoder waits before
+ * transmitting data from its rate buffer.
+ * PPS16[7:2] - Reserved
+ */
+ u8 initial_xmit_delay_high;
+ /**
+ * @initial_xmit_delay_low:
+ * PPS17[7:0] - Least significant 8 bits of initial transmission delay.
+ */
+ u8 initial_xmit_delay_low;
+ /**
+ * @initial_dec_delay:
+ *
+ * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number
+ * of pixel times that the decoder accumulates data in its rate buffer
+ * before starting to decode and output pixels.
+ */
+ __be16 initial_dec_delay;
+ /**
+ * @pps20_reserved:
+ *
+ * PPS20[7:0] - Reserved
+ */
+ u8 pps20_reserved;
+ /**
+ * @initial_scale_value:
+ * PPS21[5:0] - Initial rcXformScale factor used at beginning
+ * of a slice.
+ * PPS21[7:6] - Reserved
+ */
+ u8 initial_scale_value;
+ /**
+ * @scale_increment_interval:
+ * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing
+ * the rcXformScale factor at end of a slice.
+ */
+ __be16 scale_increment_interval;
+ /**
+ * @scale_decrement_interval_high:
+ * PPS24[3:0] - Higher 4 bits indicating number of group times between
+ * decrementing the rcXformScale factor at beginning of a slice.
+ * PPS24[7:4] - Reserved
+ */
+ u8 scale_decrement_interval_high;
+ /**
+ * @scale_decrement_interval_low:
+ * PPS25[7:0] - Lower 8 bits of scale decrement interval
+ */
+ u8 scale_decrement_interval_low;
+ /**
+ * @pps26_reserved:
+ * PPS26[7:0]
+ */
+ u8 pps26_reserved;
+ /**
+ * @first_line_bpg_offset:
+ * PPS27[4:0] - Number of additional bits that are allocated
+ * for each group on first line of a slice.
+ * PPS27[7:5] - Reserved
+ */
+ u8 first_line_bpg_offset;
+ /**
+ * @nfl_bpg_offset:
+ * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits
+ * deallocated for each group for groups after the first line of slice.
+ */
+ __be16 nfl_bpg_offset;
+ /**
+ * @slice_bpg_offset:
+ * PPS30, PPS31[7:0] - Number of bits that are deallocated for each
+ * group to enforce the slice constraint.
+ */
+ __be16 slice_bpg_offset;
+ /**
+ * @initial_offset:
+ * PPS32,33[7:0] - Initial value for rcXformOffset
+ */
+ __be16 initial_offset;
+ /**
+ * @final_offset:
+ * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset
+ */
+ __be16 final_offset;
+ /**
+ * @flatness_min_qp:
+ * PPS36[4:0] - Minimum QP at which flatness is signaled and
+ * flatness QP adjustment is made.
+ * PPS36[7:5] - Reserved
+ */
+ u8 flatness_min_qp;
+ /**
+ * @flatness_max_qp:
+ * PPS37[4:0] - Max QP at which flatness is signalled and
+ * the flatness adjustment is made.
+ * PPS37[7:5] - Reserved
+ */
+ u8 flatness_max_qp;
+ /**
+ * @rc_model_size:
+ * PPS38,39[7:0] - Number of bits within RC Model.
+ */
+ __be16 rc_model_size;
+ /**
+ * @rc_edge_factor:
+ * PPS40[3:0] - Ratio of current activity vs, previous
+ * activity to determine presence of edge.
+ * PPS40[7:4] - Reserved
+ */
+ u8 rc_edge_factor;
+ /**
+ * @rc_quant_incr_limit0:
+ * PPS41[4:0] - QP threshold used in short term RC
+ * PPS41[7:5] - Reserved
+ */
+ u8 rc_quant_incr_limit0;
+ /**
+ * @rc_quant_incr_limit1:
+ * PPS42[4:0] - QP threshold used in short term RC
+ * PPS42[7:5] - Reserved
+ */
+ u8 rc_quant_incr_limit1;
+ /**
+ * @rc_tgt_offset:
+ * PPS43[3:0] - Lower end of the variability range around the target
+ * bits per group that is allowed by short term RC.
+ * PPS43[7:4]- Upper end of the variability range around the target
+ * bits per group that i allowed by short term rc.
+ */
+ u8 rc_tgt_offset;
+ /**
+ * @rc_buf_thresh:
+ * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for
+ * the 15 ranges defined by 14 thresholds.
+ */
+ u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+ /**
+ * @rc_range_parameters:
+ * PPS58[7:0] - PPS87[7:0]
+ * Parameters that correspond to each of the 15 ranges.
+ */
+ __be16 rc_range_parameters[DSC_NUM_BUF_RANGES];
+ /**
+ * @native_422_420:
+ * PPS88[0] - 0 = Native 4:2:2 not used
+ * 1 = Native 4:2:2 used
+ * PPS88[1] - 0 = Native 4:2:0 not use
+ * 1 = Native 4:2:0 used
+ * PPS88[7:2] - Reserved 6 bits
+ */
+ u8 native_422_420;
+ /**
+ * @second_line_bpg_offset:
+ * PPS89[4:0] - Additional bits/group budget for the
+ * second line of a slice in Native 4:2:0 mode.
+ * Set to 0 if DSC minor version is 1 or native420 is 0.
+ * PPS89[7:5] - Reserved
+ */
+ u8 second_line_bpg_offset;
+ /**
+ * @nsl_bpg_offset:
+ * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated
+ * for each group that is not in the second line of a slice.
+ */
+ __be16 nsl_bpg_offset;
+ /**
+ * @second_line_offset_adj:
+ * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second
+ * line in Native 4:2:0 mode.
+ */
+ __be16 second_line_offset_adj;
+ /**
+ * @pps_long_94_reserved:
+ * PPS 94, 95, 96, 97 - Reserved
+ */
+ u32 pps_long_94_reserved;
+ /**
+ * @pps_long_98_reserved:
+ * PPS 98, 99, 100, 101 - Reserved
+ */
+ u32 pps_long_98_reserved;
+ /**
+ * @pps_long_102_reserved:
+ * PPS 102, 103, 104, 105 - Reserved
+ */
+ u32 pps_long_102_reserved;
+ /**
+ * @pps_long_106_reserved:
+ * PPS 106, 107, 108, 109 - reserved
+ */
+ u32 pps_long_106_reserved;
+ /**
+ * @pps_long_110_reserved:
+ * PPS 110, 111, 112, 113 - reserved
+ */
+ u32 pps_long_110_reserved;
+ /**
+ * @pps_long_114_reserved:
+ * PPS 114 - 117 - reserved
+ */
+ u32 pps_long_114_reserved;
+ /**
+ * @pps_long_118_reserved:
+ * PPS 118 - 121 - reserved
+ */
+ u32 pps_long_118_reserved;
+ /**
+ * @pps_long_122_reserved:
+ * PPS 122- 125 - reserved
+ */
+ u32 pps_long_122_reserved;
+ /**
+ * @pps_short_126_reserved:
+ * PPS 126, 127 - reserved
+ */
+ __be16 pps_short_126_reserved;
+} __packed;
+
+/**
+ * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter
+ * Set Metadata
+ *
+ * This structure represents the DSC PPS infoframe required to send the Picture
+ * Parameter Set metadata required before enabling VESA Display Stream
+ * Compression. This is based on the DP Secondary Data Packet structure and
+ * comprises of SDP Header as defined in drm_dp_helper.h and PPS payload.
+ *
+ * @pps_header: Header for PPS as per DP SDP header format
+ * @pps_payload: PPS payload fields as per DSC specification Table 4-1
+ */
+struct drm_dsc_pps_infoframe {
+ struct dp_sdp_header pps_header;
+ struct drm_dsc_picture_parameter_set pps_payload;
+} __packed;
+
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp);
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg);
+
+#endif /* _DRM_DSC_H_ */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b25d12ef120a..e3c404833115 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -214,9 +214,9 @@ struct detailed_timing {
#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
/* YCBCR 420 deep color modes */
-#define DRM_EDID_YCBCR420_DC_48 (1 << 6)
-#define DRM_EDID_YCBCR420_DC_36 (1 << 5)
-#define DRM_EDID_YCBCR420_DC_30 (1 << 4)
+#define DRM_EDID_YCBCR420_DC_48 (1 << 2)
+#define DRM_EDID_YCBCR420_DC_36 (1 << 1)
+#define DRM_EDID_YCBCR420_DC_30 (1 << 0)
#define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \
DRM_EDID_YCBCR420_DC_36 | \
DRM_EDID_YCBCR420_DC_30)
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 4f597c0730b4..70cfca03d812 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -28,6 +28,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_mode.h>
#include <drm/drm_mode_object.h>
+#include <drm/drm_util.h>
struct drm_encoder;
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 96e26e3b9a0c..8dbbe1eece1b 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -26,9 +26,6 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
-void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state);
-void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
- bool state);
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 5db08c8f1d25..bb9acea61369 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -604,6 +604,16 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
#endif
+/**
+ * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
+ * @a: memory range, users of which are to be removed
+ * @name: requesting driver name
+ * @primary: also kick vga16fb if present
+ *
+ * This function removes framebuffer devices (initialized by firmware/bootloader)
+ * which use memory range described by @a. If @a is NULL all such devices are
+ * removed.
+ */
static inline int
drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary)
@@ -615,4 +625,28 @@ drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
#endif
}
+/**
+ * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
+ * @pdev: PCI device
+ * @resource_id: index of PCI BAR configuring framebuffer memory
+ * @name: requesting driver name
+ *
+ * This function removes framebuffer devices (eg. initialized by firmware)
+ * using memory range configured for @pdev's BAR @resource_id.
+ *
+ * The function assumes that PCI device with shadowed ROM drives a primary
+ * display and so kicks out vga16fb.
+ */
+static inline int
+drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
+ int resource_id,
+ const char *name)
+{
+#if IS_REACHABLE(CONFIG_FB)
+ return remove_conflicting_pci_framebuffers(pdev, resource_id, name);
+#else
+ return 0;
+#endif
+}
+
#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 26485acc51d7..84ac79219e4c 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -164,14 +164,14 @@ struct drm_file {
* See also the :ref:`section on primary nodes and authentication
* <drm_primary_node>`.
*/
- unsigned authenticated :1;
+ bool authenticated;
/**
* @stereo_allowed:
*
* True when the client has asked us to expose stereo 3D mode flags.
*/
- unsigned stereo_allowed :1;
+ bool stereo_allowed;
/**
* @universal_planes:
@@ -179,10 +179,10 @@ struct drm_file {
* True if client understands CRTC primary planes and cursor planes
* in the plane list. Automatically set when @atomic is set.
*/
- unsigned universal_planes:1;
+ bool universal_planes;
/** @atomic: True if client understands atomic properties. */
- unsigned atomic:1;
+ bool atomic;
/**
* @aspect_ratio_allowed:
@@ -190,14 +190,14 @@ struct drm_file {
* True, if client can handle picture aspect ratios, and has requested
* to pass this information along with the mode.
*/
- unsigned aspect_ratio_allowed:1;
+ bool aspect_ratio_allowed;
/**
* @writeback_connectors:
*
* True if client understands writeback connectors
*/
- unsigned writeback_connectors:1;
+ bool writeback_connectors;
/**
* @is_master:
@@ -208,7 +208,7 @@ struct drm_file {
* See also the :ref:`section on primary nodes and authentication
* <drm_primary_node>`.
*/
- unsigned is_master:1;
+ bool is_master;
/**
* @master:
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index f9c15845f465..bcb389f04618 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,30 +25,113 @@
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
+/*
+ * DRM formats are little endian. Define host endian variants for the
+ * most common formats here, to reduce the #ifdefs needed in drivers.
+ *
+ * Note that the DRM_FORMAT_BIG_ENDIAN flag should only be used in
+ * case the format can't be specified otherwise, so we don't end up
+ * with two values describing the same format.
+ */
+#ifdef __BIG_ENDIAN
+# define DRM_FORMAT_HOST_XRGB1555 (DRM_FORMAT_XRGB1555 | \
+ DRM_FORMAT_BIG_ENDIAN)
+# define DRM_FORMAT_HOST_RGB565 (DRM_FORMAT_RGB565 | \
+ DRM_FORMAT_BIG_ENDIAN)
+# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_BGRX8888
+# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_BGRA8888
+#else
+# define DRM_FORMAT_HOST_XRGB1555 DRM_FORMAT_XRGB1555
+# define DRM_FORMAT_HOST_RGB565 DRM_FORMAT_RGB565
+# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_XRGB8888
+# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_ARGB8888
+#endif
+
struct drm_device;
struct drm_mode_fb_cmd2;
/**
* struct drm_format_info - information about a DRM format
- * @format: 4CC format identifier (DRM_FORMAT_*)
- * @depth: Color depth (number of bits per pixel excluding padding bits),
- * valid for a subset of RGB formats only. This is a legacy field, do not
- * use in new code and set to 0 for new formats.
- * @num_planes: Number of color planes (1 to 3)
- * @cpp: Number of bytes per pixel (per plane)
- * @hsub: Horizontal chroma subsampling factor
- * @vsub: Vertical chroma subsampling factor
- * @has_alpha: Does the format embeds an alpha component?
- * @is_yuv: Is it a YUV format?
*/
struct drm_format_info {
+ /** @format: 4CC format identifier (DRM_FORMAT_*) */
u32 format;
+
+ /**
+ * @depth:
+ *
+ * Color depth (number of bits per pixel excluding padding bits),
+ * valid for a subset of RGB formats only. This is a legacy field, do
+ * not use in new code and set to 0 for new formats.
+ */
u8 depth;
+
+ /** @num_planes: Number of color planes (1 to 3) */
u8 num_planes;
- u8 cpp[3];
+
+ union {
+ /**
+ * @cpp:
+ *
+ * Number of bytes per pixel (per plane), this is aliased with
+ * @char_per_block. It is deprecated in favour of using the
+ * triplet @char_per_block, @block_w, @block_h for better
+ * describing the pixel format.
+ */
+ u8 cpp[3];
+
+ /**
+ * @char_per_block:
+ *
+ * Number of bytes per block (per plane), where blocks are
+ * defined as a rectangle of pixels which are stored next to
+ * each other in a byte aligned memory region. Together with
+ * @block_w and @block_h this is used to properly describe tiles
+ * in tiled formats or to describe groups of pixels in packed
+ * formats for which the memory needed for a single pixel is not
+ * byte aligned.
+ *
+ * @cpp has been kept for historical reasons because there are
+ * a lot of places in drivers where it's used. In drm core for
+ * generic code paths the preferred way is to use
+ * @char_per_block, drm_format_info_block_width() and
+ * drm_format_info_block_height() which allows handling both
+ * block and non-block formats in the same way.
+ *
+ * For formats that are intended to be used only with non-linear
+ * modifiers both @cpp and @char_per_block must be 0 in the
+ * generic format table. Drivers could supply accurate
+ * information from their drm_mode_config.get_format_info hook
+ * if they want the core to be validating the pitch.
+ */
+ u8 char_per_block[3];
+ };
+
+ /**
+ * @block_w:
+ *
+ * Block width in pixels, this is intended to be accessed through
+ * drm_format_info_block_width()
+ */
+ u8 block_w[3];
+
+ /**
+ * @block_h:
+ *
+ * Block height in pixels, this is intended to be accessed through
+ * drm_format_info_block_height()
+ */
+ u8 block_h[3];
+
+ /** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
+ /** @vsub: Vertical chroma subsampling factor */
u8 vsub;
+
+ /** @has_alpha: Does the format embeds an alpha component? */
bool has_alpha;
+
+ /** @is_yuv: Is it a YUV format? */
bool is_yuv;
};
@@ -66,12 +149,20 @@ const struct drm_format_info *
drm_get_format_info(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd);
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
+uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
+ uint32_t bpp, uint32_t depth);
int drm_format_num_planes(uint32_t format);
int drm_format_plane_cpp(uint32_t format, int plane);
int drm_format_horz_chroma_subsampling(uint32_t format);
int drm_format_vert_chroma_subsampling(uint32_t format);
int drm_format_plane_width(int width, uint32_t format, int plane);
int drm_format_plane_height(int height, uint32_t format, int plane);
+unsigned int drm_format_info_block_width(const struct drm_format_info *info,
+ int plane);
+unsigned int drm_format_info_block_height(const struct drm_format_info *info,
+ int plane);
+uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
+ int plane, unsigned int buffer_width);
const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index c50502c656e5..c94acedfb08e 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -241,30 +241,6 @@ static inline void drm_framebuffer_put(struct drm_framebuffer *fb)
}
/**
- * drm_framebuffer_reference - acquire a framebuffer reference
- * @fb: DRM framebuffer
- *
- * This is a compatibility alias for drm_framebuffer_get() and should not be
- * used by new code.
- */
-static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
-{
- drm_framebuffer_get(fb);
-}
-
-/**
- * drm_framebuffer_unreference - release a framebuffer reference
- * @fb: DRM framebuffer
- *
- * This is a compatibility alias for drm_framebuffer_put() and should not be
- * used by new code.
- */
-static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
- drm_framebuffer_put(fb);
-}
-
-/**
* drm_framebuffer_read_refcount - read the framebuffer reference count.
* @fb: framebuffer
*
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 3583b98a1718..c95727425284 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -38,6 +38,121 @@
#include <drm/drm_vma_manager.h>
+struct drm_gem_object;
+
+/**
+ * struct drm_gem_object_funcs - GEM object functions
+ */
+struct drm_gem_object_funcs {
+ /**
+ * @free:
+ *
+ * Deconstructor for drm_gem_objects.
+ *
+ * This callback is mandatory.
+ */
+ void (*free)(struct drm_gem_object *obj);
+
+ /**
+ * @open:
+ *
+ * Called upon GEM handle creation.
+ *
+ * This callback is optional.
+ */
+ int (*open)(struct drm_gem_object *obj, struct drm_file *file);
+
+ /**
+ * @close:
+ *
+ * Called upon GEM handle release.
+ *
+ * This callback is optional.
+ */
+ void (*close)(struct drm_gem_object *obj, struct drm_file *file);
+
+ /**
+ * @print_info:
+ *
+ * If driver subclasses struct &drm_gem_object, it can implement this
+ * optional hook for printing additional driver specific info.
+ *
+ * drm_printf_indent() should be used in the callback passing it the
+ * indent argument.
+ *
+ * This callback is called from drm_gem_print_info().
+ *
+ * This callback is optional.
+ */
+ void (*print_info)(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj);
+
+ /**
+ * @export:
+ *
+ * Export backing buffer as a &dma_buf.
+ * If this is not set drm_gem_prime_export() is used.
+ *
+ * This callback is optional.
+ */
+ struct dma_buf *(*export)(struct drm_gem_object *obj, int flags);
+
+ /**
+ * @pin:
+ *
+ * Pin backing buffer in memory.
+ *
+ * This callback is optional.
+ */
+ int (*pin)(struct drm_gem_object *obj);
+
+ /**
+ * @unpin:
+ *
+ * Unpin backing buffer.
+ *
+ * This callback is optional.
+ */
+ void (*unpin)(struct drm_gem_object *obj);
+
+ /**
+ * @get_sg_table:
+ *
+ * Returns a Scatter-Gather table representation of the buffer.
+ * Used when exporting a buffer.
+ *
+ * This callback is mandatory if buffer export is supported.
+ */
+ struct sg_table *(*get_sg_table)(struct drm_gem_object *obj);
+
+ /**
+ * @vmap:
+ *
+ * Returns a virtual address for the buffer.
+ *
+ * This callback is optional.
+ */
+ void *(*vmap)(struct drm_gem_object *obj);
+
+ /**
+ * @vunmap:
+ *
+ * Releases the the address previously returned by @vmap.
+ *
+ * This callback is optional.
+ */
+ void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
+
+ /**
+ * @vm_ops:
+ *
+ * Virtual memory operations used with mmap.
+ *
+ * This is optional but necessary for mmap support.
+ */
+ const struct vm_operations_struct *vm_ops;
+};
+
/**
* struct drm_gem_object - GEM buffer object
*
@@ -146,6 +261,17 @@ struct drm_gem_object {
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
+
+ /**
+ * @funcs:
+ *
+ * Optional GEM object functions. If this is set, it will be used instead of the
+ * corresponding &drm_driver GEM callbacks.
+ *
+ * New drivers should use this.
+ *
+ */
+ const struct drm_gem_object_funcs *funcs;
};
/**
@@ -222,56 +348,6 @@ __drm_gem_object_put(struct drm_gem_object *obj)
void drm_gem_object_put_unlocked(struct drm_gem_object *obj);
void drm_gem_object_put(struct drm_gem_object *obj);
-/**
- * drm_gem_object_reference - acquire a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_get() and should not be
- * used by new code.
- */
-static inline void drm_gem_object_reference(struct drm_gem_object *obj)
-{
- drm_gem_object_get(obj);
-}
-
-/**
- * __drm_gem_object_unreference - raw function to release a GEM buffer object
- * reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for __drm_gem_object_put() and should not be
- * used by new code.
- */
-static inline void __drm_gem_object_unreference(struct drm_gem_object *obj)
-{
- __drm_gem_object_put(obj);
-}
-
-/**
- * drm_gem_object_unreference_unlocked - release a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_put_unlocked() and should
- * not be used by new code.
- */
-static inline void
-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
-{
- drm_gem_object_put_unlocked(obj);
-}
-
-/**
- * drm_gem_object_unreference - release a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_put() and should not be
- * used by new code.
- */
-static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
-{
- drm_gem_object_put(obj);
-}
-
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -293,4 +369,9 @@ int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
+int drm_gem_pin(struct drm_gem_object *obj);
+void drm_gem_unpin(struct drm_gem_object *obj);
+void *drm_gem_vmap(struct drm_gem_object *obj);
+void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr);
+
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 19777145cf8e..07c504940ba1 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -103,4 +103,28 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj);
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+struct drm_gem_object *
+drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size);
+
+/**
+ * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual
+ * address on the buffer
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ */
+#define DRM_GEM_CMA_VMAP_DRIVER_OPS \
+ .gem_create_object = drm_cma_gem_create_object_default_funcs, \
+ .dumb_create = drm_gem_cma_dumb_create, \
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \
+ .gem_prime_mmap = drm_gem_prime_mmap
+
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h
deleted file mode 100644
index 3a830602a2e4..000000000000
--- a/include/drm/drm_global.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#ifndef _DRM_GLOBAL_H_
-#define _DRM_GLOBAL_H_
-enum drm_global_types {
- DRM_GLOBAL_TTM_MEM = 0,
- DRM_GLOBAL_TTM_BO,
- DRM_GLOBAL_TTM_OBJECT,
- DRM_GLOBAL_NUM
-};
-
-struct drm_global_reference {
- enum drm_global_types global_type;
- size_t size;
- void *object;
- int (*init) (struct drm_global_reference *);
- void (*release) (struct drm_global_reference *);
-};
-
-void drm_global_init(void);
-void drm_global_release(void);
-int drm_global_item_ref(struct drm_global_reference *ref);
-void drm_global_item_unref(struct drm_global_reference *ref);
-
-#endif
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 98e63d870139..a6de09c5e47f 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -38,4 +38,216 @@
#define DRM_HDCP_DDC_BSTATUS 0x41
#define DRM_HDCP_DDC_KSV_FIFO 0x43
+#define DRM_HDCP_1_4_SRM_ID 0x8
+#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3
+#define DRM_HDCP_1_4_DCP_SIG_SIZE 40
+
+/* Protocol message definition for HDCP2.2 specification */
+/*
+ * Protected content streams are classified into 2 types:
+ * - Type0: Can be transmitted with HDCP 1.4+
+ * - Type1: Can be transmitted with HDCP 2.2+
+ */
+#define HDCP_STREAM_TYPE0 0x00
+#define HDCP_STREAM_TYPE1 0x01
+
+/* HDCP2.2 Msg IDs */
+#define HDCP_2_2_NULL_MSG 1
+#define HDCP_2_2_AKE_INIT 2
+#define HDCP_2_2_AKE_SEND_CERT 3
+#define HDCP_2_2_AKE_NO_STORED_KM 4
+#define HDCP_2_2_AKE_STORED_KM 5
+#define HDCP_2_2_AKE_SEND_HPRIME 7
+#define HDCP_2_2_AKE_SEND_PAIRING_INFO 8
+#define HDCP_2_2_LC_INIT 9
+#define HDCP_2_2_LC_SEND_LPRIME 10
+#define HDCP_2_2_SKE_SEND_EKS 11
+#define HDCP_2_2_REP_SEND_RECVID_LIST 12
+#define HDCP_2_2_REP_SEND_ACK 15
+#define HDCP_2_2_REP_STREAM_MANAGE 16
+#define HDCP_2_2_REP_STREAM_READY 17
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
+
+#define HDCP_2_2_RTX_LEN 8
+#define HDCP_2_2_RRX_LEN 8
+
+#define HDCP_2_2_K_PUB_RX_MOD_N_LEN 128
+#define HDCP_2_2_K_PUB_RX_EXP_E_LEN 3
+#define HDCP_2_2_K_PUB_RX_LEN (HDCP_2_2_K_PUB_RX_MOD_N_LEN + \
+ HDCP_2_2_K_PUB_RX_EXP_E_LEN)
+
+#define HDCP_2_2_DCP_LLC_SIG_LEN 384
+
+#define HDCP_2_2_E_KPUB_KM_LEN 128
+#define HDCP_2_2_E_KH_KM_M_LEN (16 + 16)
+#define HDCP_2_2_H_PRIME_LEN 32
+#define HDCP_2_2_E_KH_KM_LEN 16
+#define HDCP_2_2_RN_LEN 8
+#define HDCP_2_2_L_PRIME_LEN 32
+#define HDCP_2_2_E_DKEY_KS_LEN 16
+#define HDCP_2_2_RIV_LEN 8
+#define HDCP_2_2_SEQ_NUM_LEN 3
+#define HDCP_2_2_V_PRIME_HALF_LEN (HDCP_2_2_L_PRIME_LEN / 2)
+#define HDCP_2_2_RECEIVER_ID_LEN DRM_HDCP_KSV_LEN
+#define HDCP_2_2_MAX_DEVICE_COUNT 31
+#define HDCP_2_2_RECEIVER_IDS_MAX_LEN (HDCP_2_2_RECEIVER_ID_LEN * \
+ HDCP_2_2_MAX_DEVICE_COUNT)
+#define HDCP_2_2_MPRIME_LEN 32
+
+/* Following Macros take a byte at a time for bit(s) masking */
+/*
+ * TODO: This has to be changed for DP MST, as multiple stream on
+ * same port is possible.
+ * For HDCP2.2 on HDMI and DP SST this value is always 1.
+ */
+#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 1
+#define HDCP_2_2_TXCAP_MASK_LEN 2
+#define HDCP_2_2_RXCAPS_LEN 3
+#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0))
+#define HDCP_2_2_DP_HDCP_CAPABLE(x) ((x) & BIT(1))
+#define HDCP_2_2_RXINFO_LEN 2
+
+/* HDCP1.x compliant device in downstream */
+#define HDCP_2_2_HDCP1_DEVICE_CONNECTED(x) ((x) & BIT(0))
+
+/* HDCP2.0 Compliant repeater in downstream */
+#define HDCP_2_2_HDCP_2_0_REP_CONNECTED(x) ((x) & BIT(1))
+#define HDCP_2_2_MAX_CASCADE_EXCEEDED(x) ((x) & BIT(2))
+#define HDCP_2_2_MAX_DEVS_EXCEEDED(x) ((x) & BIT(3))
+#define HDCP_2_2_DEV_COUNT_LO(x) (((x) & (0xF << 4)) >> 4)
+#define HDCP_2_2_DEV_COUNT_HI(x) ((x) & BIT(0))
+#define HDCP_2_2_DEPTH(x) (((x) & (0x7 << 1)) >> 1)
+
+struct hdcp2_cert_rx {
+ u8 receiver_id[HDCP_2_2_RECEIVER_ID_LEN];
+ u8 kpub_rx[HDCP_2_2_K_PUB_RX_LEN];
+ u8 reserved[2];
+ u8 dcp_signature[HDCP_2_2_DCP_LLC_SIG_LEN];
+} __packed;
+
+struct hdcp2_streamid_type {
+ u8 stream_id;
+ u8 stream_type;
+} __packed;
+
+/*
+ * The TxCaps field specified in the HDCP HDMI, DP specs
+ * This field is big endian as specified in the errata.
+ */
+struct hdcp2_tx_caps {
+ /* Transmitter must set this to 0x2 */
+ u8 version;
+
+ /* Reserved for HDCP and DP Spec. Read as Zero */
+ u8 tx_cap_mask[HDCP_2_2_TXCAP_MASK_LEN];
+} __packed;
+
+/* Main structures for HDCP2.2 protocol communication */
+struct hdcp2_ake_init {
+ u8 msg_id;
+ u8 r_tx[HDCP_2_2_RTX_LEN];
+ struct hdcp2_tx_caps tx_caps;
+} __packed;
+
+struct hdcp2_ake_send_cert {
+ u8 msg_id;
+ struct hdcp2_cert_rx cert_rx;
+ u8 r_rx[HDCP_2_2_RRX_LEN];
+ u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
+} __packed;
+
+struct hdcp2_ake_no_stored_km {
+ u8 msg_id;
+ u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
+} __packed;
+
+struct hdcp2_ake_stored_km {
+ u8 msg_id;
+ u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
+} __packed;
+
+struct hdcp2_ake_send_hprime {
+ u8 msg_id;
+ u8 h_prime[HDCP_2_2_H_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ake_send_pairing_info {
+ u8 msg_id;
+ u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
+} __packed;
+
+struct hdcp2_lc_init {
+ u8 msg_id;
+ u8 r_n[HDCP_2_2_RN_LEN];
+} __packed;
+
+struct hdcp2_lc_send_lprime {
+ u8 msg_id;
+ u8 l_prime[HDCP_2_2_L_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ske_send_eks {
+ u8 msg_id;
+ u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
+ u8 riv[HDCP_2_2_RIV_LEN];
+} __packed;
+
+struct hdcp2_rep_send_receiverid_list {
+ u8 msg_id;
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
+ u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
+ u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
+} __packed;
+
+struct hdcp2_rep_send_ack {
+ u8 msg_id;
+ u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
+} __packed;
+
+struct hdcp2_rep_stream_manage {
+ u8 msg_id;
+ u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
+ __be16 k;
+ struct hdcp2_streamid_type streams[HDCP_2_2_MAX_CONTENT_STREAMS_CNT];
+} __packed;
+
+struct hdcp2_rep_stream_ready {
+ u8 msg_id;
+ u8 m_prime[HDCP_2_2_MPRIME_LEN];
+} __packed;
+
+struct hdcp2_dp_errata_stream_type {
+ u8 msg_id;
+ u8 stream_type;
+} __packed;
+
+/* HDCP2.2 TIMEOUTs in mSec */
+#define HDCP_2_2_CERT_TIMEOUT_MS 100
+#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000
+#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200
+#define HDCP_2_2_PAIRING_TIMEOUT_MS 200
+#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20
+#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 7
+#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000
+#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100
+
+/* HDMI HDCP2.2 Register Offsets */
+#define HDCP_2_2_HDMI_REG_VER_OFFSET 0x50
+#define HDCP_2_2_HDMI_REG_WR_MSG_OFFSET 0x60
+#define HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET 0x70
+#define HDCP_2_2_HDMI_REG_RD_MSG_OFFSET 0x80
+#define HDCP_2_2_HDMI_REG_DBG_OFFSET 0xC0
+
+#define HDCP_2_2_HDMI_SUPPORT_MASK BIT(2)
+#define HDCP_2_2_RX_CAPS_VERSION_VAL 0x02
+#define HDCP_2_2_SEQ_NUM_MAX 0xFFFFFF
+#define HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN 200
+
+/* Below macros take a byte at a time and mask the bit(s) */
+#define HDCP_2_2_HDMI_RXSTATUS_LEN 2
+#define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3)
+#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2))
+#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
+
#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 4fef19064b0f..491528f48cfb 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -168,6 +168,12 @@ struct mipi_dsi_device_info {
* @format: pixel format for video mode
* @lanes: number of active data lanes
* @mode_flags: DSI operation mode related flags
+ * @hs_rate: maximum lane frequency for high speed mode in hertz, this should
+ * be set to the real limits of the hardware, zero is only accepted for
+ * legacy drivers
+ * @lp_rate: maximum lane frequency for low power mode in hertz, this should
+ * be set to the real limits of the hardware, zero is only accepted for
+ * legacy drivers
*/
struct mipi_dsi_device {
struct mipi_dsi_host *host;
@@ -178,6 +184,8 @@ struct mipi_dsi_device {
unsigned int lanes;
enum mipi_dsi_pixel_format format;
unsigned long mode_flags;
+ unsigned long hs_rate;
+ unsigned long lp_rate;
};
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index a0b202e1d69a..572274ccbec7 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -52,6 +52,12 @@ struct drm_mode_config_funcs {
* requested metadata, but most of that is left to the driver. See
* &struct drm_mode_fb_cmd2 for details.
*
+ * To validate the pixel format and modifier drivers can use
+ * drm_any_plane_has_format() to make sure at least one plane supports
+ * the requested values. Note that the driver must first determine the
+ * actual modifier used if the request doesn't have it specified,
+ * ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0.
+ *
* If the parameters are deemed valid and the backing storage objects in
* the underlying memory manager all exist, then the driver allocates
* a new &drm_framebuffer structure, subclassed to contain
@@ -628,6 +634,15 @@ struct drm_mode_config {
*/
struct drm_property *prop_crtc_id;
/**
+ * @prop_fb_damage_clips: Optional plane property to mark damaged
+ * regions on the plane in framebuffer coordinates of the framebuffer
+ * attached to the plane.
+ *
+ * The layout of blob data is simply an array of &drm_mode_rect. Unlike
+ * plane src coordinates, damage clips are not in 16.16 fixed point.
+ */
+ struct drm_property *prop_fb_damage_clips;
+ /**
* @prop_active: Default atomic CRTC property to control the active
* state, which is the simplified implementation for DPMS in atomic
* drivers.
@@ -639,6 +654,11 @@ struct drm_mode_config {
* connectors must be of and active must be set to disabled, too.
*/
struct drm_property *prop_mode_id;
+ /**
+ * @prop_vrr_enabled: Default atomic CRTC property to indicate
+ * whether variable refresh rate should be enabled on the CRTC.
+ */
+ struct drm_property *prop_vrr_enabled;
/**
* @dvi_i_subconnector_property: Optional DVI-I property to
@@ -811,6 +831,28 @@ struct drm_mode_config {
uint32_t preferred_depth, prefer_shadow;
/**
+ * @quirk_addfb_prefer_xbgr_30bpp:
+ *
+ * Special hack for legacy ADDFB to keep nouveau userspace happy. Should
+ * only ever be set by the nouveau kernel driver.
+ */
+ bool quirk_addfb_prefer_xbgr_30bpp;
+
+ /**
+ * @quirk_addfb_prefer_host_byte_order:
+ *
+ * When set to true drm_mode_addfb() will pick host byte order
+ * pixel_format when calling drm_mode_addfb2(). This is how
+ * drm_mode_addfb() should have worked from day one. It
+ * didn't though, so we ended up with quirks in both kernel
+ * and userspace drivers to deal with the broken behavior.
+ * Simply fixing drm_mode_addfb() unconditionally would break
+ * these drivers, so add a quirk bit here to allow drivers
+ * opt-in.
+ */
+ bool quirk_addfb_prefer_host_byte_order;
+
+ /**
* @async_page_flip: Does this device support async flips on the primary
* plane?
*/
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index a685d1bb21f2..a308f2d6496f 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,4 +130,63 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
+/**
+ * DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks
+ * @dev: drm device
+ * @ctx: local modeset acquire context, will be dereferenced
+ * @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init()
+ * @ret: local ret/err/etc variable to track error status
+ *
+ * Use these macros to simplify grabbing all modeset locks using a local
+ * context. This has the advantage of reducing boilerplate, but also properly
+ * checking return values where appropriate.
+ *
+ * Any code run between BEGIN and END will be holding the modeset locks.
+ *
+ * This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and
+ * forth between the labels on deadlock and error conditions.
+ *
+ * Drivers can acquire additional modeset locks. If any lock acquisition
+ * fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with
+ * the @ret parameter containing the return value of drm_modeset_lock().
+ *
+ * Returns:
+ * The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN()
+ * is 0, so no error checking is necessary
+ */
+#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \
+ drm_modeset_acquire_init(&ctx, flags); \
+modeset_lock_retry: \
+ ret = drm_modeset_lock_all_ctx(dev, &ctx); \
+ if (ret) \
+ goto modeset_lock_fail;
+
+/**
+ * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks
+ * @ctx: local modeset acquire context, will be dereferenced
+ * @ret: local ret/err/etc variable to track error status
+ *
+ * The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN
+ * if ret is -EDEADLK.
+ *
+ * It's important that you use the same ret variable for begin and end so
+ * deadlock conditions are properly handled.
+ *
+ * Returns:
+ * ret will be untouched unless it is -EDEADLK on entry. That means that if you
+ * successfully acquire the locks, ret will be whatever your code sets it to. If
+ * there is a deadlock or other failure with acquire or backoff, ret will be set
+ * to that failure. In both of these cases the code between BEGIN/END will not
+ * be run, so the failure will reflect the inability to grab the locks.
+ */
+#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \
+modeset_lock_fail: \
+ if (ret == -EDEADLK) { \
+ ret = drm_modeset_backoff(&ctx); \
+ if (!ret) \
+ goto modeset_lock_retry; \
+ } \
+ drm_modeset_drop_locks(&ctx); \
+ drm_modeset_acquire_fini(&ctx);
+
#endif /* DRM_MODESET_LOCK_H_ */
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 582a0ec0aa70..8c738c0e6e9f 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -82,6 +82,7 @@ struct drm_panel_funcs {
* @drm: DRM device owning the panel
* @connector: DRM connector that the panel is attached to
* @dev: parent device of the panel
+ * @link: link from panel device (supplier) to DRM device (consumer)
* @funcs: operations that can be performed on the panel
* @list: panel entry in registry
*/
@@ -89,7 +90,6 @@ struct drm_panel {
struct drm_device *drm;
struct drm_connector *connector;
struct device *dev;
- struct device_link *link;
const struct drm_panel_funcs *funcs;
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 683742826511..b7e899ce44f0 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#define radeon_PCI_IDS \
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 8a152dc16ea5..6078c700d9ba 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -27,6 +27,9 @@
#include <linux/ctype.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_color_mgmt.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_modeset_lock.h>
+#include <drm/drm_util.h>
struct drm_crtc;
struct drm_printer;
@@ -119,6 +122,14 @@ struct drm_plane_state {
u16 alpha;
/**
+ * @pixel_blend_mode:
+ * The alpha blending equation selection, describing how the pixels from
+ * the current plane are composited with the background. Value can be
+ * one of DRM_MODE_BLEND_*
+ */
+ uint16_t pixel_blend_mode;
+
+ /**
* @rotation:
* Rotation of the plane. See drm_plane_create_rotation_property() for
* more details.
@@ -162,6 +173,16 @@ struct drm_plane_state {
*/
enum drm_color_range color_range;
+ /**
+ * @fb_damage_clips:
+ *
+ * Blob representing damage (area in plane framebuffer that changed
+ * since last plane update) as an array of &drm_mode_rect in framebuffer
+ * coodinates of the attached framebuffer. Note that unlike plane src,
+ * damage clips are not in 16.16 fixed point.
+ */
+ struct drm_property_blob *fb_damage_clips;
+
/** @src: clipped source coordinates of the plane (in 16.16) */
/** @dst: clipped destination coordinates of the plane */
struct drm_rect src, dst;
@@ -659,6 +680,14 @@ struct drm_plane {
* drm_plane_create_rotation_property().
*/
struct drm_property *rotation_property;
+ /**
+ * @blend_mode_property:
+ * Optional "pixel blend mode" enum property for this plane.
+ * Blend mode property represents the alpha blending equation selection,
+ * describing how the pixels from the current plane are composited with
+ * the background.
+ */
+ struct drm_property *blend_mode_property;
/**
* @color_encoding_property:
@@ -779,5 +808,39 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
#define drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+bool drm_any_plane_has_format(struct drm_device *dev,
+ u32 format, u64 modifier);
+/**
+ * drm_plane_get_damage_clips_count - Returns damage clips count.
+ * @state: Plane state.
+ *
+ * Simple helper to get the number of &drm_mode_rect clips set by user-space
+ * during plane update.
+ *
+ * Return: Number of clips in plane fb_damage_clips blob property.
+ */
+static inline unsigned int
+drm_plane_get_damage_clips_count(const struct drm_plane_state *state)
+{
+ return (state && state->fb_damage_clips) ?
+ state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0;
+}
+
+/**
+ * drm_plane_get_damage_clips - Returns damage clips.
+ * @state: Plane state.
+ *
+ * Note that this function returns uapi type &drm_mode_rect. Drivers might
+ * instead be interested in internal &drm_rect which can be obtained by calling
+ * drm_helper_get_plane_damage_clips().
+ *
+ * Return: Damage clips in plane fb_damage_clips blob property.
+ */
+static inline struct drm_mode_rect *
+drm_plane_get_damage_clips(const struct drm_plane_state *state)
+{
+ return (struct drm_mode_rect *)((state && state->fb_damage_clips) ?
+ state->fb_damage_clips->data : NULL);
+}
#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 26cee2934781..331ebd60b3a3 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -38,42 +38,7 @@
*/
#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
-int drm_plane_helper_check_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_rect *src,
- struct drm_rect *dest,
- unsigned int rotation,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled,
- bool *visible);
-int drm_primary_helper_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx);
-int drm_primary_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
-int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx);
-int drm_plane_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
-
-/* For use by drm_crtc_helper.c */
-int drm_plane_helper_commit(struct drm_plane *plane,
- struct drm_plane_state *plane_state,
- struct drm_framebuffer *old_fb);
#endif
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index d716d653b096..b03731a3f079 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -70,6 +70,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
+int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
@@ -93,9 +94,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
-void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
-void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
- void *addr);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index f3e6eed3e79c..afbc3beef089 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -381,7 +381,7 @@ void drm_err(const char *format, ...);
#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
-#define DRM_DEBUG_DP(dev, fmt, ...) \
+#define DRM_DEBUG_DP(fmt, ...) \
drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index c030f6ccab99..4a0a80d658c7 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -27,6 +27,8 @@
#include <linux/ctype.h>
#include <drm/drm_mode_object.h>
+#include <uapi/drm/drm_mode.h>
+
/**
* struct drm_property_enum - symbolic values for enumerations
* @value: numeric property value for this enum entry
@@ -151,7 +153,8 @@ struct drm_property {
* userspace. The kernel is allowed to update the value of these
* properties. This is generally used to expose probe state to
* userspace, e.g. the EDID, or the connector path property on DP
- * MST sinks.
+ * MST sinks. Kernel can update the value of an immutable property
+ * by calling drm_object_property_set_value().
*/
uint32_t flags;
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 3980602472c0..b1fe921f8e8f 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -131,15 +131,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
u32 handle);
-void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
- struct drm_syncobj_cb *cb,
- drm_syncobj_func_t func);
-void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
- struct drm_syncobj_cb *cb);
void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence);
int drm_syncobj_find_fence(struct drm_file *file_private,
- u32 handle,
+ u32 handle, u64 point, u64 flags,
struct dma_fence **fence);
void drm_syncobj_free(struct kref *kref);
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h
new file mode 100644
index 000000000000..88abdca89baa
--- /dev/null
+++ b/include/drm/drm_util.h
@@ -0,0 +1,32 @@
+/*
+ * Internal Header for the Direct Rendering Manager
+ *
+ * Copyright 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_UTIL_H_
+#define _DRM_UTIL_H_
+
+/* helper for handling conditionals in various for_each macros */
+#define for_each_if(condition) if (!(condition)) {} else
+
+#endif
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index d25a9603ab57..6ad9630d4f48 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -95,7 +95,7 @@ struct drm_vblank_crtc {
/**
* @queue: Wait queue for vblank waiters.
*/
- wait_queue_head_t queue; /**< VBLANK wait queue */
+ wait_queue_head_t queue;
/**
* @disable_timer: Disable timer for the delayed vblank disabling
* hysteresis logic. Vblank disabling is controlled through the
@@ -107,7 +107,7 @@ struct drm_vblank_crtc {
/**
* @seqlock: Protect vblank count and time.
*/
- seqlock_t seqlock; /* protects vblank count and time */
+ seqlock_t seqlock;
/**
* @count: Current software vblank counter.
@@ -123,7 +123,7 @@ struct drm_vblank_crtc {
* this refcount reaches 0 can the hardware interrupt be disabled using
* @disable_timer.
*/
- atomic_t refcount; /* number of users of vblank interruptsper crtc */
+ atomic_t refcount;
/**
* @last: Protected by &drm_device.vbl_lock, used for wraparound handling.
*/
@@ -136,7 +136,7 @@ struct drm_vblank_crtc {
* call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly
* save and restore the vblank count.
*/
- unsigned int inmodeset; /* Display driver is setting mode */
+ unsigned int inmodeset;
/**
* @pipe: drm_crtc_index() of the &drm_crtc corresponding to this
* structure.
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 21c648b0b2a1..47e19796c450 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -50,7 +50,10 @@ enum drm_sched_priority {
*
* @list: used to append this struct to the list of entities in the
* runqueue.
- * @rq: runqueue to which this entity belongs.
+ * @rq: runqueue on which this entity is currently scheduled.
+ * @rq_list: a list of run queues on which jobs from this entity can
+ * be scheduled
+ * @num_rq_list: number of run queues in the rq_list
* @rq_lock: lock to modify the runqueue to which this entity belongs.
* @job_queue: the list of jobs of this entity.
* @fence_seq: a linearly increasing seqno incremented with each
@@ -67,6 +70,7 @@ enum drm_sched_priority {
* @fini_status: contains the exit status in case the process was signalled.
* @last_scheduled: points to the finished fence of the last scheduled job.
* @last_user: last group leader pushing a job into the entity.
+ * @stopped: Marks the enity as removed from rq and destined for termination.
*
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
@@ -75,6 +79,8 @@ enum drm_sched_priority {
struct drm_sched_entity {
struct list_head list;
struct drm_sched_rq *rq;
+ struct drm_sched_rq **rq_list;
+ unsigned int num_rq_list;
spinlock_t rq_lock;
struct spsc_queue job_queue;
@@ -87,6 +93,7 @@ struct drm_sched_entity {
atomic_t *guilty;
struct dma_fence *last_scheduled;
struct task_struct *last_user;
+ bool stopped;
};
/**
@@ -168,8 +175,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* finished to remove the job from the
* @drm_gpu_scheduler.ring_mirror_list.
* @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
- * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout
- * interval is over.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
@@ -188,7 +193,6 @@ struct drm_sched_job {
struct dma_fence_cb finish_cb;
struct work_struct finish_work;
struct list_head node;
- struct delayed_work work_tdr;
uint64_t id;
atomic_t karma;
enum drm_sched_priority s_priority;
@@ -252,11 +256,15 @@ struct drm_sched_backend_ops {
* finished.
* @hw_rq_count: the number of jobs currently in the hardware queue.
* @job_id_count: used to assign unique id to the each job.
+ * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
+ * timeout interval is over.
* @thread: the kthread on which the scheduler which run.
* @ring_mirror_list: the list of jobs which are currently in the job queue.
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
+ * @num_jobs: the number of jobs in queue in the scheduler
+ * @ready: marks if the underlying HW is ready to work
*
* One scheduler is implemented for each hardware ring.
*/
@@ -270,17 +278,38 @@ struct drm_gpu_scheduler {
wait_queue_head_t job_scheduled;
atomic_t hw_rq_count;
atomic64_t job_id_count;
+ struct delayed_work work_tdr;
struct task_struct *thread;
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
+ atomic_t num_jobs;
+ bool ready;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit, long timeout,
const char *name);
+
void drm_sched_fini(struct drm_gpu_scheduler *sched);
+int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_sched_entity *entity,
+ void *owner);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
+ struct drm_sched_job *job);
+void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
+bool drm_sched_dependency_optimized(struct dma_fence* fence,
+ struct drm_sched_entity *entity);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
+void drm_sched_job_kickout(struct drm_sched_job *s_job);
+
+void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity);
+void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity);
int drm_sched_entity_init(struct drm_sched_entity *entity,
struct drm_sched_rq **rq_list,
@@ -289,23 +318,21 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
+void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
+struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity);
-void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
- struct drm_sched_rq *rq);
+void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
+ enum drm_sched_priority priority);
+bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
struct drm_sched_fence *drm_sched_fence_create(
struct drm_sched_entity *s_entity, void *owner);
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
-int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_sched_entity *entity,
- void *owner);
-void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
- struct drm_sched_job *job);
-void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
- struct drm_sched_entity *entity);
-void drm_sched_job_kickout(struct drm_sched_job *s_job);
+
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining);
#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fbf5cfc9b352..192667144693 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -365,16 +365,20 @@
INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
/* AML/KBL Y GT2 */
-#define INTEL_AML_GT2_IDS(info) \
+#define INTEL_AML_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
+/* AML/CFL Y GT2 */
+#define INTEL_AML_CFL_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x87CA, info)
+
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
INTEL_KBL_GT3_IDS(info), \
INTEL_KBL_GT4_IDS(info), \
- INTEL_AML_GT2_IDS(info)
+ INTEL_AML_KBL_GT2_IDS(info)
/* CFL S */
#define INTEL_CFL_S_GT1_IDS(info) \
@@ -386,6 +390,7 @@
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
/* CFL H */
@@ -406,17 +411,17 @@
/* WHL/CFL U GT1 */
#define INTEL_WHL_U_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA1, info)
+ INTEL_VGA_DEVICE(0x3EA1, info), \
+ INTEL_VGA_DEVICE(0x3EA4, info)
/* WHL/CFL U GT2 */
#define INTEL_WHL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA0, info)
+ INTEL_VGA_DEVICE(0x3EA0, info), \
+ INTEL_VGA_DEVICE(0x3EA3, info)
/* WHL/CFL U GT3 */
#define INTEL_WHL_U_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA2, info), \
- INTEL_VGA_DEVICE(0x3EA3, info), \
- INTEL_VGA_DEVICE(0x3EA4, info)
+ INTEL_VGA_DEVICE(0x3EA2, info)
#define INTEL_CFL_IDS(info) \
INTEL_CFL_S_GT1_IDS(info), \
@@ -426,7 +431,8 @@
INTEL_CFL_U_GT3_IDS(info), \
INTEL_WHL_U_GT1_IDS(info), \
INTEL_WHL_U_GT2_IDS(info), \
- INTEL_WHL_U_GT3_IDS(info)
+ INTEL_WHL_U_GT3_IDS(info), \
+ INTEL_AML_CFL_GT2_IDS(info)
/* CNL */
#define INTEL_CNL_IDS(info) \
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index fe9827d0ca8a..448aa5ea4722 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -10,10 +10,15 @@
#ifndef __LINUX_TINYDRM_H
#define __LINUX_TINYDRM_H
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <linux/mutex.h>
#include <drm/drm_simple_kms_helper.h>
+struct drm_clip_rect;
+struct drm_driver;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_framebuffer_funcs;
+
/**
* struct tinydrm_device - tinydrm device
*/
@@ -54,27 +59,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
}
/**
- * TINYDRM_GEM_DRIVER_OPS - default tinydrm gem operations
- *
- * This macro provides a shortcut for setting the tinydrm GEM operations in
- * the &drm_driver structure.
- */
-#define TINYDRM_GEM_DRIVER_OPS \
- .gem_free_object_unlocked = tinydrm_gem_cma_free_object, \
- .gem_print_info = drm_gem_cma_print_info, \
- .gem_vm_ops = &drm_gem_cma_vm_ops, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import = drm_gem_prime_import, \
- .gem_prime_export = drm_gem_prime_export, \
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, \
- .gem_prime_import_sg_table = tinydrm_gem_cma_prime_import_sg_table, \
- .gem_prime_vmap = drm_gem_cma_prime_vmap, \
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap, \
- .gem_prime_mmap = drm_gem_cma_prime_mmap, \
- .dumb_create = drm_gem_cma_dumb_create
-
-/**
* TINYDRM_MODE - tinydrm display mode
* @hd: Horizontal resolution, width
* @vd: Vertical resolution, height
@@ -97,11 +81,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
.type = DRM_MODE_TYPE_DRIVER, \
.clock = 1 /* pass validation */
-void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-struct drm_gem_object *
-tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
const struct drm_framebuffer_funcs *fb_funcs,
struct drm_driver *driver);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index a01ba2032f0e..3fc4854dce49 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -51,6 +51,8 @@ struct ttm_placement;
struct ttm_place;
+struct ttm_lru_bulk_move;
+
/**
* struct ttm_bus_placement
*
@@ -311,6 +313,24 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
}
/**
+ * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
+ * its refcount has already reached zero.
+ * @bo: The buffer object.
+ *
+ * Used to reference a TTM buffer object in lookups where the object is removed
+ * from the lookup structure during the destructor and for RCU lookups.
+ *
+ * Returns: @bo if the referencing was successful, NULL otherwise.
+ */
+static inline __must_check struct ttm_buffer_object *
+ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
+{
+ if (!kref_get_unless_zero(&bo->kref))
+ return NULL;
+ return bo;
+}
+
+/**
* ttm_bo_wait - wait for buffer idle.
*
* @bo: The buffer object.
@@ -405,12 +425,24 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
* ttm_bo_move_to_lru_tail
*
* @bo: The buffer object.
+ * @bulk: optional bulk move structure to remember BO positions
*
* Move this BO to the tail of all lru lists used to lookup and reserve an
* object. This function must be called with struct ttm_bo_global::lru_lock
* held, and is used to make a BO less likely to be considered for eviction.
*/
-void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
+void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
+ struct ttm_lru_bulk_move *bulk);
+
+/**
+ * ttm_bo_bulk_move_lru_tail
+ *
+ * @bulk: bulk move structure
+ *
+ * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
+ * BO order never changes. Should be called with ttm_bo_global::lru_lock held.
+ */
+void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
/**
* ttm_bo_lock_delayed_workqueue
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 3234cc322e70..1021106438b2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -31,7 +31,6 @@
#define _TTM_BO_DRIVER_H_
#include <drm/drm_mm.h>
-#include <drm/drm_global.h>
#include <drm/drm_vma_manager.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
@@ -385,15 +384,6 @@ struct ttm_bo_driver {
};
/**
- * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
- */
-
-struct ttm_bo_global_ref {
- struct drm_global_reference ref;
- struct ttm_mem_global *mem_glob;
-};
-
-/**
* struct ttm_bo_global - Buffer object driver global data.
*
* @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
@@ -407,7 +397,7 @@ struct ttm_bo_global_ref {
* @swap_lru: Lru list of buffer objects used for swapping.
*/
-struct ttm_bo_global {
+extern struct ttm_bo_global {
/**
* Constant after init.
@@ -416,12 +406,12 @@ struct ttm_bo_global {
struct kobject kobj;
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
- struct mutex device_list_mutex;
spinlock_t lru_lock;
/**
- * Protected by device_list_mutex.
+ * Protected by ttm_global_mutex.
*/
+ unsigned int use_count;
struct list_head device_list;
/**
@@ -433,7 +423,7 @@ struct ttm_bo_global {
* Internal protection.
*/
atomic_t bo_count;
-};
+} ttm_bo_glob;
#define TTM_NUM_MEM_TYPES 8
@@ -491,6 +481,34 @@ struct ttm_bo_device {
};
/**
+ * struct ttm_lru_bulk_move_pos
+ *
+ * @first: first BO in the bulk move range
+ * @last: last BO in the bulk move range
+ *
+ * Positions for a lru bulk move.
+ */
+struct ttm_lru_bulk_move_pos {
+ struct ttm_buffer_object *first;
+ struct ttm_buffer_object *last;
+};
+
+/**
+ * struct ttm_lru_bulk_move
+ *
+ * @tt: first/last lru entry for BOs in the TT domain
+ * @vram: first/last lru entry for BOs in the VRAM domain
+ * @swap: first/last lru entry for BOs on the swap list
+ *
+ * Helper structure for bulk moves on the LRU list.
+ */
+struct ttm_lru_bulk_move {
+ struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
+ struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
+ struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY];
+};
+
+/**
* ttm_flag_masked
*
* @old: Pointer to the result and original value.
@@ -550,9 +568,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-void ttm_bo_global_release(struct drm_global_reference *ref);
-int ttm_bo_global_init(struct drm_global_reference *ref);
-
int ttm_bo_device_release(struct ttm_bo_device *bdev);
/**
@@ -570,7 +585,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
* Returns:
* !0: Failure.
*/
-int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset, bool need_dma32);
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index b0fdd1980034..621615fa7728 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -40,13 +40,13 @@
*
* @head: list head for thread-private list.
* @bo: refcounted buffer object pointer.
- * @shared: should the fence be added shared?
+ * @num_shared: How many shared fences we want to add.
*/
struct ttm_validate_buffer {
struct list_head head;
struct ttm_buffer_object *bo;
- bool shared;
+ unsigned int num_shared;
};
/**
diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h
deleted file mode 100644
index 0c3af9836863..000000000000
--- a/include/drm/ttm/ttm_lock.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-/** @file ttm_lock.h
- * This file implements a simple replacement for the buffer manager use
- * of the DRM heavyweight hardware lock.
- * The lock is a read-write lock. Taking it in read mode and write mode
- * is relatively fast, and intended for in-kernel use only.
- *
- * The vt mode is used only when there is a need to block all
- * user-space processes from validating buffers.
- * It's allowed to leave kernel space with the vt lock held.
- * If a user-space process dies while having the vt-lock,
- * it will be released during the file descriptor release. The vt lock
- * excludes write lock and read lock.
- *
- * The suspend mode is used to lock out all TTM users when preparing for
- * and executing suspend operations.
- *
- */
-
-#ifndef _TTM_LOCK_H_
-#define _TTM_LOCK_H_
-
-#include <linux/wait.h>
-#include <linux/atomic.h>
-
-#include "ttm_object.h"
-
-/**
- * struct ttm_lock
- *
- * @base: ttm base object used solely to release the lock if the client
- * holding the lock dies.
- * @queue: Queue for processes waiting for lock change-of-status.
- * @lock: Spinlock protecting some lock members.
- * @rw: Read-write lock counter. Protected by @lock.
- * @flags: Lock state. Protected by @lock.
- * @kill_takers: Boolean whether to kill takers of the lock.
- * @signal: Signal to send when kill_takers is true.
- */
-
-struct ttm_lock {
- struct ttm_base_object base;
- wait_queue_head_t queue;
- spinlock_t lock;
- int32_t rw;
- uint32_t flags;
- bool kill_takers;
- int signal;
- struct ttm_object_file *vt_holder;
-};
-
-
-/**
- * ttm_lock_init
- *
- * @lock: Pointer to a struct ttm_lock
- * Initializes the lock.
- */
-extern void ttm_lock_init(struct ttm_lock *lock);
-
-/**
- * ttm_read_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a read lock.
- */
-extern void ttm_read_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_read_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Takes the lock in read mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
-
-/**
- * ttm_read_trylock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Tries to take the lock in read mode. If the lock is already held
- * in write mode, the function will return -EBUSY. If the lock is held
- * in vt or suspend mode, the function will sleep until these modes
- * are unlocked.
- *
- * Returns:
- * -EBUSY The lock was already held in write mode.
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
-
-/**
- * ttm_write_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a write lock.
- */
-extern void ttm_write_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_write_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Takes the lock in write mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
-
-/**
- * ttm_lock_downgrade
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Downgrades a write lock to a read lock.
- */
-extern void ttm_lock_downgrade(struct ttm_lock *lock);
-
-/**
- * ttm_suspend_lock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Takes the lock in suspend mode. Excludes read and write mode.
- */
-extern void ttm_suspend_lock(struct ttm_lock *lock);
-
-/**
- * ttm_suspend_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a suspend lock
- */
-extern void ttm_suspend_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_vt_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- * @tfile: Pointer to a struct ttm_object_file to register the lock with.
- *
- * Takes the lock in vt mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- * -ENOMEM: Out of memory when locking.
- */
-extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
- struct ttm_object_file *tfile);
-
-/**
- * ttm_vt_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a vt lock.
- * Returns:
- * -EINVAL If the lock was not held.
- */
-extern int ttm_vt_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_write_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a write lock.
- */
-extern void ttm_write_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_write_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Takes the lock in write mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
-
-/**
- * ttm_lock_set_kill
- *
- * @lock: Pointer to a struct ttm_lock
- * @val: Boolean whether to kill processes taking the lock.
- * @signal: Signal to send to the process taking the lock.
- *
- * The kill-when-taking-lock functionality is used to kill processes that keep
- * on using the TTM functionality when its resources has been taken down, for
- * example when the X server exits. A typical sequence would look like this:
- * - X server takes lock in write mode.
- * - ttm_lock_set_kill() is called with @val set to true.
- * - As part of X server exit, TTM resources are taken down.
- * - X server releases the lock on file release.
- * - Another dri client wants to render, takes the lock and is killed.
- *
- */
-static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
- int signal)
-{
- lock->kill_takers = val;
- if (val)
- lock->signal = signal;
-}
-
-#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 737b5fed8003..3ff48a0a2d7b 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -63,7 +63,7 @@
#define TTM_MEM_MAX_ZONES 2
struct ttm_mem_zone;
-struct ttm_mem_global {
+extern struct ttm_mem_global {
struct kobject kobj;
struct ttm_bo_global *bo_glob;
struct workqueue_struct *swap_queue;
@@ -78,7 +78,7 @@ struct ttm_mem_global {
#else
struct ttm_mem_zone *zone_dma32;
#endif
-};
+} ttm_mem_glob;
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
deleted file mode 100644
index a98bfeb4239e..000000000000
--- a/include/drm/ttm/ttm_object.h
+++ /dev/null
@@ -1,354 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-/** @file ttm_object.h
- *
- * Base- and reference object implementation for the various
- * ttm objects. Implements reference counting, minimal security checks
- * and release on file close.
- */
-
-#ifndef _TTM_OBJECT_H_
-#define _TTM_OBJECT_H_
-
-#include <linux/list.h>
-#include <drm/drm_hashtab.h>
-#include <linux/kref.h>
-#include <linux/rcupdate.h>
-#include <linux/dma-buf.h>
-
-#include "ttm_memory.h"
-
-/**
- * enum ttm_ref_type
- *
- * Describes what type of reference a ref object holds.
- *
- * TTM_REF_USAGE is a simple refcount on a base object.
- *
- * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
- * buffer object.
- *
- * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
- * buffer object.
- *
- */
-
-enum ttm_ref_type {
- TTM_REF_USAGE,
- TTM_REF_SYNCCPU_READ,
- TTM_REF_SYNCCPU_WRITE,
- TTM_REF_NUM
-};
-
-/**
- * enum ttm_object_type
- *
- * One entry per ttm object type.
- * Device-specific types should use the
- * ttm_driver_typex types.
- */
-
-enum ttm_object_type {
- ttm_fence_type,
- ttm_buffer_type,
- ttm_lock_type,
- ttm_prime_type,
- ttm_driver_type0 = 256,
- ttm_driver_type1,
- ttm_driver_type2,
- ttm_driver_type3,
- ttm_driver_type4,
- ttm_driver_type5
-};
-
-struct ttm_object_file;
-struct ttm_object_device;
-
-/**
- * struct ttm_base_object
- *
- * @hash: hash entry for the per-device object hash.
- * @type: derived type this object is base class for.
- * @shareable: Other ttm_object_files can access this object.
- *
- * @tfile: Pointer to ttm_object_file of the creator.
- * NULL if the object was not created by a user request.
- * (kernel object).
- *
- * @refcount: Number of references to this object, not
- * including the hash entry. A reference to a base object can
- * only be held by a ref object.
- *
- * @refcount_release: A function to be called when there are
- * no more references to this object. This function should
- * destroy the object (or make sure destruction eventually happens),
- * and when it is called, the object has
- * already been taken out of the per-device hash. The parameter
- * "base" should be set to NULL by the function.
- *
- * @ref_obj_release: A function to be called when a reference object
- * with another ttm_ref_type than TTM_REF_USAGE is deleted.
- * This function may, for example, release a lock held by a user-space
- * process.
- *
- * This struct is intended to be used as a base struct for objects that
- * are visible to user-space. It provides a global name, race-safe
- * access and refcounting, minimal access contol and hooks for unref actions.
- */
-
-struct ttm_base_object {
- struct rcu_head rhead;
- struct drm_hash_item hash;
- enum ttm_object_type object_type;
- bool shareable;
- struct ttm_object_file *tfile;
- struct kref refcount;
- void (*refcount_release) (struct ttm_base_object **base);
- void (*ref_obj_release) (struct ttm_base_object *base,
- enum ttm_ref_type ref_type);
-};
-
-
-/**
- * struct ttm_prime_object - Modified base object that is prime-aware
- *
- * @base: struct ttm_base_object that we derive from
- * @mutex: Mutex protecting the @dma_buf member.
- * @size: Size of the dma_buf associated with this object
- * @real_type: Type of the underlying object. Needed since we're setting
- * the value of @base::object_type to ttm_prime_type
- * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
- * object.
- * @refcount_release: The underlying object's release method. Needed since
- * we set @base::refcount_release to our own release method.
- */
-
-struct ttm_prime_object {
- struct ttm_base_object base;
- struct mutex mutex;
- size_t size;
- enum ttm_object_type real_type;
- struct dma_buf *dma_buf;
- void (*refcount_release) (struct ttm_base_object **);
-};
-
-/**
- * ttm_base_object_init
- *
- * @tfile: Pointer to a struct ttm_object_file.
- * @base: The struct ttm_base_object to initialize.
- * @shareable: This object is shareable with other applcations.
- * (different @tfile pointers.)
- * @type: The object type.
- * @refcount_release: See the struct ttm_base_object description.
- * @ref_obj_release: See the struct ttm_base_object description.
- *
- * Initializes a struct ttm_base_object.
- */
-
-extern int ttm_base_object_init(struct ttm_object_file *tfile,
- struct ttm_base_object *base,
- bool shareable,
- enum ttm_object_type type,
- void (*refcount_release) (struct ttm_base_object
- **),
- void (*ref_obj_release) (struct ttm_base_object
- *,
- enum ttm_ref_type
- ref_type));
-
-/**
- * ttm_base_object_lookup
- *
- * @tfile: Pointer to a struct ttm_object_file.
- * @key: Hash key
- *
- * Looks up a struct ttm_base_object with the key @key.
- */
-
-extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
- *tfile, uint32_t key);
-
-/**
- * ttm_base_object_lookup_for_ref
- *
- * @tdev: Pointer to a struct ttm_object_device.
- * @key: Hash key
- *
- * Looks up a struct ttm_base_object with the key @key.
- * This function should only be used when the struct tfile associated with the
- * caller doesn't yet have a reference to the base object.
- */
-
-extern struct ttm_base_object *
-ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
-
-/**
- * ttm_base_object_unref
- *
- * @p_base: Pointer to a pointer referencing a struct ttm_base_object.
- *
- * Decrements the base object refcount and clears the pointer pointed to by
- * p_base.
- */
-
-extern void ttm_base_object_unref(struct ttm_base_object **p_base);
-
-/**
- * ttm_ref_object_add.
- *
- * @tfile: A struct ttm_object_file representing the application owning the
- * ref_object.
- * @base: The base object to reference.
- * @ref_type: The type of reference.
- * @existed: Upon completion, indicates that an identical reference object
- * already existed, and the refcount was upped on that object instead.
- * @require_existed: Fail with -EPERM if an identical ref object didn't
- * already exist.
- *
- * Checks that the base object is shareable and adds a ref object to it.
- *
- * Adding a ref object to a base object is basically like referencing the
- * base object, but a user-space application holds the reference. When the
- * file corresponding to @tfile is closed, all its reference objects are
- * deleted. A reference object can have different types depending on what
- * it's intended for. It can be refcounting to prevent object destruction,
- * When user-space takes a lock, it can add a ref object to that lock to
- * make sure the lock is released if the application dies. A ref object
- * will hold a single reference on a base object.
- */
-extern int ttm_ref_object_add(struct ttm_object_file *tfile,
- struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed,
- bool require_existed);
-
-extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
- struct ttm_base_object *base);
-
-/**
- * ttm_ref_object_base_unref
- *
- * @key: Key representing the base object.
- * @ref_type: Ref type of the ref object to be dereferenced.
- *
- * Unreference a ref object with type @ref_type
- * on the base object identified by @key. If there are no duplicate
- * references, the ref object will be destroyed and the base object
- * will be unreferenced.
- */
-extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
- unsigned long key,
- enum ttm_ref_type ref_type);
-
-/**
- * ttm_object_file_init - initialize a struct ttm_object file
- *
- * @tdev: A struct ttm_object device this file is initialized on.
- * @hash_order: Order of the hash table used to hold the reference objects.
- *
- * This is typically called by the file_ops::open function.
- */
-
-extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
- *tdev,
- unsigned int hash_order);
-
-/**
- * ttm_object_file_release - release data held by a ttm_object_file
- *
- * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
- * *p_tfile will be set to NULL by this function.
- *
- * Releases all data associated by a ttm_object_file.
- * Typically called from file_ops::release. The caller must
- * ensure that there are no concurrent users of tfile.
- */
-
-extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
-
-/**
- * ttm_object device init - initialize a struct ttm_object_device
- *
- * @mem_glob: struct ttm_mem_global for memory accounting.
- * @hash_order: Order of hash table used to hash the base objects.
- * @ops: DMA buf ops for prime objects of this device.
- *
- * This function is typically called on device initialization to prepare
- * data structures needed for ttm base and ref objects.
- */
-
-extern struct ttm_object_device *
-ttm_object_device_init(struct ttm_mem_global *mem_glob,
- unsigned int hash_order,
- const struct dma_buf_ops *ops);
-
-/**
- * ttm_object_device_release - release data held by a ttm_object_device
- *
- * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
- * *p_tdev will be set to NULL by this function.
- *
- * Releases all data associated by a ttm_object_device.
- * Typically called from driver::unload before the destruction of the
- * device private data structure.
- */
-
-extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
-
-#define ttm_base_object_kfree(__object, __base)\
- kfree_rcu(__object, __base.rhead)
-
-extern int ttm_prime_object_init(struct ttm_object_file *tfile,
- size_t size,
- struct ttm_prime_object *prime,
- bool shareable,
- enum ttm_object_type type,
- void (*refcount_release)
- (struct ttm_base_object **),
- void (*ref_obj_release)
- (struct ttm_base_object *,
- enum ttm_ref_type ref_type));
-
-static inline enum ttm_object_type
-ttm_base_object_type(struct ttm_base_object *base)
-{
- return (base->object_type == ttm_prime_type) ?
- container_of(base, struct ttm_prime_object, base)->real_type :
- base->object_type;
-}
-extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
- int fd, u32 *handle);
-extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
- uint32_t handle, uint32_t flags,
- int *prime_fd);
-
-#define ttm_prime_object_kfree(__obj, __prime) \
- kfree_rcu(__obj, __prime.base.rhead)
-#endif
diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h
index 2c005376ac0e..7138384e2ef9 100644
--- a/include/dt-bindings/bus/ti-sysc.h
+++ b/include/dt-bindings/bus/ti-sysc.h
@@ -15,6 +15,8 @@
/* SmartReflex sysc found on 36xx and later */
#define SYSC_OMAP3_SR_ENAWAKEUP (1 << 26)
+#define SYSC_DRA7_MCAN_ENAWAKEUP (1 << 4)
+
/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */
#define SYSC_IDLE_FORCE 0
#define SYSC_IDLE_NO 1
diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h
index b396f00e481d..86a8806e2140 100644
--- a/include/dt-bindings/clock/am3.h
+++ b/include/dt-bindings/clock/am3.h
@@ -16,6 +16,8 @@
#define AM3_CLKCTRL_OFFSET 0x0
#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET)
+/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
+
/* l4_per clocks */
#define AM3_L4_PER_CLKCTRL_OFFSET 0x14
#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET)
@@ -105,4 +107,121 @@
#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET)
#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20)
+/* XXX: Compatibility part end */
+
+/* l4ls clocks */
+#define AM3_L4LS_CLKCTRL_OFFSET 0x38
+#define AM3_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4LS_CLKCTRL_OFFSET)
+#define AM3_L4LS_UART6_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x38)
+#define AM3_L4LS_MMC1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x3c)
+#define AM3_L4LS_ELM_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x40)
+#define AM3_L4LS_I2C3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x44)
+#define AM3_L4LS_I2C2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x48)
+#define AM3_L4LS_SPI0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x4c)
+#define AM3_L4LS_SPI1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x50)
+#define AM3_L4LS_L4_LS_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x60)
+#define AM3_L4LS_UART2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x6c)
+#define AM3_L4LS_UART3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x70)
+#define AM3_L4LS_UART4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x74)
+#define AM3_L4LS_UART5_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x78)
+#define AM3_L4LS_TIMER7_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x7c)
+#define AM3_L4LS_TIMER2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x80)
+#define AM3_L4LS_TIMER3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x84)
+#define AM3_L4LS_TIMER4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x88)
+#define AM3_L4LS_RNG_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x90)
+#define AM3_L4LS_GPIO2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xac)
+#define AM3_L4LS_GPIO3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xb0)
+#define AM3_L4LS_GPIO4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xb4)
+#define AM3_L4LS_D_CAN0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xc0)
+#define AM3_L4LS_D_CAN1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xc4)
+#define AM3_L4LS_EPWMSS1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xcc)
+#define AM3_L4LS_EPWMSS0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xd4)
+#define AM3_L4LS_EPWMSS2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xd8)
+#define AM3_L4LS_TIMER5_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xec)
+#define AM3_L4LS_TIMER6_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xf0)
+#define AM3_L4LS_MMC2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xf4)
+#define AM3_L4LS_SPINLOCK_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x10c)
+#define AM3_L4LS_MAILBOX_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x110)
+#define AM3_L4LS_OCPWP_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x130)
+
+/* l3s clocks */
+#define AM3_L3S_CLKCTRL_OFFSET 0x1c
+#define AM3_L3S_CLKCTRL_INDEX(offset) ((offset) - AM3_L3S_CLKCTRL_OFFSET)
+#define AM3_L3S_USB_OTG_HS_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x1c)
+#define AM3_L3S_GPMC_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x30)
+#define AM3_L3S_MCASP0_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x34)
+#define AM3_L3S_MCASP1_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x68)
+#define AM3_L3S_MMC3_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0xf8)
+
+/* l3 clocks */
+#define AM3_L3_CLKCTRL_OFFSET 0x24
+#define AM3_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_L3_CLKCTRL_OFFSET)
+#define AM3_L3_TPTC0_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x24)
+#define AM3_L3_EMIF_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x28)
+#define AM3_L3_OCMCRAM_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x2c)
+#define AM3_L3_AES_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x94)
+#define AM3_L3_SHAM_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xa0)
+#define AM3_L3_TPCC_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xbc)
+#define AM3_L3_L3_INSTR_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xdc)
+#define AM3_L3_L3_MAIN_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xe0)
+#define AM3_L3_TPTC1_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xfc)
+#define AM3_L3_TPTC2_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x100)
+
+/* l4hs clocks */
+#define AM3_L4HS_CLKCTRL_OFFSET 0x120
+#define AM3_L4HS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4HS_CLKCTRL_OFFSET)
+#define AM3_L4HS_L4_HS_CLKCTRL AM3_L4HS_CLKCTRL_INDEX(0x120)
+
+/* pruss_ocp clocks */
+#define AM3_PRUSS_OCP_CLKCTRL_OFFSET 0xe8
+#define AM3_PRUSS_OCP_CLKCTRL_INDEX(offset) ((offset) - AM3_PRUSS_OCP_CLKCTRL_OFFSET)
+#define AM3_PRUSS_OCP_PRUSS_CLKCTRL AM3_PRUSS_OCP_CLKCTRL_INDEX(0xe8)
+
+/* cpsw_125mhz clocks */
+#define AM3_CPSW_125MHZ_CPGMAC0_CLKCTRL AM3_CLKCTRL_INDEX(0x14)
+
+/* lcdc clocks */
+#define AM3_LCDC_CLKCTRL_OFFSET 0x18
+#define AM3_LCDC_CLKCTRL_INDEX(offset) ((offset) - AM3_LCDC_CLKCTRL_OFFSET)
+#define AM3_LCDC_LCDC_CLKCTRL AM3_LCDC_CLKCTRL_INDEX(0x18)
+
+/* clk_24mhz clocks */
+#define AM3_CLK_24MHZ_CLKCTRL_OFFSET 0x14c
+#define AM3_CLK_24MHZ_CLKCTRL_INDEX(offset) ((offset) - AM3_CLK_24MHZ_CLKCTRL_OFFSET)
+#define AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL AM3_CLK_24MHZ_CLKCTRL_INDEX(0x14c)
+
+/* l4_wkup clocks */
+#define AM3_L4_WKUP_CONTROL_CLKCTRL AM3_CLKCTRL_INDEX(0x4)
+#define AM3_L4_WKUP_GPIO1_CLKCTRL AM3_CLKCTRL_INDEX(0x8)
+#define AM3_L4_WKUP_L4_WKUP_CLKCTRL AM3_CLKCTRL_INDEX(0xc)
+#define AM3_L4_WKUP_UART1_CLKCTRL AM3_CLKCTRL_INDEX(0xb4)
+#define AM3_L4_WKUP_I2C1_CLKCTRL AM3_CLKCTRL_INDEX(0xb8)
+#define AM3_L4_WKUP_ADC_TSC_CLKCTRL AM3_CLKCTRL_INDEX(0xbc)
+#define AM3_L4_WKUP_SMARTREFLEX0_CLKCTRL AM3_CLKCTRL_INDEX(0xc0)
+#define AM3_L4_WKUP_TIMER1_CLKCTRL AM3_CLKCTRL_INDEX(0xc4)
+#define AM3_L4_WKUP_SMARTREFLEX1_CLKCTRL AM3_CLKCTRL_INDEX(0xc8)
+#define AM3_L4_WKUP_WD_TIMER2_CLKCTRL AM3_CLKCTRL_INDEX(0xd4)
+
+/* l3_aon clocks */
+#define AM3_L3_AON_CLKCTRL_OFFSET 0x14
+#define AM3_L3_AON_CLKCTRL_INDEX(offset) ((offset) - AM3_L3_AON_CLKCTRL_OFFSET)
+#define AM3_L3_AON_DEBUGSS_CLKCTRL AM3_L3_AON_CLKCTRL_INDEX(0x14)
+
+/* l4_wkup_aon clocks */
+#define AM3_L4_WKUP_AON_CLKCTRL_OFFSET 0xb0
+#define AM3_L4_WKUP_AON_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_AON_CLKCTRL_OFFSET)
+#define AM3_L4_WKUP_AON_WKUP_M3_CLKCTRL AM3_L4_WKUP_AON_CLKCTRL_INDEX(0xb0)
+
+/* mpu clocks */
+#define AM3_MPU_MPU_CLKCTRL AM3_CLKCTRL_INDEX(0x4)
+
+/* l4_rtc clocks */
+#define AM3_L4_RTC_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0)
+
+/* gfx_l3 clocks */
+#define AM3_GFX_L3_GFX_CLKCTRL AM3_CLKCTRL_INDEX(0x4)
+
+/* l4_cefuse clocks */
+#define AM3_L4_CEFUSE_CEFUSE_CLKCTRL AM3_CLKCTRL_INDEX(0x20)
+
#endif
diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h
index d21df00b3270..0f545b5afd60 100644
--- a/include/dt-bindings/clock/am4.h
+++ b/include/dt-bindings/clock/am4.h
@@ -16,6 +16,8 @@
#define AM4_CLKCTRL_OFFSET 0x20
#define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET)
+/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
+
/* l4_wkup clocks */
#define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120)
#define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
@@ -110,4 +112,134 @@
#define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20)
#define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20)
+/* XXX: Compatibility part end. */
+
+/* l3s_tsc clocks */
+#define AM4_L3S_TSC_CLKCTRL_OFFSET 0x120
+#define AM4_L3S_TSC_CLKCTRL_INDEX(offset) ((offset) - AM4_L3S_TSC_CLKCTRL_OFFSET)
+#define AM4_L3S_TSC_ADC_TSC_CLKCTRL AM4_L3S_TSC_CLKCTRL_INDEX(0x120)
+
+/* l4_wkup_aon clocks */
+#define AM4_L4_WKUP_AON_CLKCTRL_OFFSET 0x228
+#define AM4_L4_WKUP_AON_CLKCTRL_INDEX(offset) ((offset) - AM4_L4_WKUP_AON_CLKCTRL_OFFSET)
+#define AM4_L4_WKUP_AON_WKUP_M3_CLKCTRL AM4_L4_WKUP_AON_CLKCTRL_INDEX(0x228)
+#define AM4_L4_WKUP_AON_COUNTER_32K_CLKCTRL AM4_L4_WKUP_AON_CLKCTRL_INDEX(0x230)
+
+/* l4_wkup clocks */
+#define AM4_L4_WKUP_CLKCTRL_OFFSET 0x220
+#define AM4_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM4_L4_WKUP_CLKCTRL_OFFSET)
+#define AM4_L4_WKUP_L4_WKUP_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x220)
+#define AM4_L4_WKUP_TIMER1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x328)
+#define AM4_L4_WKUP_WD_TIMER2_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x338)
+#define AM4_L4_WKUP_I2C1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x340)
+#define AM4_L4_WKUP_UART1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x348)
+#define AM4_L4_WKUP_SMARTREFLEX0_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x350)
+#define AM4_L4_WKUP_SMARTREFLEX1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x358)
+#define AM4_L4_WKUP_CONTROL_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x360)
+#define AM4_L4_WKUP_GPIO1_CLKCTRL AM4_L4_WKUP_CLKCTRL_INDEX(0x368)
+
+/* mpu clocks */
+#define AM4_MPU_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+
+/* gfx_l3 clocks */
+#define AM4_GFX_L3_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+
+/* l4_rtc clocks */
+#define AM4_L4_RTC_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+
+/* l3 clocks */
+#define AM4_L3_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
+#define AM4_L3_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28)
+#define AM4_L3_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30)
+#define AM4_L3_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40)
+#define AM4_L3_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50)
+#define AM4_L3_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58)
+#define AM4_L3_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78)
+#define AM4_L3_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80)
+#define AM4_L3_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88)
+#define AM4_L3_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90)
+#define AM4_L3_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0)
+
+/* l3s clocks */
+#define AM4_L3S_CLKCTRL_OFFSET 0x68
+#define AM4_L3S_CLKCTRL_INDEX(offset) ((offset) - AM4_L3S_CLKCTRL_OFFSET)
+#define AM4_L3S_VPFE0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x68)
+#define AM4_L3S_VPFE1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x70)
+#define AM4_L3S_GPMC_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x220)
+#define AM4_L3S_MCASP0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x238)
+#define AM4_L3S_MCASP1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x240)
+#define AM4_L3S_MMC3_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x248)
+#define AM4_L3S_QSPI_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x258)
+#define AM4_L3S_USB_OTG_SS0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x260)
+#define AM4_L3S_USB_OTG_SS1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x268)
+
+/* pruss_ocp clocks */
+#define AM4_PRUSS_OCP_CLKCTRL_OFFSET 0x320
+#define AM4_PRUSS_OCP_CLKCTRL_INDEX(offset) ((offset) - AM4_PRUSS_OCP_CLKCTRL_OFFSET)
+#define AM4_PRUSS_OCP_PRUSS_CLKCTRL AM4_PRUSS_OCP_CLKCTRL_INDEX(0x320)
+
+/* l4ls clocks */
+#define AM4_L4LS_CLKCTRL_OFFSET 0x420
+#define AM4_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM4_L4LS_CLKCTRL_OFFSET)
+#define AM4_L4LS_L4_LS_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x420)
+#define AM4_L4LS_D_CAN0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x428)
+#define AM4_L4LS_D_CAN1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x430)
+#define AM4_L4LS_EPWMSS0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x438)
+#define AM4_L4LS_EPWMSS1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x440)
+#define AM4_L4LS_EPWMSS2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x448)
+#define AM4_L4LS_EPWMSS3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x450)
+#define AM4_L4LS_EPWMSS4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x458)
+#define AM4_L4LS_EPWMSS5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x460)
+#define AM4_L4LS_ELM_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x468)
+#define AM4_L4LS_GPIO2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x478)
+#define AM4_L4LS_GPIO3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x480)
+#define AM4_L4LS_GPIO4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x488)
+#define AM4_L4LS_GPIO5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x490)
+#define AM4_L4LS_GPIO6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x498)
+#define AM4_L4LS_HDQ1W_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4a0)
+#define AM4_L4LS_I2C2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4a8)
+#define AM4_L4LS_I2C3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4b0)
+#define AM4_L4LS_MAILBOX_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4b8)
+#define AM4_L4LS_MMC1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4c0)
+#define AM4_L4LS_MMC2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4c8)
+#define AM4_L4LS_RNG_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x4e0)
+#define AM4_L4LS_SPI0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x500)
+#define AM4_L4LS_SPI1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x508)
+#define AM4_L4LS_SPI2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x510)
+#define AM4_L4LS_SPI3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x518)
+#define AM4_L4LS_SPI4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x520)
+#define AM4_L4LS_SPINLOCK_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x528)
+#define AM4_L4LS_TIMER2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x530)
+#define AM4_L4LS_TIMER3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x538)
+#define AM4_L4LS_TIMER4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x540)
+#define AM4_L4LS_TIMER5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x548)
+#define AM4_L4LS_TIMER6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x550)
+#define AM4_L4LS_TIMER7_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x558)
+#define AM4_L4LS_TIMER8_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x560)
+#define AM4_L4LS_TIMER9_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x568)
+#define AM4_L4LS_TIMER10_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x570)
+#define AM4_L4LS_TIMER11_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x578)
+#define AM4_L4LS_UART2_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x580)
+#define AM4_L4LS_UART3_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x588)
+#define AM4_L4LS_UART4_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x590)
+#define AM4_L4LS_UART5_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x598)
+#define AM4_L4LS_UART6_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5a0)
+#define AM4_L4LS_OCP2SCP0_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5b8)
+#define AM4_L4LS_OCP2SCP1_CLKCTRL AM4_L4LS_CLKCTRL_INDEX(0x5c0)
+
+/* emif clocks */
+#define AM4_EMIF_CLKCTRL_OFFSET 0x720
+#define AM4_EMIF_CLKCTRL_INDEX(offset) ((offset) - AM4_EMIF_CLKCTRL_OFFSET)
+#define AM4_EMIF_EMIF_CLKCTRL AM4_EMIF_CLKCTRL_INDEX(0x720)
+
+/* dss clocks */
+#define AM4_DSS_CLKCTRL_OFFSET 0xa20
+#define AM4_DSS_CLKCTRL_INDEX(offset) ((offset) - AM4_DSS_CLKCTRL_OFFSET)
+#define AM4_DSS_DSS_CORE_CLKCTRL AM4_DSS_CLKCTRL_INDEX(0xa20)
+
+/* cpsw_125mhz clocks */
+#define AM4_CPSW_125MHZ_CLKCTRL_OFFSET 0xb20
+#define AM4_CPSW_125MHZ_CLKCTRL_INDEX(offset) ((offset) - AM4_CPSW_125MHZ_CLKCTRL_OFFSET)
+#define AM4_CPSW_125MHZ_CPGMAC0_CLKCTRL AM4_CPSW_125MHZ_CLKCTRL_INDEX(0xb20)
+
#endif
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index ab3ee241d10c..ed30da28d820 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -9,6 +9,20 @@
#ifndef _DT_BINDINGS_CLK_AT91_H
#define _DT_BINDINGS_CLK_AT91_H
+#define PMC_TYPE_CORE 0
+#define PMC_TYPE_SYSTEM 1
+#define PMC_TYPE_PERIPHERAL 2
+#define PMC_TYPE_GCK 3
+
+#define PMC_SLOW 0
+#define PMC_MCK 1
+#define PMC_UTMI 2
+#define PMC_MAIN 3
+#define PMC_MCK2 4
+#define PMC_I2S0_MUX 5
+#define PMC_I2S1_MUX 6
+
+#ifndef AT91_PMC_MOSCS
#define AT91_PMC_MOSCS 0 /* MOSCS Flag */
#define AT91_PMC_LOCKA 1 /* PLLA Lock */
#define AT91_PMC_LOCKB 2 /* PLLB Lock */
@@ -19,5 +33,6 @@
#define AT91_PMC_MOSCRCS 17 /* Main On-Chip RC */
#define AT91_PMC_CFDEV 18 /* Clock Failure Detector Event */
#define AT91_PMC_GCKRDY 24 /* Generated Clocks */
+#endif
#endif
diff --git a/include/dt-bindings/clock/bcm2835-aux.h b/include/dt-bindings/clock/bcm2835-aux.h
index d91156e2658d..bb79de383a3b 100644
--- a/include/dt-bindings/clock/bcm2835-aux.h
+++ b/include/dt-bindings/clock/bcm2835-aux.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define BCM2835_AUX_CLOCK_UART 0
diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h
index a0c812b0fa39..2cec01f96897 100644
--- a/include/dt-bindings/clock/bcm2835.h
+++ b/include/dt-bindings/clock/bcm2835.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define BCM2835_PLLA 0
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
index 5e1061b15aed..ec969b5aeb25 100644
--- a/include/dt-bindings/clock/dra7.h
+++ b/include/dt-bindings/clock/dra7.h
@@ -16,19 +16,21 @@
#define DRA7_CLKCTRL_OFFSET 0x20
#define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET)
+/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
+
/* mpu clocks */
#define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
/* ipu clocks */
-#define DRA7_IPU_CLKCTRL_OFFSET 0x40
-#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET)
-#define DRA7_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50)
-#define DRA7_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58)
-#define DRA7_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60)
-#define DRA7_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68)
-#define DRA7_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70)
-#define DRA7_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78)
-#define DRA7_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80)
+#define _DRA7_IPU_CLKCTRL_OFFSET 0x40
+#define _DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - _DRA7_IPU_CLKCTRL_OFFSET)
+#define DRA7_MCASP1_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x50)
+#define DRA7_TIMER5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x58)
+#define DRA7_TIMER6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x60)
+#define DRA7_TIMER7_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x68)
+#define DRA7_TIMER8_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x70)
+#define DRA7_I2C5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x78)
+#define DRA7_UART6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x80)
/* rtc clocks */
#define DRA7_RTC_CLKCTRL_OFFSET 0x40
@@ -99,65 +101,65 @@
#define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0)
/* l4per clocks */
-#define DRA7_L4PER_CLKCTRL_OFFSET 0x0
-#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET)
-#define DRA7_L4_PER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc)
-#define DRA7_L4_PER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x14)
-#define DRA7_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28)
-#define DRA7_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30)
-#define DRA7_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38)
-#define DRA7_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40)
-#define DRA7_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48)
-#define DRA7_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50)
-#define DRA7_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58)
-#define DRA7_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60)
-#define DRA7_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68)
-#define DRA7_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70)
-#define DRA7_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78)
-#define DRA7_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80)
-#define DRA7_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88)
-#define DRA7_EPWMSS1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x90)
-#define DRA7_EPWMSS2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x98)
-#define DRA7_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0)
-#define DRA7_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8)
-#define DRA7_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0)
-#define DRA7_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8)
-#define DRA7_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0)
-#define DRA7_EPWMSS0_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc4)
-#define DRA7_TIMER13_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc8)
-#define DRA7_TIMER14_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd0)
-#define DRA7_TIMER15_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd8)
-#define DRA7_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0)
-#define DRA7_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8)
-#define DRA7_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100)
-#define DRA7_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108)
-#define DRA7_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110)
-#define DRA7_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118)
-#define DRA7_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120)
-#define DRA7_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128)
-#define DRA7_TIMER16_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x130)
-#define DRA7_QSPI_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x138)
-#define DRA7_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140)
-#define DRA7_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148)
-#define DRA7_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150)
-#define DRA7_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158)
-#define DRA7_MCASP2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x160)
-#define DRA7_MCASP3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x168)
-#define DRA7_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170)
-#define DRA7_MCASP5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x178)
-#define DRA7_MCASP8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x190)
-#define DRA7_MCASP4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x198)
-#define DRA7_AES1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a0)
-#define DRA7_AES2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a8)
-#define DRA7_DES_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1b0)
-#define DRA7_RNG_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c0)
-#define DRA7_SHAM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c8)
-#define DRA7_UART7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1d0)
-#define DRA7_UART8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e0)
-#define DRA7_UART9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e8)
-#define DRA7_DCAN2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1f0)
-#define DRA7_MCASP6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x204)
-#define DRA7_MCASP7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x208)
+#define _DRA7_L4PER_CLKCTRL_OFFSET 0x0
+#define _DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - _DRA7_L4PER_CLKCTRL_OFFSET)
+#define DRA7_L4_PER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc)
+#define DRA7_L4_PER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x14)
+#define DRA7_TIMER10_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x28)
+#define DRA7_TIMER11_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x30)
+#define DRA7_TIMER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x38)
+#define DRA7_TIMER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x40)
+#define DRA7_TIMER4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x48)
+#define DRA7_TIMER9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x50)
+#define DRA7_ELM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x58)
+#define DRA7_GPIO2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x60)
+#define DRA7_GPIO3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x68)
+#define DRA7_GPIO4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x70)
+#define DRA7_GPIO5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x78)
+#define DRA7_GPIO6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x80)
+#define DRA7_HDQ1W_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x88)
+#define DRA7_EPWMSS1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x90)
+#define DRA7_EPWMSS2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x98)
+#define DRA7_I2C1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa0)
+#define DRA7_I2C2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa8)
+#define DRA7_I2C3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb0)
+#define DRA7_I2C4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb8)
+#define DRA7_L4_PER1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc0)
+#define DRA7_EPWMSS0_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc4)
+#define DRA7_TIMER13_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc8)
+#define DRA7_TIMER14_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd0)
+#define DRA7_TIMER15_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd8)
+#define DRA7_MCSPI1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf0)
+#define DRA7_MCSPI2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf8)
+#define DRA7_MCSPI3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x100)
+#define DRA7_MCSPI4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x108)
+#define DRA7_GPIO7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x110)
+#define DRA7_GPIO8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x118)
+#define DRA7_MMC3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x120)
+#define DRA7_MMC4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x128)
+#define DRA7_TIMER16_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x130)
+#define DRA7_QSPI_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x138)
+#define DRA7_UART1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x140)
+#define DRA7_UART2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x148)
+#define DRA7_UART3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x150)
+#define DRA7_UART4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x158)
+#define DRA7_MCASP2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x160)
+#define DRA7_MCASP3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x168)
+#define DRA7_UART5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x170)
+#define DRA7_MCASP5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x178)
+#define DRA7_MCASP8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x190)
+#define DRA7_MCASP4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x198)
+#define DRA7_AES1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a0)
+#define DRA7_AES2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a8)
+#define DRA7_DES_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1b0)
+#define DRA7_RNG_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c0)
+#define DRA7_SHAM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c8)
+#define DRA7_UART7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1d0)
+#define DRA7_UART8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e0)
+#define DRA7_UART9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e8)
+#define DRA7_DCAN2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1f0)
+#define DRA7_MCASP6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x204)
+#define DRA7_MCASP7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x208)
/* wkupaon clocks */
#define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
@@ -168,5 +170,194 @@
#define DRA7_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
#define DRA7_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
+
+/* XXX: Compatibility part end. */
+
+/* mpu clocks */
+#define DRA7_MPU_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* dsp1 clocks */
+#define DRA7_DSP1_MMU0_DSP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* ipu1 clocks */
+#define DRA7_IPU1_MMU_IPU1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* ipu clocks */
+#define DRA7_IPU_CLKCTRL_OFFSET 0x50
+#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET)
+#define DRA7_IPU_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50)
+#define DRA7_IPU_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58)
+#define DRA7_IPU_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60)
+#define DRA7_IPU_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68)
+#define DRA7_IPU_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70)
+#define DRA7_IPU_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78)
+#define DRA7_IPU_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80)
+
+/* dsp2 clocks */
+#define DRA7_DSP2_MMU0_DSP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* rtc clocks */
+#define DRA7_RTC_RTCSS_CLKCTRL DRA7_CLKCTRL_INDEX(0x44)
+
+/* coreaon clocks */
+#define DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
+
+/* l3main1 clocks */
+#define DRA7_L3MAIN1_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_L3MAIN1_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_L3MAIN1_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
+#define DRA7_L3MAIN1_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
+#define DRA7_L3MAIN1_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
+#define DRA7_L3MAIN1_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_L3MAIN1_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
+
+/* ipu2 clocks */
+#define DRA7_IPU2_MMU_IPU2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* dma clocks */
+#define DRA7_DMA_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* emif clocks */
+#define DRA7_EMIF_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
+/* atl clocks */
+#define DRA7_ATL_CLKCTRL_OFFSET 0x0
+#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET)
+#define DRA7_ATL_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0)
+
+/* l4cfg clocks */
+#define DRA7_L4CFG_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_L4CFG_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_L4CFG_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+#define DRA7_L4CFG_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
+#define DRA7_L4CFG_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
+#define DRA7_L4CFG_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58)
+#define DRA7_L4CFG_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60)
+#define DRA7_L4CFG_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68)
+#define DRA7_L4CFG_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
+#define DRA7_L4CFG_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
+#define DRA7_L4CFG_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
+#define DRA7_L4CFG_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_L4CFG_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
+#define DRA7_L4CFG_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98)
+#define DRA7_L4CFG_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
+
+/* l3instr clocks */
+#define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+
+/* dss clocks */
+#define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+
+/* l3init clocks */
+#define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+#define DRA7_L3INIT_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
+#define DRA7_L3INIT_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
+#define DRA7_L3INIT_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
+#define DRA7_L3INIT_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_L3INIT_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0)
+#define DRA7_L3INIT_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8)
+#define DRA7_L3INIT_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0)
+
+/* pcie clocks */
+#define DRA7_PCIE_CLKCTRL_OFFSET 0xb0
+#define DRA7_PCIE_CLKCTRL_INDEX(offset) ((offset) - DRA7_PCIE_CLKCTRL_OFFSET)
+#define DRA7_PCIE_PCIE1_CLKCTRL DRA7_PCIE_CLKCTRL_INDEX(0xb0)
+#define DRA7_PCIE_PCIE2_CLKCTRL DRA7_PCIE_CLKCTRL_INDEX(0xb8)
+
+/* gmac clocks */
+#define DRA7_GMAC_CLKCTRL_OFFSET 0xd0
+#define DRA7_GMAC_CLKCTRL_INDEX(offset) ((offset) - DRA7_GMAC_CLKCTRL_OFFSET)
+#define DRA7_GMAC_GMAC_CLKCTRL DRA7_GMAC_CLKCTRL_INDEX(0xd0)
+
+/* l4per clocks */
+#define DRA7_L4PER_CLKCTRL_OFFSET 0x28
+#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET)
+#define DRA7_L4PER_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28)
+#define DRA7_L4PER_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30)
+#define DRA7_L4PER_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38)
+#define DRA7_L4PER_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40)
+#define DRA7_L4PER_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48)
+#define DRA7_L4PER_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50)
+#define DRA7_L4PER_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58)
+#define DRA7_L4PER_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60)
+#define DRA7_L4PER_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68)
+#define DRA7_L4PER_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70)
+#define DRA7_L4PER_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78)
+#define DRA7_L4PER_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80)
+#define DRA7_L4PER_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88)
+#define DRA7_L4PER_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0)
+#define DRA7_L4PER_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8)
+#define DRA7_L4PER_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0)
+#define DRA7_L4PER_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8)
+#define DRA7_L4PER_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0)
+#define DRA7_L4PER_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0)
+#define DRA7_L4PER_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8)
+#define DRA7_L4PER_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100)
+#define DRA7_L4PER_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108)
+#define DRA7_L4PER_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110)
+#define DRA7_L4PER_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118)
+#define DRA7_L4PER_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120)
+#define DRA7_L4PER_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128)
+#define DRA7_L4PER_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140)
+#define DRA7_L4PER_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148)
+#define DRA7_L4PER_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150)
+#define DRA7_L4PER_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158)
+#define DRA7_L4PER_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170)
+
+/* l4sec clocks */
+#define DRA7_L4SEC_CLKCTRL_OFFSET 0x1a0
+#define DRA7_L4SEC_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4SEC_CLKCTRL_OFFSET)
+#define DRA7_L4SEC_AES1_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1a0)
+#define DRA7_L4SEC_AES2_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1a8)
+#define DRA7_L4SEC_DES_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1b0)
+#define DRA7_L4SEC_RNG_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c0)
+#define DRA7_L4SEC_SHAM_CLKCTRL DRA7_L4SEC_CLKCTRL_INDEX(0x1c8)
+
+/* l4per2 clocks */
+#define DRA7_L4PER2_CLKCTRL_OFFSET 0xc
+#define DRA7_L4PER2_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER2_CLKCTRL_OFFSET)
+#define DRA7_L4PER2_L4_PER2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0xc)
+#define DRA7_L4PER2_PRUSS1_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x18)
+#define DRA7_L4PER2_PRUSS2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x20)
+#define DRA7_L4PER2_EPWMSS1_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x90)
+#define DRA7_L4PER2_EPWMSS2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x98)
+#define DRA7_L4PER2_EPWMSS0_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0xc4)
+#define DRA7_L4PER2_QSPI_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x138)
+#define DRA7_L4PER2_MCASP2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x160)
+#define DRA7_L4PER2_MCASP3_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x168)
+#define DRA7_L4PER2_MCASP5_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x178)
+#define DRA7_L4PER2_MCASP8_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x190)
+#define DRA7_L4PER2_MCASP4_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x198)
+#define DRA7_L4PER2_UART7_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1d0)
+#define DRA7_L4PER2_UART8_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1e0)
+#define DRA7_L4PER2_UART9_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1e8)
+#define DRA7_L4PER2_DCAN2_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x1f0)
+#define DRA7_L4PER2_MCASP6_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x204)
+#define DRA7_L4PER2_MCASP7_CLKCTRL DRA7_L4PER2_CLKCTRL_INDEX(0x208)
+
+/* l4per3 clocks */
+#define DRA7_L4PER3_CLKCTRL_OFFSET 0x14
+#define DRA7_L4PER3_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER3_CLKCTRL_OFFSET)
+#define DRA7_L4PER3_L4_PER3_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0x14)
+#define DRA7_L4PER3_TIMER13_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xc8)
+#define DRA7_L4PER3_TIMER14_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xd0)
+#define DRA7_L4PER3_TIMER15_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0xd8)
+#define DRA7_L4PER3_TIMER16_CLKCTRL DRA7_L4PER3_CLKCTRL_INDEX(0x130)
+
+/* wkupaon clocks */
+#define DRA7_WKUPAON_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_WKUPAON_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+#define DRA7_WKUPAON_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
+#define DRA7_WKUPAON_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
+#define DRA7_WKUPAON_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
+#define DRA7_WKUPAON_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
+#define DRA7_WKUPAON_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
+#define DRA7_WKUPAON_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_WKUPAON_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
#endif
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index c796ff02ceeb..fe8214017b46 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Tomasz Figa <t.figa@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Samsung Exynos3250 clock controllers.
*/
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index e9f9d400c322..a0439ce8e8d3 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* Author: Andrzej Hajda <a.hajda@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Exynos4 clock controller.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_EXYNOS_4_H
#define _DT_BINDINGS_CLOCK_EXYNOS_4_H
@@ -190,32 +187,6 @@
#define CLK_MIPI_HSI 349 /* Exynos4210 only */
#define CLK_PIXELASYNCM0 351
#define CLK_PIXELASYNCM1 352
-#define CLK_FIMC_LITE0 353 /* Exynos4x12 only */
-#define CLK_FIMC_LITE1 354 /* Exynos4x12 only */
-#define CLK_PPMUISPX 355 /* Exynos4x12 only */
-#define CLK_PPMUISPMX 356 /* Exynos4x12 only */
-#define CLK_FIMC_ISP 357 /* Exynos4x12 only */
-#define CLK_FIMC_DRC 358 /* Exynos4x12 only */
-#define CLK_FIMC_FD 359 /* Exynos4x12 only */
-#define CLK_MCUISP 360 /* Exynos4x12 only */
-#define CLK_GICISP 361 /* Exynos4x12 only */
-#define CLK_SMMU_ISP 362 /* Exynos4x12 only */
-#define CLK_SMMU_DRC 363 /* Exynos4x12 only */
-#define CLK_SMMU_FD 364 /* Exynos4x12 only */
-#define CLK_SMMU_LITE0 365 /* Exynos4x12 only */
-#define CLK_SMMU_LITE1 366 /* Exynos4x12 only */
-#define CLK_MCUCTL_ISP 367 /* Exynos4x12 only */
-#define CLK_MPWM_ISP 368 /* Exynos4x12 only */
-#define CLK_I2C0_ISP 369 /* Exynos4x12 only */
-#define CLK_I2C1_ISP 370 /* Exynos4x12 only */
-#define CLK_MTCADC_ISP 371 /* Exynos4x12 only */
-#define CLK_PWM_ISP 372 /* Exynos4x12 only */
-#define CLK_WDT_ISP 373 /* Exynos4x12 only */
-#define CLK_UART_ISP 374 /* Exynos4x12 only */
-#define CLK_ASYNCAXIM 375 /* Exynos4x12 only */
-#define CLK_SMMU_ISPCX 376 /* Exynos4x12 only */
-#define CLK_SPI0_ISP 377 /* Exynos4x12 only */
-#define CLK_SPI1_ISP 378 /* Exynos4x12 only */
#define CLK_PWM_ISP_SCLK 379 /* Exynos4x12 only */
#define CLK_SPI0_ISP_SCLK 380 /* Exynos4x12 only */
#define CLK_SPI1_ISP_SCLK 381 /* Exynos4x12 only */
@@ -257,10 +228,6 @@
#define CLK_PPMUACP 415
/* div clocks */
-#define CLK_DIV_ISP0 450 /* Exynos4x12 only */
-#define CLK_DIV_ISP1 451 /* Exynos4x12 only */
-#define CLK_DIV_MCUISP0 452 /* Exynos4x12 only */
-#define CLK_DIV_MCUISP1 453 /* Exynos4x12 only */
#define CLK_DIV_ACLK200 454 /* Exynos4x12 only */
#define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */
#define CLK_DIV_ACP 456
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index 15508adcdfde..bc8a3c53a54b 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* Author: Andrzej Hajda <a.hajda@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Exynos5250 clock controller.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5250_H
#define _DT_BINDINGS_CLOCK_EXYNOS_5250_H
diff --git a/include/dt-bindings/clock/exynos5260-clk.h b/include/dt-bindings/clock/exynos5260-clk.h
index a4bac9a1764f..98a58cbd81b2 100644
--- a/include/dt-bindings/clock/exynos5260-clk.h
+++ b/include/dt-bindings/clock/exynos5260-clk.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Rahul Sharma <rahul.sharma@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Provides Constants for Exynos5260 clocks.
-*/
+ */
#ifndef _DT_BINDINGS_CLK_EXYNOS5260_H
#define _DT_BINDINGS_CLK_EXYNOS5260_H
diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h
index 6cb4e90f81fc..f179eabbcdb7 100644
--- a/include/dt-bindings/clock/exynos5410.h
+++ b/include/dt-bindings/clock/exynos5410.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Copyright (c) 2016 Krzysztof Kozlowski
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Exynos5421 clock controller.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5410_H
#define _DT_BINDINGS_CLOCK_EXYNOS_5410_H
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 2740ae0424a9..355f469943f1 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* Author: Andrzej Hajda <a.hajda@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Exynos5420 clock controller.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5420_H
#define _DT_BINDINGS_CLOCK_EXYNOS_5420_H
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index be39d23e6a32..98bd85ce1e45 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Chanwoo Choi <cw00.choi@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _DT_BINDINGS_CLOCK_EXYNOS5433_H
diff --git a/include/dt-bindings/clock/exynos5440.h b/include/dt-bindings/clock/exynos5440.h
deleted file mode 100644
index 842cdc0adff1..000000000000
--- a/include/dt-bindings/clock/exynos5440.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * Author: Andrzej Hajda <a.hajda@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Device Tree binding constants for Exynos5440 clock controller.
-*/
-
-#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5440_H
-#define _DT_BINDINGS_CLOCK_EXYNOS_5440_H
-
-#define CLK_XTAL 1
-#define CLK_ARM_CLK 2
-#define CLK_CPLLA 3
-#define CLK_CPLLB 4
-#define CLK_SPI_BAUD 16
-#define CLK_PB0_250 17
-#define CLK_PR0_250 18
-#define CLK_PR1_250 19
-#define CLK_B_250 20
-#define CLK_B_125 21
-#define CLK_B_200 22
-#define CLK_SATA 23
-#define CLK_USB 24
-#define CLK_GMAC0 25
-#define CLK_CS250 26
-#define CLK_PB0_250_O 27
-#define CLK_PR0_250_O 28
-#define CLK_PR1_250_O 29
-#define CLK_B_250_O 30
-#define CLK_B_125_O 31
-#define CLK_B_200_O 32
-#define CLK_SATA_O 33
-#define CLK_USB_O 34
-#define CLK_GMAC0_O 35
-#define CLK_CS250_O 36
-
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 37
-
-#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5440_H */
diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h
index 10c558611085..fce33c7050c8 100644
--- a/include/dt-bindings/clock/exynos7-clk.h
+++ b/include/dt-bindings/clock/exynos7-clk.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_EXYNOS7_H
#define _DT_BINDINGS_CLOCK_EXYNOS7_H
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 3979d48c025f..db0763e96173 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -128,5 +128,23 @@
#define CLKID_VDEC_1 153
#define CLKID_VDEC_HEVC 156
#define CLKID_GEN_CLK 159
+#define CLKID_VID_PLL 166
+#define CLKID_VCLK 175
+#define CLKID_VCLK2 176
+#define CLKID_VCLK_DIV1 185
+#define CLKID_VCLK_DIV2 186
+#define CLKID_VCLK_DIV4 187
+#define CLKID_VCLK_DIV6 188
+#define CLKID_VCLK_DIV12 189
+#define CLKID_VCLK2_DIV1 190
+#define CLKID_VCLK2_DIV2 191
+#define CLKID_VCLK2_DIV4 192
+#define CLKID_VCLK2_DIV6 193
+#define CLKID_VCLK2_DIV12 194
+#define CLKID_CTS_ENCI 199
+#define CLKID_CTS_ENCP 200
+#define CLKID_CTS_VDAC 201
+#define CLKID_HDMI_TX 202
+#define CLKID_HDMI 205
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/hi3670-clock.h b/include/dt-bindings/clock/hi3670-clock.h
new file mode 100644
index 000000000000..fa48583f87d6
--- /dev/null
+++ b/include/dt-bindings/clock/hi3670-clock.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device Tree binding constants for HiSilicon Hi3670 SoC
+ *
+ * Copyright (c) 2001-2021, Huawei Tech. Co., Ltd.
+ * Copyright (c) 2018 Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_HI3670_H
+#define __DT_BINDINGS_CLOCK_HI3670_H
+
+/* clk in stub clock */
+#define HI3670_CLK_STUB_CLUSTER0 0
+#define HI3670_CLK_STUB_CLUSTER1 1
+#define HI3670_CLK_STUB_GPU 2
+#define HI3670_CLK_STUB_DDR 3
+#define HI3670_CLK_STUB_DDR_VOTE 4
+#define HI3670_CLK_STUB_DDR_LIMIT 5
+#define HI3670_CLK_STUB_NUM 6
+
+/* clk in crg clock */
+#define HI3670_CLKIN_SYS 0
+#define HI3670_CLKIN_REF 1
+#define HI3670_CLK_FLL_SRC 2
+#define HI3670_CLK_PPLL0 3
+#define HI3670_CLK_PPLL1 4
+#define HI3670_CLK_PPLL2 5
+#define HI3670_CLK_PPLL3 6
+#define HI3670_CLK_PPLL4 7
+#define HI3670_CLK_PPLL6 8
+#define HI3670_CLK_PPLL7 9
+#define HI3670_CLK_PPLL_PCIE 10
+#define HI3670_CLK_PCIEPLL_REV 11
+#define HI3670_CLK_SCPLL 12
+#define HI3670_PCLK 13
+#define HI3670_CLK_UART0_DBG 14
+#define HI3670_CLK_UART6 15
+#define HI3670_OSC32K 16
+#define HI3670_OSC19M 17
+#define HI3670_CLK_480M 18
+#define HI3670_CLK_INVALID 19
+#define HI3670_CLK_DIV_SYSBUS 20
+#define HI3670_CLK_FACTOR_MMC 21
+#define HI3670_CLK_SD_SYS 22
+#define HI3670_CLK_SDIO_SYS 23
+#define HI3670_CLK_DIV_A53HPM 24
+#define HI3670_CLK_DIV_320M 25
+#define HI3670_PCLK_GATE_UART0 26
+#define HI3670_CLK_FACTOR_UART0 27
+#define HI3670_CLK_FACTOR_USB3PHY_PLL 28
+#define HI3670_CLK_GATE_ABB_USB 29
+#define HI3670_CLK_GATE_UFSPHY_REF 30
+#define HI3670_ICS_VOLT_HIGH 31
+#define HI3670_ICS_VOLT_MIDDLE 32
+#define HI3670_VENC_VOLT_HOLD 33
+#define HI3670_VDEC_VOLT_HOLD 34
+#define HI3670_EDC_VOLT_HOLD 35
+#define HI3670_CLK_ISP_SNCLK_FAC 36
+#define HI3670_CLK_FACTOR_RXDPHY 37
+#define HI3670_AUTODIV_SYSBUS 38
+#define HI3670_AUTODIV_EMMC0BUS 39
+#define HI3670_PCLK_ANDGT_MMC1_PCIE 40
+#define HI3670_CLK_GATE_VCODECBUS_GT 41
+#define HI3670_CLK_ANDGT_SD 42
+#define HI3670_CLK_SD_SYS_GT 43
+#define HI3670_CLK_ANDGT_SDIO 44
+#define HI3670_CLK_SDIO_SYS_GT 45
+#define HI3670_CLK_A53HPM_ANDGT 46
+#define HI3670_CLK_320M_PLL_GT 47
+#define HI3670_CLK_ANDGT_UARTH 48
+#define HI3670_CLK_ANDGT_UARTL 49
+#define HI3670_CLK_ANDGT_UART0 50
+#define HI3670_CLK_ANDGT_SPI 51
+#define HI3670_CLK_ANDGT_PCIEAXI 52
+#define HI3670_CLK_DIV_AO_ASP_GT 53
+#define HI3670_CLK_GATE_CSI_TRANS 54
+#define HI3670_CLK_GATE_DSI_TRANS 55
+#define HI3670_CLK_ANDGT_PTP 56
+#define HI3670_CLK_ANDGT_OUT0 57
+#define HI3670_CLK_ANDGT_OUT1 58
+#define HI3670_CLKGT_DP_AUDIO_PLL_AO 59
+#define HI3670_CLK_ANDGT_VDEC 60
+#define HI3670_CLK_ANDGT_VENC 61
+#define HI3670_CLK_ISP_SNCLK_ANGT 62
+#define HI3670_CLK_ANDGT_RXDPHY 63
+#define HI3670_CLK_ANDGT_ICS 64
+#define HI3670_AUTODIV_DMABUS 65
+#define HI3670_CLK_MUX_SYSBUS 66
+#define HI3670_CLK_MUX_VCODECBUS 67
+#define HI3670_CLK_MUX_SD_SYS 68
+#define HI3670_CLK_MUX_SD_PLL 69
+#define HI3670_CLK_MUX_SDIO_SYS 70
+#define HI3670_CLK_MUX_SDIO_PLL 71
+#define HI3670_CLK_MUX_A53HPM 72
+#define HI3670_CLK_MUX_320M 73
+#define HI3670_CLK_MUX_UARTH 74
+#define HI3670_CLK_MUX_UARTL 75
+#define HI3670_CLK_MUX_UART0 76
+#define HI3670_CLK_MUX_I2C 77
+#define HI3670_CLK_MUX_SPI 78
+#define HI3670_CLK_MUX_PCIEAXI 79
+#define HI3670_CLK_MUX_AO_ASP 80
+#define HI3670_CLK_MUX_VDEC 81
+#define HI3670_CLK_MUX_VENC 82
+#define HI3670_CLK_ISP_SNCLK_MUX0 83
+#define HI3670_CLK_ISP_SNCLK_MUX1 84
+#define HI3670_CLK_ISP_SNCLK_MUX2 85
+#define HI3670_CLK_MUX_RXDPHY_CFG 86
+#define HI3670_CLK_MUX_ICS 87
+#define HI3670_CLK_DIV_CFGBUS 88
+#define HI3670_CLK_DIV_MMC0BUS 89
+#define HI3670_CLK_DIV_MMC1BUS 90
+#define HI3670_PCLK_DIV_MMC1_PCIE 91
+#define HI3670_CLK_DIV_VCODECBUS 92
+#define HI3670_CLK_DIV_SD 93
+#define HI3670_CLK_DIV_SDIO 94
+#define HI3670_CLK_DIV_UARTH 95
+#define HI3670_CLK_DIV_UARTL 96
+#define HI3670_CLK_DIV_UART0 97
+#define HI3670_CLK_DIV_I2C 98
+#define HI3670_CLK_DIV_SPI 99
+#define HI3670_CLK_DIV_PCIEAXI 100
+#define HI3670_CLK_DIV_AO_ASP 101
+#define HI3670_CLK_DIV_CSI_TRANS 102
+#define HI3670_CLK_DIV_DSI_TRANS 103
+#define HI3670_CLK_DIV_PTP 104
+#define HI3670_CLK_DIV_CLKOUT0_PLL 105
+#define HI3670_CLK_DIV_CLKOUT1_PLL 106
+#define HI3670_CLKDIV_DP_AUDIO_PLL_AO 107
+#define HI3670_CLK_DIV_VDEC 108
+#define HI3670_CLK_DIV_VENC 109
+#define HI3670_CLK_ISP_SNCLK_DIV0 110
+#define HI3670_CLK_ISP_SNCLK_DIV1 111
+#define HI3670_CLK_ISP_SNCLK_DIV2 112
+#define HI3670_CLK_DIV_ICS 113
+#define HI3670_PPLL1_EN_ACPU 114
+#define HI3670_PPLL2_EN_ACPU 115
+#define HI3670_PPLL3_EN_ACPU 116
+#define HI3670_PPLL1_GT_CPU 117
+#define HI3670_PPLL2_GT_CPU 118
+#define HI3670_PPLL3_GT_CPU 119
+#define HI3670_CLK_GATE_PPLL2_MEDIA 120
+#define HI3670_CLK_GATE_PPLL3_MEDIA 121
+#define HI3670_CLK_GATE_PPLL4_MEDIA 122
+#define HI3670_CLK_GATE_PPLL6_MEDIA 123
+#define HI3670_CLK_GATE_PPLL7_MEDIA 124
+#define HI3670_PCLK_GPIO0 125
+#define HI3670_PCLK_GPIO1 126
+#define HI3670_PCLK_GPIO2 127
+#define HI3670_PCLK_GPIO3 128
+#define HI3670_PCLK_GPIO4 129
+#define HI3670_PCLK_GPIO5 130
+#define HI3670_PCLK_GPIO6 131
+#define HI3670_PCLK_GPIO7 132
+#define HI3670_PCLK_GPIO8 133
+#define HI3670_PCLK_GPIO9 134
+#define HI3670_PCLK_GPIO10 135
+#define HI3670_PCLK_GPIO11 136
+#define HI3670_PCLK_GPIO12 137
+#define HI3670_PCLK_GPIO13 138
+#define HI3670_PCLK_GPIO14 139
+#define HI3670_PCLK_GPIO15 140
+#define HI3670_PCLK_GPIO16 141
+#define HI3670_PCLK_GPIO17 142
+#define HI3670_PCLK_GPIO20 143
+#define HI3670_PCLK_GPIO21 144
+#define HI3670_PCLK_GATE_DSI0 145
+#define HI3670_PCLK_GATE_DSI1 146
+#define HI3670_HCLK_GATE_USB3OTG 147
+#define HI3670_ACLK_GATE_USB3DVFS 148
+#define HI3670_HCLK_GATE_SDIO 149
+#define HI3670_PCLK_GATE_PCIE_SYS 150
+#define HI3670_PCLK_GATE_PCIE_PHY 151
+#define HI3670_PCLK_GATE_MMC1_PCIE 152
+#define HI3670_PCLK_GATE_MMC0_IOC 153
+#define HI3670_PCLK_GATE_MMC1_IOC 154
+#define HI3670_CLK_GATE_DMAC 155
+#define HI3670_CLK_GATE_VCODECBUS2DDR 156
+#define HI3670_CLK_CCI400_BYPASS 157
+#define HI3670_CLK_GATE_CCI400 158
+#define HI3670_CLK_GATE_SD 159
+#define HI3670_HCLK_GATE_SD 160
+#define HI3670_CLK_GATE_SDIO 161
+#define HI3670_CLK_GATE_A57HPM 162
+#define HI3670_CLK_GATE_A53HPM 163
+#define HI3670_CLK_GATE_PA_A53 164
+#define HI3670_CLK_GATE_PA_A57 165
+#define HI3670_CLK_GATE_PA_G3D 166
+#define HI3670_CLK_GATE_GPUHPM 167
+#define HI3670_CLK_GATE_PERIHPM 168
+#define HI3670_CLK_GATE_AOHPM 169
+#define HI3670_CLK_GATE_UART1 170
+#define HI3670_CLK_GATE_UART4 171
+#define HI3670_PCLK_GATE_UART1 172
+#define HI3670_PCLK_GATE_UART4 173
+#define HI3670_CLK_GATE_UART2 174
+#define HI3670_CLK_GATE_UART5 175
+#define HI3670_PCLK_GATE_UART2 176
+#define HI3670_PCLK_GATE_UART5 177
+#define HI3670_CLK_GATE_UART0 178
+#define HI3670_CLK_GATE_I2C3 179
+#define HI3670_CLK_GATE_I2C4 180
+#define HI3670_CLK_GATE_I2C7 181
+#define HI3670_PCLK_GATE_I2C3 182
+#define HI3670_PCLK_GATE_I2C4 183
+#define HI3670_PCLK_GATE_I2C7 184
+#define HI3670_CLK_GATE_SPI1 185
+#define HI3670_CLK_GATE_SPI4 186
+#define HI3670_PCLK_GATE_SPI1 187
+#define HI3670_PCLK_GATE_SPI4 188
+#define HI3670_CLK_GATE_USB3OTG_REF 189
+#define HI3670_CLK_GATE_USB2PHY_REF 190
+#define HI3670_CLK_GATE_PCIEAUX 191
+#define HI3670_ACLK_GATE_PCIE 192
+#define HI3670_CLK_GATE_MMC1_PCIEAXI 193
+#define HI3670_CLK_GATE_PCIEPHY_REF 194
+#define HI3670_CLK_GATE_PCIE_DEBOUNCE 195
+#define HI3670_CLK_GATE_PCIEIO 196
+#define HI3670_CLK_GATE_PCIE_HP 197
+#define HI3670_CLK_GATE_AO_ASP 198
+#define HI3670_PCLK_GATE_PCTRL 199
+#define HI3670_CLK_CSI_TRANS_GT 200
+#define HI3670_CLK_DSI_TRANS_GT 201
+#define HI3670_CLK_GATE_PWM 202
+#define HI3670_ABB_AUDIO_EN0 203
+#define HI3670_ABB_AUDIO_EN1 204
+#define HI3670_ABB_AUDIO_GT_EN0 205
+#define HI3670_ABB_AUDIO_GT_EN1 206
+#define HI3670_CLK_GATE_DP_AUDIO_PLL_AO 207
+#define HI3670_PERI_VOLT_HOLD 208
+#define HI3670_PERI_VOLT_MIDDLE 209
+#define HI3670_CLK_GATE_ISP_SNCLK0 210
+#define HI3670_CLK_GATE_ISP_SNCLK1 211
+#define HI3670_CLK_GATE_ISP_SNCLK2 212
+#define HI3670_CLK_GATE_RXDPHY0_CFG 213
+#define HI3670_CLK_GATE_RXDPHY1_CFG 214
+#define HI3670_CLK_GATE_RXDPHY2_CFG 215
+#define HI3670_CLK_GATE_TXDPHY0_CFG 216
+#define HI3670_CLK_GATE_TXDPHY0_REF 217
+#define HI3670_CLK_GATE_TXDPHY1_CFG 218
+#define HI3670_CLK_GATE_TXDPHY1_REF 219
+#define HI3670_CLK_GATE_MEDIA_TCXO 220
+
+/* clk in sctrl */
+#define HI3670_CLK_ANDGT_IOPERI 0
+#define HI3670_CLKANDGT_ASP_SUBSYS_PERI 1
+#define HI3670_CLK_ANGT_ASP_SUBSYS 2
+#define HI3670_CLK_MUX_UFS_SUBSYS 3
+#define HI3670_CLK_MUX_CLKOUT0 4
+#define HI3670_CLK_MUX_CLKOUT1 5
+#define HI3670_CLK_MUX_ASP_SUBSYS_PERI 6
+#define HI3670_CLK_MUX_ASP_PLL 7
+#define HI3670_CLK_DIV_AOBUS 8
+#define HI3670_CLK_DIV_UFS_SUBSYS 9
+#define HI3670_CLK_DIV_IOPERI 10
+#define HI3670_CLK_DIV_CLKOUT0_TCXO 11
+#define HI3670_CLK_DIV_CLKOUT1_TCXO 12
+#define HI3670_CLK_ASP_SUBSYS_PERI_DIV 13
+#define HI3670_CLK_DIV_ASP_SUBSYS 14
+#define HI3670_PPLL0_EN_ACPU 15
+#define HI3670_PPLL0_GT_CPU 16
+#define HI3670_CLK_GATE_PPLL0_MEDIA 17
+#define HI3670_PCLK_GPIO18 18
+#define HI3670_PCLK_GPIO19 19
+#define HI3670_CLK_GATE_SPI 20
+#define HI3670_PCLK_GATE_SPI 21
+#define HI3670_CLK_GATE_UFS_SUBSYS 22
+#define HI3670_CLK_GATE_UFSIO_REF 23
+#define HI3670_PCLK_AO_GPIO0 24
+#define HI3670_PCLK_AO_GPIO1 25
+#define HI3670_PCLK_AO_GPIO2 26
+#define HI3670_PCLK_AO_GPIO3 27
+#define HI3670_PCLK_AO_GPIO4 28
+#define HI3670_PCLK_AO_GPIO5 29
+#define HI3670_PCLK_AO_GPIO6 30
+#define HI3670_CLK_GATE_OUT0 31
+#define HI3670_CLK_GATE_OUT1 32
+#define HI3670_PCLK_GATE_SYSCNT 33
+#define HI3670_CLK_GATE_SYSCNT 34
+#define HI3670_CLK_GATE_ASP_SUBSYS_PERI 35
+#define HI3670_CLK_GATE_ASP_SUBSYS 36
+#define HI3670_CLK_GATE_ASP_TCXO 37
+#define HI3670_CLK_GATE_DP_AUDIO_PLL 38
+
+/* clk in pmuctrl */
+#define HI3670_GATE_ABB_192 0
+
+/* clk in pctrl */
+#define HI3670_GATE_UFS_TCXO_EN 0
+#define HI3670_GATE_USB_TCXO_EN 1
+
+/* clk in iomcu */
+#define HI3670_CLK_GATE_I2C0 0
+#define HI3670_CLK_GATE_I2C1 1
+#define HI3670_CLK_GATE_I2C2 2
+#define HI3670_CLK_GATE_SPI0 3
+#define HI3670_CLK_GATE_SPI2 4
+#define HI3670_CLK_GATE_UART3 5
+#define HI3670_CLK_I2C0_GATE_IOMCU 6
+#define HI3670_CLK_I2C1_GATE_IOMCU 7
+#define HI3670_CLK_I2C2_GATE_IOMCU 8
+#define HI3670_CLK_SPI0_GATE_IOMCU 9
+#define HI3670_CLK_SPI2_GATE_IOMCU 10
+#define HI3670_CLK_UART3_GATE_IOMCU 11
+#define HI3670_CLK_GATE_PERI0_IOMCU 12
+
+/* clk in media1 */
+#define HI3670_CLK_GATE_VIVOBUS_ANDGT 0
+#define HI3670_CLK_ANDGT_EDC0 1
+#define HI3670_CLK_ANDGT_LDI0 2
+#define HI3670_CLK_ANDGT_LDI1 3
+#define HI3670_CLK_MMBUF_PLL_ANDGT 4
+#define HI3670_PCLK_MMBUF_ANDGT 5
+#define HI3670_CLK_MUX_VIVOBUS 6
+#define HI3670_CLK_MUX_EDC0 7
+#define HI3670_CLK_MUX_LDI0 8
+#define HI3670_CLK_MUX_LDI1 9
+#define HI3670_CLK_SW_MMBUF 10
+#define HI3670_CLK_DIV_VIVOBUS 11
+#define HI3670_CLK_DIV_EDC0 12
+#define HI3670_CLK_DIV_LDI0 13
+#define HI3670_CLK_DIV_LDI1 14
+#define HI3670_ACLK_DIV_MMBUF 15
+#define HI3670_PCLK_DIV_MMBUF 16
+#define HI3670_ACLK_GATE_NOC_DSS 17
+#define HI3670_PCLK_GATE_NOC_DSS_CFG 18
+#define HI3670_PCLK_GATE_MMBUF_CFG 19
+#define HI3670_PCLK_GATE_DISP_NOC_SUBSYS 20
+#define HI3670_ACLK_GATE_DISP_NOC_SUBSYS 21
+#define HI3670_PCLK_GATE_DSS 22
+#define HI3670_ACLK_GATE_DSS 23
+#define HI3670_CLK_GATE_VIVOBUSFREQ 24
+#define HI3670_CLK_GATE_EDC0 25
+#define HI3670_CLK_GATE_LDI0 26
+#define HI3670_CLK_GATE_LDI1FREQ 27
+#define HI3670_CLK_GATE_BRG 28
+#define HI3670_ACLK_GATE_ASC 29
+#define HI3670_CLK_GATE_DSS_AXI_MM 30
+#define HI3670_CLK_GATE_MMBUF 31
+#define HI3670_PCLK_GATE_MMBUF 32
+#define HI3670_CLK_GATE_ATDIV_VIVO 33
+
+/* clk in media2 */
+#define HI3670_CLK_GATE_VDECFREQ 0
+#define HI3670_CLK_GATE_VENCFREQ 1
+#define HI3670_CLK_GATE_ICSFREQ 2
+
+#endif /* __DT_BINDINGS_CLOCK_HI3670_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 7ad171b8f3bf..b3cef297d5df 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -273,6 +273,9 @@
#define IMX6QDL_CLK_MLB_PODF 260
#define IMX6QDL_CLK_EPIT1 261
#define IMX6QDL_CLK_EPIT2 262
-#define IMX6QDL_CLK_END 263
+#define IMX6QDL_CLK_MMDC_P0_IPG 263
+#define IMX6QDL_CLK_DCIC1 264
+#define IMX6QDL_CLK_DCIC2 265
+#define IMX6QDL_CLK_END 266
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h
index e14573e293c5..cfbfc39d1878 100644
--- a/include/dt-bindings/clock/imx6sl-clock.h
+++ b/include/dt-bindings/clock/imx6sl-clock.h
@@ -175,6 +175,8 @@
#define IMX6SL_CLK_SSI2_IPG 162
#define IMX6SL_CLK_SSI3_IPG 163
#define IMX6SL_CLK_SPDIF_GCLK 164
-#define IMX6SL_CLK_END 165
+#define IMX6SL_CLK_MMDC_P0_IPG 165
+#define IMX6SL_CLK_MMDC_P1_IPG 166
+#define IMX6SL_CLK_END 167
#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h
index 1036475f997d..f446710fe63d 100644
--- a/include/dt-bindings/clock/imx6sll-clock.h
+++ b/include/dt-bindings/clock/imx6sll-clock.h
@@ -203,7 +203,8 @@
#define IMX6SLL_CLK_GPIO4 176
#define IMX6SLL_CLK_GPIO5 177
#define IMX6SLL_CLK_GPIO6 178
+#define IMX6SLL_CLK_MMDC_P1_IPG 179
-#define IMX6SLL_CLK_END 179
+#define IMX6SLL_CLK_END 180
#endif /* __DT_BINDINGS_CLOCK_IMX6SLL_H */
diff --git a/include/dt-bindings/clock/imx6sx-clock.h b/include/dt-bindings/clock/imx6sx-clock.h
index cd2d6c570e86..fb420c734774 100644
--- a/include/dt-bindings/clock/imx6sx-clock.h
+++ b/include/dt-bindings/clock/imx6sx-clock.h
@@ -279,6 +279,7 @@
#define IMX6SX_CLK_LVDS2_OUT 266
#define IMX6SX_CLK_LVDS2_IN 267
#define IMX6SX_CLK_ANACLK2 268
-#define IMX6SX_CLK_CLK_END 269
+#define IMX6SX_CLK_MMDC_P1_IPG 269
+#define IMX6SX_CLK_CLK_END 270
#endif /* __DT_BINDINGS_CLOCK_IMX6SX_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index f8e0476a3a0e..f718aac9b9da 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -259,7 +259,8 @@
#define IMX6UL_CLK_GPIO3 246
#define IMX6UL_CLK_GPIO4 247
#define IMX6UL_CLK_GPIO5 248
+#define IMX6UL_CLK_MMDC_P1_IPG 249
-#define IMX6UL_CLK_END 249
+#define IMX6UL_CLK_END 250
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h
new file mode 100644
index 000000000000..21d872e69cb1
--- /dev/null
+++ b/include/dt-bindings/clock/imx7ulp-clock.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX7ULP_H
+#define __DT_BINDINGS_CLOCK_IMX7ULP_H
+
+/* SCG1 */
+
+#define IMX7ULP_CLK_DUMMY 0
+#define IMX7ULP_CLK_ROSC 1
+#define IMX7ULP_CLK_SOSC 2
+#define IMX7ULP_CLK_FIRC 3
+#define IMX7ULP_CLK_SPLL_PRE_SEL 4
+#define IMX7ULP_CLK_SPLL_PRE_DIV 5
+#define IMX7ULP_CLK_SPLL 6
+#define IMX7ULP_CLK_SPLL_POST_DIV1 7
+#define IMX7ULP_CLK_SPLL_POST_DIV2 8
+#define IMX7ULP_CLK_SPLL_PFD0 9
+#define IMX7ULP_CLK_SPLL_PFD1 10
+#define IMX7ULP_CLK_SPLL_PFD2 11
+#define IMX7ULP_CLK_SPLL_PFD3 12
+#define IMX7ULP_CLK_SPLL_PFD_SEL 13
+#define IMX7ULP_CLK_SPLL_SEL 14
+#define IMX7ULP_CLK_APLL_PRE_SEL 15
+#define IMX7ULP_CLK_APLL_PRE_DIV 16
+#define IMX7ULP_CLK_APLL 17
+#define IMX7ULP_CLK_APLL_POST_DIV1 18
+#define IMX7ULP_CLK_APLL_POST_DIV2 19
+#define IMX7ULP_CLK_APLL_PFD0 20
+#define IMX7ULP_CLK_APLL_PFD1 21
+#define IMX7ULP_CLK_APLL_PFD2 22
+#define IMX7ULP_CLK_APLL_PFD3 23
+#define IMX7ULP_CLK_APLL_PFD_SEL 24
+#define IMX7ULP_CLK_APLL_SEL 25
+#define IMX7ULP_CLK_UPLL 26
+#define IMX7ULP_CLK_SYS_SEL 27
+#define IMX7ULP_CLK_CORE_DIV 28
+#define IMX7ULP_CLK_BUS_DIV 29
+#define IMX7ULP_CLK_PLAT_DIV 30
+#define IMX7ULP_CLK_DDR_SEL 31
+#define IMX7ULP_CLK_DDR_DIV 32
+#define IMX7ULP_CLK_NIC_SEL 33
+#define IMX7ULP_CLK_NIC0_DIV 34
+#define IMX7ULP_CLK_GPU_DIV 35
+#define IMX7ULP_CLK_NIC1_DIV 36
+#define IMX7ULP_CLK_NIC1_BUS_DIV 37
+#define IMX7ULP_CLK_NIC1_EXT_DIV 38
+#define IMX7ULP_CLK_MIPI_PLL 39
+#define IMX7ULP_CLK_SIRC 40
+#define IMX7ULP_CLK_SOSC_BUS_CLK 41
+#define IMX7ULP_CLK_FIRC_BUS_CLK 42
+#define IMX7ULP_CLK_SPLL_BUS_CLK 43
+#define IMX7ULP_CLK_HSRUN_SYS_SEL 44
+#define IMX7ULP_CLK_HSRUN_CORE_DIV 45
+
+#define IMX7ULP_CLK_SCG1_END 46
+
+/* PCC2 */
+#define IMX7ULP_CLK_DMA1 0
+#define IMX7ULP_CLK_RGPIO2P1 1
+#define IMX7ULP_CLK_FLEXBUS 2
+#define IMX7ULP_CLK_SEMA42_1 3
+#define IMX7ULP_CLK_DMA_MUX1 4
+#define IMX7ULP_CLK_SNVS 5
+#define IMX7ULP_CLK_CAAM 6
+#define IMX7ULP_CLK_LPTPM4 7
+#define IMX7ULP_CLK_LPTPM5 8
+#define IMX7ULP_CLK_LPIT1 9
+#define IMX7ULP_CLK_LPSPI2 10
+#define IMX7ULP_CLK_LPSPI3 11
+#define IMX7ULP_CLK_LPI2C4 12
+#define IMX7ULP_CLK_LPI2C5 13
+#define IMX7ULP_CLK_LPUART4 14
+#define IMX7ULP_CLK_LPUART5 15
+#define IMX7ULP_CLK_FLEXIO1 16
+#define IMX7ULP_CLK_USB0 17
+#define IMX7ULP_CLK_USB1 18
+#define IMX7ULP_CLK_USB_PHY 19
+#define IMX7ULP_CLK_USB_PL301 20
+#define IMX7ULP_CLK_USDHC0 21
+#define IMX7ULP_CLK_USDHC1 22
+#define IMX7ULP_CLK_WDG1 23
+#define IMX7ULP_CLK_WDG2 24
+
+#define IMX7ULP_CLK_PCC2_END 25
+
+/* PCC3 */
+#define IMX7ULP_CLK_LPTPM6 0
+#define IMX7ULP_CLK_LPTPM7 1
+#define IMX7ULP_CLK_LPI2C6 2
+#define IMX7ULP_CLK_LPI2C7 3
+#define IMX7ULP_CLK_LPUART6 4
+#define IMX7ULP_CLK_LPUART7 5
+#define IMX7ULP_CLK_VIU 6
+#define IMX7ULP_CLK_DSI 7
+#define IMX7ULP_CLK_LCDIF 8
+#define IMX7ULP_CLK_MMDC 9
+#define IMX7ULP_CLK_PCTLC 10
+#define IMX7ULP_CLK_PCTLD 11
+#define IMX7ULP_CLK_PCTLE 12
+#define IMX7ULP_CLK_PCTLF 13
+#define IMX7ULP_CLK_GPU3D 14
+#define IMX7ULP_CLK_GPU2D 15
+
+#define IMX7ULP_CLK_PCC3_END 16
+
+/* SMC1 */
+#define IMX7ULP_CLK_ARM 0
+
+#define IMX7ULP_CLK_SMC1_END 1
+
+#endif /* __DT_BINDINGS_CLOCK_IMX7ULP_H */
diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h
new file mode 100644
index 000000000000..4236818e3be5
--- /dev/null
+++ b/include/dt-bindings/clock/imx8-clock.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX_H
+#define __DT_BINDINGS_CLOCK_IMX_H
+
+/* SCU Clocks */
+
+#define IMX_CLK_DUMMY 0
+
+/* CPU */
+#define IMX_A35_CLK 1
+
+/* LSIO SS */
+#define IMX_LSIO_MEM_CLK 2
+#define IMX_LSIO_BUS_CLK 3
+#define IMX_LSIO_PWM0_CLK 10
+#define IMX_LSIO_PWM1_CLK 11
+#define IMX_LSIO_PWM2_CLK 12
+#define IMX_LSIO_PWM3_CLK 13
+#define IMX_LSIO_PWM4_CLK 14
+#define IMX_LSIO_PWM5_CLK 15
+#define IMX_LSIO_PWM6_CLK 16
+#define IMX_LSIO_PWM7_CLK 17
+#define IMX_LSIO_GPT0_CLK 18
+#define IMX_LSIO_GPT1_CLK 19
+#define IMX_LSIO_GPT2_CLK 20
+#define IMX_LSIO_GPT3_CLK 21
+#define IMX_LSIO_GPT4_CLK 22
+#define IMX_LSIO_FSPI0_CLK 23
+#define IMX_LSIO_FSPI1_CLK 24
+
+/* Connectivity SS */
+#define IMX_CONN_AXI_CLK_ROOT 30
+#define IMX_CONN_AHB_CLK_ROOT 31
+#define IMX_CONN_IPG_CLK_ROOT 32
+#define IMX_CONN_SDHC0_CLK 40
+#define IMX_CONN_SDHC1_CLK 41
+#define IMX_CONN_SDHC2_CLK 42
+#define IMX_CONN_ENET0_ROOT_CLK 43
+#define IMX_CONN_ENET0_BYPASS_CLK 44
+#define IMX_CONN_ENET0_RGMII_CLK 45
+#define IMX_CONN_ENET1_ROOT_CLK 46
+#define IMX_CONN_ENET1_BYPASS_CLK 47
+#define IMX_CONN_ENET1_RGMII_CLK 48
+#define IMX_CONN_GPMI_BCH_IO_CLK 49
+#define IMX_CONN_GPMI_BCH_CLK 50
+#define IMX_CONN_USB2_ACLK 51
+#define IMX_CONN_USB2_BUS_CLK 52
+#define IMX_CONN_USB2_LPM_CLK 53
+
+/* HSIO SS */
+#define IMX_HSIO_AXI_CLK 60
+#define IMX_HSIO_PER_CLK 61
+
+/* Display controller SS */
+#define IMX_DC_AXI_EXT_CLK 70
+#define IMX_DC_AXI_INT_CLK 71
+#define IMX_DC_CFG_CLK 72
+#define IMX_DC0_PLL0_CLK 80
+#define IMX_DC0_PLL1_CLK 81
+#define IMX_DC0_DISP0_CLK 82
+#define IMX_DC0_DISP1_CLK 83
+
+/* MIPI-LVDS SS */
+#define IMX_MIPI_IPG_CLK 90
+#define IMX_MIPI0_PIXEL_CLK 100
+#define IMX_MIPI0_BYPASS_CLK 101
+#define IMX_MIPI0_LVDS_PIXEL_CLK 102
+#define IMX_MIPI0_LVDS_BYPASS_CLK 103
+#define IMX_MIPI0_LVDS_PHY_CLK 104
+#define IMX_MIPI0_I2C0_CLK 105
+#define IMX_MIPI0_I2C1_CLK 106
+#define IMX_MIPI0_PWM0_CLK 107
+#define IMX_MIPI1_PIXEL_CLK 108
+#define IMX_MIPI1_BYPASS_CLK 109
+#define IMX_MIPI1_LVDS_PIXEL_CLK 110
+#define IMX_MIPI1_LVDS_BYPASS_CLK 111
+#define IMX_MIPI1_LVDS_PHY_CLK 112
+#define IMX_MIPI1_I2C0_CLK 113
+#define IMX_MIPI1_I2C1_CLK 114
+#define IMX_MIPI1_PWM0_CLK 115
+
+/* IMG SS */
+#define IMX_IMG_AXI_CLK 120
+#define IMX_IMG_IPG_CLK 121
+#define IMX_IMG_PXL_CLK 122
+
+/* MIPI-CSI SS */
+#define IMX_CSI0_CORE_CLK 130
+#define IMX_CSI0_ESC_CLK 131
+#define IMX_CSI0_PWM0_CLK 132
+#define IMX_CSI0_I2C0_CLK 133
+
+/* PARALLER CSI SS */
+#define IMX_PARALLEL_CSI_DPLL_CLK 140
+#define IMX_PARALLEL_CSI_PIXEL_CLK 141
+#define IMX_PARALLEL_CSI_MCLK_CLK 142
+
+/* VPU SS */
+#define IMX_VPU_ENC_CLK 150
+#define IMX_VPU_DEC_CLK 151
+
+/* GPU SS */
+#define IMX_GPU0_CORE_CLK 160
+#define IMX_GPU0_SHADER_CLK 161
+
+/* ADMA SS */
+#define IMX_ADMA_IPG_CLK_ROOT 165
+#define IMX_ADMA_UART0_CLK 170
+#define IMX_ADMA_UART1_CLK 171
+#define IMX_ADMA_UART2_CLK 172
+#define IMX_ADMA_UART3_CLK 173
+#define IMX_ADMA_SPI0_CLK 174
+#define IMX_ADMA_SPI1_CLK 175
+#define IMX_ADMA_SPI2_CLK 176
+#define IMX_ADMA_SPI3_CLK 177
+#define IMX_ADMA_CAN0_CLK 178
+#define IMX_ADMA_CAN1_CLK 179
+#define IMX_ADMA_CAN2_CLK 180
+#define IMX_ADMA_I2C0_CLK 181
+#define IMX_ADMA_I2C1_CLK 182
+#define IMX_ADMA_I2C2_CLK 183
+#define IMX_ADMA_I2C3_CLK 184
+#define IMX_ADMA_FTM0_CLK 185
+#define IMX_ADMA_FTM1_CLK 186
+#define IMX_ADMA_ADC0_CLK 187
+#define IMX_ADMA_PWM_CLK 188
+#define IMX_ADMA_LCD_CLK 189
+
+#define IMX_SCU_CLK_END 190
+
+/* LPCG clocks */
+
+/* LSIO SS LPCG */
+#define IMX_LSIO_LPCG_PWM0_IPG_CLK 0
+#define IMX_LSIO_LPCG_PWM0_IPG_S_CLK 1
+#define IMX_LSIO_LPCG_PWM0_IPG_HF_CLK 2
+#define IMX_LSIO_LPCG_PWM0_IPG_SLV_CLK 3
+#define IMX_LSIO_LPCG_PWM0_IPG_MSTR_CLK 4
+#define IMX_LSIO_LPCG_PWM1_IPG_CLK 5
+#define IMX_LSIO_LPCG_PWM1_IPG_S_CLK 6
+#define IMX_LSIO_LPCG_PWM1_IPG_HF_CLK 7
+#define IMX_LSIO_LPCG_PWM1_IPG_SLV_CLK 8
+#define IMX_LSIO_LPCG_PWM1_IPG_MSTR_CLK 9
+#define IMX_LSIO_LPCG_PWM2_IPG_CLK 10
+#define IMX_LSIO_LPCG_PWM2_IPG_S_CLK 11
+#define IMX_LSIO_LPCG_PWM2_IPG_HF_CLK 12
+#define IMX_LSIO_LPCG_PWM2_IPG_SLV_CLK 13
+#define IMX_LSIO_LPCG_PWM2_IPG_MSTR_CLK 14
+#define IMX_LSIO_LPCG_PWM3_IPG_CLK 15
+#define IMX_LSIO_LPCG_PWM3_IPG_S_CLK 16
+#define IMX_LSIO_LPCG_PWM3_IPG_HF_CLK 17
+#define IMX_LSIO_LPCG_PWM3_IPG_SLV_CLK 18
+#define IMX_LSIO_LPCG_PWM3_IPG_MSTR_CLK 19
+#define IMX_LSIO_LPCG_PWM4_IPG_CLK 20
+#define IMX_LSIO_LPCG_PWM4_IPG_S_CLK 21
+#define IMX_LSIO_LPCG_PWM4_IPG_HF_CLK 22
+#define IMX_LSIO_LPCG_PWM4_IPG_SLV_CLK 23
+#define IMX_LSIO_LPCG_PWM4_IPG_MSTR_CLK 24
+#define IMX_LSIO_LPCG_PWM5_IPG_CLK 25
+#define IMX_LSIO_LPCG_PWM5_IPG_S_CLK 26
+#define IMX_LSIO_LPCG_PWM5_IPG_HF_CLK 27
+#define IMX_LSIO_LPCG_PWM5_IPG_SLV_CLK 28
+#define IMX_LSIO_LPCG_PWM5_IPG_MSTR_CLK 29
+#define IMX_LSIO_LPCG_PWM6_IPG_CLK 30
+#define IMX_LSIO_LPCG_PWM6_IPG_S_CLK 31
+#define IMX_LSIO_LPCG_PWM6_IPG_HF_CLK 32
+#define IMX_LSIO_LPCG_PWM6_IPG_SLV_CLK 33
+#define IMX_LSIO_LPCG_PWM6_IPG_MSTR_CLK 34
+#define IMX_LSIO_LPCG_PWM7_IPG_CLK 35
+#define IMX_LSIO_LPCG_PWM7_IPG_S_CLK 36
+#define IMX_LSIO_LPCG_PWM7_IPG_HF_CLK 37
+#define IMX_LSIO_LPCG_PWM7_IPG_SLV_CLK 38
+#define IMX_LSIO_LPCG_PWM7_IPG_MSTR_CLK 39
+#define IMX_LSIO_LPCG_GPT0_IPG_CLK 40
+#define IMX_LSIO_LPCG_GPT0_IPG_S_CLK 41
+#define IMX_LSIO_LPCG_GPT0_IPG_HF_CLK 42
+#define IMX_LSIO_LPCG_GPT0_IPG_SLV_CLK 43
+#define IMX_LSIO_LPCG_GPT0_IPG_MSTR_CLK 44
+#define IMX_LSIO_LPCG_GPT1_IPG_CLK 45
+#define IMX_LSIO_LPCG_GPT1_IPG_S_CLK 46
+#define IMX_LSIO_LPCG_GPT1_IPG_HF_CLK 47
+#define IMX_LSIO_LPCG_GPT1_IPG_SLV_CLK 48
+#define IMX_LSIO_LPCG_GPT1_IPG_MSTR_CLK 49
+#define IMX_LSIO_LPCG_GPT2_IPG_CLK 50
+#define IMX_LSIO_LPCG_GPT2_IPG_S_CLK 51
+#define IMX_LSIO_LPCG_GPT2_IPG_HF_CLK 52
+#define IMX_LSIO_LPCG_GPT2_IPG_SLV_CLK 53
+#define IMX_LSIO_LPCG_GPT2_IPG_MSTR_CLK 54
+#define IMX_LSIO_LPCG_GPT3_IPG_CLK 55
+#define IMX_LSIO_LPCG_GPT3_IPG_S_CLK 56
+#define IMX_LSIO_LPCG_GPT3_IPG_HF_CLK 57
+#define IMX_LSIO_LPCG_GPT3_IPG_SLV_CLK 58
+#define IMX_LSIO_LPCG_GPT3_IPG_MSTR_CLK 59
+#define IMX_LSIO_LPCG_GPT4_IPG_CLK 60
+#define IMX_LSIO_LPCG_GPT4_IPG_S_CLK 61
+#define IMX_LSIO_LPCG_GPT4_IPG_HF_CLK 62
+#define IMX_LSIO_LPCG_GPT4_IPG_SLV_CLK 63
+#define IMX_LSIO_LPCG_GPT4_IPG_MSTR_CLK 64
+#define IMX_LSIO_LPCG_FSPI0_HCLK 65
+#define IMX_LSIO_LPCG_FSPI0_IPG_CLK 66
+#define IMX_LSIO_LPCG_FSPI0_IPG_S_CLK 67
+#define IMX_LSIO_LPCG_FSPI0_IPG_SFCK 68
+#define IMX_LSIO_LPCG_FSPI1_HCLK 69
+#define IMX_LSIO_LPCG_FSPI1_IPG_CLK 70
+#define IMX_LSIO_LPCG_FSPI1_IPG_S_CLK 71
+#define IMX_LSIO_LPCG_FSPI1_IPG_SFCK 72
+
+#define IMX_LSIO_LPCG_CLK_END 73
+
+/* Connectivity SS LPCG */
+#define IMX_CONN_LPCG_SDHC0_IPG_CLK 0
+#define IMX_CONN_LPCG_SDHC0_PER_CLK 1
+#define IMX_CONN_LPCG_SDHC0_HCLK 2
+#define IMX_CONN_LPCG_SDHC1_IPG_CLK 3
+#define IMX_CONN_LPCG_SDHC1_PER_CLK 4
+#define IMX_CONN_LPCG_SDHC1_HCLK 5
+#define IMX_CONN_LPCG_SDHC2_IPG_CLK 6
+#define IMX_CONN_LPCG_SDHC2_PER_CLK 7
+#define IMX_CONN_LPCG_SDHC2_HCLK 8
+#define IMX_CONN_LPCG_GPMI_APB_CLK 9
+#define IMX_CONN_LPCG_GPMI_BCH_APB_CLK 10
+#define IMX_CONN_LPCG_GPMI_BCH_IO_CLK 11
+#define IMX_CONN_LPCG_GPMI_BCH_CLK 12
+#define IMX_CONN_LPCG_APBHDMA_CLK 13
+#define IMX_CONN_LPCG_ENET0_ROOT_CLK 14
+#define IMX_CONN_LPCG_ENET0_TX_CLK 15
+#define IMX_CONN_LPCG_ENET0_AHB_CLK 16
+#define IMX_CONN_LPCG_ENET0_IPG_S_CLK 17
+#define IMX_CONN_LPCG_ENET0_IPG_CLK 18
+
+#define IMX_CONN_LPCG_ENET1_ROOT_CLK 19
+#define IMX_CONN_LPCG_ENET1_TX_CLK 20
+#define IMX_CONN_LPCG_ENET1_AHB_CLK 21
+#define IMX_CONN_LPCG_ENET1_IPG_S_CLK 22
+#define IMX_CONN_LPCG_ENET1_IPG_CLK 23
+
+#define IMX_CONN_LPCG_CLK_END 24
+
+/* ADMA SS LPCG */
+#define IMX_ADMA_LPCG_UART0_IPG_CLK 0
+#define IMX_ADMA_LPCG_UART0_BAUD_CLK 1
+#define IMX_ADMA_LPCG_UART1_IPG_CLK 2
+#define IMX_ADMA_LPCG_UART1_BAUD_CLK 3
+#define IMX_ADMA_LPCG_UART2_IPG_CLK 4
+#define IMX_ADMA_LPCG_UART2_BAUD_CLK 5
+#define IMX_ADMA_LPCG_UART3_IPG_CLK 6
+#define IMX_ADMA_LPCG_UART3_BAUD_CLK 7
+#define IMX_ADMA_LPCG_SPI0_IPG_CLK 8
+#define IMX_ADMA_LPCG_SPI1_IPG_CLK 9
+#define IMX_ADMA_LPCG_SPI2_IPG_CLK 10
+#define IMX_ADMA_LPCG_SPI3_IPG_CLK 11
+#define IMX_ADMA_LPCG_SPI0_CLK 12
+#define IMX_ADMA_LPCG_SPI1_CLK 13
+#define IMX_ADMA_LPCG_SPI2_CLK 14
+#define IMX_ADMA_LPCG_SPI3_CLK 15
+#define IMX_ADMA_LPCG_CAN0_IPG_CLK 16
+#define IMX_ADMA_LPCG_CAN0_IPG_PE_CLK 17
+#define IMX_ADMA_LPCG_CAN0_IPG_CHI_CLK 18
+#define IMX_ADMA_LPCG_CAN1_IPG_CLK 19
+#define IMX_ADMA_LPCG_CAN1_IPG_PE_CLK 20
+#define IMX_ADMA_LPCG_CAN1_IPG_CHI_CLK 21
+#define IMX_ADMA_LPCG_CAN2_IPG_CLK 22
+#define IMX_ADMA_LPCG_CAN2_IPG_PE_CLK 23
+#define IMX_ADMA_LPCG_CAN2_IPG_CHI_CLK 24
+#define IMX_ADMA_LPCG_I2C0_CLK 25
+#define IMX_ADMA_LPCG_I2C1_CLK 26
+#define IMX_ADMA_LPCG_I2C2_CLK 27
+#define IMX_ADMA_LPCG_I2C3_CLK 28
+#define IMX_ADMA_LPCG_I2C0_IPG_CLK 29
+#define IMX_ADMA_LPCG_I2C1_IPG_CLK 30
+#define IMX_ADMA_LPCG_I2C2_IPG_CLK 31
+#define IMX_ADMA_LPCG_I2C3_IPG_CLK 32
+#define IMX_ADMA_LPCG_FTM0_CLK 33
+#define IMX_ADMA_LPCG_FTM1_CLK 34
+#define IMX_ADMA_LPCG_FTM0_IPG_CLK 35
+#define IMX_ADMA_LPCG_FTM1_IPG_CLK 36
+#define IMX_ADMA_LPCG_PWM_HI_CLK 37
+#define IMX_ADMA_LPCG_PWM_IPG_CLK 38
+#define IMX_ADMA_LPCG_LCD_PIX_CLK 39
+#define IMX_ADMA_LPCG_LCD_APB_CLK 40
+
+#define IMX_ADMA_LPCG_CLK_END 41
+
+#endif /* __DT_BINDINGS_CLOCK_IMX_H */
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
new file mode 100644
index 000000000000..b53be41929be
--- /dev/null
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8MQ_H
+#define __DT_BINDINGS_CLOCK_IMX8MQ_H
+
+#define IMX8MQ_CLK_DUMMY 0
+#define IMX8MQ_CLK_32K 1
+#define IMX8MQ_CLK_25M 2
+#define IMX8MQ_CLK_27M 3
+#define IMX8MQ_CLK_EXT1 4
+#define IMX8MQ_CLK_EXT2 5
+#define IMX8MQ_CLK_EXT3 6
+#define IMX8MQ_CLK_EXT4 7
+
+/* ANAMIX PLL clocks */
+/* FRAC PLLs */
+/* ARM PLL */
+#define IMX8MQ_ARM_PLL_REF_SEL 8
+#define IMX8MQ_ARM_PLL_REF_DIV 9
+#define IMX8MQ_ARM_PLL 10
+#define IMX8MQ_ARM_PLL_BYPASS 11
+#define IMX8MQ_ARM_PLL_OUT 12
+
+/* GPU PLL */
+#define IMX8MQ_GPU_PLL_REF_SEL 13
+#define IMX8MQ_GPU_PLL_REF_DIV 14
+#define IMX8MQ_GPU_PLL 15
+#define IMX8MQ_GPU_PLL_BYPASS 16
+#define IMX8MQ_GPU_PLL_OUT 17
+
+/* VPU PLL */
+#define IMX8MQ_VPU_PLL_REF_SEL 18
+#define IMX8MQ_VPU_PLL_REF_DIV 19
+#define IMX8MQ_VPU_PLL 20
+#define IMX8MQ_VPU_PLL_BYPASS 21
+#define IMX8MQ_VPU_PLL_OUT 22
+
+/* AUDIO PLL1 */
+#define IMX8MQ_AUDIO_PLL1_REF_SEL 23
+#define IMX8MQ_AUDIO_PLL1_REF_DIV 24
+#define IMX8MQ_AUDIO_PLL1 25
+#define IMX8MQ_AUDIO_PLL1_BYPASS 26
+#define IMX8MQ_AUDIO_PLL1_OUT 27
+
+/* AUDIO PLL2 */
+#define IMX8MQ_AUDIO_PLL2_REF_SEL 28
+#define IMX8MQ_AUDIO_PLL2_REF_DIV 29
+#define IMX8MQ_AUDIO_PLL2 30
+#define IMX8MQ_AUDIO_PLL2_BYPASS 31
+#define IMX8MQ_AUDIO_PLL2_OUT 32
+
+/* VIDEO PLL1 */
+#define IMX8MQ_VIDEO_PLL1_REF_SEL 33
+#define IMX8MQ_VIDEO_PLL1_REF_DIV 34
+#define IMX8MQ_VIDEO_PLL1 35
+#define IMX8MQ_VIDEO_PLL1_BYPASS 36
+#define IMX8MQ_VIDEO_PLL1_OUT 37
+
+/* SYS1 PLL */
+#define IMX8MQ_SYS1_PLL1_REF_SEL 38
+#define IMX8MQ_SYS1_PLL1_REF_DIV 39
+#define IMX8MQ_SYS1_PLL1 40
+#define IMX8MQ_SYS1_PLL1_OUT 41
+#define IMX8MQ_SYS1_PLL1_OUT_DIV 42
+#define IMX8MQ_SYS1_PLL2 43
+#define IMX8MQ_SYS1_PLL2_DIV 44
+#define IMX8MQ_SYS1_PLL2_OUT 45
+
+/* SYS2 PLL */
+#define IMX8MQ_SYS2_PLL1_REF_SEL 46
+#define IMX8MQ_SYS2_PLL1_REF_DIV 47
+#define IMX8MQ_SYS2_PLL1 48
+#define IMX8MQ_SYS2_PLL1_OUT 49
+#define IMX8MQ_SYS2_PLL1_OUT_DIV 50
+#define IMX8MQ_SYS2_PLL2 51
+#define IMX8MQ_SYS2_PLL2_DIV 52
+#define IMX8MQ_SYS2_PLL2_OUT 53
+
+/* SYS3 PLL */
+#define IMX8MQ_SYS3_PLL1_REF_SEL 54
+#define IMX8MQ_SYS3_PLL1_REF_DIV 55
+#define IMX8MQ_SYS3_PLL1 56
+#define IMX8MQ_SYS3_PLL1_OUT 57
+#define IMX8MQ_SYS3_PLL1_OUT_DIV 58
+#define IMX8MQ_SYS3_PLL2 59
+#define IMX8MQ_SYS3_PLL2_DIV 60
+#define IMX8MQ_SYS3_PLL2_OUT 61
+
+/* DRAM PLL */
+#define IMX8MQ_DRAM_PLL1_REF_SEL 62
+#define IMX8MQ_DRAM_PLL1_REF_DIV 63
+#define IMX8MQ_DRAM_PLL1 64
+#define IMX8MQ_DRAM_PLL1_OUT 65
+#define IMX8MQ_DRAM_PLL1_OUT_DIV 66
+#define IMX8MQ_DRAM_PLL2 67
+#define IMX8MQ_DRAM_PLL2_DIV 68
+#define IMX8MQ_DRAM_PLL2_OUT 69
+
+/* SYS PLL DIV */
+#define IMX8MQ_SYS1_PLL_40M 70
+#define IMX8MQ_SYS1_PLL_80M 71
+#define IMX8MQ_SYS1_PLL_100M 72
+#define IMX8MQ_SYS1_PLL_133M 73
+#define IMX8MQ_SYS1_PLL_160M 74
+#define IMX8MQ_SYS1_PLL_200M 75
+#define IMX8MQ_SYS1_PLL_266M 76
+#define IMX8MQ_SYS1_PLL_400M 77
+#define IMX8MQ_SYS1_PLL_800M 78
+
+#define IMX8MQ_SYS2_PLL_50M 79
+#define IMX8MQ_SYS2_PLL_100M 80
+#define IMX8MQ_SYS2_PLL_125M 81
+#define IMX8MQ_SYS2_PLL_166M 82
+#define IMX8MQ_SYS2_PLL_200M 83
+#define IMX8MQ_SYS2_PLL_250M 84
+#define IMX8MQ_SYS2_PLL_333M 85
+#define IMX8MQ_SYS2_PLL_500M 86
+#define IMX8MQ_SYS2_PLL_1000M 87
+
+/* CCM ROOT clocks */
+/* A53 */
+#define IMX8MQ_CLK_A53_SRC 88
+#define IMX8MQ_CLK_A53_CG 89
+#define IMX8MQ_CLK_A53_DIV 90
+/* M4 */
+#define IMX8MQ_CLK_M4_SRC 91
+#define IMX8MQ_CLK_M4_CG 92
+#define IMX8MQ_CLK_M4_DIV 93
+/* VPU */
+#define IMX8MQ_CLK_VPU_SRC 94
+#define IMX8MQ_CLK_VPU_CG 95
+#define IMX8MQ_CLK_VPU_DIV 96
+/* GPU CORE */
+#define IMX8MQ_CLK_GPU_CORE_SRC 97
+#define IMX8MQ_CLK_GPU_CORE_CG 98
+#define IMX8MQ_CLK_GPU_CORE_DIV 99
+/* GPU SHADER */
+#define IMX8MQ_CLK_GPU_SHADER_SRC 100
+#define IMX8MQ_CLK_GPU_SHADER_CG 101
+#define IMX8MQ_CLK_GPU_SHADER_DIV 102
+
+/* BUS TYPE */
+/* MAIN AXI */
+#define IMX8MQ_CLK_MAIN_AXI 103
+/* ENET AXI */
+#define IMX8MQ_CLK_ENET_AXI 104
+/* NAND_USDHC_BUS */
+#define IMX8MQ_CLK_NAND_USDHC_BUS 105
+/* VPU BUS */
+#define IMX8MQ_CLK_VPU_BUS 106
+/* DISP_AXI */
+#define IMX8MQ_CLK_DISP_AXI 107
+/* DISP APB */
+#define IMX8MQ_CLK_DISP_APB 108
+/* DISP RTRM */
+#define IMX8MQ_CLK_DISP_RTRM 109
+/* USB_BUS */
+#define IMX8MQ_CLK_USB_BUS 110
+/* GPU_AXI */
+#define IMX8MQ_CLK_GPU_AXI 111
+/* GPU_AHB */
+#define IMX8MQ_CLK_GPU_AHB 112
+/* NOC */
+#define IMX8MQ_CLK_NOC 113
+/* NOC_APB */
+#define IMX8MQ_CLK_NOC_APB 115
+
+/* AHB */
+#define IMX8MQ_CLK_AHB 116
+/* AUDIO AHB */
+#define IMX8MQ_CLK_AUDIO_AHB 117
+
+/* DRAM_ALT */
+#define IMX8MQ_CLK_DRAM_ALT 118
+/* DRAM APB */
+#define IMX8MQ_CLK_DRAM_APB 119
+/* VPU_G1 */
+#define IMX8MQ_CLK_VPU_G1 120
+/* VPU_G2 */
+#define IMX8MQ_CLK_VPU_G2 121
+/* DISP_DTRC */
+#define IMX8MQ_CLK_DISP_DTRC 122
+/* DISP_DC8000 */
+#define IMX8MQ_CLK_DISP_DC8000 123
+/* PCIE_CTRL */
+#define IMX8MQ_CLK_PCIE1_CTRL 124
+/* PCIE_PHY */
+#define IMX8MQ_CLK_PCIE1_PHY 125
+/* PCIE_AUX */
+#define IMX8MQ_CLK_PCIE1_AUX 126
+/* DC_PIXEL */
+#define IMX8MQ_CLK_DC_PIXEL 127
+/* LCDIF_PIXEL */
+#define IMX8MQ_CLK_LCDIF_PIXEL 128
+/* SAI1~6 */
+#define IMX8MQ_CLK_SAI1 129
+
+#define IMX8MQ_CLK_SAI2 130
+
+#define IMX8MQ_CLK_SAI3 131
+
+#define IMX8MQ_CLK_SAI4 132
+
+#define IMX8MQ_CLK_SAI5 133
+
+#define IMX8MQ_CLK_SAI6 134
+/* SPDIF1 */
+#define IMX8MQ_CLK_SPDIF1 135
+/* SPDIF2 */
+#define IMX8MQ_CLK_SPDIF2 136
+/* ENET_REF */
+#define IMX8MQ_CLK_ENET_REF 137
+/* ENET_TIMER */
+#define IMX8MQ_CLK_ENET_TIMER 138
+/* ENET_PHY */
+#define IMX8MQ_CLK_ENET_PHY_REF 139
+/* NAND */
+#define IMX8MQ_CLK_NAND 140
+/* QSPI */
+#define IMX8MQ_CLK_QSPI 141
+/* USDHC1 */
+#define IMX8MQ_CLK_USDHC1 142
+/* USDHC2 */
+#define IMX8MQ_CLK_USDHC2 143
+/* I2C1 */
+#define IMX8MQ_CLK_I2C1 144
+/* I2C2 */
+#define IMX8MQ_CLK_I2C2 145
+/* I2C3 */
+#define IMX8MQ_CLK_I2C3 146
+/* I2C4 */
+#define IMX8MQ_CLK_I2C4 147
+/* UART1 */
+#define IMX8MQ_CLK_UART1 148
+/* UART2 */
+#define IMX8MQ_CLK_UART2 149
+/* UART3 */
+#define IMX8MQ_CLK_UART3 150
+/* UART4 */
+#define IMX8MQ_CLK_UART4 151
+/* USB_CORE_REF */
+#define IMX8MQ_CLK_USB_CORE_REF 152
+/* USB_PHY_REF */
+#define IMX8MQ_CLK_USB_PHY_REF 163
+/* ECSPI1 */
+#define IMX8MQ_CLK_ECSPI1 164
+/* ECSPI2 */
+#define IMX8MQ_CLK_ECSPI2 165
+/* PWM1 */
+#define IMX8MQ_CLK_PWM1 166
+/* PWM2 */
+#define IMX8MQ_CLK_PWM2 167
+/* PWM3 */
+#define IMX8MQ_CLK_PWM3 168
+/* PWM4 */
+#define IMX8MQ_CLK_PWM4 169
+/* GPT1 */
+#define IMX8MQ_CLK_GPT1 170
+/* WDOG */
+#define IMX8MQ_CLK_WDOG 171
+/* WRCLK */
+#define IMX8MQ_CLK_WRCLK 172
+/* DSI_CORE */
+#define IMX8MQ_CLK_DSI_CORE 173
+/* DSI_PHY */
+#define IMX8MQ_CLK_DSI_PHY_REF 174
+/* DSI_DBI */
+#define IMX8MQ_CLK_DSI_DBI 175
+/*DSI_ESC */
+#define IMX8MQ_CLK_DSI_ESC 176
+/* CSI1_CORE */
+#define IMX8MQ_CLK_CSI1_CORE 177
+/* CSI1_PHY */
+#define IMX8MQ_CLK_CSI1_PHY_REF 178
+/* CSI_ESC */
+#define IMX8MQ_CLK_CSI1_ESC 179
+/* CSI2_CORE */
+#define IMX8MQ_CLK_CSI2_CORE 170
+/* CSI2_PHY */
+#define IMX8MQ_CLK_CSI2_PHY_REF 181
+/* CSI2_ESC */
+#define IMX8MQ_CLK_CSI2_ESC 182
+/* PCIE2_CTRL */
+#define IMX8MQ_CLK_PCIE2_CTRL 183
+/* PCIE2_PHY */
+#define IMX8MQ_CLK_PCIE2_PHY 184
+/* PCIE2_AUX */
+#define IMX8MQ_CLK_PCIE2_AUX 185
+/* ECSPI3 */
+#define IMX8MQ_CLK_ECSPI3 186
+
+/* CCGR clocks */
+#define IMX8MQ_CLK_A53_ROOT 187
+#define IMX8MQ_CLK_DRAM_ROOT 188
+#define IMX8MQ_CLK_ECSPI1_ROOT 189
+#define IMX8MQ_CLK_ECSPI2_ROOT 180
+#define IMX8MQ_CLK_ECSPI3_ROOT 181
+#define IMX8MQ_CLK_ENET1_ROOT 182
+#define IMX8MQ_CLK_GPT1_ROOT 193
+#define IMX8MQ_CLK_I2C1_ROOT 194
+#define IMX8MQ_CLK_I2C2_ROOT 195
+#define IMX8MQ_CLK_I2C3_ROOT 196
+#define IMX8MQ_CLK_I2C4_ROOT 197
+#define IMX8MQ_CLK_M4_ROOT 198
+#define IMX8MQ_CLK_PCIE1_ROOT 199
+#define IMX8MQ_CLK_PCIE2_ROOT 200
+#define IMX8MQ_CLK_PWM1_ROOT 201
+#define IMX8MQ_CLK_PWM2_ROOT 202
+#define IMX8MQ_CLK_PWM3_ROOT 203
+#define IMX8MQ_CLK_PWM4_ROOT 204
+#define IMX8MQ_CLK_QSPI_ROOT 205
+#define IMX8MQ_CLK_SAI1_ROOT 206
+#define IMX8MQ_CLK_SAI2_ROOT 207
+#define IMX8MQ_CLK_SAI3_ROOT 208
+#define IMX8MQ_CLK_SAI4_ROOT 209
+#define IMX8MQ_CLK_SAI5_ROOT 210
+#define IMX8MQ_CLK_SAI6_ROOT 212
+#define IMX8MQ_CLK_UART1_ROOT 213
+#define IMX8MQ_CLK_UART2_ROOT 214
+#define IMX8MQ_CLK_UART3_ROOT 215
+#define IMX8MQ_CLK_UART4_ROOT 216
+#define IMX8MQ_CLK_USB1_CTRL_ROOT 217
+#define IMX8MQ_CLK_USB2_CTRL_ROOT 218
+#define IMX8MQ_CLK_USB1_PHY_ROOT 219
+#define IMX8MQ_CLK_USB2_PHY_ROOT 220
+#define IMX8MQ_CLK_USDHC1_ROOT 221
+#define IMX8MQ_CLK_USDHC2_ROOT 222
+#define IMX8MQ_CLK_WDOG1_ROOT 223
+#define IMX8MQ_CLK_WDOG2_ROOT 224
+#define IMX8MQ_CLK_WDOG3_ROOT 225
+#define IMX8MQ_CLK_GPU_ROOT 226
+#define IMX8MQ_CLK_HEVC_ROOT 227
+#define IMX8MQ_CLK_AVC_ROOT 228
+#define IMX8MQ_CLK_VP9_ROOT 229
+#define IMX8MQ_CLK_HEVC_INTER_ROOT 230
+#define IMX8MQ_CLK_DISP_ROOT 231
+#define IMX8MQ_CLK_HDMI_ROOT 232
+#define IMX8MQ_CLK_HDMI_PHY_ROOT 233
+#define IMX8MQ_CLK_VPU_DEC_ROOT 234
+#define IMX8MQ_CLK_CSI1_ROOT 235
+#define IMX8MQ_CLK_CSI2_ROOT 236
+#define IMX8MQ_CLK_RAWNAND_ROOT 237
+#define IMX8MQ_CLK_SDMA1_ROOT 238
+#define IMX8MQ_CLK_SDMA2_ROOT 239
+#define IMX8MQ_CLK_VPU_G1_ROOT 240
+#define IMX8MQ_CLK_VPU_G2_ROOT 241
+
+/* SCCG PLL GATE */
+#define IMX8MQ_SYS1_PLL_OUT 232
+#define IMX8MQ_SYS2_PLL_OUT 243
+#define IMX8MQ_SYS3_PLL_OUT 244
+#define IMX8MQ_DRAM_PLL_OUT 245
+
+#define IMX8MQ_GPT_3M_CLK 246
+
+#define IMX8MQ_CLK_IPG_ROOT 247
+#define IMX8MQ_CLK_IPG_AUDIO_ROOT 248
+#define IMX8MQ_CLK_SAI1_IPG 249
+#define IMX8MQ_CLK_SAI2_IPG 250
+#define IMX8MQ_CLK_SAI3_IPG 251
+#define IMX8MQ_CLK_SAI4_IPG 252
+#define IMX8MQ_CLK_SAI5_IPG 253
+#define IMX8MQ_CLK_SAI6_IPG 254
+
+/* DSI AHB/IPG clocks */
+/* rxesc clock */
+#define IMX8MQ_CLK_DSI_AHB 255
+/* txesc clock */
+#define IMX8MQ_CLK_DSI_IPG_DIV 256
+
+#define IMX8MQ_CLK_TMU_ROOT 265
+
+/* Display root clocks */
+#define IMX8MQ_CLK_DISP_AXI_ROOT 266
+#define IMX8MQ_CLK_DISP_APB_ROOT 267
+#define IMX8MQ_CLK_DISP_RTRM_ROOT 268
+
+#define IMX8MQ_CLK_OCOTP_ROOT 269
+
+#define IMX8MQ_CLK_DRAM_ALT_ROOT 270
+#define IMX8MQ_CLK_DRAM_CORE 271
+
+#define IMX8MQ_CLK_MU_ROOT 272
+#define IMX8MQ_VIDEO2_PLL_OUT 273
+
+#define IMX8MQ_CLK_CLKO2 274
+
+#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 275
+
+#define IMX8MQ_CLK_END 276
+#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/jz4725b-cgu.h b/include/dt-bindings/clock/jz4725b-cgu.h
new file mode 100644
index 000000000000..460bbeff6ab8
--- /dev/null
+++ b/include/dt-bindings/clock/jz4725b-cgu.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,jz4725b-cgu DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__
+
+#define JZ4725B_CLK_EXT 0
+#define JZ4725B_CLK_OSC32K 1
+#define JZ4725B_CLK_PLL 2
+#define JZ4725B_CLK_PLL_HALF 3
+#define JZ4725B_CLK_CCLK 4
+#define JZ4725B_CLK_HCLK 5
+#define JZ4725B_CLK_PCLK 6
+#define JZ4725B_CLK_MCLK 7
+#define JZ4725B_CLK_IPU 8
+#define JZ4725B_CLK_LCD 9
+#define JZ4725B_CLK_I2S 10
+#define JZ4725B_CLK_SPI 11
+#define JZ4725B_CLK_MMC_MUX 12
+#define JZ4725B_CLK_UDC 13
+#define JZ4725B_CLK_UART 14
+#define JZ4725B_CLK_DMA 15
+#define JZ4725B_CLK_ADC 16
+#define JZ4725B_CLK_I2C 17
+#define JZ4725B_CLK_AIC 18
+#define JZ4725B_CLK_MMC0 19
+#define JZ4725B_CLK_MMC1 20
+#define JZ4725B_CLK_BCH 21
+#define JZ4725B_CLK_TCU 22
+#define JZ4725B_CLK_EXT512 23
+#define JZ4725B_CLK_RTC 24
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__ */
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 228a5e234af0..7b24fc791146 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -71,6 +71,7 @@
#define MMP2_CLK_CCIC1_MIX 117
#define MMP2_CLK_CCIC1_PHY 118
#define MMP2_CLK_CCIC1_SPHY 119
+#define MMP2_CLK_SP 120
#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/maxim,max77686.h b/include/dt-bindings/clock/maxim,max77686.h
index 7b28b0905869..af8261dcace1 100644
--- a/include/dt-bindings/clock/maxim,max77686.h
+++ b/include/dt-bindings/clock/maxim,max77686.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clocks for the Maxim 77686 PMIC.
*/
diff --git a/include/dt-bindings/clock/maxim,max77802.h b/include/dt-bindings/clock/maxim,max77802.h
index 997312edcbb5..51adcbaed697 100644
--- a/include/dt-bindings/clock/maxim,max77802.h
+++ b/include/dt-bindings/clock/maxim,max77802.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clocks for the Maxim 77802 PMIC.
*/
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index a60f47b49231..5fe2923382d0 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -103,5 +103,9 @@
#define CLKID_MPLL1 94
#define CLKID_MPLL2 95
#define CLKID_NAND_CLK 112
+#define CLKID_ABP 124
+#define CLKID_PERIPH 126
+#define CLKID_AXI 128
+#define CLKID_L2_DRAM 130
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt7629-clk.h b/include/dt-bindings/clock/mt7629-clk.h
new file mode 100644
index 000000000000..ad8e6d7f0154
--- /dev/null
+++ b/include/dt-bindings/clock/mt7629-clk.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7629_H
+#define _DT_BINDINGS_CLK_MT7629_H
+
+/* TOPCKGEN */
+#define CLK_TOP_TO_U2_PHY 0
+#define CLK_TOP_TO_U2_PHY_1P 1
+#define CLK_TOP_PCIE0_PIPE_EN 2
+#define CLK_TOP_PCIE1_PIPE_EN 3
+#define CLK_TOP_SSUSB_TX250M 4
+#define CLK_TOP_SSUSB_EQ_RX250M 5
+#define CLK_TOP_SSUSB_CDR_REF 6
+#define CLK_TOP_SSUSB_CDR_FB 7
+#define CLK_TOP_SATA_ASIC 8
+#define CLK_TOP_SATA_RBC 9
+#define CLK_TOP_TO_USB3_SYS 10
+#define CLK_TOP_P1_1MHZ 11
+#define CLK_TOP_4MHZ 12
+#define CLK_TOP_P0_1MHZ 13
+#define CLK_TOP_ETH_500M 14
+#define CLK_TOP_TXCLK_SRC_PRE 15
+#define CLK_TOP_RTC 16
+#define CLK_TOP_PWM_QTR_26M 17
+#define CLK_TOP_CPUM_TCK_IN 18
+#define CLK_TOP_TO_USB3_DA_TOP 19
+#define CLK_TOP_MEMPLL 20
+#define CLK_TOP_DMPLL 21
+#define CLK_TOP_DMPLL_D4 22
+#define CLK_TOP_DMPLL_D8 23
+#define CLK_TOP_SYSPLL_D2 24
+#define CLK_TOP_SYSPLL1_D2 25
+#define CLK_TOP_SYSPLL1_D4 26
+#define CLK_TOP_SYSPLL1_D8 27
+#define CLK_TOP_SYSPLL1_D16 28
+#define CLK_TOP_SYSPLL2_D2 29
+#define CLK_TOP_SYSPLL2_D4 30
+#define CLK_TOP_SYSPLL2_D8 31
+#define CLK_TOP_SYSPLL_D5 32
+#define CLK_TOP_SYSPLL3_D2 33
+#define CLK_TOP_SYSPLL3_D4 34
+#define CLK_TOP_SYSPLL_D7 35
+#define CLK_TOP_SYSPLL4_D2 36
+#define CLK_TOP_SYSPLL4_D4 37
+#define CLK_TOP_SYSPLL4_D16 38
+#define CLK_TOP_UNIVPLL 39
+#define CLK_TOP_UNIVPLL1_D2 40
+#define CLK_TOP_UNIVPLL1_D4 41
+#define CLK_TOP_UNIVPLL1_D8 42
+#define CLK_TOP_UNIVPLL_D3 43
+#define CLK_TOP_UNIVPLL2_D2 44
+#define CLK_TOP_UNIVPLL2_D4 45
+#define CLK_TOP_UNIVPLL2_D8 46
+#define CLK_TOP_UNIVPLL2_D16 47
+#define CLK_TOP_UNIVPLL_D5 48
+#define CLK_TOP_UNIVPLL3_D2 49
+#define CLK_TOP_UNIVPLL3_D4 50
+#define CLK_TOP_UNIVPLL3_D16 51
+#define CLK_TOP_UNIVPLL_D7 52
+#define CLK_TOP_UNIVPLL_D80_D4 53
+#define CLK_TOP_UNIV48M 54
+#define CLK_TOP_SGMIIPLL_D2 55
+#define CLK_TOP_CLKXTAL_D4 56
+#define CLK_TOP_HD_FAXI 57
+#define CLK_TOP_FAXI 58
+#define CLK_TOP_F_FAUD_INTBUS 59
+#define CLK_TOP_AP2WBHIF_HCLK 60
+#define CLK_TOP_10M_INFRAO 61
+#define CLK_TOP_MSDC30_1 62
+#define CLK_TOP_SPI 63
+#define CLK_TOP_SF 64
+#define CLK_TOP_FLASH 65
+#define CLK_TOP_TO_USB3_REF 66
+#define CLK_TOP_TO_USB3_MCU 67
+#define CLK_TOP_TO_USB3_DMA 68
+#define CLK_TOP_FROM_TOP_AHB 69
+#define CLK_TOP_FROM_TOP_AXI 70
+#define CLK_TOP_PCIE1_MAC_EN 71
+#define CLK_TOP_PCIE0_MAC_EN 72
+#define CLK_TOP_AXI_SEL 73
+#define CLK_TOP_MEM_SEL 74
+#define CLK_TOP_DDRPHYCFG_SEL 75
+#define CLK_TOP_ETH_SEL 76
+#define CLK_TOP_PWM_SEL 77
+#define CLK_TOP_F10M_REF_SEL 78
+#define CLK_TOP_NFI_INFRA_SEL 79
+#define CLK_TOP_FLASH_SEL 80
+#define CLK_TOP_UART_SEL 81
+#define CLK_TOP_SPI0_SEL 82
+#define CLK_TOP_SPI1_SEL 83
+#define CLK_TOP_MSDC50_0_SEL 84
+#define CLK_TOP_MSDC30_0_SEL 85
+#define CLK_TOP_MSDC30_1_SEL 86
+#define CLK_TOP_AP2WBMCU_SEL 87
+#define CLK_TOP_AP2WBHIF_SEL 88
+#define CLK_TOP_AUDIO_SEL 89
+#define CLK_TOP_AUD_INTBUS_SEL 90
+#define CLK_TOP_PMICSPI_SEL 91
+#define CLK_TOP_SCP_SEL 92
+#define CLK_TOP_ATB_SEL 93
+#define CLK_TOP_HIF_SEL 94
+#define CLK_TOP_SATA_SEL 95
+#define CLK_TOP_U2_SEL 96
+#define CLK_TOP_AUD1_SEL 97
+#define CLK_TOP_AUD2_SEL 98
+#define CLK_TOP_IRRX_SEL 99
+#define CLK_TOP_IRTX_SEL 100
+#define CLK_TOP_SATA_MCU_SEL 101
+#define CLK_TOP_PCIE0_MCU_SEL 102
+#define CLK_TOP_PCIE1_MCU_SEL 103
+#define CLK_TOP_SSUSB_MCU_SEL 104
+#define CLK_TOP_CRYPTO_SEL 105
+#define CLK_TOP_SGMII_REF_1_SEL 106
+#define CLK_TOP_10M_SEL 107
+#define CLK_TOP_NR_CLK 108
+
+/* INFRACFG */
+#define CLK_INFRA_MUX1_SEL 0
+#define CLK_INFRA_DBGCLK_PD 1
+#define CLK_INFRA_TRNG_PD 2
+#define CLK_INFRA_DEVAPC_PD 3
+#define CLK_INFRA_APXGPT_PD 4
+#define CLK_INFRA_SEJ_PD 5
+#define CLK_INFRA_NR_CLK 6
+
+/* PERICFG */
+#define CLK_PERIBUS_SEL 0
+#define CLK_PERI_PWM1_PD 1
+#define CLK_PERI_PWM2_PD 2
+#define CLK_PERI_PWM3_PD 3
+#define CLK_PERI_PWM4_PD 4
+#define CLK_PERI_PWM5_PD 5
+#define CLK_PERI_PWM6_PD 6
+#define CLK_PERI_PWM7_PD 7
+#define CLK_PERI_PWM_PD 8
+#define CLK_PERI_AP_DMA_PD 9
+#define CLK_PERI_MSDC30_1_PD 10
+#define CLK_PERI_UART0_PD 11
+#define CLK_PERI_UART1_PD 12
+#define CLK_PERI_UART2_PD 13
+#define CLK_PERI_UART3_PD 14
+#define CLK_PERI_BTIF_PD 15
+#define CLK_PERI_I2C0_PD 16
+#define CLK_PERI_SPI0_PD 17
+#define CLK_PERI_SNFI_PD 18
+#define CLK_PERI_NFI_PD 19
+#define CLK_PERI_NFIECC_PD 20
+#define CLK_PERI_FLASH_PD 21
+#define CLK_PERI_NR_CLK 22
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIV2PLL 2
+#define CLK_APMIXED_ETH1PLL 3
+#define CLK_APMIXED_ETH2PLL 4
+#define CLK_APMIXED_SGMIPLL 5
+#define CLK_APMIXED_MAIN_CORE_EN 6
+#define CLK_APMIXED_NR_CLK 7
+
+/* SSUSBSYS */
+#define CLK_SSUSB_U2_PHY_1P_EN 0
+#define CLK_SSUSB_U2_PHY_EN 1
+#define CLK_SSUSB_REF_EN 2
+#define CLK_SSUSB_SYS_EN 3
+#define CLK_SSUSB_MCU_EN 4
+#define CLK_SSUSB_DMA_EN 5
+#define CLK_SSUSB_NR_CLK 6
+
+/* PCIESYS */
+#define CLK_PCIE_P1_AUX_EN 0
+#define CLK_PCIE_P1_OBFF_EN 1
+#define CLK_PCIE_P1_AHB_EN 2
+#define CLK_PCIE_P1_AXI_EN 3
+#define CLK_PCIE_P1_MAC_EN 4
+#define CLK_PCIE_P1_PIPE_EN 5
+#define CLK_PCIE_P0_AUX_EN 6
+#define CLK_PCIE_P0_OBFF_EN 7
+#define CLK_PCIE_P0_AHB_EN 8
+#define CLK_PCIE_P0_AXI_EN 9
+#define CLK_PCIE_P0_MAC_EN 10
+#define CLK_PCIE_P0_PIPE_EN 11
+#define CLK_PCIE_NR_CLK 12
+
+/* ETHSYS */
+#define CLK_ETH_FE_EN 0
+#define CLK_ETH_GP2_EN 1
+#define CLK_ETH_GP1_EN 2
+#define CLK_ETH_GP0_EN 3
+#define CLK_ETH_ESW_EN 4
+#define CLK_ETH_NR_CLK 5
+
+/* SGMIISYS */
+#define CLK_SGMII_TX_EN 0
+#define CLK_SGMII_RX_EN 1
+#define CLK_SGMII_CDR_REF 2
+#define CLK_SGMII_CDR_FB 3
+#define CLK_SGMII_NR_CLK 4
+
+#endif /* _DT_BINDINGS_CLK_MT7629_H */
diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
new file mode 100644
index 000000000000..4f7a2d2320bf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_CAM_CC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_CAM_CC_SDM845_H
+
+/* CAM_CC clock registers */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_AXI_CLK 2
+#define CAM_CC_BPS_CLK 3
+#define CAM_CC_BPS_CLK_SRC 4
+#define CAM_CC_CAMNOC_ATB_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK 6
+#define CAM_CC_CCI_CLK 7
+#define CAM_CC_CCI_CLK_SRC 8
+#define CAM_CC_CPAS_AHB_CLK 9
+#define CAM_CC_CPHY_RX_CLK_SRC 10
+#define CAM_CC_CSI0PHYTIMER_CLK 11
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 12
+#define CAM_CC_CSI1PHYTIMER_CLK 13
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 14
+#define CAM_CC_CSI2PHYTIMER_CLK 15
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 16
+#define CAM_CC_CSI3PHYTIMER_CLK 17
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 18
+#define CAM_CC_CSIPHY0_CLK 19
+#define CAM_CC_CSIPHY1_CLK 20
+#define CAM_CC_CSIPHY2_CLK 21
+#define CAM_CC_CSIPHY3_CLK 22
+#define CAM_CC_FAST_AHB_CLK_SRC 23
+#define CAM_CC_FD_CORE_CLK 24
+#define CAM_CC_FD_CORE_CLK_SRC 25
+#define CAM_CC_FD_CORE_UAR_CLK 26
+#define CAM_CC_ICP_APB_CLK 27
+#define CAM_CC_ICP_ATB_CLK 28
+#define CAM_CC_ICP_CLK 29
+#define CAM_CC_ICP_CLK_SRC 30
+#define CAM_CC_ICP_CTI_CLK 31
+#define CAM_CC_ICP_TS_CLK 32
+#define CAM_CC_IFE_0_AXI_CLK 33
+#define CAM_CC_IFE_0_CLK 34
+#define CAM_CC_IFE_0_CLK_SRC 35
+#define CAM_CC_IFE_0_CPHY_RX_CLK 36
+#define CAM_CC_IFE_0_CSID_CLK 37
+#define CAM_CC_IFE_0_CSID_CLK_SRC 38
+#define CAM_CC_IFE_0_DSP_CLK 39
+#define CAM_CC_IFE_1_AXI_CLK 40
+#define CAM_CC_IFE_1_CLK 41
+#define CAM_CC_IFE_1_CLK_SRC 42
+#define CAM_CC_IFE_1_CPHY_RX_CLK 43
+#define CAM_CC_IFE_1_CSID_CLK 44
+#define CAM_CC_IFE_1_CSID_CLK_SRC 45
+#define CAM_CC_IFE_1_DSP_CLK 46
+#define CAM_CC_IFE_LITE_CLK 47
+#define CAM_CC_IFE_LITE_CLK_SRC 48
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 49
+#define CAM_CC_IFE_LITE_CSID_CLK 50
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 51
+#define CAM_CC_IPE_0_AHB_CLK 52
+#define CAM_CC_IPE_0_AREG_CLK 53
+#define CAM_CC_IPE_0_AXI_CLK 54
+#define CAM_CC_IPE_0_CLK 55
+#define CAM_CC_IPE_0_CLK_SRC 56
+#define CAM_CC_IPE_1_AHB_CLK 57
+#define CAM_CC_IPE_1_AREG_CLK 58
+#define CAM_CC_IPE_1_AXI_CLK 59
+#define CAM_CC_IPE_1_CLK 60
+#define CAM_CC_IPE_1_CLK_SRC 61
+#define CAM_CC_JPEG_CLK 62
+#define CAM_CC_JPEG_CLK_SRC 63
+#define CAM_CC_LRME_CLK 64
+#define CAM_CC_LRME_CLK_SRC 65
+#define CAM_CC_MCLK0_CLK 66
+#define CAM_CC_MCLK0_CLK_SRC 67
+#define CAM_CC_MCLK1_CLK 68
+#define CAM_CC_MCLK1_CLK_SRC 69
+#define CAM_CC_MCLK2_CLK 70
+#define CAM_CC_MCLK2_CLK_SRC 71
+#define CAM_CC_MCLK3_CLK 72
+#define CAM_CC_MCLK3_CLK_SRC 73
+#define CAM_CC_PLL0 74
+#define CAM_CC_PLL0_OUT_EVEN 75
+#define CAM_CC_PLL1 76
+#define CAM_CC_PLL1_OUT_EVEN 77
+#define CAM_CC_PLL2 78
+#define CAM_CC_PLL2_OUT_EVEN 79
+#define CAM_CC_PLL3 80
+#define CAM_CC_PLL3_OUT_EVEN 81
+#define CAM_CC_SLOW_AHB_CLK_SRC 82
+#define CAM_CC_SOC_AHB_CLK 83
+#define CAM_CC_SYS_TMR_CLK 84
+
+/* CAM_CC Resets */
+#define TITAN_CAM_CC_CCI_BCR 0
+#define TITAN_CAM_CC_CPAS_BCR 1
+#define TITAN_CAM_CC_CSI0PHY_BCR 2
+#define TITAN_CAM_CC_CSI1PHY_BCR 3
+#define TITAN_CAM_CC_CSI2PHY_BCR 4
+#define TITAN_CAM_CC_MCLK0_BCR 5
+#define TITAN_CAM_CC_MCLK1_BCR 6
+#define TITAN_CAM_CC_MCLK2_BCR 7
+#define TITAN_CAM_CC_MCLK3_BCR 8
+#define TITAN_CAM_CC_TITAN_TOP_BCR 9
+
+/* CAM_CC GDSCRs */
+#define BPS_GDSC 0
+#define IPE_0_GDSC 1
+#define IPE_1_GDSC 2
+#define IFE_0_GDSC 3
+#define IFE_1_GDSC 4
+#define TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8960.h b/include/dt-bindings/clock/qcom,gcc-msm8960.h
index 7d20eedfee98..e02742fc81cc 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8960.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8960.h
@@ -319,5 +319,7 @@
#define CE3_SRC 303
#define CE3_CORE_CLK 304
#define CE3_H_CLK 305
+#define PLL16 306
+#define PLL17 307
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h
index 75b07cf5eed0..db80f2ee571b 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8996.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h
@@ -235,6 +235,15 @@
#define GCC_RX1_USB2_CLKREF_CLK 218
#define GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK 219
#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 220
+#define GCC_EDP_CLKREF_CLK 221
+#define GCC_MSS_CFG_AHB_CLK 222
+#define GCC_MSS_Q6_BIMC_AXI_CLK 223
+#define GCC_MSS_SNOC_AXI_CLK 224
+#define GCC_MSS_MNOC_BIMC_AXI_CLK 225
+#define GCC_DCC_AHB_CLK 226
+#define GCC_AGGRE0_NOC_MPU_CFG_AHB_CLK 227
+#define GCC_MMSS_GPLL0_DIV_CLK 228
+#define GCC_MSS_GPLL0_DIV_CLK 229
#define GCC_SYSTEM_NOC_BCR 0
#define GCC_CONFIG_NOC_BCR 1
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index 58a242e656b1..ba84bbab5c83 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -180,6 +180,11 @@
#define USB30_MASTER_CLK_SRC 163
#define USB30_MOCK_UTMI_CLK_SRC 164
#define USB3_PHY_AUX_CLK_SRC 165
+#define GCC_USB3_CLKREF_CLK 166
+#define GCC_HDMI_CLKREF_CLK 167
+#define GCC_UFS_CLKREF_CLK 168
+#define GCC_PCIE_CLKREF_CLK 169
+#define GCC_RX1_USB2_CLKREF_CLK 170
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
@@ -204,5 +209,94 @@
#define GCC_TSIF_BCR 16
#define GCC_UFS_BCR 17
#define GCC_USB_30_BCR 18
+#define GCC_SYSTEM_NOC_BCR 19
+#define GCC_CONFIG_NOC_BCR 20
+#define GCC_AHB2PHY_EAST_BCR 21
+#define GCC_IMEM_BCR 22
+#define GCC_PIMEM_BCR 23
+#define GCC_MMSS_BCR 24
+#define GCC_QDSS_BCR 25
+#define GCC_WCSS_BCR 26
+#define GCC_BLSP1_BCR 27
+#define GCC_BLSP1_UART1_BCR 28
+#define GCC_BLSP1_UART2_BCR 29
+#define GCC_BLSP1_UART3_BCR 30
+#define GCC_CM_PHY_REFGEN1_BCR 31
+#define GCC_CM_PHY_REFGEN2_BCR 32
+#define GCC_BLSP2_BCR 33
+#define GCC_BLSP2_UART1_BCR 34
+#define GCC_BLSP2_UART2_BCR 35
+#define GCC_BLSP2_UART3_BCR 36
+#define GCC_SRAM_SENSOR_BCR 37
+#define GCC_PRNG_BCR 38
+#define GCC_TSIF_0_RESET 39
+#define GCC_TSIF_1_RESET 40
+#define GCC_TCSR_BCR 41
+#define GCC_BOOT_ROM_BCR 42
+#define GCC_MSG_RAM_BCR 43
+#define GCC_TLMM_BCR 44
+#define GCC_MPM_BCR 45
+#define GCC_SEC_CTRL_BCR 46
+#define GCC_SPMI_BCR 47
+#define GCC_SPDM_BCR 48
+#define GCC_CE1_BCR 49
+#define GCC_BIMC_BCR 50
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 51
+#define GCC_SNOC_BUS_TIMEOUT1_BCR 52
+#define GCC_SNOC_BUS_TIMEOUT3_BCR 53
+#define GCC_SNOC_BUS_TIMEOUT_EXTREF_BCR 54
+#define GCC_PNOC_BUS_TIMEOUT0_BCR 55
+#define GCC_CNOC_PERIPH_BUS_TIMEOUT1_BCR 56
+#define GCC_CNOC_PERIPH_BUS_TIMEOUT2_BCR 57
+#define GCC_CNOC_BUS_TIMEOUT0_BCR 58
+#define GCC_CNOC_BUS_TIMEOUT1_BCR 59
+#define GCC_CNOC_BUS_TIMEOUT2_BCR 60
+#define GCC_CNOC_BUS_TIMEOUT3_BCR 61
+#define GCC_CNOC_BUS_TIMEOUT4_BCR 62
+#define GCC_CNOC_BUS_TIMEOUT5_BCR 63
+#define GCC_CNOC_BUS_TIMEOUT6_BCR 64
+#define GCC_CNOC_BUS_TIMEOUT7_BCR 65
+#define GCC_APB2JTAG_BCR 66
+#define GCC_RBCPR_CX_BCR 67
+#define GCC_RBCPR_MX_BCR 68
+#define GCC_USB3_PHY_BCR 69
+#define GCC_USB3PHY_PHY_BCR 70
+#define GCC_USB3_DP_PHY_BCR 71
+#define GCC_SSC_BCR 72
+#define GCC_SSC_RESET 73
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 74
+#define GCC_PCIE_0_LINK_DOWN_BCR 75
+#define GCC_PCIE_0_PHY_BCR 76
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 77
+#define GCC_PCIE_PHY_BCR 78
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 79
+#define GCC_PCIE_PHY_CFG_AHB_BCR 80
+#define GCC_PCIE_PHY_COM_BCR 81
+#define GCC_GPU_BCR 82
+#define GCC_SPSS_BCR 83
+#define GCC_OBT_ODT_BCR 84
+#define GCC_VS_BCR 85
+#define GCC_MSS_VS_RESET 86
+#define GCC_GPU_VS_RESET 87
+#define GCC_APC0_VS_RESET 88
+#define GCC_APC1_VS_RESET 89
+#define GCC_CNOC_BUS_TIMEOUT8_BCR 90
+#define GCC_CNOC_BUS_TIMEOUT9_BCR 91
+#define GCC_CNOC_BUS_TIMEOUT10_BCR 92
+#define GCC_CNOC_BUS_TIMEOUT11_BCR 93
+#define GCC_CNOC_BUS_TIMEOUT12_BCR 94
+#define GCC_CNOC_BUS_TIMEOUT13_BCR 95
+#define GCC_CNOC_BUS_TIMEOUT14_BCR 96
+#define GCC_CNOC_BUS_TIMEOUT_EXTREF_BCR 97
+#define GCC_AGGRE1_NOC_BCR 98
+#define GCC_AGGRE2_NOC_BCR 99
+#define GCC_DCC_BCR 100
+#define GCC_QREFS_VBG_CAL_BCR 101
+#define GCC_IPA_BCR 102
+#define GCC_GLM_BCR 103
+#define GCC_SKL_BCR 104
+#define GCC_MSMPU_BCR 105
+#define GCC_QUSB2PHY_PRIM_BCR 106
+#define GCC_QUSB2PHY_SEC_BCR 107
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h
new file mode 100644
index 000000000000..6ceb55ed72c6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCS404_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QCS404_H
+
+#define GCC_APSS_AHB_CLK_SRC 0
+#define GCC_BLSP1_QUP0_I2C_APPS_CLK_SRC 1
+#define GCC_BLSP1_QUP0_SPI_APPS_CLK_SRC 2
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 3
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 4
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 5
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 6
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 7
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 8
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 9
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 10
+#define GCC_BLSP1_UART0_APPS_CLK_SRC 11
+#define GCC_BLSP1_UART1_APPS_CLK_SRC 12
+#define GCC_BLSP1_UART2_APPS_CLK_SRC 13
+#define GCC_BLSP1_UART3_APPS_CLK_SRC 14
+#define GCC_BLSP2_QUP0_I2C_APPS_CLK_SRC 15
+#define GCC_BLSP2_QUP0_SPI_APPS_CLK_SRC 16
+#define GCC_BLSP2_UART0_APPS_CLK_SRC 17
+#define GCC_BYTE0_CLK_SRC 18
+#define GCC_EMAC_CLK_SRC 19
+#define GCC_EMAC_PTP_CLK_SRC 20
+#define GCC_ESC0_CLK_SRC 21
+#define GCC_APSS_AHB_CLK 22
+#define GCC_APSS_AXI_CLK 23
+#define GCC_BIMC_APSS_AXI_CLK 24
+#define GCC_BIMC_GFX_CLK 25
+#define GCC_BIMC_MDSS_CLK 26
+#define GCC_BLSP1_AHB_CLK 27
+#define GCC_BLSP1_QUP0_I2C_APPS_CLK 28
+#define GCC_BLSP1_QUP0_SPI_APPS_CLK 29
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 30
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 31
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 32
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 33
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 34
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 35
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 36
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 37
+#define GCC_BLSP1_UART0_APPS_CLK 38
+#define GCC_BLSP1_UART1_APPS_CLK 39
+#define GCC_BLSP1_UART2_APPS_CLK 40
+#define GCC_BLSP1_UART3_APPS_CLK 41
+#define GCC_BLSP2_AHB_CLK 42
+#define GCC_BLSP2_QUP0_I2C_APPS_CLK 43
+#define GCC_BLSP2_QUP0_SPI_APPS_CLK 44
+#define GCC_BLSP2_UART0_APPS_CLK 45
+#define GCC_BOOT_ROM_AHB_CLK 46
+#define GCC_DCC_CLK 47
+#define GCC_GENI_IR_H_CLK 48
+#define GCC_ETH_AXI_CLK 49
+#define GCC_ETH_PTP_CLK 50
+#define GCC_ETH_RGMII_CLK 51
+#define GCC_ETH_SLAVE_AHB_CLK 52
+#define GCC_GENI_IR_S_CLK 53
+#define GCC_GP1_CLK 54
+#define GCC_GP2_CLK 55
+#define GCC_GP3_CLK 56
+#define GCC_MDSS_AHB_CLK 57
+#define GCC_MDSS_AXI_CLK 58
+#define GCC_MDSS_BYTE0_CLK 59
+#define GCC_MDSS_ESC0_CLK 60
+#define GCC_MDSS_HDMI_APP_CLK 61
+#define GCC_MDSS_HDMI_PCLK_CLK 62
+#define GCC_MDSS_MDP_CLK 63
+#define GCC_MDSS_PCLK0_CLK 64
+#define GCC_MDSS_VSYNC_CLK 65
+#define GCC_OXILI_AHB_CLK 66
+#define GCC_OXILI_GFX3D_CLK 67
+#define GCC_PCIE_0_AUX_CLK 68
+#define GCC_PCIE_0_CFG_AHB_CLK 69
+#define GCC_PCIE_0_MSTR_AXI_CLK 70
+#define GCC_PCIE_0_PIPE_CLK 71
+#define GCC_PCIE_0_SLV_AXI_CLK 72
+#define GCC_PCNOC_USB2_CLK 73
+#define GCC_PCNOC_USB3_CLK 74
+#define GCC_PDM2_CLK 75
+#define GCC_PDM_AHB_CLK 76
+#define GCC_VSYNC_CLK_SRC 77
+#define GCC_PRNG_AHB_CLK 78
+#define GCC_PWM0_XO512_CLK 79
+#define GCC_PWM1_XO512_CLK 80
+#define GCC_PWM2_XO512_CLK 81
+#define GCC_SDCC1_AHB_CLK 82
+#define GCC_SDCC1_APPS_CLK 83
+#define GCC_SDCC1_ICE_CORE_CLK 84
+#define GCC_SDCC2_AHB_CLK 85
+#define GCC_SDCC2_APPS_CLK 86
+#define GCC_SYS_NOC_USB3_CLK 87
+#define GCC_USB20_MOCK_UTMI_CLK 88
+#define GCC_USB2A_PHY_SLEEP_CLK 89
+#define GCC_USB30_MASTER_CLK 90
+#define GCC_USB30_MOCK_UTMI_CLK 91
+#define GCC_USB30_SLEEP_CLK 92
+#define GCC_USB3_PHY_AUX_CLK 93
+#define GCC_USB3_PHY_PIPE_CLK 94
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 95
+#define GCC_USB_HS_SYSTEM_CLK 96
+#define GCC_GFX3D_CLK_SRC 97
+#define GCC_GP1_CLK_SRC 98
+#define GCC_GP2_CLK_SRC 99
+#define GCC_GP3_CLK_SRC 100
+#define GCC_GPLL0_OUT_MAIN 101
+#define GCC_GPLL1_OUT_MAIN 102
+#define GCC_GPLL3_OUT_MAIN 103
+#define GCC_GPLL4_OUT_MAIN 104
+#define GCC_HDMI_APP_CLK_SRC 105
+#define GCC_HDMI_PCLK_CLK_SRC 106
+#define GCC_MDP_CLK_SRC 107
+#define GCC_PCIE_0_AUX_CLK_SRC 108
+#define GCC_PCIE_0_PIPE_CLK_SRC 109
+#define GCC_PCLK0_CLK_SRC 110
+#define GCC_PDM2_CLK_SRC 111
+#define GCC_SDCC1_APPS_CLK_SRC 112
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 113
+#define GCC_SDCC2_APPS_CLK_SRC 114
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 115
+#define GCC_USB30_MASTER_CLK_SRC 116
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 117
+#define GCC_USB3_PHY_AUX_CLK_SRC 118
+#define GCC_USB_HS_SYSTEM_CLK_SRC 119
+#define GCC_GPLL0_AO_CLK_SRC 120
+#define GCC_USB_HS_INACTIVITY_TIMERS_CLK 122
+#define GCC_GPLL0_AO_OUT_MAIN 123
+#define GCC_GPLL0_SLEEP_CLK_SRC 124
+#define GCC_GPLL6 125
+#define GCC_GPLL6_OUT_AUX 126
+#define GCC_MDSS_MDP_VOTE_CLK 127
+#define GCC_MDSS_ROTATOR_VOTE_CLK 128
+#define GCC_BIMC_GPU_CLK 129
+#define GCC_GTCU_AHB_CLK 130
+#define GCC_GFX_TCU_CLK 131
+#define GCC_GFX_TBU_CLK 132
+#define GCC_SMMU_CFG_CLK 133
+#define GCC_APSS_TCU_CLK 134
+#define GCC_CRYPTO_AHB_CLK 135
+#define GCC_CRYPTO_AXI_CLK 136
+#define GCC_CRYPTO_CLK 137
+#define GCC_MDP_TBU_CLK 138
+#define GCC_QDSS_DAP_CLK 139
+#define GCC_DCC_XO_CLK 140
+
+#define GCC_GENI_IR_BCR 0
+#define GCC_USB_HS_BCR 1
+#define GCC_USB2_HS_PHY_ONLY_BCR 2
+#define GCC_QUSB2_PHY_BCR 3
+#define GCC_USB_HS_PHY_CFG_AHB_BCR 4
+#define GCC_USB2A_PHY_BCR 5
+#define GCC_USB3_PHY_BCR 6
+#define GCC_USB_30_BCR 7
+#define GCC_USB3PHY_PHY_BCR 8
+#define GCC_PCIE_0_BCR 9
+#define GCC_PCIE_0_PHY_BCR 10
+#define GCC_PCIE_0_LINK_DOWN_BCR 11
+#define GCC_PCIEPHY_0_PHY_BCR 12
+#define GCC_EMAC_BCR 13
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm660.h b/include/dt-bindings/clock/qcom,gcc-sdm660.h
new file mode 100644
index 000000000000..468302282913
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sdm660.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018, Craig Tatlor.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_660_H
+#define _DT_BINDINGS_CLK_MSM_GCC_660_H
+
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 0
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 1
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 2
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 3
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 4
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 5
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 6
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 7
+#define BLSP1_UART1_APPS_CLK_SRC 8
+#define BLSP1_UART2_APPS_CLK_SRC 9
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 10
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 11
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 12
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 13
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 14
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 15
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 16
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 17
+#define BLSP2_UART1_APPS_CLK_SRC 18
+#define BLSP2_UART2_APPS_CLK_SRC 19
+#define GCC_AGGRE2_UFS_AXI_CLK 20
+#define GCC_AGGRE2_USB3_AXI_CLK 21
+#define GCC_BIMC_GFX_CLK 22
+#define GCC_BIMC_HMSS_AXI_CLK 23
+#define GCC_BIMC_MSS_Q6_AXI_CLK 24
+#define GCC_BLSP1_AHB_CLK 25
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 26
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 27
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 28
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 29
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 30
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 31
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 32
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 33
+#define GCC_BLSP1_UART1_APPS_CLK 34
+#define GCC_BLSP1_UART2_APPS_CLK 35
+#define GCC_BLSP2_AHB_CLK 36
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 37
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 38
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 39
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 40
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 41
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 42
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 43
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 44
+#define GCC_BLSP2_UART1_APPS_CLK 45
+#define GCC_BLSP2_UART2_APPS_CLK 46
+#define GCC_BOOT_ROM_AHB_CLK 47
+#define GCC_CFG_NOC_USB2_AXI_CLK 48
+#define GCC_CFG_NOC_USB3_AXI_CLK 49
+#define GCC_DCC_AHB_CLK 50
+#define GCC_GP1_CLK 51
+#define GCC_GP2_CLK 52
+#define GCC_GP3_CLK 53
+#define GCC_GPU_BIMC_GFX_CLK 54
+#define GCC_GPU_CFG_AHB_CLK 55
+#define GCC_GPU_GPLL0_CLK 56
+#define GCC_GPU_GPLL0_DIV_CLK 57
+#define GCC_HMSS_DVM_BUS_CLK 58
+#define GCC_HMSS_RBCPR_CLK 59
+#define GCC_MMSS_GPLL0_CLK 60
+#define GCC_MMSS_GPLL0_DIV_CLK 61
+#define GCC_MMSS_NOC_CFG_AHB_CLK 62
+#define GCC_MMSS_SYS_NOC_AXI_CLK 63
+#define GCC_MSS_CFG_AHB_CLK 64
+#define GCC_MSS_GPLL0_DIV_CLK 65
+#define GCC_MSS_MNOC_BIMC_AXI_CLK 66
+#define GCC_MSS_Q6_BIMC_AXI_CLK 67
+#define GCC_MSS_SNOC_AXI_CLK 68
+#define GCC_PDM2_CLK 69
+#define GCC_PDM_AHB_CLK 70
+#define GCC_PRNG_AHB_CLK 71
+#define GCC_QSPI_AHB_CLK 72
+#define GCC_QSPI_SER_CLK 73
+#define GCC_SDCC1_AHB_CLK 74
+#define GCC_SDCC1_APPS_CLK 75
+#define GCC_SDCC1_ICE_CORE_CLK 76
+#define GCC_SDCC2_AHB_CLK 77
+#define GCC_SDCC2_APPS_CLK 78
+#define GCC_UFS_AHB_CLK 79
+#define GCC_UFS_AXI_CLK 80
+#define GCC_UFS_CLKREF_CLK 81
+#define GCC_UFS_ICE_CORE_CLK 82
+#define GCC_UFS_PHY_AUX_CLK 83
+#define GCC_UFS_RX_SYMBOL_0_CLK 84
+#define GCC_UFS_RX_SYMBOL_1_CLK 85
+#define GCC_UFS_TX_SYMBOL_0_CLK 86
+#define GCC_UFS_UNIPRO_CORE_CLK 87
+#define GCC_USB20_MASTER_CLK 88
+#define GCC_USB20_MOCK_UTMI_CLK 89
+#define GCC_USB20_SLEEP_CLK 90
+#define GCC_USB30_MASTER_CLK 91
+#define GCC_USB30_MOCK_UTMI_CLK 92
+#define GCC_USB30_SLEEP_CLK 93
+#define GCC_USB3_CLKREF_CLK 94
+#define GCC_USB3_PHY_AUX_CLK 95
+#define GCC_USB3_PHY_PIPE_CLK 96
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 97
+#define GP1_CLK_SRC 98
+#define GP2_CLK_SRC 99
+#define GP3_CLK_SRC 100
+#define GPLL0 101
+#define GPLL0_EARLY 102
+#define GPLL1 103
+#define GPLL1_EARLY 104
+#define GPLL4 105
+#define GPLL4_EARLY 106
+#define HMSS_GPLL0_CLK_SRC 107
+#define HMSS_GPLL4_CLK_SRC 108
+#define HMSS_RBCPR_CLK_SRC 109
+#define PDM2_CLK_SRC 110
+#define QSPI_SER_CLK_SRC 111
+#define SDCC1_APPS_CLK_SRC 112
+#define SDCC1_ICE_CORE_CLK_SRC 113
+#define SDCC2_APPS_CLK_SRC 114
+#define UFS_AXI_CLK_SRC 115
+#define UFS_ICE_CORE_CLK_SRC 116
+#define UFS_PHY_AUX_CLK_SRC 117
+#define UFS_UNIPRO_CORE_CLK_SRC 118
+#define USB20_MASTER_CLK_SRC 119
+#define USB20_MOCK_UTMI_CLK_SRC 120
+#define USB30_MASTER_CLK_SRC 121
+#define USB30_MOCK_UTMI_CLK_SRC 122
+#define USB3_PHY_AUX_CLK_SRC 123
+#define GPLL0_OUT_MSSCC 124
+#define GCC_UFS_AXI_HW_CTL_CLK 125
+#define GCC_UFS_ICE_CORE_HW_CTL_CLK 126
+#define GCC_UFS_PHY_AUX_HW_CTL_CLK 127
+#define GCC_UFS_UNIPRO_CORE_HW_CTL_CLK 128
+#define GCC_RX0_USB2_CLKREF_CLK 129
+#define GCC_RX1_USB2_CLKREF_CLK 130
+
+#define PCIE_0_GDSC 0
+#define UFS_GDSC 1
+#define USB_30_GDSC 2
+
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_UFS_BCR 2
+#define GCC_USB3_DP_PHY_BCR 3
+#define GCC_USB3_PHY_BCR 4
+#define GCC_USB3PHY_PHY_BCR 5
+#define GCC_USB_20_BCR 6
+#define GCC_USB_30_BCR 7
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index f96fc2dbf60e..968fa65b9c42 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -194,6 +194,11 @@
#define GPLL4 184
#define GCC_CPUSS_DVM_BUS_CLK 185
#define GCC_CPUSS_GNOC_CLK 186
+#define GCC_QSPI_CORE_CLK_SRC 187
+#define GCC_QSPI_CORE_CLK 188
+#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 189
+#define GCC_LPASS_Q6_AXI_CLK 190
+#define GCC_LPASS_SWAY_CLK 191
/* GCC Resets */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
new file mode 100644
index 000000000000..9690d901b50a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_GPU_CC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_GPU_CC_SDM845_H
+
+/* GPU_CC clock registers */
+#define GPU_CC_CX_GMU_CLK 0
+#define GPU_CC_CXO_CLK 1
+#define GPU_CC_GMU_CLK_SRC 2
+#define GPU_CC_PLL1 3
+
+/* GPU_CC Resets */
+#define GPUCC_GPU_CC_CX_BCR 0
+#define GPUCC_GPU_CC_GMU_BCR 1
+#define GPUCC_GPU_CC_XO_BCR 2
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpass-sdm845.h b/include/dt-bindings/clock/qcom,lpass-sdm845.h
new file mode 100644
index 000000000000..659050846f61
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpass-sdm845.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_LPASS_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_LPASS_SDM845_H
+
+#define LPASS_Q6SS_AHBM_AON_CLK 0
+#define LPASS_Q6SS_AHBS_AON_CLK 1
+#define LPASS_QDSP6SS_XO_CLK 2
+#define LPASS_QDSP6SS_SLEEP_CLK 3
+#define LPASS_QDSP6SS_CORE_CLK 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index c585b82b9c05..3658b0c14966 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -123,5 +123,9 @@
#define RPM_SMD_DIV_A_CLK3 73
#define RPM_SMD_LN_BB_CLK 74
#define RPM_SMD_LN_BB_A_CLK 75
+#define RPM_SMD_BIMC_GPU_CLK 76
+#define RPM_SMD_BIMC_GPU_A_CLK 77
+#define RPM_SMD_QPIC_CLK 78
+#define RPM_SMD_QPIC_CLK_A 79
#endif
diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h
index 0dcb3e87d44c..a267ac250143 100644
--- a/include/dt-bindings/clock/r7s72100-clock.h
+++ b/include/dt-bindings/clock/r7s72100-clock.h
@@ -1,10 +1,7 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* Copyright (C) 2014 Renesas Solutions Corp.
* Copyright (C) 2014 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#ifndef __DT_BINDINGS_CLOCK_R7S72100_H__
diff --git a/include/dt-bindings/clock/r7s9210-cpg-mssr.h b/include/dt-bindings/clock/r7s9210-cpg-mssr.h
new file mode 100644
index 000000000000..b6f85ca149aa
--- /dev/null
+++ b/include/dt-bindings/clock/r7s9210-cpg-mssr.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R7S9210 CPG Core Clocks */
+#define R7S9210_CLK_I 0
+#define R7S9210_CLK_G 1
+#define R7S9210_CLK_B 2
+#define R7S9210_CLK_P1 3
+#define R7S9210_CLK_P1C 4
+#define R7S9210_CLK_P0 5
+
+#endif /* __DT_BINDINGS_CLOCK_R7S9210_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7743-cpg-mssr.h b/include/dt-bindings/clock/r8a7743-cpg-mssr.h
index e1d1f3c6a99e..3ba936029d9f 100644
--- a/include/dt-bindings/clock/r8a7743-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7743-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2016 Cogent Embedded Inc.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2016 Cogent Embedded Inc.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A7743_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7744-cpg-mssr.h b/include/dt-bindings/clock/r8a7744-cpg-mssr.h
new file mode 100644
index 000000000000..2690be0c3e22
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7744-cpg-mssr.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7744 CPG Core Clocks */
+#define R8A7744_CLK_Z 0
+#define R8A7744_CLK_ZG 1
+#define R8A7744_CLK_ZTR 2
+#define R8A7744_CLK_ZTRD2 3
+#define R8A7744_CLK_ZT 4
+#define R8A7744_CLK_ZX 5
+#define R8A7744_CLK_ZS 6
+#define R8A7744_CLK_HP 7
+#define R8A7744_CLK_B 9
+#define R8A7744_CLK_LB 10
+#define R8A7744_CLK_P 11
+#define R8A7744_CLK_CL 12
+#define R8A7744_CLK_M2 13
+#define R8A7744_CLK_ZB3 15
+#define R8A7744_CLK_ZB3D2 16
+#define R8A7744_CLK_DDR 17
+#define R8A7744_CLK_SDH 18
+#define R8A7744_CLK_SD0 19
+#define R8A7744_CLK_SD2 20
+#define R8A7744_CLK_SD3 21
+#define R8A7744_CLK_MMC0 22
+#define R8A7744_CLK_MP 23
+#define R8A7744_CLK_QSPI 26
+#define R8A7744_CLK_CP 27
+#define R8A7744_CLK_RCAN 28
+#define R8A7744_CLK_R 29
+#define R8A7744_CLK_OSC 30
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7744_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7745-cpg-mssr.h b/include/dt-bindings/clock/r8a7745-cpg-mssr.h
index 56ad6f0c6760..f81066c9d192 100644
--- a/include/dt-bindings/clock/r8a7745-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7745-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2016 Cogent Embedded Inc.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2016 Cogent Embedded Inc.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A7745_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a774a1-cpg-mssr.h b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
new file mode 100644
index 000000000000..9bc5d45ff4b5
--- /dev/null
+++ b/include/dt-bindings/clock/r8a774a1-cpg-mssr.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a774a1 CPG Core Clocks */
+#define R8A774A1_CLK_Z 0
+#define R8A774A1_CLK_Z2 1
+#define R8A774A1_CLK_ZG 2
+#define R8A774A1_CLK_ZTR 3
+#define R8A774A1_CLK_ZTRD2 4
+#define R8A774A1_CLK_ZT 5
+#define R8A774A1_CLK_ZX 6
+#define R8A774A1_CLK_S0D1 7
+#define R8A774A1_CLK_S0D2 8
+#define R8A774A1_CLK_S0D3 9
+#define R8A774A1_CLK_S0D4 10
+#define R8A774A1_CLK_S0D6 11
+#define R8A774A1_CLK_S0D8 12
+#define R8A774A1_CLK_S0D12 13
+#define R8A774A1_CLK_S1D2 14
+#define R8A774A1_CLK_S1D4 15
+#define R8A774A1_CLK_S2D1 16
+#define R8A774A1_CLK_S2D2 17
+#define R8A774A1_CLK_S2D4 18
+#define R8A774A1_CLK_S3D1 19
+#define R8A774A1_CLK_S3D2 20
+#define R8A774A1_CLK_S3D4 21
+#define R8A774A1_CLK_LB 22
+#define R8A774A1_CLK_CL 23
+#define R8A774A1_CLK_ZB3 24
+#define R8A774A1_CLK_ZB3D2 25
+#define R8A774A1_CLK_ZB3D4 26
+#define R8A774A1_CLK_CR 27
+#define R8A774A1_CLK_CRD2 28
+#define R8A774A1_CLK_SD0H 29
+#define R8A774A1_CLK_SD0 30
+#define R8A774A1_CLK_SD1H 31
+#define R8A774A1_CLK_SD1 32
+#define R8A774A1_CLK_SD2H 33
+#define R8A774A1_CLK_SD2 34
+#define R8A774A1_CLK_SD3H 35
+#define R8A774A1_CLK_SD3 36
+#define R8A774A1_CLK_RPC 37
+#define R8A774A1_CLK_RPCD2 38
+#define R8A774A1_CLK_MSO 39
+#define R8A774A1_CLK_HDMI 40
+#define R8A774A1_CLK_CSI0 41
+#define R8A774A1_CLK_CP 42
+#define R8A774A1_CLK_CPEX 43
+#define R8A774A1_CLK_R 44
+#define R8A774A1_CLK_OSC 45
+
+#endif /* __DT_BINDINGS_CLOCK_R8A774A1_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a774c0-cpg-mssr.h b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
new file mode 100644
index 000000000000..8fe51b6aca28
--- /dev/null
+++ b/include/dt-bindings/clock/r8a774c0-cpg-mssr.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a774c0 CPG Core Clocks */
+#define R8A774C0_CLK_Z2 0
+#define R8A774C0_CLK_ZG 1
+#define R8A774C0_CLK_ZTR 2
+#define R8A774C0_CLK_ZT 3
+#define R8A774C0_CLK_ZX 4
+#define R8A774C0_CLK_S0D1 5
+#define R8A774C0_CLK_S0D3 6
+#define R8A774C0_CLK_S0D6 7
+#define R8A774C0_CLK_S0D12 8
+#define R8A774C0_CLK_S0D24 9
+#define R8A774C0_CLK_S1D1 10
+#define R8A774C0_CLK_S1D2 11
+#define R8A774C0_CLK_S1D4 12
+#define R8A774C0_CLK_S2D1 13
+#define R8A774C0_CLK_S2D2 14
+#define R8A774C0_CLK_S2D4 15
+#define R8A774C0_CLK_S3D1 16
+#define R8A774C0_CLK_S3D2 17
+#define R8A774C0_CLK_S3D4 18
+#define R8A774C0_CLK_S0D6C 19
+#define R8A774C0_CLK_S3D1C 20
+#define R8A774C0_CLK_S3D2C 21
+#define R8A774C0_CLK_S3D4C 22
+#define R8A774C0_CLK_LB 23
+#define R8A774C0_CLK_CL 24
+#define R8A774C0_CLK_ZB3 25
+#define R8A774C0_CLK_ZB3D2 26
+#define R8A774C0_CLK_CR 27
+#define R8A774C0_CLK_CRD2 28
+#define R8A774C0_CLK_SD0H 29
+#define R8A774C0_CLK_SD0 30
+#define R8A774C0_CLK_SD1H 31
+#define R8A774C0_CLK_SD1 32
+#define R8A774C0_CLK_SD3H 33
+#define R8A774C0_CLK_SD3 34
+#define R8A774C0_CLK_RPC 35
+#define R8A774C0_CLK_RPCD2 36
+#define R8A774C0_CLK_ZA2 37
+#define R8A774C0_CLK_ZA8 38
+#define R8A774C0_CLK_Z2D 39
+#define R8A774C0_CLK_MSO 40
+#define R8A774C0_CLK_R 41
+#define R8A774C0_CLK_OSC 42
+#define R8A774C0_CLK_LV0 43
+#define R8A774C0_CLK_LV1 44
+#define R8A774C0_CLK_CSI0 45
+#define R8A774C0_CLK_CP 46
+#define R8A774C0_CLK_CPEX 47
+
+#endif /* __DT_BINDINGS_CLOCK_R8A774C0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7790-cpg-mssr.h b/include/dt-bindings/clock/r8a7790-cpg-mssr.h
index 1625b8bf3482..c5955b56b36d 100644
--- a/include/dt-bindings/clock/r8a7790-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7790-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7791-cpg-mssr.h b/include/dt-bindings/clock/r8a7791-cpg-mssr.h
index e8823410c01c..aadd06c566c0 100644
--- a/include/dt-bindings/clock/r8a7791-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7791-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7792-cpg-mssr.h b/include/dt-bindings/clock/r8a7792-cpg-mssr.h
index 72ce85cb2f94..829c44db0271 100644
--- a/include/dt-bindings/clock/r8a7792-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7792-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h
index 7318d45d4e7e..49c66d8ed178 100644
--- a/include/dt-bindings/clock/r8a7793-clock.h
+++ b/include/dt-bindings/clock/r8a7793-clock.h
@@ -1,16 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* r8a7793 clock definition
*
* Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7793_H__
diff --git a/include/dt-bindings/clock/r8a7793-cpg-mssr.h b/include/dt-bindings/clock/r8a7793-cpg-mssr.h
index 8809b0f62d61..d1ff646c31f2 100644
--- a/include/dt-bindings/clock/r8a7793-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7793-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index 93e99c3ffc8d..649f005782d0 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -1,11 +1,7 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (C) 2014 Renesas Electronics Corporation
* Copyright 2013 Ideas On Board SPRL
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7794_H__
diff --git a/include/dt-bindings/clock/r8a7794-cpg-mssr.h b/include/dt-bindings/clock/r8a7794-cpg-mssr.h
index 9d720311ae3a..6314e23b51af 100644
--- a/include/dt-bindings/clock/r8a7794-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7794-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
index f047eaf261f3..92b3e2a95179 100644
--- a/include/dt-bindings/clock/r8a7795-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A7795_CPG_MSSR_H__
@@ -54,7 +50,7 @@
#define R8A7795_CLK_CANFD 39
#define R8A7795_CLK_HDMI 40
#define R8A7795_CLK_CSI0 41
-#define R8A7795_CLK_CSIREF 42
+/* CLK_CSIREF was removed */
#define R8A7795_CLK_CP 43
#define R8A7795_CLK_CPEX 44
#define R8A7795_CLK_R 45
diff --git a/include/dt-bindings/clock/r8a7796-cpg-mssr.h b/include/dt-bindings/clock/r8a7796-cpg-mssr.h
index 1e5942695f0d..c0957cf45840 100644
--- a/include/dt-bindings/clock/r8a7796-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7796-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2016 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2016 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A7796_CPG_MSSR_H__
@@ -60,7 +56,7 @@
#define R8A7796_CLK_CANFD 45
#define R8A7796_CLK_HDMI 46
#define R8A7796_CLK_CSI0 47
-#define R8A7796_CLK_CSIREF 48
+/* CLK_CSIREF was removed */
#define R8A7796_CLK_CP 49
#define R8A7796_CLK_CPEX 50
#define R8A7796_CLK_R 51
diff --git a/include/dt-bindings/clock/r8a77970-cpg-mssr.h b/include/dt-bindings/clock/r8a77970-cpg-mssr.h
index 4146395595b1..6145ebe66361 100644
--- a/include/dt-bindings/clock/r8a77970-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a77970-cpg-mssr.h
@@ -1,11 +1,7 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (C) 2016 Renesas Electronics Corp.
* Copyright (C) 2017 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A77970_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/r8a77995-cpg-mssr.h b/include/dt-bindings/clock/r8a77995-cpg-mssr.h
index 4e8ae3dee590..fd701c4e87cf 100644
--- a/include/dt-bindings/clock/r8a77995-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a77995-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2017 Glider bvba
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2017 Glider bvba
*/
#ifndef __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__
@@ -39,8 +35,8 @@
#define R8A77995_CLK_CRD2 24
#define R8A77995_CLK_SD0H 25
#define R8A77995_CLK_SD0 26
-#define R8A77995_CLK_SSP2 27
-#define R8A77995_CLK_SSP1 28
+/* CLK_SSP2 was removed */
+/* CLK_SSP1 was removed */
#define R8A77995_CLK_RPC 29
#define R8A77995_CLK_RPCD2 30
#define R8A77995_CLK_ZA2 31
@@ -53,5 +49,6 @@
#define R8A77995_CLK_LV0 38
#define R8A77995_CLK_LV1 39
#define R8A77995_CLK_CP 40
+#define R8A77995_CLK_CPEX 41
#endif /* __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/renesas-cpg-mssr.h b/include/dt-bindings/clock/renesas-cpg-mssr.h
index 569a3cc33ffb..8169ad063f0a 100644
--- a/include/dt-bindings/clock/renesas-cpg-mssr.h
+++ b/include/dt-bindings/clock/renesas-cpg-mssr.h
@@ -1,10 +1,6 @@
-/*
- * Copyright (C) 2015 Renesas Electronics Corp.
+/* SPDX-License-Identifier: GPL-2.0+
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Copyright (C) 2015 Renesas Electronics Corp.
*/
#ifndef __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__
#define __DT_BINDINGS_CLOCK_RENESAS_CPG_MSSR_H__
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
index b9462b7d3dfe..dc2101a634be 100644
--- a/include/dt-bindings/clock/rk3188-cru-common.h
+++ b/include/dt-bindings/clock/rk3188-cru-common.h
@@ -139,8 +139,9 @@
#define HCLK_CIF1 470
#define HCLK_VEPU 471
#define HCLK_VDPU 472
+#define HCLK_HDMI 473
-#define CLK_NR_CLKS (HCLK_VDPU + 1)
+#define CLK_NR_CLKS (HCLK_HDMI + 1)
/* soft-reset indices */
#define SRST_MCORE 2
diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h
index a82a0109faff..bcaa4559ab1b 100644
--- a/include/dt-bindings/clock/rk3328-cru.h
+++ b/include/dt-bindings/clock/rk3328-cru.h
@@ -172,13 +172,14 @@
#define PCLK_HDCP 232
#define PCLK_DCF 233
#define PCLK_SARADC 234
+#define PCLK_ACODECPHY 235
/* hclk gates */
#define HCLK_PERI 308
#define HCLK_TSP 309
#define HCLK_GMAC 310
#define HCLK_I2S0_8CH 311
-#define HCLK_I2S1_8CH 313
+#define HCLK_I2S1_8CH 312
#define HCLK_I2S2_2CH 313
#define HCLK_SPDIF_8CH 314
#define HCLK_VOP 315
diff --git a/include/dt-bindings/clock/s3c2410.h b/include/dt-bindings/clock/s3c2410.h
index 352a7673fc69..0fb65c3f2f59 100644
--- a/include/dt-bindings/clock/s3c2410.h
+++ b/include/dt-bindings/clock/s3c2410.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clock controllers of Samsung S3C2410 and later.
*/
diff --git a/include/dt-bindings/clock/s3c2412.h b/include/dt-bindings/clock/s3c2412.h
index aac1dcfda81c..b4656156cc0f 100644
--- a/include/dt-bindings/clock/s3c2412.h
+++ b/include/dt-bindings/clock/s3c2412.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clock controllers of Samsung S3C2412.
*/
diff --git a/include/dt-bindings/clock/s3c2443.h b/include/dt-bindings/clock/s3c2443.h
index f3ba68a25ecb..a9d2f105d536 100644
--- a/include/dt-bindings/clock/s3c2443.h
+++ b/include/dt-bindings/clock/s3c2443.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clock controllers of Samsung S3C2443 and later.
*/
diff --git a/include/dt-bindings/clock/samsung,s2mps11.h b/include/dt-bindings/clock/samsung,s2mps11.h
index b903d7de27c9..5ece35d429ff 100644
--- a/include/dt-bindings/clock/samsung,s2mps11.h
+++ b/include/dt-bindings/clock/samsung,s2mps11.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 Markus Reichl
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clocks for the Samsung S2MPS11 PMIC.
*/
diff --git a/include/dt-bindings/clock/samsung,s3c64xx-clock.h b/include/dt-bindings/clock/samsung,s3c64xx-clock.h
index ad95c7f50090..19d233f37e2f 100644
--- a/include/dt-bindings/clock/samsung,s3c64xx-clock.h
+++ b/include/dt-bindings/clock/samsung,s3c64xx-clock.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Tomasz Figa <tomasz.figa at gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Samsung S3C64xx clock controller.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H
#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C64XX_CLOCK_H
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index d66432c6e675..a8ac4cfcdcbc 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -43,6 +43,7 @@
#ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_
#define _DT_BINDINGS_CLK_SUN50I_A64_H_
+#define CLK_PLL_VIDEO0 7
#define CLK_PLL_PERIPH0 11
#define CLK_BUS_MIPI_DSI 28
diff --git a/include/dt-bindings/clock/sun8i-de2.h b/include/dt-bindings/clock/sun8i-de2.h
index 3bed63b524aa..7768f73b051e 100644
--- a/include/dt-bindings/clock/sun8i-de2.h
+++ b/include/dt-bindings/clock/sun8i-de2.h
@@ -15,4 +15,7 @@
#define CLK_MIXER1 7
#define CLK_WB 8
+#define CLK_BUS_ROT 9
+#define CLK_ROT 10
+
#endif /* _DT_BINDINGS_CLOCK_SUN8I_DE2_H_ */
diff --git a/include/dt-bindings/clock/suniv-ccu-f1c100s.h b/include/dt-bindings/clock/suniv-ccu-f1c100s.h
new file mode 100644
index 000000000000..f5ac155c9c70
--- /dev/null
+++ b/include/dt-bindings/clock/suniv-ccu-f1c100s.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ *
+ * Copyright (c) 2018 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUNIV_F1C100S_H_
+#define _DT_BINDINGS_CLK_SUNIV_F1C100S_H_
+
+#define CLK_CPU 11
+
+#define CLK_BUS_DMA 14
+#define CLK_BUS_MMC0 15
+#define CLK_BUS_MMC1 16
+#define CLK_BUS_DRAM 17
+#define CLK_BUS_SPI0 18
+#define CLK_BUS_SPI1 19
+#define CLK_BUS_OTG 20
+#define CLK_BUS_VE 21
+#define CLK_BUS_LCD 22
+#define CLK_BUS_DEINTERLACE 23
+#define CLK_BUS_CSI 24
+#define CLK_BUS_TVD 25
+#define CLK_BUS_TVE 26
+#define CLK_BUS_DE_BE 27
+#define CLK_BUS_DE_FE 28
+#define CLK_BUS_CODEC 29
+#define CLK_BUS_SPDIF 30
+#define CLK_BUS_IR 31
+#define CLK_BUS_RSB 32
+#define CLK_BUS_I2S0 33
+#define CLK_BUS_I2C0 34
+#define CLK_BUS_I2C1 35
+#define CLK_BUS_I2C2 36
+#define CLK_BUS_PIO 37
+#define CLK_BUS_UART0 38
+#define CLK_BUS_UART1 39
+#define CLK_BUS_UART2 40
+
+#define CLK_MMC0 41
+#define CLK_MMC0_SAMPLE 42
+#define CLK_MMC0_OUTPUT 43
+#define CLK_MMC1 44
+#define CLK_MMC1_SAMPLE 45
+#define CLK_MMC1_OUTPUT 46
+#define CLK_I2S 47
+#define CLK_SPDIF 48
+
+#define CLK_USB_PHY0 49
+
+#define CLK_DRAM_VE 50
+#define CLK_DRAM_CSI 51
+#define CLK_DRAM_DEINTERLACE 52
+#define CLK_DRAM_TVD 53
+#define CLK_DRAM_DE_FE 54
+#define CLK_DRAM_DE_BE 55
+
+#define CLK_DE_BE 56
+#define CLK_DE_FE 57
+#define CLK_TCON 58
+#define CLK_DEINTERLACE 59
+#define CLK_TVE2_CLK 60
+#define CLK_TVE1_CLK 61
+#define CLK_TVD 62
+#define CLK_CSI 63
+#define CLK_VE 64
+#define CLK_CODEC 65
+#define CLK_AVS 66
+
+#endif
diff --git a/include/dt-bindings/clock/xlnx,zynqmp-clk.h b/include/dt-bindings/clock/xlnx,zynqmp-clk.h
new file mode 100644
index 000000000000..4aebe6e2049e
--- /dev/null
+++ b/include/dt-bindings/clock/xlnx,zynqmp-clk.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ */
+
+#ifndef _DT_BINDINGS_CLK_ZYNQMP_H
+#define _DT_BINDINGS_CLK_ZYNQMP_H
+
+#define IOPLL 0
+#define RPLL 1
+#define APLL 2
+#define DPLL 3
+#define VPLL 4
+#define IOPLL_TO_FPD 5
+#define RPLL_TO_FPD 6
+#define APLL_TO_LPD 7
+#define DPLL_TO_LPD 8
+#define VPLL_TO_LPD 9
+#define ACPU 10
+#define ACPU_HALF 11
+#define DBF_FPD 12
+#define DBF_LPD 13
+#define DBG_TRACE 14
+#define DBG_TSTMP 15
+#define DP_VIDEO_REF 16
+#define DP_AUDIO_REF 17
+#define DP_STC_REF 18
+#define GDMA_REF 19
+#define DPDMA_REF 20
+#define DDR_REF 21
+#define SATA_REF 22
+#define PCIE_REF 23
+#define GPU_REF 24
+#define GPU_PP0_REF 25
+#define GPU_PP1_REF 26
+#define TOPSW_MAIN 27
+#define TOPSW_LSBUS 28
+#define GTGREF0_REF 29
+#define LPD_SWITCH 30
+#define LPD_LSBUS 31
+#define USB0_BUS_REF 32
+#define USB1_BUS_REF 33
+#define USB3_DUAL_REF 34
+#define USB0 35
+#define USB1 36
+#define CPU_R5 37
+#define CPU_R5_CORE 38
+#define CSU_SPB 39
+#define CSU_PLL 40
+#define PCAP 41
+#define IOU_SWITCH 42
+#define GEM_TSU_REF 43
+#define GEM_TSU 44
+#define GEM0_REF 45
+#define GEM1_REF 46
+#define GEM2_REF 47
+#define GEM3_REF 48
+#define GEM0_TX 49
+#define GEM1_TX 50
+#define GEM2_TX 51
+#define GEM3_TX 52
+#define QSPI_REF 53
+#define SDIO0_REF 54
+#define SDIO1_REF 55
+#define UART0_REF 56
+#define UART1_REF 57
+#define SPI0_REF 58
+#define SPI1_REF 59
+#define NAND_REF 60
+#define I2C0_REF 61
+#define I2C1_REF 62
+#define CAN0_REF 63
+#define CAN1_REF 64
+#define CAN0 65
+#define CAN1 66
+#define DLL_REF 67
+#define ADMA_REF 68
+#define TIMESTAMP_REF 69
+#define AMS_REF 70
+#define PL0_REF 71
+#define PL1_REF 72
+#define PL2_REF 73
+#define PL3_REF 74
+#define WDT 75
+#define IOPLL_INT 76
+#define IOPLL_PRE_SRC 77
+#define IOPLL_HALF 78
+#define IOPLL_INT_MUX 79
+#define IOPLL_POST_SRC 80
+#define RPLL_INT 81
+#define RPLL_PRE_SRC 82
+#define RPLL_HALF 83
+#define RPLL_INT_MUX 84
+#define RPLL_POST_SRC 85
+#define APLL_INT 86
+#define APLL_PRE_SRC 87
+#define APLL_HALF 88
+#define APLL_INT_MUX 89
+#define APLL_POST_SRC 90
+#define DPLL_INT 91
+#define DPLL_PRE_SRC 92
+#define DPLL_HALF 93
+#define DPLL_INT_MUX 94
+#define DPLL_POST_SRC 95
+#define VPLL_INT 96
+#define VPLL_PRE_SRC 97
+#define VPLL_HALF 98
+#define VPLL_INT_MUX 99
+#define VPLL_POST_SRC 100
+#define CAN0_MIO 101
+#define CAN1_MIO 102
+
+#endif
diff --git a/include/dt-bindings/dma/dw-dmac.h b/include/dt-bindings/dma/dw-dmac.h
new file mode 100644
index 000000000000..d1ca705c95b3
--- /dev/null
+++ b/include/dt-bindings/dma/dw-dmac.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+#ifndef __DT_BINDINGS_DMA_DW_DMAC_H__
+#define __DT_BINDINGS_DMA_DW_DMAC_H__
+
+/*
+ * Protection Control bits provide protection against illegal transactions.
+ * The protection bits[0:2] are one-to-one mapped to AHB HPROT[3:1] signals.
+ */
+#define DW_DMAC_HPROT1_PRIVILEGED_MODE (1 << 0) /* Privileged Mode */
+#define DW_DMAC_HPROT2_BUFFERABLE (1 << 1) /* DMA is bufferable */
+#define DW_DMAC_HPROT3_CACHEABLE (1 << 2) /* DMA is cacheable */
+
+#endif /* __DT_BINDINGS_DMA_DW_DMAC_H__ */
diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h
new file mode 100644
index 000000000000..4481f2d60d65
--- /dev/null
+++ b/include/dt-bindings/firmware/imx/rsrc.h
@@ -0,0 +1,559 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef __DT_BINDINGS_RSCRC_IMX_H
+#define __DT_BINDINGS_RSCRC_IMX_H
+
+/*
+ * These defines are used to indicate a resource. Resources include peripherals
+ * and bus masters (but not memory regions). Note items from list should
+ * never be changed or removed (only added to at the end of the list).
+ */
+
+#define IMX_SC_R_A53 0
+#define IMX_SC_R_A53_0 1
+#define IMX_SC_R_A53_1 2
+#define IMX_SC_R_A53_2 3
+#define IMX_SC_R_A53_3 4
+#define IMX_SC_R_A72 5
+#define IMX_SC_R_A72_0 6
+#define IMX_SC_R_A72_1 7
+#define IMX_SC_R_A72_2 8
+#define IMX_SC_R_A72_3 9
+#define IMX_SC_R_CCI 10
+#define IMX_SC_R_DB 11
+#define IMX_SC_R_DRC_0 12
+#define IMX_SC_R_DRC_1 13
+#define IMX_SC_R_GIC_SMMU 14
+#define IMX_SC_R_IRQSTR_M4_0 15
+#define IMX_SC_R_IRQSTR_M4_1 16
+#define IMX_SC_R_SMMU 17
+#define IMX_SC_R_GIC 18
+#define IMX_SC_R_DC_0_BLIT0 19
+#define IMX_SC_R_DC_0_BLIT1 20
+#define IMX_SC_R_DC_0_BLIT2 21
+#define IMX_SC_R_DC_0_BLIT_OUT 22
+#define IMX_SC_R_DC_0_CAPTURE0 23
+#define IMX_SC_R_DC_0_CAPTURE1 24
+#define IMX_SC_R_DC_0_WARP 25
+#define IMX_SC_R_DC_0_INTEGRAL0 26
+#define IMX_SC_R_DC_0_INTEGRAL1 27
+#define IMX_SC_R_DC_0_VIDEO0 28
+#define IMX_SC_R_DC_0_VIDEO1 29
+#define IMX_SC_R_DC_0_FRAC0 30
+#define IMX_SC_R_DC_0_FRAC1 31
+#define IMX_SC_R_DC_0 32
+#define IMX_SC_R_GPU_2_PID0 33
+#define IMX_SC_R_DC_0_PLL_0 34
+#define IMX_SC_R_DC_0_PLL_1 35
+#define IMX_SC_R_DC_1_BLIT0 36
+#define IMX_SC_R_DC_1_BLIT1 37
+#define IMX_SC_R_DC_1_BLIT2 38
+#define IMX_SC_R_DC_1_BLIT_OUT 39
+#define IMX_SC_R_DC_1_CAPTURE0 40
+#define IMX_SC_R_DC_1_CAPTURE1 41
+#define IMX_SC_R_DC_1_WARP 42
+#define IMX_SC_R_DC_1_INTEGRAL0 43
+#define IMX_SC_R_DC_1_INTEGRAL1 44
+#define IMX_SC_R_DC_1_VIDEO0 45
+#define IMX_SC_R_DC_1_VIDEO1 46
+#define IMX_SC_R_DC_1_FRAC0 47
+#define IMX_SC_R_DC_1_FRAC1 48
+#define IMX_SC_R_DC_1 49
+#define IMX_SC_R_GPU_3_PID0 50
+#define IMX_SC_R_DC_1_PLL_0 51
+#define IMX_SC_R_DC_1_PLL_1 52
+#define IMX_SC_R_SPI_0 53
+#define IMX_SC_R_SPI_1 54
+#define IMX_SC_R_SPI_2 55
+#define IMX_SC_R_SPI_3 56
+#define IMX_SC_R_UART_0 57
+#define IMX_SC_R_UART_1 58
+#define IMX_SC_R_UART_2 59
+#define IMX_SC_R_UART_3 60
+#define IMX_SC_R_UART_4 61
+#define IMX_SC_R_EMVSIM_0 62
+#define IMX_SC_R_EMVSIM_1 63
+#define IMX_SC_R_DMA_0_CH0 64
+#define IMX_SC_R_DMA_0_CH1 65
+#define IMX_SC_R_DMA_0_CH2 66
+#define IMX_SC_R_DMA_0_CH3 67
+#define IMX_SC_R_DMA_0_CH4 68
+#define IMX_SC_R_DMA_0_CH5 69
+#define IMX_SC_R_DMA_0_CH6 70
+#define IMX_SC_R_DMA_0_CH7 71
+#define IMX_SC_R_DMA_0_CH8 72
+#define IMX_SC_R_DMA_0_CH9 73
+#define IMX_SC_R_DMA_0_CH10 74
+#define IMX_SC_R_DMA_0_CH11 75
+#define IMX_SC_R_DMA_0_CH12 76
+#define IMX_SC_R_DMA_0_CH13 77
+#define IMX_SC_R_DMA_0_CH14 78
+#define IMX_SC_R_DMA_0_CH15 79
+#define IMX_SC_R_DMA_0_CH16 80
+#define IMX_SC_R_DMA_0_CH17 81
+#define IMX_SC_R_DMA_0_CH18 82
+#define IMX_SC_R_DMA_0_CH19 83
+#define IMX_SC_R_DMA_0_CH20 84
+#define IMX_SC_R_DMA_0_CH21 85
+#define IMX_SC_R_DMA_0_CH22 86
+#define IMX_SC_R_DMA_0_CH23 87
+#define IMX_SC_R_DMA_0_CH24 88
+#define IMX_SC_R_DMA_0_CH25 89
+#define IMX_SC_R_DMA_0_CH26 90
+#define IMX_SC_R_DMA_0_CH27 91
+#define IMX_SC_R_DMA_0_CH28 92
+#define IMX_SC_R_DMA_0_CH29 93
+#define IMX_SC_R_DMA_0_CH30 94
+#define IMX_SC_R_DMA_0_CH31 95
+#define IMX_SC_R_I2C_0 96
+#define IMX_SC_R_I2C_1 97
+#define IMX_SC_R_I2C_2 98
+#define IMX_SC_R_I2C_3 99
+#define IMX_SC_R_I2C_4 100
+#define IMX_SC_R_ADC_0 101
+#define IMX_SC_R_ADC_1 102
+#define IMX_SC_R_FTM_0 103
+#define IMX_SC_R_FTM_1 104
+#define IMX_SC_R_CAN_0 105
+#define IMX_SC_R_CAN_1 106
+#define IMX_SC_R_CAN_2 107
+#define IMX_SC_R_DMA_1_CH0 108
+#define IMX_SC_R_DMA_1_CH1 109
+#define IMX_SC_R_DMA_1_CH2 110
+#define IMX_SC_R_DMA_1_CH3 111
+#define IMX_SC_R_DMA_1_CH4 112
+#define IMX_SC_R_DMA_1_CH5 113
+#define IMX_SC_R_DMA_1_CH6 114
+#define IMX_SC_R_DMA_1_CH7 115
+#define IMX_SC_R_DMA_1_CH8 116
+#define IMX_SC_R_DMA_1_CH9 117
+#define IMX_SC_R_DMA_1_CH10 118
+#define IMX_SC_R_DMA_1_CH11 119
+#define IMX_SC_R_DMA_1_CH12 120
+#define IMX_SC_R_DMA_1_CH13 121
+#define IMX_SC_R_DMA_1_CH14 122
+#define IMX_SC_R_DMA_1_CH15 123
+#define IMX_SC_R_DMA_1_CH16 124
+#define IMX_SC_R_DMA_1_CH17 125
+#define IMX_SC_R_DMA_1_CH18 126
+#define IMX_SC_R_DMA_1_CH19 127
+#define IMX_SC_R_DMA_1_CH20 128
+#define IMX_SC_R_DMA_1_CH21 129
+#define IMX_SC_R_DMA_1_CH22 130
+#define IMX_SC_R_DMA_1_CH23 131
+#define IMX_SC_R_DMA_1_CH24 132
+#define IMX_SC_R_DMA_1_CH25 133
+#define IMX_SC_R_DMA_1_CH26 134
+#define IMX_SC_R_DMA_1_CH27 135
+#define IMX_SC_R_DMA_1_CH28 136
+#define IMX_SC_R_DMA_1_CH29 137
+#define IMX_SC_R_DMA_1_CH30 138
+#define IMX_SC_R_DMA_1_CH31 139
+#define IMX_SC_R_UNUSED1 140
+#define IMX_SC_R_UNUSED2 141
+#define IMX_SC_R_UNUSED3 142
+#define IMX_SC_R_UNUSED4 143
+#define IMX_SC_R_GPU_0_PID0 144
+#define IMX_SC_R_GPU_0_PID1 145
+#define IMX_SC_R_GPU_0_PID2 146
+#define IMX_SC_R_GPU_0_PID3 147
+#define IMX_SC_R_GPU_1_PID0 148
+#define IMX_SC_R_GPU_1_PID1 149
+#define IMX_SC_R_GPU_1_PID2 150
+#define IMX_SC_R_GPU_1_PID3 151
+#define IMX_SC_R_PCIE_A 152
+#define IMX_SC_R_SERDES_0 153
+#define IMX_SC_R_MATCH_0 154
+#define IMX_SC_R_MATCH_1 155
+#define IMX_SC_R_MATCH_2 156
+#define IMX_SC_R_MATCH_3 157
+#define IMX_SC_R_MATCH_4 158
+#define IMX_SC_R_MATCH_5 159
+#define IMX_SC_R_MATCH_6 160
+#define IMX_SC_R_MATCH_7 161
+#define IMX_SC_R_MATCH_8 162
+#define IMX_SC_R_MATCH_9 163
+#define IMX_SC_R_MATCH_10 164
+#define IMX_SC_R_MATCH_11 165
+#define IMX_SC_R_MATCH_12 166
+#define IMX_SC_R_MATCH_13 167
+#define IMX_SC_R_MATCH_14 168
+#define IMX_SC_R_PCIE_B 169
+#define IMX_SC_R_SATA_0 170
+#define IMX_SC_R_SERDES_1 171
+#define IMX_SC_R_HSIO_GPIO 172
+#define IMX_SC_R_MATCH_15 173
+#define IMX_SC_R_MATCH_16 174
+#define IMX_SC_R_MATCH_17 175
+#define IMX_SC_R_MATCH_18 176
+#define IMX_SC_R_MATCH_19 177
+#define IMX_SC_R_MATCH_20 178
+#define IMX_SC_R_MATCH_21 179
+#define IMX_SC_R_MATCH_22 180
+#define IMX_SC_R_MATCH_23 181
+#define IMX_SC_R_MATCH_24 182
+#define IMX_SC_R_MATCH_25 183
+#define IMX_SC_R_MATCH_26 184
+#define IMX_SC_R_MATCH_27 185
+#define IMX_SC_R_MATCH_28 186
+#define IMX_SC_R_LCD_0 187
+#define IMX_SC_R_LCD_0_PWM_0 188
+#define IMX_SC_R_LCD_0_I2C_0 189
+#define IMX_SC_R_LCD_0_I2C_1 190
+#define IMX_SC_R_PWM_0 191
+#define IMX_SC_R_PWM_1 192
+#define IMX_SC_R_PWM_2 193
+#define IMX_SC_R_PWM_3 194
+#define IMX_SC_R_PWM_4 195
+#define IMX_SC_R_PWM_5 196
+#define IMX_SC_R_PWM_6 197
+#define IMX_SC_R_PWM_7 198
+#define IMX_SC_R_GPIO_0 199
+#define IMX_SC_R_GPIO_1 200
+#define IMX_SC_R_GPIO_2 201
+#define IMX_SC_R_GPIO_3 202
+#define IMX_SC_R_GPIO_4 203
+#define IMX_SC_R_GPIO_5 204
+#define IMX_SC_R_GPIO_6 205
+#define IMX_SC_R_GPIO_7 206
+#define IMX_SC_R_GPT_0 207
+#define IMX_SC_R_GPT_1 208
+#define IMX_SC_R_GPT_2 209
+#define IMX_SC_R_GPT_3 210
+#define IMX_SC_R_GPT_4 211
+#define IMX_SC_R_KPP 212
+#define IMX_SC_R_MU_0A 213
+#define IMX_SC_R_MU_1A 214
+#define IMX_SC_R_MU_2A 215
+#define IMX_SC_R_MU_3A 216
+#define IMX_SC_R_MU_4A 217
+#define IMX_SC_R_MU_5A 218
+#define IMX_SC_R_MU_6A 219
+#define IMX_SC_R_MU_7A 220
+#define IMX_SC_R_MU_8A 221
+#define IMX_SC_R_MU_9A 222
+#define IMX_SC_R_MU_10A 223
+#define IMX_SC_R_MU_11A 224
+#define IMX_SC_R_MU_12A 225
+#define IMX_SC_R_MU_13A 226
+#define IMX_SC_R_MU_5B 227
+#define IMX_SC_R_MU_6B 228
+#define IMX_SC_R_MU_7B 229
+#define IMX_SC_R_MU_8B 230
+#define IMX_SC_R_MU_9B 231
+#define IMX_SC_R_MU_10B 232
+#define IMX_SC_R_MU_11B 233
+#define IMX_SC_R_MU_12B 234
+#define IMX_SC_R_MU_13B 235
+#define IMX_SC_R_ROM_0 236
+#define IMX_SC_R_FSPI_0 237
+#define IMX_SC_R_FSPI_1 238
+#define IMX_SC_R_IEE 239
+#define IMX_SC_R_IEE_R0 240
+#define IMX_SC_R_IEE_R1 241
+#define IMX_SC_R_IEE_R2 242
+#define IMX_SC_R_IEE_R3 243
+#define IMX_SC_R_IEE_R4 244
+#define IMX_SC_R_IEE_R5 245
+#define IMX_SC_R_IEE_R6 246
+#define IMX_SC_R_IEE_R7 247
+#define IMX_SC_R_SDHC_0 248
+#define IMX_SC_R_SDHC_1 249
+#define IMX_SC_R_SDHC_2 250
+#define IMX_SC_R_ENET_0 251
+#define IMX_SC_R_ENET_1 252
+#define IMX_SC_R_MLB_0 253
+#define IMX_SC_R_DMA_2_CH0 254
+#define IMX_SC_R_DMA_2_CH1 255
+#define IMX_SC_R_DMA_2_CH2 256
+#define IMX_SC_R_DMA_2_CH3 257
+#define IMX_SC_R_DMA_2_CH4 258
+#define IMX_SC_R_USB_0 259
+#define IMX_SC_R_USB_1 260
+#define IMX_SC_R_USB_0_PHY 261
+#define IMX_SC_R_USB_2 262
+#define IMX_SC_R_USB_2_PHY 263
+#define IMX_SC_R_DTCP 264
+#define IMX_SC_R_NAND 265
+#define IMX_SC_R_LVDS_0 266
+#define IMX_SC_R_LVDS_0_PWM_0 267
+#define IMX_SC_R_LVDS_0_I2C_0 268
+#define IMX_SC_R_LVDS_0_I2C_1 269
+#define IMX_SC_R_LVDS_1 270
+#define IMX_SC_R_LVDS_1_PWM_0 271
+#define IMX_SC_R_LVDS_1_I2C_0 272
+#define IMX_SC_R_LVDS_1_I2C_1 273
+#define IMX_SC_R_LVDS_2 274
+#define IMX_SC_R_LVDS_2_PWM_0 275
+#define IMX_SC_R_LVDS_2_I2C_0 276
+#define IMX_SC_R_LVDS_2_I2C_1 277
+#define IMX_SC_R_M4_0_PID0 278
+#define IMX_SC_R_M4_0_PID1 279
+#define IMX_SC_R_M4_0_PID2 280
+#define IMX_SC_R_M4_0_PID3 281
+#define IMX_SC_R_M4_0_PID4 282
+#define IMX_SC_R_M4_0_RGPIO 283
+#define IMX_SC_R_M4_0_SEMA42 284
+#define IMX_SC_R_M4_0_TPM 285
+#define IMX_SC_R_M4_0_PIT 286
+#define IMX_SC_R_M4_0_UART 287
+#define IMX_SC_R_M4_0_I2C 288
+#define IMX_SC_R_M4_0_INTMUX 289
+#define IMX_SC_R_M4_0_SIM 290
+#define IMX_SC_R_M4_0_WDOG 291
+#define IMX_SC_R_M4_0_MU_0B 292
+#define IMX_SC_R_M4_0_MU_0A0 293
+#define IMX_SC_R_M4_0_MU_0A1 294
+#define IMX_SC_R_M4_0_MU_0A2 295
+#define IMX_SC_R_M4_0_MU_0A3 296
+#define IMX_SC_R_M4_0_MU_1A 297
+#define IMX_SC_R_M4_1_PID0 298
+#define IMX_SC_R_M4_1_PID1 299
+#define IMX_SC_R_M4_1_PID2 300
+#define IMX_SC_R_M4_1_PID3 301
+#define IMX_SC_R_M4_1_PID4 302
+#define IMX_SC_R_M4_1_RGPIO 303
+#define IMX_SC_R_M4_1_SEMA42 304
+#define IMX_SC_R_M4_1_TPM 305
+#define IMX_SC_R_M4_1_PIT 306
+#define IMX_SC_R_M4_1_UART 307
+#define IMX_SC_R_M4_1_I2C 308
+#define IMX_SC_R_M4_1_INTMUX 309
+#define IMX_SC_R_M4_1_SIM 310
+#define IMX_SC_R_M4_1_WDOG 311
+#define IMX_SC_R_M4_1_MU_0B 312
+#define IMX_SC_R_M4_1_MU_0A0 313
+#define IMX_SC_R_M4_1_MU_0A1 314
+#define IMX_SC_R_M4_1_MU_0A2 315
+#define IMX_SC_R_M4_1_MU_0A3 316
+#define IMX_SC_R_M4_1_MU_1A 317
+#define IMX_SC_R_SAI_0 318
+#define IMX_SC_R_SAI_1 319
+#define IMX_SC_R_SAI_2 320
+#define IMX_SC_R_IRQSTR_SCU2 321
+#define IMX_SC_R_IRQSTR_DSP 322
+#define IMX_SC_R_ELCDIF_PLL 323
+#define IMX_SC_R_UNUSED6 324
+#define IMX_SC_R_AUDIO_PLL_0 325
+#define IMX_SC_R_PI_0 326
+#define IMX_SC_R_PI_0_PWM_0 327
+#define IMX_SC_R_PI_0_PWM_1 328
+#define IMX_SC_R_PI_0_I2C_0 329
+#define IMX_SC_R_PI_0_PLL 330
+#define IMX_SC_R_PI_1 331
+#define IMX_SC_R_PI_1_PWM_0 332
+#define IMX_SC_R_PI_1_PWM_1 333
+#define IMX_SC_R_PI_1_I2C_0 334
+#define IMX_SC_R_PI_1_PLL 335
+#define IMX_SC_R_SC_PID0 336
+#define IMX_SC_R_SC_PID1 337
+#define IMX_SC_R_SC_PID2 338
+#define IMX_SC_R_SC_PID3 339
+#define IMX_SC_R_SC_PID4 340
+#define IMX_SC_R_SC_SEMA42 341
+#define IMX_SC_R_SC_TPM 342
+#define IMX_SC_R_SC_PIT 343
+#define IMX_SC_R_SC_UART 344
+#define IMX_SC_R_SC_I2C 345
+#define IMX_SC_R_SC_MU_0B 346
+#define IMX_SC_R_SC_MU_0A0 347
+#define IMX_SC_R_SC_MU_0A1 348
+#define IMX_SC_R_SC_MU_0A2 349
+#define IMX_SC_R_SC_MU_0A3 350
+#define IMX_SC_R_SC_MU_1A 351
+#define IMX_SC_R_SYSCNT_RD 352
+#define IMX_SC_R_SYSCNT_CMP 353
+#define IMX_SC_R_DEBUG 354
+#define IMX_SC_R_SYSTEM 355
+#define IMX_SC_R_SNVS 356
+#define IMX_SC_R_OTP 357
+#define IMX_SC_R_VPU_PID0 358
+#define IMX_SC_R_VPU_PID1 359
+#define IMX_SC_R_VPU_PID2 360
+#define IMX_SC_R_VPU_PID3 361
+#define IMX_SC_R_VPU_PID4 362
+#define IMX_SC_R_VPU_PID5 363
+#define IMX_SC_R_VPU_PID6 364
+#define IMX_SC_R_VPU_PID7 365
+#define IMX_SC_R_VPU_UART 366
+#define IMX_SC_R_VPUCORE 367
+#define IMX_SC_R_VPUCORE_0 368
+#define IMX_SC_R_VPUCORE_1 369
+#define IMX_SC_R_VPUCORE_2 370
+#define IMX_SC_R_VPUCORE_3 371
+#define IMX_SC_R_DMA_4_CH0 372
+#define IMX_SC_R_DMA_4_CH1 373
+#define IMX_SC_R_DMA_4_CH2 374
+#define IMX_SC_R_DMA_4_CH3 375
+#define IMX_SC_R_DMA_4_CH4 376
+#define IMX_SC_R_ISI_CH0 377
+#define IMX_SC_R_ISI_CH1 378
+#define IMX_SC_R_ISI_CH2 379
+#define IMX_SC_R_ISI_CH3 380
+#define IMX_SC_R_ISI_CH4 381
+#define IMX_SC_R_ISI_CH5 382
+#define IMX_SC_R_ISI_CH6 383
+#define IMX_SC_R_ISI_CH7 384
+#define IMX_SC_R_MJPEG_DEC_S0 385
+#define IMX_SC_R_MJPEG_DEC_S1 386
+#define IMX_SC_R_MJPEG_DEC_S2 387
+#define IMX_SC_R_MJPEG_DEC_S3 388
+#define IMX_SC_R_MJPEG_ENC_S0 389
+#define IMX_SC_R_MJPEG_ENC_S1 390
+#define IMX_SC_R_MJPEG_ENC_S2 391
+#define IMX_SC_R_MJPEG_ENC_S3 392
+#define IMX_SC_R_MIPI_0 393
+#define IMX_SC_R_MIPI_0_PWM_0 394
+#define IMX_SC_R_MIPI_0_I2C_0 395
+#define IMX_SC_R_MIPI_0_I2C_1 396
+#define IMX_SC_R_MIPI_1 397
+#define IMX_SC_R_MIPI_1_PWM_0 398
+#define IMX_SC_R_MIPI_1_I2C_0 399
+#define IMX_SC_R_MIPI_1_I2C_1 400
+#define IMX_SC_R_CSI_0 401
+#define IMX_SC_R_CSI_0_PWM_0 402
+#define IMX_SC_R_CSI_0_I2C_0 403
+#define IMX_SC_R_CSI_1 404
+#define IMX_SC_R_CSI_1_PWM_0 405
+#define IMX_SC_R_CSI_1_I2C_0 406
+#define IMX_SC_R_HDMI 407
+#define IMX_SC_R_HDMI_I2S 408
+#define IMX_SC_R_HDMI_I2C_0 409
+#define IMX_SC_R_HDMI_PLL_0 410
+#define IMX_SC_R_HDMI_RX 411
+#define IMX_SC_R_HDMI_RX_BYPASS 412
+#define IMX_SC_R_HDMI_RX_I2C_0 413
+#define IMX_SC_R_ASRC_0 414
+#define IMX_SC_R_ESAI_0 415
+#define IMX_SC_R_SPDIF_0 416
+#define IMX_SC_R_SPDIF_1 417
+#define IMX_SC_R_SAI_3 418
+#define IMX_SC_R_SAI_4 419
+#define IMX_SC_R_SAI_5 420
+#define IMX_SC_R_GPT_5 421
+#define IMX_SC_R_GPT_6 422
+#define IMX_SC_R_GPT_7 423
+#define IMX_SC_R_GPT_8 424
+#define IMX_SC_R_GPT_9 425
+#define IMX_SC_R_GPT_10 426
+#define IMX_SC_R_DMA_2_CH5 427
+#define IMX_SC_R_DMA_2_CH6 428
+#define IMX_SC_R_DMA_2_CH7 429
+#define IMX_SC_R_DMA_2_CH8 430
+#define IMX_SC_R_DMA_2_CH9 431
+#define IMX_SC_R_DMA_2_CH10 432
+#define IMX_SC_R_DMA_2_CH11 433
+#define IMX_SC_R_DMA_2_CH12 434
+#define IMX_SC_R_DMA_2_CH13 435
+#define IMX_SC_R_DMA_2_CH14 436
+#define IMX_SC_R_DMA_2_CH15 437
+#define IMX_SC_R_DMA_2_CH16 438
+#define IMX_SC_R_DMA_2_CH17 439
+#define IMX_SC_R_DMA_2_CH18 440
+#define IMX_SC_R_DMA_2_CH19 441
+#define IMX_SC_R_DMA_2_CH20 442
+#define IMX_SC_R_DMA_2_CH21 443
+#define IMX_SC_R_DMA_2_CH22 444
+#define IMX_SC_R_DMA_2_CH23 445
+#define IMX_SC_R_DMA_2_CH24 446
+#define IMX_SC_R_DMA_2_CH25 447
+#define IMX_SC_R_DMA_2_CH26 448
+#define IMX_SC_R_DMA_2_CH27 449
+#define IMX_SC_R_DMA_2_CH28 450
+#define IMX_SC_R_DMA_2_CH29 451
+#define IMX_SC_R_DMA_2_CH30 452
+#define IMX_SC_R_DMA_2_CH31 453
+#define IMX_SC_R_ASRC_1 454
+#define IMX_SC_R_ESAI_1 455
+#define IMX_SC_R_SAI_6 456
+#define IMX_SC_R_SAI_7 457
+#define IMX_SC_R_AMIX 458
+#define IMX_SC_R_MQS_0 459
+#define IMX_SC_R_DMA_3_CH0 460
+#define IMX_SC_R_DMA_3_CH1 461
+#define IMX_SC_R_DMA_3_CH2 462
+#define IMX_SC_R_DMA_3_CH3 463
+#define IMX_SC_R_DMA_3_CH4 464
+#define IMX_SC_R_DMA_3_CH5 465
+#define IMX_SC_R_DMA_3_CH6 466
+#define IMX_SC_R_DMA_3_CH7 467
+#define IMX_SC_R_DMA_3_CH8 468
+#define IMX_SC_R_DMA_3_CH9 469
+#define IMX_SC_R_DMA_3_CH10 470
+#define IMX_SC_R_DMA_3_CH11 471
+#define IMX_SC_R_DMA_3_CH12 472
+#define IMX_SC_R_DMA_3_CH13 473
+#define IMX_SC_R_DMA_3_CH14 474
+#define IMX_SC_R_DMA_3_CH15 475
+#define IMX_SC_R_DMA_3_CH16 476
+#define IMX_SC_R_DMA_3_CH17 477
+#define IMX_SC_R_DMA_3_CH18 478
+#define IMX_SC_R_DMA_3_CH19 479
+#define IMX_SC_R_DMA_3_CH20 480
+#define IMX_SC_R_DMA_3_CH21 481
+#define IMX_SC_R_DMA_3_CH22 482
+#define IMX_SC_R_DMA_3_CH23 483
+#define IMX_SC_R_DMA_3_CH24 484
+#define IMX_SC_R_DMA_3_CH25 485
+#define IMX_SC_R_DMA_3_CH26 486
+#define IMX_SC_R_DMA_3_CH27 487
+#define IMX_SC_R_DMA_3_CH28 488
+#define IMX_SC_R_DMA_3_CH29 489
+#define IMX_SC_R_DMA_3_CH30 490
+#define IMX_SC_R_DMA_3_CH31 491
+#define IMX_SC_R_AUDIO_PLL_1 492
+#define IMX_SC_R_AUDIO_CLK_0 493
+#define IMX_SC_R_AUDIO_CLK_1 494
+#define IMX_SC_R_MCLK_OUT_0 495
+#define IMX_SC_R_MCLK_OUT_1 496
+#define IMX_SC_R_PMIC_0 497
+#define IMX_SC_R_PMIC_1 498
+#define IMX_SC_R_SECO 499
+#define IMX_SC_R_CAAM_JR1 500
+#define IMX_SC_R_CAAM_JR2 501
+#define IMX_SC_R_CAAM_JR3 502
+#define IMX_SC_R_SECO_MU_2 503
+#define IMX_SC_R_SECO_MU_3 504
+#define IMX_SC_R_SECO_MU_4 505
+#define IMX_SC_R_HDMI_RX_PWM_0 506
+#define IMX_SC_R_A35 507
+#define IMX_SC_R_A35_0 508
+#define IMX_SC_R_A35_1 509
+#define IMX_SC_R_A35_2 510
+#define IMX_SC_R_A35_3 511
+#define IMX_SC_R_DSP 512
+#define IMX_SC_R_DSP_RAM 513
+#define IMX_SC_R_CAAM_JR1_OUT 514
+#define IMX_SC_R_CAAM_JR2_OUT 515
+#define IMX_SC_R_CAAM_JR3_OUT 516
+#define IMX_SC_R_VPU_DEC_0 517
+#define IMX_SC_R_VPU_ENC_0 518
+#define IMX_SC_R_CAAM_JR0 519
+#define IMX_SC_R_CAAM_JR0_OUT 520
+#define IMX_SC_R_PMIC_2 521
+#define IMX_SC_R_DBLOGIC 522
+#define IMX_SC_R_HDMI_PLL_1 523
+#define IMX_SC_R_BOARD_R0 524
+#define IMX_SC_R_BOARD_R1 525
+#define IMX_SC_R_BOARD_R2 526
+#define IMX_SC_R_BOARD_R3 527
+#define IMX_SC_R_BOARD_R4 528
+#define IMX_SC_R_BOARD_R5 529
+#define IMX_SC_R_BOARD_R6 530
+#define IMX_SC_R_BOARD_R7 531
+#define IMX_SC_R_MJPEG_DEC_MP 532
+#define IMX_SC_R_MJPEG_ENC_MP 533
+#define IMX_SC_R_VPU_TS_0 534
+#define IMX_SC_R_VPU_MU_0 535
+#define IMX_SC_R_VPU_MU_1 536
+#define IMX_SC_R_VPU_MU_2 537
+#define IMX_SC_R_VPU_MU_3 538
+#define IMX_SC_R_VPU_ENC_1 539
+#define IMX_SC_R_VPU 540
+#define IMX_SC_R_LAST 541
+
+#endif /* __DT_BINDINGS_RSCRC_IMX_H */
diff --git a/include/dt-bindings/gpio/meson-g12a-gpio.h b/include/dt-bindings/gpio/meson-g12a-gpio.h
new file mode 100644
index 000000000000..f7bd69350d18
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-g12a-gpio.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2018 Amlogic, Inc. All rights reserved.
+ * Author: Xingyu Chen <xingyu.chen@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_G12A_GPIO_H
+#define _DT_BINDINGS_MESON_G12A_GPIO_H
+
+/* First GPIO chip */
+#define GPIOAO_0 0
+#define GPIOAO_1 1
+#define GPIOAO_2 2
+#define GPIOAO_3 3
+#define GPIOAO_4 4
+#define GPIOAO_5 5
+#define GPIOAO_6 6
+#define GPIOAO_7 7
+#define GPIOAO_8 8
+#define GPIOAO_9 9
+#define GPIOAO_10 10
+#define GPIOAO_11 11
+#define GPIOE_0 12
+#define GPIOE_1 13
+#define GPIOE_2 14
+
+/* Second GPIO chip */
+#define GPIOZ_0 0
+#define GPIOZ_1 1
+#define GPIOZ_2 2
+#define GPIOZ_3 3
+#define GPIOZ_4 4
+#define GPIOZ_5 5
+#define GPIOZ_6 6
+#define GPIOZ_7 7
+#define GPIOZ_8 8
+#define GPIOZ_9 9
+#define GPIOZ_10 10
+#define GPIOZ_11 11
+#define GPIOZ_12 12
+#define GPIOZ_13 13
+#define GPIOZ_14 14
+#define GPIOZ_15 15
+#define GPIOH_0 16
+#define GPIOH_1 17
+#define GPIOH_2 18
+#define GPIOH_3 19
+#define GPIOH_4 20
+#define GPIOH_5 21
+#define GPIOH_6 22
+#define GPIOH_7 23
+#define GPIOH_8 24
+#define BOOT_0 25
+#define BOOT_1 26
+#define BOOT_2 27
+#define BOOT_3 28
+#define BOOT_4 29
+#define BOOT_5 30
+#define BOOT_6 31
+#define BOOT_7 32
+#define BOOT_8 33
+#define BOOT_9 34
+#define BOOT_10 35
+#define BOOT_11 36
+#define BOOT_12 37
+#define BOOT_13 38
+#define BOOT_14 39
+#define BOOT_15 40
+#define GPIOC_0 41
+#define GPIOC_1 42
+#define GPIOC_2 43
+#define GPIOC_3 44
+#define GPIOC_4 45
+#define GPIOC_5 46
+#define GPIOC_6 47
+#define GPIOC_7 48
+#define GPIOA_0 49
+#define GPIOA_1 50
+#define GPIOA_2 51
+#define GPIOA_3 52
+#define GPIOA_4 53
+#define GPIOA_5 54
+#define GPIOA_6 55
+#define GPIOA_7 56
+#define GPIOA_8 57
+#define GPIOA_9 58
+#define GPIOA_10 59
+#define GPIOA_11 60
+#define GPIOA_12 61
+#define GPIOA_13 62
+#define GPIOA_14 63
+#define GPIOA_15 64
+#define GPIOX_0 65
+#define GPIOX_1 66
+#define GPIOX_2 67
+#define GPIOX_3 68
+#define GPIOX_4 69
+#define GPIOX_5 70
+#define GPIOX_6 71
+#define GPIOX_7 72
+#define GPIOX_8 73
+#define GPIOX_9 74
+#define GPIOX_10 75
+#define GPIOX_11 76
+#define GPIOX_12 77
+#define GPIOX_13 78
+#define GPIOX_14 79
+#define GPIOX_15 80
+#define GPIOX_16 81
+#define GPIOX_17 82
+#define GPIOX_18 83
+#define GPIOX_19 84
+
+#endif /* _DT_BINDINGS_MESON_G12A_GPIO_H */
diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h
index 463ad398fe3e..cabc5712e745 100644
--- a/include/dt-bindings/gpio/tegra186-gpio.h
+++ b/include/dt-bindings/gpio/tegra186-gpio.h
@@ -14,6 +14,34 @@
#include <dt-bindings/gpio/gpio.h>
/* GPIOs implemented by main GPIO controller */
+#define TEGRA186_MAIN_GPIO_PORT_A 0
+#define TEGRA186_MAIN_GPIO_PORT_B 1
+#define TEGRA186_MAIN_GPIO_PORT_C 2
+#define TEGRA186_MAIN_GPIO_PORT_D 3
+#define TEGRA186_MAIN_GPIO_PORT_E 4
+#define TEGRA186_MAIN_GPIO_PORT_F 5
+#define TEGRA186_MAIN_GPIO_PORT_G 6
+#define TEGRA186_MAIN_GPIO_PORT_H 7
+#define TEGRA186_MAIN_GPIO_PORT_I 8
+#define TEGRA186_MAIN_GPIO_PORT_J 9
+#define TEGRA186_MAIN_GPIO_PORT_K 10
+#define TEGRA186_MAIN_GPIO_PORT_L 11
+#define TEGRA186_MAIN_GPIO_PORT_M 12
+#define TEGRA186_MAIN_GPIO_PORT_N 13
+#define TEGRA186_MAIN_GPIO_PORT_O 14
+#define TEGRA186_MAIN_GPIO_PORT_P 15
+#define TEGRA186_MAIN_GPIO_PORT_Q 16
+#define TEGRA186_MAIN_GPIO_PORT_R 17
+#define TEGRA186_MAIN_GPIO_PORT_T 18
+#define TEGRA186_MAIN_GPIO_PORT_X 19
+#define TEGRA186_MAIN_GPIO_PORT_Y 20
+#define TEGRA186_MAIN_GPIO_PORT_BB 21
+#define TEGRA186_MAIN_GPIO_PORT_CC 22
+
+#define TEGRA186_MAIN_GPIO(port, offset) \
+ ((TEGRA186_MAIN_GPIO_PORT_##port * 8) + offset)
+
+/* need to keep these for backwards-compatibility */
#define TEGRA_MAIN_GPIO_PORT_A 0
#define TEGRA_MAIN_GPIO_PORT_B 1
#define TEGRA_MAIN_GPIO_PORT_C 2
@@ -42,6 +70,19 @@
((TEGRA_MAIN_GPIO_PORT_##port * 8) + offset)
/* GPIOs implemented by AON GPIO controller */
+#define TEGRA186_AON_GPIO_PORT_S 0
+#define TEGRA186_AON_GPIO_PORT_U 1
+#define TEGRA186_AON_GPIO_PORT_V 2
+#define TEGRA186_AON_GPIO_PORT_W 3
+#define TEGRA186_AON_GPIO_PORT_Z 4
+#define TEGRA186_AON_GPIO_PORT_AA 5
+#define TEGRA186_AON_GPIO_PORT_EE 6
+#define TEGRA186_AON_GPIO_PORT_FF 7
+
+#define TEGRA186_AON_GPIO(port, offset) \
+ ((TEGRA186_AON_GPIO_PORT_##port * 8) + offset)
+
+/* need to keep these for backwards-compatibility */
#define TEGRA_AON_GPIO_PORT_S 0
#define TEGRA_AON_GPIO_PORT_U 1
#define TEGRA_AON_GPIO_PORT_V 2
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
index 42121fa238fa..61d556db1542 100644
--- a/include/dt-bindings/iio/qcom,spmi-vadc.h
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (c) 2012-2014,2018 The Linux Foundation. All rights reserved.
*/
#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_H
@@ -116,4 +108,117 @@
#define VADC_LR_MUX10_PU1_PU2_AMUX_USB_ID 0xf9
#define VADC_LR_MUX3_BUF_PU1_PU2_XO_THERM 0xfc
+/* ADC channels for SPMI PMIC5 */
+
+#define ADC5_REF_GND 0x00
+#define ADC5_1P25VREF 0x01
+#define ADC5_VREF_VADC 0x02
+#define ADC5_VREF_VADC5_DIV_3 0x82
+#define ADC5_VPH_PWR 0x83
+#define ADC5_VBAT_SNS 0x84
+#define ADC5_VCOIN 0x85
+#define ADC5_DIE_TEMP 0x06
+#define ADC5_USB_IN_I 0x07
+#define ADC5_USB_IN_V_16 0x08
+#define ADC5_CHG_TEMP 0x09
+#define ADC5_BAT_THERM 0x0a
+#define ADC5_BAT_ID 0x0b
+#define ADC5_XO_THERM 0x0c
+#define ADC5_AMUX_THM1 0x0d
+#define ADC5_AMUX_THM2 0x0e
+#define ADC5_AMUX_THM3 0x0f
+#define ADC5_AMUX_THM4 0x10
+#define ADC5_AMUX_THM5 0x11
+#define ADC5_GPIO1 0x12
+#define ADC5_GPIO2 0x13
+#define ADC5_GPIO3 0x14
+#define ADC5_GPIO4 0x15
+#define ADC5_GPIO5 0x16
+#define ADC5_GPIO6 0x17
+#define ADC5_GPIO7 0x18
+#define ADC5_SBUx 0x99
+#define ADC5_MID_CHG_DIV6 0x1e
+#define ADC5_OFF 0xff
+
+/* 30k pull-up1 */
+#define ADC5_BAT_THERM_30K_PU 0x2a
+#define ADC5_BAT_ID_30K_PU 0x2b
+#define ADC5_XO_THERM_30K_PU 0x2c
+#define ADC5_AMUX_THM1_30K_PU 0x2d
+#define ADC5_AMUX_THM2_30K_PU 0x2e
+#define ADC5_AMUX_THM3_30K_PU 0x2f
+#define ADC5_AMUX_THM4_30K_PU 0x30
+#define ADC5_AMUX_THM5_30K_PU 0x31
+#define ADC5_GPIO1_30K_PU 0x32
+#define ADC5_GPIO2_30K_PU 0x33
+#define ADC5_GPIO3_30K_PU 0x34
+#define ADC5_GPIO4_30K_PU 0x35
+#define ADC5_GPIO5_30K_PU 0x36
+#define ADC5_GPIO6_30K_PU 0x37
+#define ADC5_GPIO7_30K_PU 0x38
+#define ADC5_SBUx_30K_PU 0x39
+
+/* 100k pull-up2 */
+#define ADC5_BAT_THERM_100K_PU 0x4a
+#define ADC5_BAT_ID_100K_PU 0x4b
+#define ADC5_XO_THERM_100K_PU 0x4c
+#define ADC5_AMUX_THM1_100K_PU 0x4d
+#define ADC5_AMUX_THM2_100K_PU 0x4e
+#define ADC5_AMUX_THM3_100K_PU 0x4f
+#define ADC5_AMUX_THM4_100K_PU 0x50
+#define ADC5_AMUX_THM5_100K_PU 0x51
+#define ADC5_GPIO1_100K_PU 0x52
+#define ADC5_GPIO2_100K_PU 0x53
+#define ADC5_GPIO3_100K_PU 0x54
+#define ADC5_GPIO4_100K_PU 0x55
+#define ADC5_GPIO5_100K_PU 0x56
+#define ADC5_GPIO6_100K_PU 0x57
+#define ADC5_GPIO7_100K_PU 0x58
+#define ADC5_SBUx_100K_PU 0x59
+
+/* 400k pull-up3 */
+#define ADC5_BAT_THERM_400K_PU 0x6a
+#define ADC5_BAT_ID_400K_PU 0x6b
+#define ADC5_XO_THERM_400K_PU 0x6c
+#define ADC5_AMUX_THM1_400K_PU 0x6d
+#define ADC5_AMUX_THM2_400K_PU 0x6e
+#define ADC5_AMUX_THM3_400K_PU 0x6f
+#define ADC5_AMUX_THM4_400K_PU 0x70
+#define ADC5_AMUX_THM5_400K_PU 0x71
+#define ADC5_GPIO1_400K_PU 0x72
+#define ADC5_GPIO2_400K_PU 0x73
+#define ADC5_GPIO3_400K_PU 0x74
+#define ADC5_GPIO4_400K_PU 0x75
+#define ADC5_GPIO5_400K_PU 0x76
+#define ADC5_GPIO6_400K_PU 0x77
+#define ADC5_GPIO7_400K_PU 0x78
+#define ADC5_SBUx_400K_PU 0x79
+
+/* 1/3 Divider */
+#define ADC5_GPIO1_DIV3 0x92
+#define ADC5_GPIO2_DIV3 0x93
+#define ADC5_GPIO3_DIV3 0x94
+#define ADC5_GPIO4_DIV3 0x95
+#define ADC5_GPIO5_DIV3 0x96
+#define ADC5_GPIO6_DIV3 0x97
+#define ADC5_GPIO7_DIV3 0x98
+#define ADC5_SBUx_DIV3 0x99
+
+/* Current and combined current/voltage channels */
+#define ADC5_INT_EXT_ISENSE 0xa1
+#define ADC5_PARALLEL_ISENSE 0xa5
+#define ADC5_CUR_REPLICA_VDS 0xa7
+#define ADC5_CUR_SENS_BATFET_VDS_OFFSET 0xa9
+#define ADC5_CUR_SENS_REPLICA_VDS_OFFSET 0xab
+#define ADC5_EXT_SENS_OFFSET 0xad
+
+#define ADC5_INT_EXT_ISENSE_VBAT_VDATA 0xb0
+#define ADC5_INT_EXT_ISENSE_VBAT_IDATA 0xb1
+#define ADC5_EXT_ISENSE_VBAT_VDATA 0xb2
+#define ADC5_EXT_ISENSE_VBAT_IDATA 0xb3
+#define ADC5_PARALLEL_ISENSE_VBAT_VDATA 0xb4
+#define ADC5_PARALLEL_ISENSE_VBAT_IDATA 0xb5
+
+#define ADC5_MAX_CHANNEL 0xc0
+
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */
diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h
index 0c85f65c81c7..35b6f69b7db6 100644
--- a/include/dt-bindings/interrupt-controller/arm-gic.h
+++ b/include/dt-bindings/interrupt-controller/arm-gic.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* This header provides constants for the ARM GIC.
*/
diff --git a/include/dt-bindings/interrupt-controller/irq.h b/include/dt-bindings/interrupt-controller/irq.h
index a8b310555f14..9e3d183e1381 100644
--- a/include/dt-bindings/interrupt-controller/irq.h
+++ b/include/dt-bindings/interrupt-controller/irq.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* This header provides constants for most IRQ bindings.
*
diff --git a/include/dt-bindings/mailbox/tegra186-hsp.h b/include/dt-bindings/mailbox/tegra186-hsp.h
index bcab5b7ca785..3bdec7a84d35 100644
--- a/include/dt-bindings/mailbox/tegra186-hsp.h
+++ b/include/dt-bindings/mailbox/tegra186-hsp.h
@@ -22,4 +22,15 @@
#define TEGRA_HSP_DB_MASTER_CCPLEX 17
#define TEGRA_HSP_DB_MASTER_BPMP 19
+/*
+ * Shared mailboxes are unidirectional, so the direction needs to be specified
+ * in the device tree.
+ */
+#define TEGRA_HSP_SM_MASK 0x00ffffff
+#define TEGRA_HSP_SM_FLAG_RX (0 << 31)
+#define TEGRA_HSP_SM_FLAG_TX (1 << 31)
+
+#define TEGRA_HSP_SM_RX(x) (TEGRA_HSP_SM_FLAG_RX | ((x) & TEGRA_HSP_SM_MASK))
+#define TEGRA_HSP_SM_TX(x) (TEGRA_HSP_SM_FLAG_TX | ((x) & TEGRA_HSP_SM_MASK))
+
#endif
diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h
index 6298fec00685..94ed3edfcc70 100644
--- a/include/dt-bindings/media/xilinx-vip.h
+++ b/include/dt-bindings/media/xilinx-vip.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video IP Core
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DT_BINDINGS_MEDIA_XILINX_VIP_H__
diff --git a/include/dt-bindings/memory/mt2712-larb-port.h b/include/dt-bindings/memory/mt2712-larb-port.h
new file mode 100644
index 000000000000..6f9aa7349cef
--- /dev/null
+++ b/include/dt-bindings/memory/mt2712-larb-port.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef __DTS_IOMMU_PORT_MT2712_H
+#define __DTS_IOMMU_PORT_MT2712_H
+
+#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+#define M4U_LARB3_ID 3
+#define M4U_LARB4_ID 4
+#define M4U_LARB5_ID 5
+#define M4U_LARB6_ID 6
+#define M4U_LARB7_ID 7
+#define M4U_LARB8_ID 8
+#define M4U_LARB9_ID 9
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB0_ID, 7)
+
+/* larb1 */
+#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB1_ID, 8)
+#define M4U_PORT_HW_VDEC_TILE MTK_M4U_ID(M4U_LARB1_ID, 9)
+#define M4U_PORT_HW_IMG_RESZ_EXT MTK_M4U_ID(M4U_LARB1_ID, 10)
+
+/* larb2 */
+#define M4U_PORT_CAM_DMA0 MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_CAM_DMA1 MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_CAM_DMA2 MTK_M4U_ID(M4U_LARB2_ID, 2)
+
+/* larb3 */
+#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 8)
+
+/* larb4 */
+#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB4_ID, 0)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 1)
+#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB4_ID, 2)
+#define M4U_PORT_DISP_OD1_R MTK_M4U_ID(M4U_LARB4_ID, 3)
+#define M4U_PORT_DISP_OD1_W MTK_M4U_ID(M4U_LARB4_ID, 4)
+#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 5)
+#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB4_ID, 6)
+
+/* larb5 */
+#define M4U_PORT_DISP_OVL2 MTK_M4U_ID(M4U_LARB5_ID, 0)
+#define M4U_PORT_DISP_WDMA2 MTK_M4U_ID(M4U_LARB5_ID, 1)
+#define M4U_PORT_MDP_RDMA2 MTK_M4U_ID(M4U_LARB5_ID, 2)
+#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB5_ID, 3)
+
+/* larb6 */
+#define M4U_PORT_JPGDEC_WDMA_0 MTK_M4U_ID(M4U_LARB6_ID, 0)
+#define M4U_PORT_JPGDEC_WDMA_1 MTK_M4U_ID(M4U_LARB6_ID, 1)
+#define M4U_PORT_JPGDEC_BSDMA_0 MTK_M4U_ID(M4U_LARB6_ID, 2)
+#define M4U_PORT_JPGDEC_BSDMA_1 MTK_M4U_ID(M4U_LARB6_ID, 3)
+
+/* larb7 */
+#define M4U_PORT_MDP_RDMA3 MTK_M4U_ID(M4U_LARB7_ID, 0)
+#define M4U_PORT_MDP_WROT2 MTK_M4U_ID(M4U_LARB7_ID, 1)
+
+/* larb8 */
+#define M4U_PORT_VDO MTK_M4U_ID(M4U_LARB8_ID, 0)
+#define M4U_PORT_NR MTK_M4U_ID(M4U_LARB8_ID, 1)
+#define M4U_PORT_WR_CHANNEL0 MTK_M4U_ID(M4U_LARB8_ID, 2)
+
+/* larb9 */
+#define M4U_PORT_TVD MTK_M4U_ID(M4U_LARB9_ID, 0)
+#define M4U_PORT_WR_CHANNEL1 MTK_M4U_ID(M4U_LARB9_ID, 1)
+
+#endif
diff --git a/include/dt-bindings/mfd/at91-usart.h b/include/dt-bindings/mfd/at91-usart.h
new file mode 100644
index 000000000000..2de5bc312e1e
--- /dev/null
+++ b/include/dt-bindings/mfd/at91-usart.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides macros for AT91 USART DT bindings.
+ *
+ * Copyright (C) 2018 Microchip Technology
+ *
+ * Author: Radu Pirea <radu.pirea@microchip.com>
+ *
+ */
+
+#ifndef __DT_BINDINGS_AT91_USART_H__
+#define __DT_BINDINGS_AT91_USART_H__
+
+#define AT91_USART_MODE_SERIAL 0
+#define AT91_USART_MODE_SPI 1
+
+#endif /* __DT_BINDINGS_AT91_USART_H__ */
diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h
index 697161f80eb5..9eb2ec2b2ea9 100644
--- a/include/dt-bindings/net/mscc-phy-vsc8531.h
+++ b/include/dt-bindings/net/mscc-phy-vsc8531.h
@@ -18,9 +18,11 @@
#define VSC8531_LINK_100_1000_ACTIVITY 4
#define VSC8531_LINK_10_1000_ACTIVITY 5
#define VSC8531_LINK_10_100_ACTIVITY 6
+#define VSC8584_LINK_100FX_1000X_ACTIVITY 7
#define VSC8531_DUPLEX_COLLISION 8
#define VSC8531_COLLISION 9
#define VSC8531_ACTIVITY 10
+#define VSC8584_100FX_1000X_ACTIVITY 11
#define VSC8531_AUTONEG_FAULT 12
#define VSC8531_SERIAL_MODE 13
#define VSC8531_FORCE_LED_OFF 14
diff --git a/include/dt-bindings/phy/phy-ocelot-serdes.h b/include/dt-bindings/phy/phy-ocelot-serdes.h
new file mode 100644
index 000000000000..fe70adaca68f
--- /dev/null
+++ b/include/dt-bindings/phy/phy-ocelot-serdes.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Copyright (c) 2018 Microsemi Corporation */
+#ifndef __PHY_OCELOT_SERDES_H__
+#define __PHY_OCELOT_SERDES_H__
+
+#define SERDES1G(x) (x)
+#define SERDES1G_MAX SERDES1G(5)
+#define SERDES6G(x) (SERDES1G_MAX + 1 + (x))
+#define SERDES6G_MAX SERDES6G(2)
+#define SERDES_MAX (SERDES6G_MAX + 1)
+
+#endif
diff --git a/include/dt-bindings/pinctrl/bcm2835.h b/include/dt-bindings/pinctrl/bcm2835.h
index e4e4fdf5d38f..b5b2654a0e4d 100644
--- a/include/dt-bindings/pinctrl/bcm2835.h
+++ b/include/dt-bindings/pinctrl/bcm2835.h
@@ -1,14 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header providing constants for bcm2835 pinctrl bindings.
*
* Copyright (C) 2015 Stefan Wahren <stefan.wahren@i2se.com>
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#ifndef __DT_BINDINGS_PINCTRL_BCM2835_H__
diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h
new file mode 100644
index 000000000000..45e11b6170ca
--- /dev/null
+++ b/include/dt-bindings/pinctrl/k3.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for pinctrl bindings for TI's K3 SoC
+ * family.
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+#ifndef _DT_BINDINGS_PINCTRL_TI_K3_H
+#define _DT_BINDINGS_PINCTRL_TI_K3_H
+
+#define PULLUDEN_SHIFT (16)
+#define PULLTYPESEL_SHIFT (17)
+#define RXACTIVE_SHIFT (18)
+
+#define PULL_DISABLE (1 << PULLUDEN_SHIFT)
+#define PULL_ENABLE (0 << PULLUDEN_SHIFT)
+
+#define PULL_UP (1 << PULLTYPESEL_SHIFT | PULL_ENABLE)
+#define PULL_DOWN (0 << PULLTYPESEL_SHIFT | PULL_ENABLE)
+
+#define INPUT_EN (1 << RXACTIVE_SHIFT)
+#define INPUT_DISABLE (0 << RXACTIVE_SHIFT)
+
+/* Only these macros are expected be used directly in device tree files */
+#define PIN_OUTPUT (INPUT_DISABLE | PULL_DISABLE)
+#define PIN_OUTPUT_PULLUP (INPUT_DISABLE | PULL_UP)
+#define PIN_OUTPUT_PULLDOWN (INPUT_DISABLE | PULL_DOWN)
+#define PIN_INPUT (INPUT_EN | PULL_DISABLE)
+#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP)
+#define PIN_INPUT_PULLDOWN (INPUT_EN | PULL_DOWN)
+
+#define AM65X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+#define AM65X_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
+
+#endif
diff --git a/include/dt-bindings/pinctrl/mt6797-pinfunc.h b/include/dt-bindings/pinctrl/mt6797-pinfunc.h
new file mode 100644
index 000000000000..e9813361b27c
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt6797-pinfunc.h
@@ -0,0 +1,1368 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DTS_MT6797_PINFUNC_H
+#define __DTS_MT6797_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT6797_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT6797_GPIO0__FUNC_CSI0A_L0P_T0A (MTK_PIN_NO(0) | 1)
+
+#define MT6797_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT6797_GPIO1__FUNC_CSI0A_L0N_T0B (MTK_PIN_NO(1) | 1)
+
+#define MT6797_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT6797_GPIO2__FUNC_CSI0A_L1P_T0C (MTK_PIN_NO(2) | 1)
+
+#define MT6797_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT6797_GPIO3__FUNC_CSI0A_L1N_T1A (MTK_PIN_NO(3) | 1)
+
+#define MT6797_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT6797_GPIO4__FUNC_CSI0A_L2P_T1B (MTK_PIN_NO(4) | 1)
+
+#define MT6797_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT6797_GPIO5__FUNC_CSI0A_L2N_T1C (MTK_PIN_NO(5) | 1)
+
+#define MT6797_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT6797_GPIO6__FUNC_CSI0B_L0P_T0A (MTK_PIN_NO(6) | 1)
+
+#define MT6797_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT6797_GPIO7__FUNC_CSI0B_L0N_T0B (MTK_PIN_NO(7) | 1)
+
+#define MT6797_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT6797_GPIO8__FUNC_CSI0B_L1P_T0C (MTK_PIN_NO(8) | 1)
+
+#define MT6797_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT6797_GPIO9__FUNC_CSI0B_L1N_T1A (MTK_PIN_NO(9) | 1)
+
+#define MT6797_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT6797_GPIO10__FUNC_CSI1A_L0P_T0A (MTK_PIN_NO(10) | 1)
+
+#define MT6797_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT6797_GPIO11__FUNC_CSI1A_L0N_T0B (MTK_PIN_NO(11) | 1)
+
+#define MT6797_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT6797_GPIO12__FUNC_CSI1A_L1P_T0C (MTK_PIN_NO(12) | 1)
+
+#define MT6797_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT6797_GPIO13__FUNC_CSI1A_L1N_T1A (MTK_PIN_NO(13) | 1)
+
+#define MT6797_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT6797_GPIO14__FUNC_CSI1A_L2P_T1B (MTK_PIN_NO(14) | 1)
+
+#define MT6797_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT6797_GPIO15__FUNC_CSI1A_L2N_T1C (MTK_PIN_NO(15) | 1)
+
+#define MT6797_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT6797_GPIO16__FUNC_CSI1B_L0P_T0A (MTK_PIN_NO(16) | 1)
+
+#define MT6797_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT6797_GPIO17__FUNC_CSI1B_L0N_T0B (MTK_PIN_NO(17) | 1)
+
+#define MT6797_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT6797_GPIO18__FUNC_CSI1B_L1P_T0C (MTK_PIN_NO(18) | 1)
+
+#define MT6797_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT6797_GPIO19__FUNC_CSI1B_L1N_T1A (MTK_PIN_NO(19) | 1)
+
+#define MT6797_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT6797_GPIO20__FUNC_CSI1B_L2P_T1B (MTK_PIN_NO(20) | 1)
+
+#define MT6797_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT6797_GPIO21__FUNC_CSI1B_L2N_T1C (MTK_PIN_NO(21) | 1)
+
+#define MT6797_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT6797_GPIO22__FUNC_CSI2_L0P_T0A (MTK_PIN_NO(22) | 1)
+
+#define MT6797_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT6797_GPIO23__FUNC_CSI2_L0N_T0B (MTK_PIN_NO(23) | 1)
+
+#define MT6797_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT6797_GPIO24__FUNC_CSI2_L1P_T0C (MTK_PIN_NO(24) | 1)
+
+#define MT6797_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT6797_GPIO25__FUNC_CSI2_L1N_T1A (MTK_PIN_NO(25) | 1)
+
+#define MT6797_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT6797_GPIO26__FUNC_CSI2_L2P_T1B (MTK_PIN_NO(26) | 1)
+
+#define MT6797_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT6797_GPIO27__FUNC_CSI2_L2N_T1C (MTK_PIN_NO(27) | 1)
+
+#define MT6797_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT6797_GPIO28__FUNC_SPI5_CLK_A (MTK_PIN_NO(28) | 1)
+#define MT6797_GPIO28__FUNC_IRTX_OUT (MTK_PIN_NO(28) | 2)
+#define MT6797_GPIO28__FUNC_UDI_TDO (MTK_PIN_NO(28) | 3)
+#define MT6797_GPIO28__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(28) | 4)
+#define MT6797_GPIO28__FUNC_CONN_MCU_TDO (MTK_PIN_NO(28) | 5)
+#define MT6797_GPIO28__FUNC_PWM_A (MTK_PIN_NO(28) | 6)
+#define MT6797_GPIO28__FUNC_C2K_DM_OTDO (MTK_PIN_NO(28) | 7)
+
+#define MT6797_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT6797_GPIO29__FUNC_SPI5_MI_A (MTK_PIN_NO(29) | 1)
+#define MT6797_GPIO29__FUNC_DAP_SIB1_SWD (MTK_PIN_NO(29) | 2)
+#define MT6797_GPIO29__FUNC_UDI_TMS (MTK_PIN_NO(29) | 3)
+#define MT6797_GPIO29__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(29) | 4)
+#define MT6797_GPIO29__FUNC_CONN_MCU_TMS (MTK_PIN_NO(29) | 5)
+#define MT6797_GPIO29__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(29) | 6)
+#define MT6797_GPIO29__FUNC_C2K_DM_OTMS (MTK_PIN_NO(29) | 7)
+
+#define MT6797_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT6797_GPIO30__FUNC_CMMCLK0 (MTK_PIN_NO(30) | 1)
+#define MT6797_GPIO30__FUNC_MD_CLKM0 (MTK_PIN_NO(30) | 7)
+
+#define MT6797_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT6797_GPIO31__FUNC_CMMCLK1 (MTK_PIN_NO(31) | 1)
+#define MT6797_GPIO31__FUNC_MD_CLKM1 (MTK_PIN_NO(31) | 7)
+
+#define MT6797_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT6797_GPIO32__FUNC_SPI5_CS_A (MTK_PIN_NO(32) | 1)
+#define MT6797_GPIO32__FUNC_DAP_SIB1_SWCK (MTK_PIN_NO(32) | 2)
+#define MT6797_GPIO32__FUNC_UDI_TCK_XI (MTK_PIN_NO(32) | 3)
+#define MT6797_GPIO32__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(32) | 4)
+#define MT6797_GPIO32__FUNC_CONN_MCU_TCK (MTK_PIN_NO(32) | 5)
+#define MT6797_GPIO32__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(32) | 6)
+#define MT6797_GPIO32__FUNC_C2K_DM_OTCK (MTK_PIN_NO(32) | 7)
+
+#define MT6797_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT6797_GPIO33__FUNC_SPI5_MO_A (MTK_PIN_NO(33) | 1)
+#define MT6797_GPIO33__FUNC_CMFLASH (MTK_PIN_NO(33) | 2)
+#define MT6797_GPIO33__FUNC_UDI_TDI (MTK_PIN_NO(33) | 3)
+#define MT6797_GPIO33__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(33) | 4)
+#define MT6797_GPIO33__FUNC_CONN_MCU_TDI (MTK_PIN_NO(33) | 5)
+#define MT6797_GPIO33__FUNC_MD_URXD0 (MTK_PIN_NO(33) | 6)
+#define MT6797_GPIO33__FUNC_C2K_DM_OTDI (MTK_PIN_NO(33) | 7)
+
+#define MT6797_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT6797_GPIO34__FUNC_CMFLASH (MTK_PIN_NO(34) | 1)
+#define MT6797_GPIO34__FUNC_CLKM0 (MTK_PIN_NO(34) | 2)
+#define MT6797_GPIO34__FUNC_UDI_NTRST (MTK_PIN_NO(34) | 3)
+#define MT6797_GPIO34__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(34) | 4)
+#define MT6797_GPIO34__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(34) | 5)
+#define MT6797_GPIO34__FUNC_MD_UTXD0 (MTK_PIN_NO(34) | 6)
+#define MT6797_GPIO34__FUNC_C2K_DM_JTINTP (MTK_PIN_NO(34) | 7)
+
+#define MT6797_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT6797_GPIO35__FUNC_CMMCLK3 (MTK_PIN_NO(35) | 1)
+#define MT6797_GPIO35__FUNC_CLKM1 (MTK_PIN_NO(35) | 2)
+#define MT6797_GPIO35__FUNC_MD_URXD1 (MTK_PIN_NO(35) | 3)
+#define MT6797_GPIO35__FUNC_PTA_RXD (MTK_PIN_NO(35) | 4)
+#define MT6797_GPIO35__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(35) | 5)
+#define MT6797_GPIO35__FUNC_PWM_B (MTK_PIN_NO(35) | 6)
+#define MT6797_GPIO35__FUNC_PCC_PPC_IO (MTK_PIN_NO(35) | 7)
+
+#define MT6797_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT6797_GPIO36__FUNC_CMMCLK2 (MTK_PIN_NO(36) | 1)
+#define MT6797_GPIO36__FUNC_CLKM2 (MTK_PIN_NO(36) | 2)
+#define MT6797_GPIO36__FUNC_MD_UTXD1 (MTK_PIN_NO(36) | 3)
+#define MT6797_GPIO36__FUNC_PTA_TXD (MTK_PIN_NO(36) | 4)
+#define MT6797_GPIO36__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(36) | 5)
+#define MT6797_GPIO36__FUNC_PWM_C (MTK_PIN_NO(36) | 6)
+#define MT6797_GPIO36__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(36) | 7)
+
+#define MT6797_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT6797_GPIO37__FUNC_SCL0_0 (MTK_PIN_NO(37) | 1)
+
+#define MT6797_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT6797_GPIO38__FUNC_SDA0_0 (MTK_PIN_NO(38) | 1)
+
+#define MT6797_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT6797_GPIO39__FUNC_DPI_D0 (MTK_PIN_NO(39) | 1)
+#define MT6797_GPIO39__FUNC_SPI1_CLK_A (MTK_PIN_NO(39) | 2)
+#define MT6797_GPIO39__FUNC_PCM0_SYNC (MTK_PIN_NO(39) | 3)
+#define MT6797_GPIO39__FUNC_I2S0_LRCK (MTK_PIN_NO(39) | 4)
+#define MT6797_GPIO39__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(39) | 5)
+#define MT6797_GPIO39__FUNC_URXD3 (MTK_PIN_NO(39) | 6)
+#define MT6797_GPIO39__FUNC_C2K_NTRST (MTK_PIN_NO(39) | 7)
+
+#define MT6797_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT6797_GPIO40__FUNC_DPI_D1 (MTK_PIN_NO(40) | 1)
+#define MT6797_GPIO40__FUNC_SPI1_MI_A (MTK_PIN_NO(40) | 2)
+#define MT6797_GPIO40__FUNC_PCM0_CLK (MTK_PIN_NO(40) | 3)
+#define MT6797_GPIO40__FUNC_I2S0_BCK (MTK_PIN_NO(40) | 4)
+#define MT6797_GPIO40__FUNC_CONN_MCU_TDO (MTK_PIN_NO(40) | 5)
+#define MT6797_GPIO40__FUNC_UTXD3 (MTK_PIN_NO(40) | 6)
+#define MT6797_GPIO40__FUNC_C2K_TCK (MTK_PIN_NO(40) | 7)
+
+#define MT6797_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT6797_GPIO41__FUNC_DPI_D2 (MTK_PIN_NO(41) | 1)
+#define MT6797_GPIO41__FUNC_SPI1_CS_A (MTK_PIN_NO(41) | 2)
+#define MT6797_GPIO41__FUNC_PCM0_DO (MTK_PIN_NO(41) | 3)
+#define MT6797_GPIO41__FUNC_I2S3_DO (MTK_PIN_NO(41) | 4)
+#define MT6797_GPIO41__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(41) | 5)
+#define MT6797_GPIO41__FUNC_URTS3 (MTK_PIN_NO(41) | 6)
+#define MT6797_GPIO41__FUNC_C2K_TDI (MTK_PIN_NO(41) | 7)
+
+#define MT6797_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT6797_GPIO42__FUNC_DPI_D3 (MTK_PIN_NO(42) | 1)
+#define MT6797_GPIO42__FUNC_SPI1_MO_A (MTK_PIN_NO(42) | 2)
+#define MT6797_GPIO42__FUNC_PCM0_DI (MTK_PIN_NO(42) | 3)
+#define MT6797_GPIO42__FUNC_I2S0_DI (MTK_PIN_NO(42) | 4)
+#define MT6797_GPIO42__FUNC_CONN_MCU_TDI (MTK_PIN_NO(42) | 5)
+#define MT6797_GPIO42__FUNC_UCTS3 (MTK_PIN_NO(42) | 6)
+#define MT6797_GPIO42__FUNC_C2K_TMS (MTK_PIN_NO(42) | 7)
+
+#define MT6797_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT6797_GPIO43__FUNC_DPI_D4 (MTK_PIN_NO(43) | 1)
+#define MT6797_GPIO43__FUNC_SPI2_CLK_A (MTK_PIN_NO(43) | 2)
+#define MT6797_GPIO43__FUNC_PCM1_SYNC (MTK_PIN_NO(43) | 3)
+#define MT6797_GPIO43__FUNC_I2S2_LRCK (MTK_PIN_NO(43) | 4)
+#define MT6797_GPIO43__FUNC_CONN_MCU_TMS (MTK_PIN_NO(43) | 5)
+#define MT6797_GPIO43__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(43) | 6)
+#define MT6797_GPIO43__FUNC_C2K_TDO (MTK_PIN_NO(43) | 7)
+
+#define MT6797_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT6797_GPIO44__FUNC_DPI_D5 (MTK_PIN_NO(44) | 1)
+#define MT6797_GPIO44__FUNC_SPI2_MI_A (MTK_PIN_NO(44) | 2)
+#define MT6797_GPIO44__FUNC_PCM1_CLK (MTK_PIN_NO(44) | 3)
+#define MT6797_GPIO44__FUNC_I2S2_BCK (MTK_PIN_NO(44) | 4)
+#define MT6797_GPIO44__FUNC_CONN_MCU_TCK (MTK_PIN_NO(44) | 5)
+#define MT6797_GPIO44__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(44) | 6)
+#define MT6797_GPIO44__FUNC_C2K_RTCK (MTK_PIN_NO(44) | 7)
+
+#define MT6797_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT6797_GPIO45__FUNC_DPI_D6 (MTK_PIN_NO(45) | 1)
+#define MT6797_GPIO45__FUNC_SPI2_CS_A (MTK_PIN_NO(45) | 2)
+#define MT6797_GPIO45__FUNC_PCM1_DI (MTK_PIN_NO(45) | 3)
+#define MT6797_GPIO45__FUNC_I2S2_DI (MTK_PIN_NO(45) | 4)
+#define MT6797_GPIO45__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(45) | 5)
+#define MT6797_GPIO45__FUNC_MD_URXD0 (MTK_PIN_NO(45) | 6)
+
+#define MT6797_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT6797_GPIO46__FUNC_DPI_D7 (MTK_PIN_NO(46) | 1)
+#define MT6797_GPIO46__FUNC_SPI2_MO_A (MTK_PIN_NO(46) | 2)
+#define MT6797_GPIO46__FUNC_PCM1_DO0 (MTK_PIN_NO(46) | 3)
+#define MT6797_GPIO46__FUNC_I2S1_DO (MTK_PIN_NO(46) | 4)
+#define MT6797_GPIO46__FUNC_ANT_SEL0 (MTK_PIN_NO(46) | 5)
+#define MT6797_GPIO46__FUNC_MD_UTXD0 (MTK_PIN_NO(46) | 6)
+
+#define MT6797_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT6797_GPIO47__FUNC_DPI_D8 (MTK_PIN_NO(47) | 1)
+#define MT6797_GPIO47__FUNC_CLKM0 (MTK_PIN_NO(47) | 2)
+#define MT6797_GPIO47__FUNC_PCM1_DO1 (MTK_PIN_NO(47) | 3)
+#define MT6797_GPIO47__FUNC_I2S0_MCK (MTK_PIN_NO(47) | 4)
+#define MT6797_GPIO47__FUNC_ANT_SEL1 (MTK_PIN_NO(47) | 5)
+#define MT6797_GPIO47__FUNC_PTA_RXD (MTK_PIN_NO(47) | 6)
+#define MT6797_GPIO47__FUNC_C2K_URXD0 (MTK_PIN_NO(47) | 7)
+
+#define MT6797_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT6797_GPIO48__FUNC_DPI_D9 (MTK_PIN_NO(48) | 1)
+#define MT6797_GPIO48__FUNC_CLKM1 (MTK_PIN_NO(48) | 2)
+#define MT6797_GPIO48__FUNC_CMFLASH (MTK_PIN_NO(48) | 3)
+#define MT6797_GPIO48__FUNC_I2S2_MCK (MTK_PIN_NO(48) | 4)
+#define MT6797_GPIO48__FUNC_ANT_SEL2 (MTK_PIN_NO(48) | 5)
+#define MT6797_GPIO48__FUNC_PTA_TXD (MTK_PIN_NO(48) | 6)
+#define MT6797_GPIO48__FUNC_C2K_UTXD0 (MTK_PIN_NO(48) | 7)
+
+#define MT6797_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT6797_GPIO49__FUNC_DPI_D10 (MTK_PIN_NO(49) | 1)
+#define MT6797_GPIO49__FUNC_MD_INT1_C2K_UIM1_HOT_PLUG_IN (MTK_PIN_NO(49) | 2)
+#define MT6797_GPIO49__FUNC_PWM_C (MTK_PIN_NO(49) | 3)
+#define MT6797_GPIO49__FUNC_IRTX_OUT (MTK_PIN_NO(49) | 4)
+#define MT6797_GPIO49__FUNC_ANT_SEL3 (MTK_PIN_NO(49) | 5)
+#define MT6797_GPIO49__FUNC_MD_URXD1 (MTK_PIN_NO(49) | 6)
+
+#define MT6797_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define MT6797_GPIO50__FUNC_DPI_D11 (MTK_PIN_NO(50) | 1)
+#define MT6797_GPIO50__FUNC_MD_INT2 (MTK_PIN_NO(50) | 2)
+#define MT6797_GPIO50__FUNC_PWM_D (MTK_PIN_NO(50) | 3)
+#define MT6797_GPIO50__FUNC_CLKM2 (MTK_PIN_NO(50) | 4)
+#define MT6797_GPIO50__FUNC_ANT_SEL4 (MTK_PIN_NO(50) | 5)
+#define MT6797_GPIO50__FUNC_MD_UTXD1 (MTK_PIN_NO(50) | 6)
+
+#define MT6797_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define MT6797_GPIO51__FUNC_DPI_DE (MTK_PIN_NO(51) | 1)
+#define MT6797_GPIO51__FUNC_SPI4_CLK_A (MTK_PIN_NO(51) | 2)
+#define MT6797_GPIO51__FUNC_IRTX_OUT (MTK_PIN_NO(51) | 3)
+#define MT6797_GPIO51__FUNC_SCL0_1 (MTK_PIN_NO(51) | 4)
+#define MT6797_GPIO51__FUNC_ANT_SEL5 (MTK_PIN_NO(51) | 5)
+#define MT6797_GPIO51__FUNC_C2K_UTXD1 (MTK_PIN_NO(51) | 7)
+
+#define MT6797_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define MT6797_GPIO52__FUNC_DPI_CK (MTK_PIN_NO(52) | 1)
+#define MT6797_GPIO52__FUNC_SPI4_MI_A (MTK_PIN_NO(52) | 2)
+#define MT6797_GPIO52__FUNC_SPI4_MO_A (MTK_PIN_NO(52) | 3)
+#define MT6797_GPIO52__FUNC_SDA0_1 (MTK_PIN_NO(52) | 4)
+#define MT6797_GPIO52__FUNC_ANT_SEL6 (MTK_PIN_NO(52) | 5)
+#define MT6797_GPIO52__FUNC_C2K_URXD1 (MTK_PIN_NO(52) | 7)
+
+#define MT6797_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT6797_GPIO53__FUNC_DPI_HSYNC (MTK_PIN_NO(53) | 1)
+#define MT6797_GPIO53__FUNC_SPI4_CS_A (MTK_PIN_NO(53) | 2)
+#define MT6797_GPIO53__FUNC_CMFLASH (MTK_PIN_NO(53) | 3)
+#define MT6797_GPIO53__FUNC_SCL1_1 (MTK_PIN_NO(53) | 4)
+#define MT6797_GPIO53__FUNC_ANT_SEL7 (MTK_PIN_NO(53) | 5)
+#define MT6797_GPIO53__FUNC_MD_URXD2 (MTK_PIN_NO(53) | 6)
+#define MT6797_GPIO53__FUNC_PCC_PPC_IO (MTK_PIN_NO(53) | 7)
+
+#define MT6797_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT6797_GPIO54__FUNC_DPI_VSYNC (MTK_PIN_NO(54) | 1)
+#define MT6797_GPIO54__FUNC_SPI4_MO_A (MTK_PIN_NO(54) | 2)
+#define MT6797_GPIO54__FUNC_SPI4_MI_A (MTK_PIN_NO(54) | 3)
+#define MT6797_GPIO54__FUNC_SDA1_1 (MTK_PIN_NO(54) | 4)
+#define MT6797_GPIO54__FUNC_PWM_A (MTK_PIN_NO(54) | 5)
+#define MT6797_GPIO54__FUNC_MD_UTXD2 (MTK_PIN_NO(54) | 6)
+#define MT6797_GPIO54__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(54) | 7)
+
+#define MT6797_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT6797_GPIO55__FUNC_SCL1_0 (MTK_PIN_NO(55) | 1)
+
+#define MT6797_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT6797_GPIO56__FUNC_SDA1_0 (MTK_PIN_NO(56) | 1)
+
+#define MT6797_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT6797_GPIO57__FUNC_SPI0_CLK (MTK_PIN_NO(57) | 1)
+#define MT6797_GPIO57__FUNC_SCL0_2 (MTK_PIN_NO(57) | 2)
+#define MT6797_GPIO57__FUNC_PWM_B (MTK_PIN_NO(57) | 3)
+#define MT6797_GPIO57__FUNC_UTXD3 (MTK_PIN_NO(57) | 4)
+#define MT6797_GPIO57__FUNC_PCM0_SYNC (MTK_PIN_NO(57) | 5)
+
+#define MT6797_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT6797_GPIO58__FUNC_SPI0_MI (MTK_PIN_NO(58) | 1)
+#define MT6797_GPIO58__FUNC_SPI0_MO (MTK_PIN_NO(58) | 2)
+#define MT6797_GPIO58__FUNC_SDA1_2 (MTK_PIN_NO(58) | 3)
+#define MT6797_GPIO58__FUNC_URXD3 (MTK_PIN_NO(58) | 4)
+#define MT6797_GPIO58__FUNC_PCM0_CLK (MTK_PIN_NO(58) | 5)
+
+#define MT6797_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define MT6797_GPIO59__FUNC_SPI0_MO (MTK_PIN_NO(59) | 1)
+#define MT6797_GPIO59__FUNC_SPI0_MI (MTK_PIN_NO(59) | 2)
+#define MT6797_GPIO59__FUNC_PWM_C (MTK_PIN_NO(59) | 3)
+#define MT6797_GPIO59__FUNC_URTS3 (MTK_PIN_NO(59) | 4)
+#define MT6797_GPIO59__FUNC_PCM0_DO (MTK_PIN_NO(59) | 5)
+
+#define MT6797_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT6797_GPIO60__FUNC_SPI0_CS (MTK_PIN_NO(60) | 1)
+#define MT6797_GPIO60__FUNC_SDA0_2 (MTK_PIN_NO(60) | 2)
+#define MT6797_GPIO60__FUNC_SCL1_2 (MTK_PIN_NO(60) | 3)
+#define MT6797_GPIO60__FUNC_UCTS3 (MTK_PIN_NO(60) | 4)
+#define MT6797_GPIO60__FUNC_PCM0_DI (MTK_PIN_NO(60) | 5)
+
+#define MT6797_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT6797_GPIO61__FUNC_EINT0 (MTK_PIN_NO(61) | 1)
+#define MT6797_GPIO61__FUNC_IDDIG (MTK_PIN_NO(61) | 2)
+#define MT6797_GPIO61__FUNC_SPI4_CLK_B (MTK_PIN_NO(61) | 3)
+#define MT6797_GPIO61__FUNC_I2S0_LRCK (MTK_PIN_NO(61) | 4)
+#define MT6797_GPIO61__FUNC_PCM0_SYNC (MTK_PIN_NO(61) | 5)
+#define MT6797_GPIO61__FUNC_C2K_EINT0 (MTK_PIN_NO(61) | 7)
+
+#define MT6797_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT6797_GPIO62__FUNC_EINT1 (MTK_PIN_NO(62) | 1)
+#define MT6797_GPIO62__FUNC_USB_DRVVBUS (MTK_PIN_NO(62) | 2)
+#define MT6797_GPIO62__FUNC_SPI4_MI_B (MTK_PIN_NO(62) | 3)
+#define MT6797_GPIO62__FUNC_I2S0_BCK (MTK_PIN_NO(62) | 4)
+#define MT6797_GPIO62__FUNC_PCM0_CLK (MTK_PIN_NO(62) | 5)
+#define MT6797_GPIO62__FUNC_C2K_EINT1 (MTK_PIN_NO(62) | 7)
+
+#define MT6797_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT6797_GPIO63__FUNC_EINT2 (MTK_PIN_NO(63) | 1)
+#define MT6797_GPIO63__FUNC_IRTX_OUT (MTK_PIN_NO(63) | 2)
+#define MT6797_GPIO63__FUNC_SPI4_MO_B (MTK_PIN_NO(63) | 3)
+#define MT6797_GPIO63__FUNC_I2S0_MCK (MTK_PIN_NO(63) | 4)
+#define MT6797_GPIO63__FUNC_PCM0_DI (MTK_PIN_NO(63) | 5)
+#define MT6797_GPIO63__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(63) | 7)
+
+#define MT6797_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT6797_GPIO64__FUNC_EINT3 (MTK_PIN_NO(64) | 1)
+#define MT6797_GPIO64__FUNC_CMFLASH (MTK_PIN_NO(64) | 2)
+#define MT6797_GPIO64__FUNC_SPI4_CS_B (MTK_PIN_NO(64) | 3)
+#define MT6797_GPIO64__FUNC_I2S0_DI (MTK_PIN_NO(64) | 4)
+#define MT6797_GPIO64__FUNC_PCM0_DO (MTK_PIN_NO(64) | 5)
+#define MT6797_GPIO64__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(64) | 7)
+
+#define MT6797_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT6797_GPIO65__FUNC_EINT4 (MTK_PIN_NO(65) | 1)
+#define MT6797_GPIO65__FUNC_CLKM0 (MTK_PIN_NO(65) | 2)
+#define MT6797_GPIO65__FUNC_SPI5_CLK_B (MTK_PIN_NO(65) | 3)
+#define MT6797_GPIO65__FUNC_I2S1_LRCK (MTK_PIN_NO(65) | 4)
+#define MT6797_GPIO65__FUNC_PWM_A (MTK_PIN_NO(65) | 5)
+#define MT6797_GPIO65__FUNC_C2K_DM_EINT2 (MTK_PIN_NO(65) | 7)
+
+#define MT6797_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define MT6797_GPIO66__FUNC_EINT5 (MTK_PIN_NO(66) | 1)
+#define MT6797_GPIO66__FUNC_CLKM1 (MTK_PIN_NO(66) | 2)
+#define MT6797_GPIO66__FUNC_SPI5_MI_B (MTK_PIN_NO(66) | 3)
+#define MT6797_GPIO66__FUNC_I2S1_BCK (MTK_PIN_NO(66) | 4)
+#define MT6797_GPIO66__FUNC_PWM_B (MTK_PIN_NO(66) | 5)
+#define MT6797_GPIO66__FUNC_C2K_DM_EINT3 (MTK_PIN_NO(66) | 7)
+
+#define MT6797_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define MT6797_GPIO67__FUNC_EINT6 (MTK_PIN_NO(67) | 1)
+#define MT6797_GPIO67__FUNC_CLKM2 (MTK_PIN_NO(67) | 2)
+#define MT6797_GPIO67__FUNC_SPI5_MO_B (MTK_PIN_NO(67) | 3)
+#define MT6797_GPIO67__FUNC_I2S1_MCK (MTK_PIN_NO(67) | 4)
+#define MT6797_GPIO67__FUNC_PWM_C (MTK_PIN_NO(67) | 5)
+#define MT6797_GPIO67__FUNC_DBG_MON_A0 (MTK_PIN_NO(67) | 7)
+
+#define MT6797_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT6797_GPIO68__FUNC_EINT7 (MTK_PIN_NO(68) | 1)
+#define MT6797_GPIO68__FUNC_CLKM3 (MTK_PIN_NO(68) | 2)
+#define MT6797_GPIO68__FUNC_SPI5_CS_B (MTK_PIN_NO(68) | 3)
+#define MT6797_GPIO68__FUNC_I2S1_DO (MTK_PIN_NO(68) | 4)
+#define MT6797_GPIO68__FUNC_PWM_D (MTK_PIN_NO(68) | 5)
+#define MT6797_GPIO68__FUNC_DBG_MON_A1 (MTK_PIN_NO(68) | 7)
+
+#define MT6797_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT6797_GPIO69__FUNC_I2S0_LRCK (MTK_PIN_NO(69) | 1)
+#define MT6797_GPIO69__FUNC_I2S3_LRCK (MTK_PIN_NO(69) | 2)
+#define MT6797_GPIO69__FUNC_I2S1_LRCK (MTK_PIN_NO(69) | 3)
+#define MT6797_GPIO69__FUNC_I2S2_LRCK (MTK_PIN_NO(69) | 4)
+#define MT6797_GPIO69__FUNC_DBG_MON_A2 (MTK_PIN_NO(69) | 7)
+
+#define MT6797_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT6797_GPIO70__FUNC_I2S0_BCK (MTK_PIN_NO(70) | 1)
+#define MT6797_GPIO70__FUNC_I2S3_BCK (MTK_PIN_NO(70) | 2)
+#define MT6797_GPIO70__FUNC_I2S1_BCK (MTK_PIN_NO(70) | 3)
+#define MT6797_GPIO70__FUNC_I2S2_BCK (MTK_PIN_NO(70) | 4)
+#define MT6797_GPIO70__FUNC_DBG_MON_A3 (MTK_PIN_NO(70) | 7)
+
+#define MT6797_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT6797_GPIO71__FUNC_I2S0_MCK (MTK_PIN_NO(71) | 1)
+#define MT6797_GPIO71__FUNC_I2S3_MCK (MTK_PIN_NO(71) | 2)
+#define MT6797_GPIO71__FUNC_I2S1_MCK (MTK_PIN_NO(71) | 3)
+#define MT6797_GPIO71__FUNC_I2S2_MCK (MTK_PIN_NO(71) | 4)
+#define MT6797_GPIO71__FUNC_DBG_MON_A4 (MTK_PIN_NO(71) | 7)
+
+#define MT6797_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+/* #define MT6797_GPIO72__FUNC_I2S0_DI (MTK_PIN_NO(72) | 1) */
+#define MT6797_GPIO72__FUNC_I2S0_DI (MTK_PIN_NO(72) | 2)
+/* #define MT6797_GPIO72__FUNC_I2S2_DI (MTK_PIN_NO(72) | 3) */
+#define MT6797_GPIO72__FUNC_I2S2_DI (MTK_PIN_NO(72) | 4)
+#define MT6797_GPIO72__FUNC_DBG_MON_A5 (MTK_PIN_NO(72) | 7)
+
+#define MT6797_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+/* #define MT6797_GPIO73__FUNC_I2S3_DO (MTK_PIN_NO(73) | 1) */
+#define MT6797_GPIO73__FUNC_I2S3_DO (MTK_PIN_NO(73) | 2)
+/* #define MT6797_GPIO73__FUNC_I2S1_DO (MTK_PIN_NO(73) | 3) */
+#define MT6797_GPIO73__FUNC_I2S1_DO (MTK_PIN_NO(73) | 4)
+#define MT6797_GPIO73__FUNC_DBG_MON_A6 (MTK_PIN_NO(73) | 7)
+
+#define MT6797_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define MT6797_GPIO74__FUNC_SCL3_0 (MTK_PIN_NO(74) | 1)
+#define MT6797_GPIO74__FUNC_AUXIF_CLK1 (MTK_PIN_NO(74) | 7)
+
+#define MT6797_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define MT6797_GPIO75__FUNC_SDA3_0 (MTK_PIN_NO(75) | 1)
+#define MT6797_GPIO75__FUNC_AUXIF_ST1 (MTK_PIN_NO(75) | 7)
+
+#define MT6797_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define MT6797_GPIO76__FUNC_CONN_HRST_B (MTK_PIN_NO(76) | 1)
+#define MT6797_GPIO76__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(76) | 7)
+
+#define MT6797_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define MT6797_GPIO77__FUNC_CONN_TOP_CLK (MTK_PIN_NO(77) | 1)
+#define MT6797_GPIO77__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(77) | 7)
+
+#define MT6797_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define MT6797_GPIO78__FUNC_CONN_TOP_DATA (MTK_PIN_NO(78) | 1)
+#define MT6797_GPIO78__FUNC_C2K_DM_EINT2 (MTK_PIN_NO(78) | 7)
+
+#define MT6797_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define MT6797_GPIO79__FUNC_CONN_WB_PTA (MTK_PIN_NO(79) | 1)
+#define MT6797_GPIO79__FUNC_C2K_DM_EINT3 (MTK_PIN_NO(79) | 7)
+
+#define MT6797_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define MT6797_GPIO80__FUNC_CONN_WF_HB0 (MTK_PIN_NO(80) | 1)
+#define MT6797_GPIO80__FUNC_C2K_EINT0 (MTK_PIN_NO(80) | 7)
+
+#define MT6797_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define MT6797_GPIO81__FUNC_CONN_WF_HB1 (MTK_PIN_NO(81) | 1)
+#define MT6797_GPIO81__FUNC_C2K_EINT1 (MTK_PIN_NO(81) | 7)
+
+#define MT6797_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define MT6797_GPIO82__FUNC_CONN_WF_HB2 (MTK_PIN_NO(82) | 1)
+#define MT6797_GPIO82__FUNC_MD_CLKM0 (MTK_PIN_NO(82) | 7)
+
+#define MT6797_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define MT6797_GPIO83__FUNC_CONN_BT_CLK (MTK_PIN_NO(83) | 1)
+#define MT6797_GPIO83__FUNC_MD_CLKM1 (MTK_PIN_NO(83) | 7)
+
+#define MT6797_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define MT6797_GPIO84__FUNC_CONN_BT_DATA (MTK_PIN_NO(84) | 1)
+
+#define MT6797_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define MT6797_GPIO85__FUNC_EINT8 (MTK_PIN_NO(85) | 1)
+#define MT6797_GPIO85__FUNC_I2S1_LRCK (MTK_PIN_NO(85) | 2)
+#define MT6797_GPIO85__FUNC_I2S2_LRCK (MTK_PIN_NO(85) | 3)
+#define MT6797_GPIO85__FUNC_URXD1 (MTK_PIN_NO(85) | 4)
+#define MT6797_GPIO85__FUNC_MD_URXD0 (MTK_PIN_NO(85) | 5)
+#define MT6797_GPIO85__FUNC_DBG_MON_A7 (MTK_PIN_NO(85) | 7)
+
+#define MT6797_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define MT6797_GPIO86__FUNC_EINT9 (MTK_PIN_NO(86) | 1)
+#define MT6797_GPIO86__FUNC_I2S1_BCK (MTK_PIN_NO(86) | 2)
+#define MT6797_GPIO86__FUNC_I2S2_BCK (MTK_PIN_NO(86) | 3)
+#define MT6797_GPIO86__FUNC_UTXD1 (MTK_PIN_NO(86) | 4)
+#define MT6797_GPIO86__FUNC_MD_UTXD0 (MTK_PIN_NO(86) | 5)
+#define MT6797_GPIO86__FUNC_DBG_MON_A8 (MTK_PIN_NO(86) | 7)
+
+#define MT6797_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define MT6797_GPIO87__FUNC_EINT10 (MTK_PIN_NO(87) | 1)
+#define MT6797_GPIO87__FUNC_I2S1_MCK (MTK_PIN_NO(87) | 2)
+#define MT6797_GPIO87__FUNC_I2S2_MCK (MTK_PIN_NO(87) | 3)
+#define MT6797_GPIO87__FUNC_URTS1 (MTK_PIN_NO(87) | 4)
+#define MT6797_GPIO87__FUNC_MD_URXD1 (MTK_PIN_NO(87) | 5)
+#define MT6797_GPIO87__FUNC_DBG_MON_A9 (MTK_PIN_NO(87) | 7)
+
+#define MT6797_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define MT6797_GPIO88__FUNC_EINT11 (MTK_PIN_NO(88) | 1)
+#define MT6797_GPIO88__FUNC_I2S1_DO (MTK_PIN_NO(88) | 2)
+#define MT6797_GPIO88__FUNC_I2S2_DI (MTK_PIN_NO(88) | 3)
+#define MT6797_GPIO88__FUNC_UCTS1 (MTK_PIN_NO(88) | 4)
+#define MT6797_GPIO88__FUNC_MD_UTXD1 (MTK_PIN_NO(88) | 5)
+#define MT6797_GPIO88__FUNC_DBG_MON_A10 (MTK_PIN_NO(88) | 7)
+
+#define MT6797_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define MT6797_GPIO89__FUNC_EINT12 (MTK_PIN_NO(89) | 1)
+#define MT6797_GPIO89__FUNC_IRTX_OUT (MTK_PIN_NO(89) | 2)
+#define MT6797_GPIO89__FUNC_CLKM0 (MTK_PIN_NO(89) | 3)
+#define MT6797_GPIO89__FUNC_PCM1_SYNC (MTK_PIN_NO(89) | 4)
+#define MT6797_GPIO89__FUNC_URTS0 (MTK_PIN_NO(89) | 5)
+#define MT6797_GPIO89__FUNC_DBG_MON_A11 (MTK_PIN_NO(89) | 7)
+
+#define MT6797_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define MT6797_GPIO90__FUNC_EINT13 (MTK_PIN_NO(90) | 1)
+#define MT6797_GPIO90__FUNC_CMFLASH (MTK_PIN_NO(90) | 2)
+#define MT6797_GPIO90__FUNC_CLKM1 (MTK_PIN_NO(90) | 3)
+#define MT6797_GPIO90__FUNC_PCM1_CLK (MTK_PIN_NO(90) | 4)
+#define MT6797_GPIO90__FUNC_UCTS0 (MTK_PIN_NO(90) | 5)
+#define MT6797_GPIO90__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(90) | 7)
+
+#define MT6797_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define MT6797_GPIO91__FUNC_EINT14 (MTK_PIN_NO(91) | 1)
+#define MT6797_GPIO91__FUNC_PWM_A (MTK_PIN_NO(91) | 2)
+#define MT6797_GPIO91__FUNC_CLKM2 (MTK_PIN_NO(91) | 3)
+#define MT6797_GPIO91__FUNC_PCM1_DI (MTK_PIN_NO(91) | 4)
+#define MT6797_GPIO91__FUNC_SDA0_3 (MTK_PIN_NO(91) | 5)
+#define MT6797_GPIO91__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(91) | 7)
+
+#define MT6797_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define MT6797_GPIO92__FUNC_EINT15 (MTK_PIN_NO(92) | 1)
+#define MT6797_GPIO92__FUNC_PWM_B (MTK_PIN_NO(92) | 2)
+#define MT6797_GPIO92__FUNC_CLKM3 (MTK_PIN_NO(92) | 3)
+#define MT6797_GPIO92__FUNC_PCM1_DO0 (MTK_PIN_NO(92) | 4)
+#define MT6797_GPIO92__FUNC_SCL0_3 (MTK_PIN_NO(92) | 5)
+
+#define MT6797_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define MT6797_GPIO93__FUNC_EINT16 (MTK_PIN_NO(93) | 1)
+#define MT6797_GPIO93__FUNC_IDDIG (MTK_PIN_NO(93) | 2)
+#define MT6797_GPIO93__FUNC_CLKM4 (MTK_PIN_NO(93) | 3)
+#define MT6797_GPIO93__FUNC_PCM1_DO1 (MTK_PIN_NO(93) | 4)
+#define MT6797_GPIO93__FUNC_MD_INT2 (MTK_PIN_NO(93) | 5)
+#define MT6797_GPIO93__FUNC_DROP_ZONE (MTK_PIN_NO(93) | 7)
+
+#define MT6797_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define MT6797_GPIO94__FUNC_USB_DRVVBUS (MTK_PIN_NO(94) | 1)
+#define MT6797_GPIO94__FUNC_PWM_C (MTK_PIN_NO(94) | 2)
+#define MT6797_GPIO94__FUNC_CLKM5 (MTK_PIN_NO(94) | 3)
+
+#define MT6797_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define MT6797_GPIO95__FUNC_SDA2_0 (MTK_PIN_NO(95) | 1)
+#define MT6797_GPIO95__FUNC_AUXIF_ST0 (MTK_PIN_NO(95) | 7)
+
+#define MT6797_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define MT6797_GPIO96__FUNC_SCL2_0 (MTK_PIN_NO(96) | 1)
+#define MT6797_GPIO96__FUNC_AUXIF_CLK0 (MTK_PIN_NO(96) | 7)
+
+#define MT6797_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define MT6797_GPIO97__FUNC_URXD0 (MTK_PIN_NO(97) | 1)
+#define MT6797_GPIO97__FUNC_UTXD0 (MTK_PIN_NO(97) | 2)
+#define MT6797_GPIO97__FUNC_MD_URXD0 (MTK_PIN_NO(97) | 3)
+#define MT6797_GPIO97__FUNC_MD_URXD1 (MTK_PIN_NO(97) | 4)
+#define MT6797_GPIO97__FUNC_MD_URXD2 (MTK_PIN_NO(97) | 5)
+#define MT6797_GPIO97__FUNC_C2K_URXD0 (MTK_PIN_NO(97) | 6)
+#define MT6797_GPIO97__FUNC_C2K_URXD1 (MTK_PIN_NO(97) | 7)
+
+#define MT6797_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define MT6797_GPIO98__FUNC_UTXD0 (MTK_PIN_NO(98) | 1)
+#define MT6797_GPIO98__FUNC_URXD0 (MTK_PIN_NO(98) | 2)
+#define MT6797_GPIO98__FUNC_MD_UTXD0 (MTK_PIN_NO(98) | 3)
+#define MT6797_GPIO98__FUNC_MD_UTXD1 (MTK_PIN_NO(98) | 4)
+#define MT6797_GPIO98__FUNC_MD_UTXD2 (MTK_PIN_NO(98) | 5)
+#define MT6797_GPIO98__FUNC_C2K_UTXD0 (MTK_PIN_NO(98) | 6)
+#define MT6797_GPIO98__FUNC_C2K_UTXD1 (MTK_PIN_NO(98) | 7)
+
+#define MT6797_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define MT6797_GPIO99__FUNC_RTC32K_CK (MTK_PIN_NO(99) | 1)
+
+#define MT6797_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT6797_GPIO100__FUNC_SRCLKENAI0 (MTK_PIN_NO(100) | 1)
+
+#define MT6797_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT6797_GPIO101__FUNC_SRCLKENAI1 (MTK_PIN_NO(101) | 1)
+
+#define MT6797_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT6797_GPIO102__FUNC_SRCLKENA0 (MTK_PIN_NO(102) | 1)
+
+#define MT6797_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT6797_GPIO103__FUNC_SRCLKENA1 (MTK_PIN_NO(103) | 1)
+
+#define MT6797_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT6797_GPIO104__FUNC_SYSRSTB (MTK_PIN_NO(104) | 1)
+
+#define MT6797_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT6797_GPIO105__FUNC_WATCHDOG (MTK_PIN_NO(105) | 1)
+
+#define MT6797_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT6797_GPIO106__FUNC_KPROW0 (MTK_PIN_NO(106) | 1)
+#define MT6797_GPIO106__FUNC_CMFLASH (MTK_PIN_NO(106) | 2)
+#define MT6797_GPIO106__FUNC_CLKM4 (MTK_PIN_NO(106) | 3)
+#define MT6797_GPIO106__FUNC_TP_GPIO0_AO (MTK_PIN_NO(106) | 4)
+#define MT6797_GPIO106__FUNC_IRTX_OUT (MTK_PIN_NO(106) | 5)
+
+#define MT6797_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT6797_GPIO107__FUNC_KPROW1 (MTK_PIN_NO(107) | 1)
+#define MT6797_GPIO107__FUNC_IDDIG (MTK_PIN_NO(107) | 2)
+#define MT6797_GPIO107__FUNC_CLKM5 (MTK_PIN_NO(107) | 3)
+#define MT6797_GPIO107__FUNC_TP_GPIO1_AO (MTK_PIN_NO(107) | 4)
+#define MT6797_GPIO107__FUNC_I2S1_BCK (MTK_PIN_NO(107) | 5)
+#define MT6797_GPIO107__FUNC_DAP_SIB1_SWD (MTK_PIN_NO(107) | 7)
+
+#define MT6797_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT6797_GPIO108__FUNC_KPROW2 (MTK_PIN_NO(108) | 1)
+#define MT6797_GPIO108__FUNC_USB_DRVVBUS (MTK_PIN_NO(108) | 2)
+#define MT6797_GPIO108__FUNC_PWM_A (MTK_PIN_NO(108) | 3)
+#define MT6797_GPIO108__FUNC_CMFLASH (MTK_PIN_NO(108) | 4)
+#define MT6797_GPIO108__FUNC_I2S1_LRCK (MTK_PIN_NO(108) | 5)
+#define MT6797_GPIO108__FUNC_DAP_SIB1_SWCK (MTK_PIN_NO(108) | 7)
+
+#define MT6797_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT6797_GPIO109__FUNC_KPCOL0 (MTK_PIN_NO(109) | 1)
+
+#define MT6797_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT6797_GPIO110__FUNC_KPCOL1 (MTK_PIN_NO(110) | 1)
+#define MT6797_GPIO110__FUNC_SDA1_3 (MTK_PIN_NO(110) | 2)
+#define MT6797_GPIO110__FUNC_PWM_B (MTK_PIN_NO(110) | 3)
+#define MT6797_GPIO110__FUNC_CLKM0 (MTK_PIN_NO(110) | 4)
+#define MT6797_GPIO110__FUNC_I2S1_DO (MTK_PIN_NO(110) | 5)
+#define MT6797_GPIO110__FUNC_C2K_DM_EINT3 (MTK_PIN_NO(110) | 7)
+
+#define MT6797_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT6797_GPIO111__FUNC_KPCOL2 (MTK_PIN_NO(111) | 1)
+#define MT6797_GPIO111__FUNC_SCL1_3 (MTK_PIN_NO(111) | 2)
+#define MT6797_GPIO111__FUNC_PWM_C (MTK_PIN_NO(111) | 3)
+#define MT6797_GPIO111__FUNC_DISP_PWM (MTK_PIN_NO(111) | 4)
+#define MT6797_GPIO111__FUNC_I2S1_MCK (MTK_PIN_NO(111) | 5)
+#define MT6797_GPIO111__FUNC_C2K_DM_EINT2 (MTK_PIN_NO(111) | 7)
+
+#define MT6797_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT6797_GPIO112__FUNC_MD_INT1_C2K_UIM1_HOT_PLUG_IN (MTK_PIN_NO(112) | 1)
+#define MT6797_GPIO112__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(112) | 7)
+
+#define MT6797_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT6797_GPIO113__FUNC_MD_INT0_C2K_UIM0_HOT_PLUG_IN (MTK_PIN_NO(113) | 1)
+#define MT6797_GPIO113__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(113) | 7)
+
+#define MT6797_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT6797_GPIO114__FUNC_MSDC0_DAT0 (MTK_PIN_NO(114) | 1)
+
+#define MT6797_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT6797_GPIO115__FUNC_MSDC0_DAT1 (MTK_PIN_NO(115) | 1)
+
+#define MT6797_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT6797_GPIO116__FUNC_MSDC0_DAT2 (MTK_PIN_NO(116) | 1)
+
+#define MT6797_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT6797_GPIO117__FUNC_MSDC0_DAT3 (MTK_PIN_NO(117) | 1)
+
+#define MT6797_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT6797_GPIO118__FUNC_MSDC0_DAT4 (MTK_PIN_NO(118) | 1)
+
+#define MT6797_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT6797_GPIO119__FUNC_MSDC0_DAT5 (MTK_PIN_NO(119) | 1)
+
+#define MT6797_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT6797_GPIO120__FUNC_MSDC0_DAT6 (MTK_PIN_NO(120) | 1)
+
+#define MT6797_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define MT6797_GPIO121__FUNC_MSDC0_DAT7 (MTK_PIN_NO(121) | 1)
+
+#define MT6797_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define MT6797_GPIO122__FUNC_MSDC0_CMD (MTK_PIN_NO(122) | 1)
+
+#define MT6797_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT6797_GPIO123__FUNC_MSDC0_CLK (MTK_PIN_NO(123) | 1)
+
+#define MT6797_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define MT6797_GPIO124__FUNC_MSDC0_DSL (MTK_PIN_NO(124) | 1)
+
+#define MT6797_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define MT6797_GPIO125__FUNC_MSDC0_RSTB (MTK_PIN_NO(125) | 1)
+
+#define MT6797_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define MT6797_GPIO126__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(126) | 1)
+#define MT6797_GPIO126__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(126) | 2)
+#define MT6797_GPIO126__FUNC_C2K_UIM0_CLK (MTK_PIN_NO(126) | 3)
+#define MT6797_GPIO126__FUNC_C2K_UIM1_CLK (MTK_PIN_NO(126) | 4)
+
+#define MT6797_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define MT6797_GPIO127__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(127) | 1)
+#define MT6797_GPIO127__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(127) | 2)
+#define MT6797_GPIO127__FUNC_C2K_UIM0_RST (MTK_PIN_NO(127) | 3)
+#define MT6797_GPIO127__FUNC_C2K_UIM1_RST (MTK_PIN_NO(127) | 4)
+
+#define MT6797_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define MT6797_GPIO128__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(128) | 1)
+#define MT6797_GPIO128__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(128) | 2)
+#define MT6797_GPIO128__FUNC_C2K_UIM0_IO (MTK_PIN_NO(128) | 3)
+#define MT6797_GPIO128__FUNC_C2K_UIM1_IO (MTK_PIN_NO(128) | 4)
+
+#define MT6797_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define MT6797_GPIO129__FUNC_MSDC1_CMD (MTK_PIN_NO(129) | 1)
+#define MT6797_GPIO129__FUNC_CONN_DSP_JMS (MTK_PIN_NO(129) | 2)
+#define MT6797_GPIO129__FUNC_LTE_JTAG_TMS (MTK_PIN_NO(129) | 3)
+#define MT6797_GPIO129__FUNC_UDI_TMS (MTK_PIN_NO(129) | 4)
+#define MT6797_GPIO129__FUNC_C2K_TMS (MTK_PIN_NO(129) | 5)
+
+#define MT6797_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define MT6797_GPIO130__FUNC_MSDC1_DAT0 (MTK_PIN_NO(130) | 1)
+#define MT6797_GPIO130__FUNC_CONN_DSP_JDI (MTK_PIN_NO(130) | 2)
+#define MT6797_GPIO130__FUNC_LTE_JTAG_TDI (MTK_PIN_NO(130) | 3)
+#define MT6797_GPIO130__FUNC_UDI_TDI (MTK_PIN_NO(130) | 4)
+#define MT6797_GPIO130__FUNC_C2K_TDI (MTK_PIN_NO(130) | 5)
+
+#define MT6797_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define MT6797_GPIO131__FUNC_MSDC1_DAT1 (MTK_PIN_NO(131) | 1)
+#define MT6797_GPIO131__FUNC_CONN_DSP_JDO (MTK_PIN_NO(131) | 2)
+#define MT6797_GPIO131__FUNC_LTE_JTAG_TDO (MTK_PIN_NO(131) | 3)
+#define MT6797_GPIO131__FUNC_UDI_TDO (MTK_PIN_NO(131) | 4)
+#define MT6797_GPIO131__FUNC_C2K_TDO (MTK_PIN_NO(131) | 5)
+
+#define MT6797_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define MT6797_GPIO132__FUNC_MSDC1_DAT2 (MTK_PIN_NO(132) | 1)
+#define MT6797_GPIO132__FUNC_C2K_RTCK (MTK_PIN_NO(132) | 5)
+
+#define MT6797_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define MT6797_GPIO133__FUNC_MSDC1_DAT3 (MTK_PIN_NO(133) | 1)
+#define MT6797_GPIO133__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(133) | 2)
+#define MT6797_GPIO133__FUNC_LTE_JTAG_TRSTN (MTK_PIN_NO(133) | 3)
+#define MT6797_GPIO133__FUNC_UDI_NTRST (MTK_PIN_NO(133) | 4)
+#define MT6797_GPIO133__FUNC_C2K_NTRST (MTK_PIN_NO(133) | 5)
+
+#define MT6797_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define MT6797_GPIO134__FUNC_MSDC1_CLK (MTK_PIN_NO(134) | 1)
+#define MT6797_GPIO134__FUNC_CONN_DSP_JCK (MTK_PIN_NO(134) | 2)
+#define MT6797_GPIO134__FUNC_LTE_JTAG_TCK (MTK_PIN_NO(134) | 3)
+#define MT6797_GPIO134__FUNC_UDI_TCK_XI (MTK_PIN_NO(134) | 4)
+#define MT6797_GPIO134__FUNC_C2K_TCK (MTK_PIN_NO(134) | 5)
+
+#define MT6797_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define MT6797_GPIO135__FUNC_TDM_LRCK (MTK_PIN_NO(135) | 1)
+#define MT6797_GPIO135__FUNC_I2S0_LRCK (MTK_PIN_NO(135) | 2)
+#define MT6797_GPIO135__FUNC_CLKM0 (MTK_PIN_NO(135) | 3)
+#define MT6797_GPIO135__FUNC_PCM1_SYNC (MTK_PIN_NO(135) | 4)
+#define MT6797_GPIO135__FUNC_PWM_A (MTK_PIN_NO(135) | 5)
+#define MT6797_GPIO135__FUNC_DBG_MON_A12 (MTK_PIN_NO(135) | 7)
+
+#define MT6797_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define MT6797_GPIO136__FUNC_TDM_BCK (MTK_PIN_NO(136) | 1)
+#define MT6797_GPIO136__FUNC_I2S0_BCK (MTK_PIN_NO(136) | 2)
+#define MT6797_GPIO136__FUNC_CLKM1 (MTK_PIN_NO(136) | 3)
+#define MT6797_GPIO136__FUNC_PCM1_CLK (MTK_PIN_NO(136) | 4)
+#define MT6797_GPIO136__FUNC_PWM_B (MTK_PIN_NO(136) | 5)
+#define MT6797_GPIO136__FUNC_DBG_MON_A13 (MTK_PIN_NO(136) | 7)
+
+#define MT6797_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define MT6797_GPIO137__FUNC_TDM_MCK (MTK_PIN_NO(137) | 1)
+#define MT6797_GPIO137__FUNC_I2S0_MCK (MTK_PIN_NO(137) | 2)
+#define MT6797_GPIO137__FUNC_CLKM2 (MTK_PIN_NO(137) | 3)
+#define MT6797_GPIO137__FUNC_PCM1_DI (MTK_PIN_NO(137) | 4)
+#define MT6797_GPIO137__FUNC_IRTX_OUT (MTK_PIN_NO(137) | 5)
+#define MT6797_GPIO137__FUNC_DBG_MON_A14 (MTK_PIN_NO(137) | 7)
+
+#define MT6797_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define MT6797_GPIO138__FUNC_TDM_DATA0 (MTK_PIN_NO(138) | 1)
+#define MT6797_GPIO138__FUNC_I2S0_DI (MTK_PIN_NO(138) | 2)
+#define MT6797_GPIO138__FUNC_CLKM3 (MTK_PIN_NO(138) | 3)
+#define MT6797_GPIO138__FUNC_PCM1_DO0 (MTK_PIN_NO(138) | 4)
+#define MT6797_GPIO138__FUNC_PWM_C (MTK_PIN_NO(138) | 5)
+#define MT6797_GPIO138__FUNC_SDA3_1 (MTK_PIN_NO(138) | 6)
+#define MT6797_GPIO138__FUNC_DBG_MON_A15 (MTK_PIN_NO(138) | 7)
+
+#define MT6797_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define MT6797_GPIO139__FUNC_TDM_DATA1 (MTK_PIN_NO(139) | 1)
+#define MT6797_GPIO139__FUNC_I2S3_DO (MTK_PIN_NO(139) | 2)
+#define MT6797_GPIO139__FUNC_CLKM4 (MTK_PIN_NO(139) | 3)
+#define MT6797_GPIO139__FUNC_PCM1_DO1 (MTK_PIN_NO(139) | 4)
+#define MT6797_GPIO139__FUNC_ANT_SEL2 (MTK_PIN_NO(139) | 5)
+#define MT6797_GPIO139__FUNC_SCL3_1 (MTK_PIN_NO(139) | 6)
+#define MT6797_GPIO139__FUNC_DBG_MON_A16 (MTK_PIN_NO(139) | 7)
+
+#define MT6797_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define MT6797_GPIO140__FUNC_TDM_DATA2 (MTK_PIN_NO(140) | 1)
+#define MT6797_GPIO140__FUNC_DISP_PWM (MTK_PIN_NO(140) | 2)
+#define MT6797_GPIO140__FUNC_CLKM5 (MTK_PIN_NO(140) | 3)
+#define MT6797_GPIO140__FUNC_SDA1_4 (MTK_PIN_NO(140) | 4)
+#define MT6797_GPIO140__FUNC_ANT_SEL1 (MTK_PIN_NO(140) | 5)
+#define MT6797_GPIO140__FUNC_URXD3 (MTK_PIN_NO(140) | 6)
+#define MT6797_GPIO140__FUNC_DBG_MON_A17 (MTK_PIN_NO(140) | 7)
+
+#define MT6797_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define MT6797_GPIO141__FUNC_TDM_DATA3 (MTK_PIN_NO(141) | 1)
+#define MT6797_GPIO141__FUNC_CMFLASH (MTK_PIN_NO(141) | 2)
+#define MT6797_GPIO141__FUNC_IRTX_OUT (MTK_PIN_NO(141) | 3)
+#define MT6797_GPIO141__FUNC_SCL1_4 (MTK_PIN_NO(141) | 4)
+#define MT6797_GPIO141__FUNC_ANT_SEL0 (MTK_PIN_NO(141) | 5)
+#define MT6797_GPIO141__FUNC_UTXD3 (MTK_PIN_NO(141) | 6)
+#define MT6797_GPIO141__FUNC_DBG_MON_A18 (MTK_PIN_NO(141) | 7)
+
+#define MT6797_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define MT6797_GPIO142__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(142) | 1)
+#define MT6797_GPIO142__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(142) | 2)
+
+#define MT6797_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define MT6797_GPIO143__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(143) | 1)
+#define MT6797_GPIO143__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(143) | 2)
+
+#define MT6797_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define MT6797_GPIO144__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(144) | 1)
+
+#define MT6797_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define MT6797_GPIO145__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(145) | 1)
+
+#define MT6797_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define MT6797_GPIO146__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(146) | 1)
+
+#define MT6797_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define MT6797_GPIO147__FUNC_AUD_DAT_MISO (MTK_PIN_NO(147) | 1)
+#define MT6797_GPIO147__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(147) | 2)
+#define MT6797_GPIO147__FUNC_VOW_DAT_MISO (MTK_PIN_NO(147) | 3)
+
+#define MT6797_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define MT6797_GPIO148__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(148) | 1)
+#define MT6797_GPIO148__FUNC_AUD_DAT_MISO (MTK_PIN_NO(148) | 2)
+
+#define MT6797_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define MT6797_GPIO149__FUNC_VOW_CLK_MISO (MTK_PIN_NO(149) | 1)
+
+#define MT6797_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define MT6797_GPIO150__FUNC_ANC_DAT_MOSI (MTK_PIN_NO(150) | 1)
+
+#define MT6797_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define MT6797_GPIO151__FUNC_SCL6_0 (MTK_PIN_NO(151) | 1)
+
+#define MT6797_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define MT6797_GPIO152__FUNC_SDA6_0 (MTK_PIN_NO(152) | 1)
+
+#define MT6797_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define MT6797_GPIO153__FUNC_SCL7_0 (MTK_PIN_NO(153) | 1)
+
+#define MT6797_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define MT6797_GPIO154__FUNC_SDA7_0 (MTK_PIN_NO(154) | 1)
+
+#define MT6797_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define MT6797_GPIO155__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(155) | 1)
+#define MT6797_GPIO155__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(155) | 2)
+#define MT6797_GPIO155__FUNC_C2K_UIM0_CLK (MTK_PIN_NO(155) | 3)
+#define MT6797_GPIO155__FUNC_C2K_UIM1_CLK (MTK_PIN_NO(155) | 4)
+
+#define MT6797_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define MT6797_GPIO156__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(156) | 1)
+#define MT6797_GPIO156__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(156) | 2)
+#define MT6797_GPIO156__FUNC_C2K_UIM0_RST (MTK_PIN_NO(156) | 3)
+#define MT6797_GPIO156__FUNC_C2K_UIM1_RST (MTK_PIN_NO(156) | 4)
+
+#define MT6797_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define MT6797_GPIO157__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(157) | 1)
+#define MT6797_GPIO157__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(157) | 2)
+#define MT6797_GPIO157__FUNC_C2K_UIM0_IO (MTK_PIN_NO(157) | 3)
+#define MT6797_GPIO157__FUNC_C2K_UIM1_IO (MTK_PIN_NO(157) | 4)
+
+#define MT6797_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define MT6797_GPIO158__FUNC_MIPI_TDP0 (MTK_PIN_NO(158) | 1)
+
+#define MT6797_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define MT6797_GPIO159__FUNC_MIPI_TDN0 (MTK_PIN_NO(159) | 1)
+
+#define MT6797_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define MT6797_GPIO160__FUNC_MIPI_TDP1 (MTK_PIN_NO(160) | 1)
+
+#define MT6797_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define MT6797_GPIO161__FUNC_MIPI_TDN1 (MTK_PIN_NO(161) | 1)
+
+#define MT6797_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define MT6797_GPIO162__FUNC_MIPI_TCP (MTK_PIN_NO(162) | 1)
+
+#define MT6797_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define MT6797_GPIO163__FUNC_MIPI_TCN (MTK_PIN_NO(163) | 1)
+
+#define MT6797_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define MT6797_GPIO164__FUNC_MIPI_TDP2 (MTK_PIN_NO(164) | 1)
+
+#define MT6797_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define MT6797_GPIO165__FUNC_MIPI_TDN2 (MTK_PIN_NO(165) | 1)
+
+#define MT6797_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define MT6797_GPIO166__FUNC_MIPI_TDP3 (MTK_PIN_NO(166) | 1)
+
+#define MT6797_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define MT6797_GPIO167__FUNC_MIPI_TDN3 (MTK_PIN_NO(167) | 1)
+
+#define MT6797_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define MT6797_GPIO168__FUNC_MIPI_TDP0_A (MTK_PIN_NO(168) | 1)
+
+#define MT6797_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define MT6797_GPIO169__FUNC_MIPI_TDN0_A (MTK_PIN_NO(169) | 1)
+
+#define MT6797_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define MT6797_GPIO170__FUNC_MIPI_TDP1_A (MTK_PIN_NO(170) | 1)
+
+#define MT6797_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define MT6797_GPIO171__FUNC_MIPI_TDN1_A (MTK_PIN_NO(171) | 1)
+
+#define MT6797_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define MT6797_GPIO172__FUNC_MIPI_TCP_A (MTK_PIN_NO(172) | 1)
+
+#define MT6797_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define MT6797_GPIO173__FUNC_MIPI_TCN_A (MTK_PIN_NO(173) | 1)
+
+#define MT6797_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define MT6797_GPIO174__FUNC_MIPI_TDP2_A (MTK_PIN_NO(174) | 1)
+
+#define MT6797_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define MT6797_GPIO175__FUNC_MIPI_TDN2_A (MTK_PIN_NO(175) | 1)
+
+#define MT6797_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define MT6797_GPIO176__FUNC_MIPI_TDP3_A (MTK_PIN_NO(176) | 1)
+
+#define MT6797_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define MT6797_GPIO177__FUNC_MIPI_TDN3_A (MTK_PIN_NO(177) | 1)
+
+#define MT6797_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define MT6797_GPIO178__FUNC_DISP_PWM (MTK_PIN_NO(178) | 1)
+#define MT6797_GPIO178__FUNC_PWM_D (MTK_PIN_NO(178) | 2)
+#define MT6797_GPIO178__FUNC_CLKM5 (MTK_PIN_NO(178) | 3)
+#define MT6797_GPIO178__FUNC_DBG_MON_A19 (MTK_PIN_NO(178) | 7)
+
+#define MT6797_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define MT6797_GPIO179__FUNC_DSI_TE0 (MTK_PIN_NO(179) | 1)
+#define MT6797_GPIO179__FUNC_DBG_MON_A20 (MTK_PIN_NO(179) | 7)
+
+#define MT6797_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define MT6797_GPIO180__FUNC_LCM_RST (MTK_PIN_NO(180) | 1)
+#define MT6797_GPIO180__FUNC_DSI_TE1 (MTK_PIN_NO(180) | 2)
+#define MT6797_GPIO180__FUNC_DBG_MON_A21 (MTK_PIN_NO(180) | 7)
+
+#define MT6797_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define MT6797_GPIO181__FUNC_IDDIG (MTK_PIN_NO(181) | 1)
+#define MT6797_GPIO181__FUNC_DSI_TE1 (MTK_PIN_NO(181) | 2)
+#define MT6797_GPIO181__FUNC_DBG_MON_A22 (MTK_PIN_NO(181) | 7)
+
+#define MT6797_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define MT6797_GPIO182__FUNC_TESTMODE (MTK_PIN_NO(182) | 1)
+
+#define MT6797_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define MT6797_GPIO183__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(183) | 1)
+#define MT6797_GPIO183__FUNC_SPM_BSI_CK (MTK_PIN_NO(183) | 2)
+#define MT6797_GPIO183__FUNC_DBG_MON_B27 (MTK_PIN_NO(183) | 7)
+
+#define MT6797_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define MT6797_GPIO184__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(184) | 1)
+#define MT6797_GPIO184__FUNC_SPM_BSI_EN (MTK_PIN_NO(184) | 2)
+#define MT6797_GPIO184__FUNC_DBG_MON_B28 (MTK_PIN_NO(184) | 7)
+
+#define MT6797_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define MT6797_GPIO185__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(185) | 1)
+#define MT6797_GPIO185__FUNC_SPM_BSI_D0 (MTK_PIN_NO(185) | 2)
+#define MT6797_GPIO185__FUNC_DBG_MON_B29 (MTK_PIN_NO(185) | 7)
+
+#define MT6797_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define MT6797_GPIO186__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(186) | 1)
+#define MT6797_GPIO186__FUNC_SPM_BSI_D1 (MTK_PIN_NO(186) | 2)
+#define MT6797_GPIO186__FUNC_DBG_MON_B30 (MTK_PIN_NO(186) | 7)
+
+#define MT6797_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define MT6797_GPIO187__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(187) | 1)
+#define MT6797_GPIO187__FUNC_SPM_BSI_D2 (MTK_PIN_NO(187) | 2)
+#define MT6797_GPIO187__FUNC_DBG_MON_B31 (MTK_PIN_NO(187) | 7)
+
+#define MT6797_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define MT6797_GPIO188__FUNC_MIPI0_SCLK (MTK_PIN_NO(188) | 1)
+#define MT6797_GPIO188__FUNC_DBG_MON_B32 (MTK_PIN_NO(188) | 7)
+
+#define MT6797_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define MT6797_GPIO189__FUNC_MIPI0_SDATA (MTK_PIN_NO(189) | 1)
+
+#define MT6797_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define MT6797_GPIO190__FUNC_MIPI1_SCLK (MTK_PIN_NO(190) | 1)
+
+#define MT6797_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define MT6797_GPIO191__FUNC_MIPI1_SDATA (MTK_PIN_NO(191) | 1)
+
+#define MT6797_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define MT6797_GPIO192__FUNC_BPI_BUS4 (MTK_PIN_NO(192) | 1)
+
+#define MT6797_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define MT6797_GPIO193__FUNC_BPI_BUS5 (MTK_PIN_NO(193) | 1)
+#define MT6797_GPIO193__FUNC_DBG_MON_B0 (MTK_PIN_NO(193) | 7)
+
+#define MT6797_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define MT6797_GPIO194__FUNC_BPI_BUS6 (MTK_PIN_NO(194) | 1)
+#define MT6797_GPIO194__FUNC_DBG_MON_B1 (MTK_PIN_NO(194) | 7)
+
+#define MT6797_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define MT6797_GPIO195__FUNC_BPI_BUS7 (MTK_PIN_NO(195) | 1)
+#define MT6797_GPIO195__FUNC_DBG_MON_B2 (MTK_PIN_NO(195) | 7)
+
+#define MT6797_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define MT6797_GPIO196__FUNC_BPI_BUS8 (MTK_PIN_NO(196) | 1)
+#define MT6797_GPIO196__FUNC_DBG_MON_B3 (MTK_PIN_NO(196) | 7)
+
+#define MT6797_GPIO197__FUNC_GPIO197 (MTK_PIN_NO(197) | 0)
+#define MT6797_GPIO197__FUNC_BPI_BUS9 (MTK_PIN_NO(197) | 1)
+#define MT6797_GPIO197__FUNC_DBG_MON_B4 (MTK_PIN_NO(197) | 7)
+
+#define MT6797_GPIO198__FUNC_GPIO198 (MTK_PIN_NO(198) | 0)
+#define MT6797_GPIO198__FUNC_BPI_BUS10 (MTK_PIN_NO(198) | 1)
+#define MT6797_GPIO198__FUNC_DBG_MON_B5 (MTK_PIN_NO(198) | 7)
+
+#define MT6797_GPIO199__FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define MT6797_GPIO199__FUNC_BPI_BUS11 (MTK_PIN_NO(199) | 1)
+#define MT6797_GPIO199__FUNC_DBG_MON_B6 (MTK_PIN_NO(199) | 7)
+
+#define MT6797_GPIO200__FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define MT6797_GPIO200__FUNC_BPI_BUS12 (MTK_PIN_NO(200) | 1)
+#define MT6797_GPIO200__FUNC_DBG_MON_B7 (MTK_PIN_NO(200) | 7)
+
+#define MT6797_GPIO201__FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define MT6797_GPIO201__FUNC_BPI_BUS13 (MTK_PIN_NO(201) | 1)
+#define MT6797_GPIO201__FUNC_DBG_MON_B8 (MTK_PIN_NO(201) | 7)
+
+#define MT6797_GPIO202__FUNC_GPIO202 (MTK_PIN_NO(202) | 0)
+#define MT6797_GPIO202__FUNC_BPI_BUS14 (MTK_PIN_NO(202) | 1)
+#define MT6797_GPIO202__FUNC_DBG_MON_B9 (MTK_PIN_NO(202) | 7)
+
+#define MT6797_GPIO203__FUNC_GPIO203 (MTK_PIN_NO(203) | 0)
+#define MT6797_GPIO203__FUNC_BPI_BUS15 (MTK_PIN_NO(203) | 1)
+#define MT6797_GPIO203__FUNC_DBG_MON_B10 (MTK_PIN_NO(203) | 7)
+
+#define MT6797_GPIO204__FUNC_GPIO204 (MTK_PIN_NO(204) | 0)
+#define MT6797_GPIO204__FUNC_BPI_BUS16 (MTK_PIN_NO(204) | 1)
+#define MT6797_GPIO204__FUNC_PA_VM0 (MTK_PIN_NO(204) | 2)
+#define MT6797_GPIO204__FUNC_DBG_MON_B11 (MTK_PIN_NO(204) | 7)
+
+#define MT6797_GPIO205__FUNC_GPIO205 (MTK_PIN_NO(205) | 0)
+#define MT6797_GPIO205__FUNC_BPI_BUS17 (MTK_PIN_NO(205) | 1)
+#define MT6797_GPIO205__FUNC_PA_VM1 (MTK_PIN_NO(205) | 2)
+#define MT6797_GPIO205__FUNC_DBG_MON_B12 (MTK_PIN_NO(205) | 7)
+
+#define MT6797_GPIO206__FUNC_GPIO206 (MTK_PIN_NO(206) | 0)
+#define MT6797_GPIO206__FUNC_BPI_BUS18 (MTK_PIN_NO(206) | 1)
+#define MT6797_GPIO206__FUNC_TX_SWAP0 (MTK_PIN_NO(206) | 2)
+#define MT6797_GPIO206__FUNC_DBG_MON_B13 (MTK_PIN_NO(206) | 7)
+
+#define MT6797_GPIO207__FUNC_GPIO207 (MTK_PIN_NO(207) | 0)
+#define MT6797_GPIO207__FUNC_BPI_BUS19 (MTK_PIN_NO(207) | 1)
+#define MT6797_GPIO207__FUNC_TX_SWAP1 (MTK_PIN_NO(207) | 2)
+#define MT6797_GPIO207__FUNC_DBG_MON_B14 (MTK_PIN_NO(207) | 7)
+
+#define MT6797_GPIO208__FUNC_GPIO208 (MTK_PIN_NO(208) | 0)
+#define MT6797_GPIO208__FUNC_BPI_BUS20 (MTK_PIN_NO(208) | 1)
+#define MT6797_GPIO208__FUNC_TX_SWAP2 (MTK_PIN_NO(208) | 2)
+#define MT6797_GPIO208__FUNC_DBG_MON_B15 (MTK_PIN_NO(208) | 7)
+
+#define MT6797_GPIO209__FUNC_GPIO209 (MTK_PIN_NO(209) | 0)
+#define MT6797_GPIO209__FUNC_BPI_BUS21 (MTK_PIN_NO(209) | 1)
+#define MT6797_GPIO209__FUNC_TX_SWAP3 (MTK_PIN_NO(209) | 2)
+#define MT6797_GPIO209__FUNC_DBG_MON_B16 (MTK_PIN_NO(209) | 7)
+
+#define MT6797_GPIO210__FUNC_GPIO210 (MTK_PIN_NO(210) | 0)
+#define MT6797_GPIO210__FUNC_BPI_BUS22 (MTK_PIN_NO(210) | 1)
+#define MT6797_GPIO210__FUNC_DET_BPI0 (MTK_PIN_NO(210) | 2)
+#define MT6797_GPIO210__FUNC_DBG_MON_B17 (MTK_PIN_NO(210) | 7)
+
+#define MT6797_GPIO211__FUNC_GPIO211 (MTK_PIN_NO(211) | 0)
+#define MT6797_GPIO211__FUNC_BPI_BUS23 (MTK_PIN_NO(211) | 1)
+#define MT6797_GPIO211__FUNC_DET_BPI1 (MTK_PIN_NO(211) | 2)
+#define MT6797_GPIO211__FUNC_DBG_MON_B18 (MTK_PIN_NO(211) | 7)
+
+#define MT6797_GPIO212__FUNC_GPIO212 (MTK_PIN_NO(212) | 0)
+#define MT6797_GPIO212__FUNC_BPI_BUS0 (MTK_PIN_NO(212) | 1)
+#define MT6797_GPIO212__FUNC_DBG_MON_B19 (MTK_PIN_NO(212) | 7)
+
+#define MT6797_GPIO213__FUNC_GPIO213 (MTK_PIN_NO(213) | 0)
+#define MT6797_GPIO213__FUNC_BPI_BUS1 (MTK_PIN_NO(213) | 1)
+#define MT6797_GPIO213__FUNC_DBG_MON_B20 (MTK_PIN_NO(213) | 7)
+
+#define MT6797_GPIO214__FUNC_GPIO214 (MTK_PIN_NO(214) | 0)
+#define MT6797_GPIO214__FUNC_BPI_BUS2 (MTK_PIN_NO(214) | 1)
+#define MT6797_GPIO214__FUNC_DBG_MON_B21 (MTK_PIN_NO(214) | 7)
+
+#define MT6797_GPIO215__FUNC_GPIO215 (MTK_PIN_NO(215) | 0)
+#define MT6797_GPIO215__FUNC_BPI_BUS3 (MTK_PIN_NO(215) | 1)
+#define MT6797_GPIO215__FUNC_DBG_MON_B22 (MTK_PIN_NO(215) | 7)
+
+#define MT6797_GPIO216__FUNC_GPIO216 (MTK_PIN_NO(216) | 0)
+#define MT6797_GPIO216__FUNC_MIPI2_SCLK (MTK_PIN_NO(216) | 1)
+#define MT6797_GPIO216__FUNC_DBG_MON_B23 (MTK_PIN_NO(216) | 7)
+
+#define MT6797_GPIO217__FUNC_GPIO217 (MTK_PIN_NO(217) | 0)
+#define MT6797_GPIO217__FUNC_MIPI2_SDATA (MTK_PIN_NO(217) | 1)
+#define MT6797_GPIO217__FUNC_DBG_MON_B24 (MTK_PIN_NO(217) | 7)
+
+#define MT6797_GPIO218__FUNC_GPIO218 (MTK_PIN_NO(218) | 0)
+#define MT6797_GPIO218__FUNC_MIPI3_SCLK (MTK_PIN_NO(218) | 1)
+#define MT6797_GPIO218__FUNC_DBG_MON_B25 (MTK_PIN_NO(218) | 7)
+
+#define MT6797_GPIO219__FUNC_GPIO219 (MTK_PIN_NO(219) | 0)
+#define MT6797_GPIO219__FUNC_MIPI3_SDATA (MTK_PIN_NO(219) | 1)
+#define MT6797_GPIO219__FUNC_DBG_MON_B26 (MTK_PIN_NO(219) | 7)
+
+#define MT6797_GPIO220__FUNC_GPIO220 (MTK_PIN_NO(220) | 0)
+#define MT6797_GPIO220__FUNC_CONN_WF_IP (MTK_PIN_NO(220) | 1)
+
+#define MT6797_GPIO221__FUNC_GPIO221 (MTK_PIN_NO(221) | 0)
+#define MT6797_GPIO221__FUNC_CONN_WF_IN (MTK_PIN_NO(221) | 1)
+
+#define MT6797_GPIO222__FUNC_GPIO222 (MTK_PIN_NO(222) | 0)
+#define MT6797_GPIO222__FUNC_CONN_WF_QP (MTK_PIN_NO(222) | 1)
+
+#define MT6797_GPIO223__FUNC_GPIO223 (MTK_PIN_NO(223) | 0)
+#define MT6797_GPIO223__FUNC_CONN_WF_QN (MTK_PIN_NO(223) | 1)
+
+#define MT6797_GPIO224__FUNC_GPIO224 (MTK_PIN_NO(224) | 0)
+#define MT6797_GPIO224__FUNC_CONN_BT_IP (MTK_PIN_NO(224) | 1)
+
+#define MT6797_GPIO225__FUNC_GPIO225 (MTK_PIN_NO(225) | 0)
+#define MT6797_GPIO225__FUNC_CONN_BT_IN (MTK_PIN_NO(225) | 1)
+
+#define MT6797_GPIO226__FUNC_GPIO226 (MTK_PIN_NO(226) | 0)
+#define MT6797_GPIO226__FUNC_CONN_BT_QP (MTK_PIN_NO(226) | 1)
+
+#define MT6797_GPIO227__FUNC_GPIO227 (MTK_PIN_NO(227) | 0)
+#define MT6797_GPIO227__FUNC_CONN_BT_QN (MTK_PIN_NO(227) | 1)
+
+#define MT6797_GPIO228__FUNC_GPIO228 (MTK_PIN_NO(228) | 0)
+#define MT6797_GPIO228__FUNC_CONN_GPS_IP (MTK_PIN_NO(228) | 1)
+
+#define MT6797_GPIO229__FUNC_GPIO229 (MTK_PIN_NO(229) | 0)
+#define MT6797_GPIO229__FUNC_CONN_GPS_IN (MTK_PIN_NO(229) | 1)
+
+#define MT6797_GPIO230__FUNC_GPIO230 (MTK_PIN_NO(230) | 0)
+#define MT6797_GPIO230__FUNC_CONN_GPS_QP (MTK_PIN_NO(230) | 1)
+
+#define MT6797_GPIO231__FUNC_GPIO231 (MTK_PIN_NO(231) | 0)
+#define MT6797_GPIO231__FUNC_CONN_GPS_QN (MTK_PIN_NO(231) | 1)
+
+#define MT6797_GPIO232__FUNC_GPIO232 (MTK_PIN_NO(232) | 0)
+#define MT6797_GPIO232__FUNC_URXD1 (MTK_PIN_NO(232) | 1)
+#define MT6797_GPIO232__FUNC_UTXD1 (MTK_PIN_NO(232) | 2)
+#define MT6797_GPIO232__FUNC_MD_URXD0 (MTK_PIN_NO(232) | 3)
+#define MT6797_GPIO232__FUNC_MD_URXD1 (MTK_PIN_NO(232) | 4)
+#define MT6797_GPIO232__FUNC_MD_URXD2 (MTK_PIN_NO(232) | 5)
+#define MT6797_GPIO232__FUNC_C2K_URXD0 (MTK_PIN_NO(232) | 6)
+#define MT6797_GPIO232__FUNC_C2K_URXD1 (MTK_PIN_NO(232) | 7)
+
+#define MT6797_GPIO233__FUNC_GPIO233 (MTK_PIN_NO(233) | 0)
+#define MT6797_GPIO233__FUNC_UTXD1 (MTK_PIN_NO(233) | 1)
+#define MT6797_GPIO233__FUNC_URXD1 (MTK_PIN_NO(233) | 2)
+#define MT6797_GPIO233__FUNC_MD_UTXD0 (MTK_PIN_NO(233) | 3)
+#define MT6797_GPIO233__FUNC_MD_UTXD1 (MTK_PIN_NO(233) | 4)
+#define MT6797_GPIO233__FUNC_MD_UTXD2 (MTK_PIN_NO(233) | 5)
+#define MT6797_GPIO233__FUNC_C2K_UTXD0 (MTK_PIN_NO(233) | 6)
+#define MT6797_GPIO233__FUNC_C2K_UTXD1 (MTK_PIN_NO(233) | 7)
+
+#define MT6797_GPIO234__FUNC_GPIO234 (MTK_PIN_NO(234) | 0)
+#define MT6797_GPIO234__FUNC_SPI1_CLK_B (MTK_PIN_NO(234) | 1)
+#define MT6797_GPIO234__FUNC_TP_UTXD1_AO (MTK_PIN_NO(234) | 2)
+#define MT6797_GPIO234__FUNC_SCL4_1 (MTK_PIN_NO(234) | 3)
+#define MT6797_GPIO234__FUNC_UTXD0 (MTK_PIN_NO(234) | 4)
+#define MT6797_GPIO234__FUNC_PWM_A (MTK_PIN_NO(234) | 6)
+#define MT6797_GPIO234__FUNC_DBG_MON_A23 (MTK_PIN_NO(234) | 7)
+
+#define MT6797_GPIO235__FUNC_GPIO235 (MTK_PIN_NO(235) | 0)
+#define MT6797_GPIO235__FUNC_SPI1_MI_B (MTK_PIN_NO(235) | 1)
+#define MT6797_GPIO235__FUNC_SPI1_MO_B (MTK_PIN_NO(235) | 2)
+#define MT6797_GPIO235__FUNC_SDA4_1 (MTK_PIN_NO(235) | 3)
+#define MT6797_GPIO235__FUNC_URXD0 (MTK_PIN_NO(235) | 4)
+#define MT6797_GPIO235__FUNC_CLKM0 (MTK_PIN_NO(235) | 6)
+#define MT6797_GPIO235__FUNC_DBG_MON_A24 (MTK_PIN_NO(235) | 7)
+
+#define MT6797_GPIO236__FUNC_GPIO236 (MTK_PIN_NO(236) | 0)
+#define MT6797_GPIO236__FUNC_SPI1_MO_B (MTK_PIN_NO(236) | 1)
+#define MT6797_GPIO236__FUNC_SPI1_MI_B (MTK_PIN_NO(236) | 2)
+#define MT6797_GPIO236__FUNC_SCL5_1 (MTK_PIN_NO(236) | 3)
+#define MT6797_GPIO236__FUNC_URTS0 (MTK_PIN_NO(236) | 4)
+#define MT6797_GPIO236__FUNC_PWM_B (MTK_PIN_NO(236) | 6)
+#define MT6797_GPIO236__FUNC_DBG_MON_A25 (MTK_PIN_NO(236) | 7)
+
+#define MT6797_GPIO237__FUNC_GPIO237 (MTK_PIN_NO(237) | 0)
+#define MT6797_GPIO237__FUNC_SPI1_CS_B (MTK_PIN_NO(237) | 1)
+#define MT6797_GPIO237__FUNC_TP_URXD1_AO (MTK_PIN_NO(237) | 2)
+#define MT6797_GPIO237__FUNC_SDA5_1 (MTK_PIN_NO(237) | 3)
+#define MT6797_GPIO237__FUNC_UCTS0 (MTK_PIN_NO(237) | 4)
+#define MT6797_GPIO237__FUNC_CLKM1 (MTK_PIN_NO(237) | 6)
+#define MT6797_GPIO237__FUNC_DBG_MON_A26 (MTK_PIN_NO(237) | 7)
+
+#define MT6797_GPIO238__FUNC_GPIO238 (MTK_PIN_NO(238) | 0)
+#define MT6797_GPIO238__FUNC_SDA4_0 (MTK_PIN_NO(238) | 1)
+
+#define MT6797_GPIO239__FUNC_GPIO239 (MTK_PIN_NO(239) | 0)
+#define MT6797_GPIO239__FUNC_SCL4_0 (MTK_PIN_NO(239) | 1)
+
+#define MT6797_GPIO240__FUNC_GPIO240 (MTK_PIN_NO(240) | 0)
+#define MT6797_GPIO240__FUNC_SDA5_0 (MTK_PIN_NO(240) | 1)
+
+#define MT6797_GPIO241__FUNC_GPIO241 (MTK_PIN_NO(241) | 0)
+#define MT6797_GPIO241__FUNC_SCL5_0 (MTK_PIN_NO(241) | 1)
+
+#define MT6797_GPIO242__FUNC_GPIO242 (MTK_PIN_NO(242) | 0)
+#define MT6797_GPIO242__FUNC_SPI2_CLK_B (MTK_PIN_NO(242) | 1)
+#define MT6797_GPIO242__FUNC_TP_UTXD2_AO (MTK_PIN_NO(242) | 2)
+#define MT6797_GPIO242__FUNC_SCL4_2 (MTK_PIN_NO(242) | 3)
+#define MT6797_GPIO242__FUNC_UTXD1 (MTK_PIN_NO(242) | 4)
+#define MT6797_GPIO242__FUNC_URTS3 (MTK_PIN_NO(242) | 5)
+#define MT6797_GPIO242__FUNC_PWM_C (MTK_PIN_NO(242) | 6)
+#define MT6797_GPIO242__FUNC_DBG_MON_A27 (MTK_PIN_NO(242) | 7)
+
+#define MT6797_GPIO243__FUNC_GPIO243 (MTK_PIN_NO(243) | 0)
+#define MT6797_GPIO243__FUNC_SPI2_MI_B (MTK_PIN_NO(243) | 1)
+#define MT6797_GPIO243__FUNC_SPI2_MO_B (MTK_PIN_NO(243) | 2)
+#define MT6797_GPIO243__FUNC_SDA4_2 (MTK_PIN_NO(243) | 3)
+#define MT6797_GPIO243__FUNC_URXD1 (MTK_PIN_NO(243) | 4)
+#define MT6797_GPIO243__FUNC_UCTS3 (MTK_PIN_NO(243) | 5)
+#define MT6797_GPIO243__FUNC_CLKM2 (MTK_PIN_NO(243) | 6)
+#define MT6797_GPIO243__FUNC_DBG_MON_A28 (MTK_PIN_NO(243) | 7)
+
+#define MT6797_GPIO244__FUNC_GPIO244 (MTK_PIN_NO(244) | 0)
+#define MT6797_GPIO244__FUNC_SPI2_MO_B (MTK_PIN_NO(244) | 1)
+#define MT6797_GPIO244__FUNC_SPI2_MI_B (MTK_PIN_NO(244) | 2)
+#define MT6797_GPIO244__FUNC_SCL5_2 (MTK_PIN_NO(244) | 3)
+#define MT6797_GPIO244__FUNC_URTS1 (MTK_PIN_NO(244) | 4)
+#define MT6797_GPIO244__FUNC_UTXD3 (MTK_PIN_NO(244) | 5)
+#define MT6797_GPIO244__FUNC_PWM_D (MTK_PIN_NO(244) | 6)
+#define MT6797_GPIO244__FUNC_DBG_MON_A29 (MTK_PIN_NO(244) | 7)
+
+#define MT6797_GPIO245__FUNC_GPIO245 (MTK_PIN_NO(245) | 0)
+#define MT6797_GPIO245__FUNC_SPI2_CS_B (MTK_PIN_NO(245) | 1)
+#define MT6797_GPIO245__FUNC_TP_URXD2_AO (MTK_PIN_NO(245) | 2)
+#define MT6797_GPIO245__FUNC_SDA5_2 (MTK_PIN_NO(245) | 3)
+#define MT6797_GPIO245__FUNC_UCTS1 (MTK_PIN_NO(245) | 4)
+#define MT6797_GPIO245__FUNC_URXD3 (MTK_PIN_NO(245) | 5)
+#define MT6797_GPIO245__FUNC_CLKM3 (MTK_PIN_NO(245) | 6)
+#define MT6797_GPIO245__FUNC_DBG_MON_A30 (MTK_PIN_NO(245) | 7)
+
+#define MT6797_GPIO246__FUNC_GPIO246 (MTK_PIN_NO(246) | 0)
+#define MT6797_GPIO246__FUNC_I2S1_LRCK (MTK_PIN_NO(246) | 1)
+#define MT6797_GPIO246__FUNC_I2S2_LRCK (MTK_PIN_NO(246) | 2)
+#define MT6797_GPIO246__FUNC_I2S0_LRCK (MTK_PIN_NO(246) | 3)
+#define MT6797_GPIO246__FUNC_I2S3_LRCK (MTK_PIN_NO(246) | 4)
+#define MT6797_GPIO246__FUNC_PCM0_SYNC (MTK_PIN_NO(246) | 5)
+#define MT6797_GPIO246__FUNC_SPI5_CLK_C (MTK_PIN_NO(246) | 6)
+#define MT6797_GPIO246__FUNC_DBG_MON_A31 (MTK_PIN_NO(246) | 7)
+
+#define MT6797_GPIO247__FUNC_GPIO247 (MTK_PIN_NO(247) | 0)
+#define MT6797_GPIO247__FUNC_I2S1_BCK (MTK_PIN_NO(247) | 1)
+#define MT6797_GPIO247__FUNC_I2S2_BCK (MTK_PIN_NO(247) | 2)
+#define MT6797_GPIO247__FUNC_I2S0_BCK (MTK_PIN_NO(247) | 3)
+#define MT6797_GPIO247__FUNC_I2S3_BCK (MTK_PIN_NO(247) | 4)
+#define MT6797_GPIO247__FUNC_PCM0_CLK (MTK_PIN_NO(247) | 5)
+#define MT6797_GPIO247__FUNC_SPI5_MI_C (MTK_PIN_NO(247) | 6)
+#define MT6797_GPIO247__FUNC_DBG_MON_A32 (MTK_PIN_NO(247) | 7)
+
+#define MT6797_GPIO248__FUNC_GPIO248 (MTK_PIN_NO(248) | 0)
+/* #define MT6797_GPIO248__FUNC_I2S2_DI (MTK_PIN_NO(248) | 1) */
+#define MT6797_GPIO248__FUNC_I2S2_DI (MTK_PIN_NO(248) | 2)
+/* #define MT6797_GPIO248__FUNC_I2S0_DI (MTK_PIN_NO(248) | 3) */
+#define MT6797_GPIO248__FUNC_I2S0_DI (MTK_PIN_NO(248) | 4)
+#define MT6797_GPIO248__FUNC_PCM0_DI (MTK_PIN_NO(248) | 5)
+#define MT6797_GPIO248__FUNC_SPI5_CS_C (MTK_PIN_NO(248) | 6)
+
+#define MT6797_GPIO249__FUNC_GPIO249 (MTK_PIN_NO(249) | 0)
+/* #define MT6797_GPIO249__FUNC_I2S1_DO (MTK_PIN_NO(249) | 1) */
+#define MT6797_GPIO249__FUNC_I2S1_DO (MTK_PIN_NO(249) | 2)
+/* #define MT6797_GPIO249__FUNC_I2S3_DO (MTK_PIN_NO(249) | 3) */
+#define MT6797_GPIO249__FUNC_I2S3_DO (MTK_PIN_NO(249) | 4)
+#define MT6797_GPIO249__FUNC_PCM0_DO (MTK_PIN_NO(249) | 5)
+#define MT6797_GPIO249__FUNC_SPI5_MO_C (MTK_PIN_NO(249) | 6)
+#define MT6797_GPIO249__FUNC_TRAP_SRAM_PWR_BYPASS (MTK_PIN_NO(249) | 7)
+
+#define MT6797_GPIO250__FUNC_GPIO250 (MTK_PIN_NO(250) | 0)
+#define MT6797_GPIO250__FUNC_SPI3_MI (MTK_PIN_NO(250) | 1)
+#define MT6797_GPIO250__FUNC_SPI3_MO (MTK_PIN_NO(250) | 2)
+#define MT6797_GPIO250__FUNC_IRTX_OUT (MTK_PIN_NO(250) | 3)
+#define MT6797_GPIO250__FUNC_TP_URXD1_AO (MTK_PIN_NO(250) | 6)
+#define MT6797_GPIO250__FUNC_DROP_ZONE (MTK_PIN_NO(250) | 7)
+
+#define MT6797_GPIO251__FUNC_GPIO251 (MTK_PIN_NO(251) | 0)
+#define MT6797_GPIO251__FUNC_SPI3_MO (MTK_PIN_NO(251) | 1)
+#define MT6797_GPIO251__FUNC_SPI3_MI (MTK_PIN_NO(251) | 2)
+#define MT6797_GPIO251__FUNC_CMFLASH (MTK_PIN_NO(251) | 3)
+#define MT6797_GPIO251__FUNC_TP_UTXD1_AO (MTK_PIN_NO(251) | 6)
+#define MT6797_GPIO251__FUNC_C2K_RTCK (MTK_PIN_NO(251) | 7)
+
+#define MT6797_GPIO252__FUNC_GPIO252 (MTK_PIN_NO(252) | 0)
+#define MT6797_GPIO252__FUNC_SPI3_CLK (MTK_PIN_NO(252) | 1)
+#define MT6797_GPIO252__FUNC_SCL0_4 (MTK_PIN_NO(252) | 2)
+#define MT6797_GPIO252__FUNC_PWM_D (MTK_PIN_NO(252) | 3)
+#define MT6797_GPIO252__FUNC_C2K_TMS (MTK_PIN_NO(252) | 7)
+
+#define MT6797_GPIO253__FUNC_GPIO253 (MTK_PIN_NO(253) | 0)
+#define MT6797_GPIO253__FUNC_SPI3_CS (MTK_PIN_NO(253) | 1)
+#define MT6797_GPIO253__FUNC_SDA0_4 (MTK_PIN_NO(253) | 2)
+#define MT6797_GPIO253__FUNC_PWM_A (MTK_PIN_NO(253) | 3)
+#define MT6797_GPIO253__FUNC_C2K_TCK (MTK_PIN_NO(253) | 7)
+
+#define MT6797_GPIO254__FUNC_GPIO254 (MTK_PIN_NO(254) | 0)
+#define MT6797_GPIO254__FUNC_I2S1_MCK (MTK_PIN_NO(254) | 1)
+#define MT6797_GPIO254__FUNC_I2S2_MCK (MTK_PIN_NO(254) | 2)
+#define MT6797_GPIO254__FUNC_I2S0_MCK (MTK_PIN_NO(254) | 3)
+#define MT6797_GPIO254__FUNC_I2S3_MCK (MTK_PIN_NO(254) | 4)
+#define MT6797_GPIO254__FUNC_CLKM0 (MTK_PIN_NO(254) | 5)
+#define MT6797_GPIO254__FUNC_C2K_TDI (MTK_PIN_NO(254) | 7)
+
+#define MT6797_GPIO255__FUNC_GPIO255 (MTK_PIN_NO(255) | 0)
+#define MT6797_GPIO255__FUNC_CLKM1 (MTK_PIN_NO(255) | 1)
+#define MT6797_GPIO255__FUNC_DISP_PWM (MTK_PIN_NO(255) | 2)
+#define MT6797_GPIO255__FUNC_PWM_B (MTK_PIN_NO(255) | 3)
+#define MT6797_GPIO255__FUNC_TP_GPIO1_AO (MTK_PIN_NO(255) | 6)
+#define MT6797_GPIO255__FUNC_C2K_TDO (MTK_PIN_NO(255) | 7)
+
+#define MT6797_GPIO256__FUNC_GPIO256 (MTK_PIN_NO(256) | 0)
+#define MT6797_GPIO256__FUNC_CLKM2 (MTK_PIN_NO(256) | 1)
+#define MT6797_GPIO256__FUNC_IRTX_OUT (MTK_PIN_NO(256) | 2)
+#define MT6797_GPIO256__FUNC_PWM_C (MTK_PIN_NO(256) | 3)
+#define MT6797_GPIO256__FUNC_TP_GPIO0_AO (MTK_PIN_NO(256) | 6)
+#define MT6797_GPIO256__FUNC_C2K_NTRST (MTK_PIN_NO(256) | 7)
+
+#define MT6797_GPIO257__FUNC_GPIO257 (MTK_PIN_NO(257) | 0)
+#define MT6797_GPIO257__FUNC_IO_JTAG_TMS (MTK_PIN_NO(257) | 1)
+#define MT6797_GPIO257__FUNC_LTE_JTAG_TMS (MTK_PIN_NO(257) | 2)
+#define MT6797_GPIO257__FUNC_DFD_TMS (MTK_PIN_NO(257) | 3)
+#define MT6797_GPIO257__FUNC_DAP_SIB1_SWD (MTK_PIN_NO(257) | 4)
+#define MT6797_GPIO257__FUNC_ANC_JTAG_TMS (MTK_PIN_NO(257) | 5)
+#define MT6797_GPIO257__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(257) | 6)
+#define MT6797_GPIO257__FUNC_C2K_DM_OTMS (MTK_PIN_NO(257) | 7)
+
+#define MT6797_GPIO258__FUNC_GPIO258 (MTK_PIN_NO(258) | 0)
+#define MT6797_GPIO258__FUNC_IO_JTAG_TCK (MTK_PIN_NO(258) | 1)
+#define MT6797_GPIO258__FUNC_LTE_JTAG_TCK (MTK_PIN_NO(258) | 2)
+#define MT6797_GPIO258__FUNC_DFD_TCK_XI (MTK_PIN_NO(258) | 3)
+#define MT6797_GPIO258__FUNC_DAP_SIB1_SWCK (MTK_PIN_NO(258) | 4)
+#define MT6797_GPIO258__FUNC_ANC_JTAG_TCK (MTK_PIN_NO(258) | 5)
+#define MT6797_GPIO258__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(258) | 6)
+#define MT6797_GPIO258__FUNC_C2K_DM_OTCK (MTK_PIN_NO(258) | 7)
+
+#define MT6797_GPIO259__FUNC_GPIO259 (MTK_PIN_NO(259) | 0)
+#define MT6797_GPIO259__FUNC_IO_JTAG_TDI (MTK_PIN_NO(259) | 1)
+#define MT6797_GPIO259__FUNC_LTE_JTAG_TDI (MTK_PIN_NO(259) | 2)
+#define MT6797_GPIO259__FUNC_DFD_TDI (MTK_PIN_NO(259) | 3)
+#define MT6797_GPIO259__FUNC_ANC_JTAG_TDI (MTK_PIN_NO(259) | 5)
+#define MT6797_GPIO259__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(259) | 6)
+#define MT6797_GPIO259__FUNC_C2K_DM_OTDI (MTK_PIN_NO(259) | 7)
+
+#define MT6797_GPIO260__FUNC_GPIO260 (MTK_PIN_NO(260) | 0)
+#define MT6797_GPIO260__FUNC_IO_JTAG_TDO (MTK_PIN_NO(260) | 1)
+#define MT6797_GPIO260__FUNC_LTE_JTAG_TDO (MTK_PIN_NO(260) | 2)
+#define MT6797_GPIO260__FUNC_DFD_TDO (MTK_PIN_NO(260) | 3)
+#define MT6797_GPIO260__FUNC_ANC_JTAG_TDO (MTK_PIN_NO(260) | 5)
+#define MT6797_GPIO260__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(260) | 6)
+#define MT6797_GPIO260__FUNC_C2K_DM_OTDO (MTK_PIN_NO(260) | 7)
+
+#define MT6797_GPIO261__FUNC_GPIO261 (MTK_PIN_NO(261) | 0)
+#define MT6797_GPIO261__FUNC_LTE_JTAG_TRSTN (MTK_PIN_NO(261) | 2)
+#define MT6797_GPIO261__FUNC_DFD_NTRST (MTK_PIN_NO(261) | 3)
+#define MT6797_GPIO261__FUNC_ANC_JTAG_TRSTN (MTK_PIN_NO(261) | 5)
+#define MT6797_GPIO261__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(261) | 6)
+#define MT6797_GPIO261__FUNC_C2K_DM_JTINTP (MTK_PIN_NO(261) | 7)
+
+#endif /* __DTS_MT6797_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/pads-imx8qm.h b/include/dt-bindings/pinctrl/pads-imx8qm.h
new file mode 100644
index 000000000000..ae7b2942da69
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pads-imx8qm.h
@@ -0,0 +1,960 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ */
+
+#ifndef _IMX8QM_PADS_H
+#define _IMX8QM_PADS_H
+
+/* pin id */
+#define IMX8QM_SIM0_CLK 0
+#define IMX8QM_SIM0_RST 1
+#define IMX8QM_SIM0_IO 2
+#define IMX8QM_SIM0_PD 3
+#define IMX8QM_SIM0_POWER_EN 4
+#define IMX8QM_SIM0_GPIO0_00 5
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_SIM 6
+#define IMX8QM_M40_I2C0_SCL 7
+#define IMX8QM_M40_I2C0_SDA 8
+#define IMX8QM_M40_GPIO0_00 9
+#define IMX8QM_M40_GPIO0_01 10
+#define IMX8QM_M41_I2C0_SCL 11
+#define IMX8QM_M41_I2C0_SDA 12
+#define IMX8QM_M41_GPIO0_00 13
+#define IMX8QM_M41_GPIO0_01 14
+#define IMX8QM_GPT0_CLK 15
+#define IMX8QM_GPT0_CAPTURE 16
+#define IMX8QM_GPT0_COMPARE 17
+#define IMX8QM_GPT1_CLK 18
+#define IMX8QM_GPT1_CAPTURE 19
+#define IMX8QM_GPT1_COMPARE 20
+#define IMX8QM_UART0_RX 21
+#define IMX8QM_UART0_TX 22
+#define IMX8QM_UART0_RTS_B 23
+#define IMX8QM_UART0_CTS_B 24
+#define IMX8QM_UART1_TX 25
+#define IMX8QM_UART1_RX 26
+#define IMX8QM_UART1_RTS_B 27
+#define IMX8QM_UART1_CTS_B 28
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOLH 29
+#define IMX8QM_SCU_PMIC_MEMC_ON 30
+#define IMX8QM_SCU_WDOG_OUT 31
+#define IMX8QM_PMIC_I2C_SDA 32
+#define IMX8QM_PMIC_I2C_SCL 33
+#define IMX8QM_PMIC_EARLY_WARNING 34
+#define IMX8QM_PMIC_INT_B 35
+#define IMX8QM_SCU_GPIO0_00 36
+#define IMX8QM_SCU_GPIO0_01 37
+#define IMX8QM_SCU_GPIO0_02 38
+#define IMX8QM_SCU_GPIO0_03 39
+#define IMX8QM_SCU_GPIO0_04 40
+#define IMX8QM_SCU_GPIO0_05 41
+#define IMX8QM_SCU_GPIO0_06 42
+#define IMX8QM_SCU_GPIO0_07 43
+#define IMX8QM_SCU_BOOT_MODE0 44
+#define IMX8QM_SCU_BOOT_MODE1 45
+#define IMX8QM_SCU_BOOT_MODE2 46
+#define IMX8QM_SCU_BOOT_MODE3 47
+#define IMX8QM_SCU_BOOT_MODE4 48
+#define IMX8QM_SCU_BOOT_MODE5 49
+#define IMX8QM_LVDS0_GPIO00 50
+#define IMX8QM_LVDS0_GPIO01 51
+#define IMX8QM_LVDS0_I2C0_SCL 52
+#define IMX8QM_LVDS0_I2C0_SDA 53
+#define IMX8QM_LVDS0_I2C1_SCL 54
+#define IMX8QM_LVDS0_I2C1_SDA 55
+#define IMX8QM_LVDS1_GPIO00 56
+#define IMX8QM_LVDS1_GPIO01 57
+#define IMX8QM_LVDS1_I2C0_SCL 58
+#define IMX8QM_LVDS1_I2C0_SDA 59
+#define IMX8QM_LVDS1_I2C1_SCL 60
+#define IMX8QM_LVDS1_I2C1_SDA 61
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_LVDSGPIO 62
+#define IMX8QM_MIPI_DSI0_I2C0_SCL 63
+#define IMX8QM_MIPI_DSI0_I2C0_SDA 64
+#define IMX8QM_MIPI_DSI0_GPIO0_00 65
+#define IMX8QM_MIPI_DSI0_GPIO0_01 66
+#define IMX8QM_MIPI_DSI1_I2C0_SCL 67
+#define IMX8QM_MIPI_DSI1_I2C0_SDA 68
+#define IMX8QM_MIPI_DSI1_GPIO0_00 69
+#define IMX8QM_MIPI_DSI1_GPIO0_01 70
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_MIPIDSIGPIO 71
+#define IMX8QM_MIPI_CSI0_MCLK_OUT 72
+#define IMX8QM_MIPI_CSI0_I2C0_SCL 73
+#define IMX8QM_MIPI_CSI0_I2C0_SDA 74
+#define IMX8QM_MIPI_CSI0_GPIO0_00 75
+#define IMX8QM_MIPI_CSI0_GPIO0_01 76
+#define IMX8QM_MIPI_CSI1_MCLK_OUT 77
+#define IMX8QM_MIPI_CSI1_GPIO0_00 78
+#define IMX8QM_MIPI_CSI1_GPIO0_01 79
+#define IMX8QM_MIPI_CSI1_I2C0_SCL 80
+#define IMX8QM_MIPI_CSI1_I2C0_SDA 81
+#define IMX8QM_HDMI_TX0_TS_SCL 82
+#define IMX8QM_HDMI_TX0_TS_SDA 83
+#define IMX8QM_COMP_CTL_GPIO_3V3_HDMIGPIO 84
+#define IMX8QM_ESAI1_FSR 85
+#define IMX8QM_ESAI1_FST 86
+#define IMX8QM_ESAI1_SCKR 87
+#define IMX8QM_ESAI1_SCKT 88
+#define IMX8QM_ESAI1_TX0 89
+#define IMX8QM_ESAI1_TX1 90
+#define IMX8QM_ESAI1_TX2_RX3 91
+#define IMX8QM_ESAI1_TX3_RX2 92
+#define IMX8QM_ESAI1_TX4_RX1 93
+#define IMX8QM_ESAI1_TX5_RX0 94
+#define IMX8QM_SPDIF0_RX 95
+#define IMX8QM_SPDIF0_TX 96
+#define IMX8QM_SPDIF0_EXT_CLK 97
+#define IMX8QM_SPI3_SCK 98
+#define IMX8QM_SPI3_SDO 99
+#define IMX8QM_SPI3_SDI 100
+#define IMX8QM_SPI3_CS0 101
+#define IMX8QM_SPI3_CS1 102
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHB 103
+#define IMX8QM_ESAI0_FSR 104
+#define IMX8QM_ESAI0_FST 105
+#define IMX8QM_ESAI0_SCKR 106
+#define IMX8QM_ESAI0_SCKT 107
+#define IMX8QM_ESAI0_TX0 108
+#define IMX8QM_ESAI0_TX1 109
+#define IMX8QM_ESAI0_TX2_RX3 110
+#define IMX8QM_ESAI0_TX3_RX2 111
+#define IMX8QM_ESAI0_TX4_RX1 112
+#define IMX8QM_ESAI0_TX5_RX0 113
+#define IMX8QM_MCLK_IN0 114
+#define IMX8QM_MCLK_OUT0 115
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHC 116
+#define IMX8QM_SPI0_SCK 117
+#define IMX8QM_SPI0_SDO 118
+#define IMX8QM_SPI0_SDI 119
+#define IMX8QM_SPI0_CS0 120
+#define IMX8QM_SPI0_CS1 121
+#define IMX8QM_SPI2_SCK 122
+#define IMX8QM_SPI2_SDO 123
+#define IMX8QM_SPI2_SDI 124
+#define IMX8QM_SPI2_CS0 125
+#define IMX8QM_SPI2_CS1 126
+#define IMX8QM_SAI1_RXC 127
+#define IMX8QM_SAI1_RXD 128
+#define IMX8QM_SAI1_RXFS 129
+#define IMX8QM_SAI1_TXC 130
+#define IMX8QM_SAI1_TXD 131
+#define IMX8QM_SAI1_TXFS 132
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHT 133
+#define IMX8QM_ADC_IN7 134
+#define IMX8QM_ADC_IN6 135
+#define IMX8QM_ADC_IN5 136
+#define IMX8QM_ADC_IN4 137
+#define IMX8QM_ADC_IN3 138
+#define IMX8QM_ADC_IN2 139
+#define IMX8QM_ADC_IN1 140
+#define IMX8QM_ADC_IN0 141
+#define IMX8QM_MLB_SIG 142
+#define IMX8QM_MLB_CLK 143
+#define IMX8QM_MLB_DATA 144
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOLHT 145
+#define IMX8QM_FLEXCAN0_RX 146
+#define IMX8QM_FLEXCAN0_TX 147
+#define IMX8QM_FLEXCAN1_RX 148
+#define IMX8QM_FLEXCAN1_TX 149
+#define IMX8QM_FLEXCAN2_RX 150
+#define IMX8QM_FLEXCAN2_TX 151
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOTHR 152
+#define IMX8QM_USB_SS3_TC0 153
+#define IMX8QM_USB_SS3_TC1 154
+#define IMX8QM_USB_SS3_TC2 155
+#define IMX8QM_USB_SS3_TC3 156
+#define IMX8QM_COMP_CTL_GPIO_3V3_USB3IO 157
+#define IMX8QM_USDHC1_RESET_B 158
+#define IMX8QM_USDHC1_VSELECT 159
+#define IMX8QM_USDHC2_RESET_B 160
+#define IMX8QM_USDHC2_VSELECT 161
+#define IMX8QM_USDHC2_WP 162
+#define IMX8QM_USDHC2_CD_B 163
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSELSEP 164
+#define IMX8QM_ENET0_MDIO 165
+#define IMX8QM_ENET0_MDC 166
+#define IMX8QM_ENET0_REFCLK_125M_25M 167
+#define IMX8QM_ENET1_REFCLK_125M_25M 168
+#define IMX8QM_ENET1_MDIO 169
+#define IMX8QM_ENET1_MDC 170
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOCT 171
+#define IMX8QM_QSPI1A_SS0_B 172
+#define IMX8QM_QSPI1A_SS1_B 173
+#define IMX8QM_QSPI1A_SCLK 174
+#define IMX8QM_QSPI1A_DQS 175
+#define IMX8QM_QSPI1A_DATA3 176
+#define IMX8QM_QSPI1A_DATA2 177
+#define IMX8QM_QSPI1A_DATA1 178
+#define IMX8QM_QSPI1A_DATA0 179
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_QSPI1 180
+#define IMX8QM_QSPI0A_DATA0 181
+#define IMX8QM_QSPI0A_DATA1 182
+#define IMX8QM_QSPI0A_DATA2 183
+#define IMX8QM_QSPI0A_DATA3 184
+#define IMX8QM_QSPI0A_DQS 185
+#define IMX8QM_QSPI0A_SS0_B 186
+#define IMX8QM_QSPI0A_SS1_B 187
+#define IMX8QM_QSPI0A_SCLK 188
+#define IMX8QM_QSPI0B_SCLK 189
+#define IMX8QM_QSPI0B_DATA0 190
+#define IMX8QM_QSPI0B_DATA1 191
+#define IMX8QM_QSPI0B_DATA2 192
+#define IMX8QM_QSPI0B_DATA3 193
+#define IMX8QM_QSPI0B_DQS 194
+#define IMX8QM_QSPI0B_SS0_B 195
+#define IMX8QM_QSPI0B_SS1_B 196
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_QSPI0 197
+#define IMX8QM_PCIE_CTRL0_CLKREQ_B 198
+#define IMX8QM_PCIE_CTRL0_WAKE_B 199
+#define IMX8QM_PCIE_CTRL0_PERST_B 200
+#define IMX8QM_PCIE_CTRL1_CLKREQ_B 201
+#define IMX8QM_PCIE_CTRL1_WAKE_B 202
+#define IMX8QM_PCIE_CTRL1_PERST_B 203
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_PCIESEP 204
+#define IMX8QM_USB_HSIC0_DATA 205
+#define IMX8QM_USB_HSIC0_STROBE 206
+#define IMX8QM_CALIBRATION_0_HSIC 207
+#define IMX8QM_CALIBRATION_1_HSIC 208
+#define IMX8QM_EMMC0_CLK 209
+#define IMX8QM_EMMC0_CMD 210
+#define IMX8QM_EMMC0_DATA0 211
+#define IMX8QM_EMMC0_DATA1 212
+#define IMX8QM_EMMC0_DATA2 213
+#define IMX8QM_EMMC0_DATA3 214
+#define IMX8QM_EMMC0_DATA4 215
+#define IMX8QM_EMMC0_DATA5 216
+#define IMX8QM_EMMC0_DATA6 217
+#define IMX8QM_EMMC0_DATA7 218
+#define IMX8QM_EMMC0_STROBE 219
+#define IMX8QM_EMMC0_RESET_B 220
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_SD1FIX 221
+#define IMX8QM_USDHC1_CLK 222
+#define IMX8QM_USDHC1_CMD 223
+#define IMX8QM_USDHC1_DATA0 224
+#define IMX8QM_USDHC1_DATA1 225
+#define IMX8QM_CTL_NAND_RE_P_N 226
+#define IMX8QM_USDHC1_DATA2 227
+#define IMX8QM_USDHC1_DATA3 228
+#define IMX8QM_CTL_NAND_DQS_P_N 229
+#define IMX8QM_USDHC1_DATA4 230
+#define IMX8QM_USDHC1_DATA5 231
+#define IMX8QM_USDHC1_DATA6 232
+#define IMX8QM_USDHC1_DATA7 233
+#define IMX8QM_USDHC1_STROBE 234
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSEL2 235
+#define IMX8QM_USDHC2_CLK 236
+#define IMX8QM_USDHC2_CMD 237
+#define IMX8QM_USDHC2_DATA0 238
+#define IMX8QM_USDHC2_DATA1 239
+#define IMX8QM_USDHC2_DATA2 240
+#define IMX8QM_USDHC2_DATA3 241
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSEL3 242
+#define IMX8QM_ENET0_RGMII_TXC 243
+#define IMX8QM_ENET0_RGMII_TX_CTL 244
+#define IMX8QM_ENET0_RGMII_TXD0 245
+#define IMX8QM_ENET0_RGMII_TXD1 246
+#define IMX8QM_ENET0_RGMII_TXD2 247
+#define IMX8QM_ENET0_RGMII_TXD3 248
+#define IMX8QM_ENET0_RGMII_RXC 249
+#define IMX8QM_ENET0_RGMII_RX_CTL 250
+#define IMX8QM_ENET0_RGMII_RXD0 251
+#define IMX8QM_ENET0_RGMII_RXD1 252
+#define IMX8QM_ENET0_RGMII_RXD2 253
+#define IMX8QM_ENET0_RGMII_RXD3 254
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB 255
+#define IMX8QM_ENET1_RGMII_TXC 256
+#define IMX8QM_ENET1_RGMII_TX_CTL 257
+#define IMX8QM_ENET1_RGMII_TXD0 258
+#define IMX8QM_ENET1_RGMII_TXD1 259
+#define IMX8QM_ENET1_RGMII_TXD2 260
+#define IMX8QM_ENET1_RGMII_TXD3 261
+#define IMX8QM_ENET1_RGMII_RXC 262
+#define IMX8QM_ENET1_RGMII_RX_CTL 263
+#define IMX8QM_ENET1_RGMII_RXD0 264
+#define IMX8QM_ENET1_RGMII_RXD1 265
+#define IMX8QM_ENET1_RGMII_RXD2 266
+#define IMX8QM_ENET1_RGMII_RXD3 267
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA 268
+
+/*
+ * format: <pin_id mux_mode>
+ */
+#define IMX8QM_SIM0_CLK_DMA_SIM0_CLK IMX8QM_SIM0_CLK 0
+#define IMX8QM_SIM0_CLK_LSIO_GPIO0_IO00 IMX8QM_SIM0_CLK 3
+#define IMX8QM_SIM0_RST_DMA_SIM0_RST IMX8QM_SIM0_RST 0
+#define IMX8QM_SIM0_RST_LSIO_GPIO0_IO01 IMX8QM_SIM0_RST 3
+#define IMX8QM_SIM0_IO_DMA_SIM0_IO IMX8QM_SIM0_IO 0
+#define IMX8QM_SIM0_IO_LSIO_GPIO0_IO02 IMX8QM_SIM0_IO 3
+#define IMX8QM_SIM0_PD_DMA_SIM0_PD IMX8QM_SIM0_PD 0
+#define IMX8QM_SIM0_PD_DMA_I2C3_SCL IMX8QM_SIM0_PD 1
+#define IMX8QM_SIM0_PD_LSIO_GPIO0_IO03 IMX8QM_SIM0_PD 3
+#define IMX8QM_SIM0_POWER_EN_DMA_SIM0_POWER_EN IMX8QM_SIM0_POWER_EN 0
+#define IMX8QM_SIM0_POWER_EN_DMA_I2C3_SDA IMX8QM_SIM0_POWER_EN 1
+#define IMX8QM_SIM0_POWER_EN_LSIO_GPIO0_IO04 IMX8QM_SIM0_POWER_EN 3
+#define IMX8QM_SIM0_GPIO0_00_DMA_SIM0_POWER_EN IMX8QM_SIM0_GPIO0_00 0
+#define IMX8QM_SIM0_GPIO0_00_LSIO_GPIO0_IO05 IMX8QM_SIM0_GPIO0_00 3
+#define IMX8QM_M40_I2C0_SCL_M40_I2C0_SCL IMX8QM_M40_I2C0_SCL 0
+#define IMX8QM_M40_I2C0_SCL_M40_UART0_RX IMX8QM_M40_I2C0_SCL 1
+#define IMX8QM_M40_I2C0_SCL_M40_GPIO0_IO02 IMX8QM_M40_I2C0_SCL 2
+#define IMX8QM_M40_I2C0_SCL_LSIO_GPIO0_IO06 IMX8QM_M40_I2C0_SCL 3
+#define IMX8QM_M40_I2C0_SDA_M40_I2C0_SDA IMX8QM_M40_I2C0_SDA 0
+#define IMX8QM_M40_I2C0_SDA_M40_UART0_TX IMX8QM_M40_I2C0_SDA 1
+#define IMX8QM_M40_I2C0_SDA_M40_GPIO0_IO03 IMX8QM_M40_I2C0_SDA 2
+#define IMX8QM_M40_I2C0_SDA_LSIO_GPIO0_IO07 IMX8QM_M40_I2C0_SDA 3
+#define IMX8QM_M40_GPIO0_00_M40_GPIO0_IO00 IMX8QM_M40_GPIO0_00 0
+#define IMX8QM_M40_GPIO0_00_M40_TPM0_CH0 IMX8QM_M40_GPIO0_00 1
+#define IMX8QM_M40_GPIO0_00_DMA_UART4_RX IMX8QM_M40_GPIO0_00 2
+#define IMX8QM_M40_GPIO0_00_LSIO_GPIO0_IO08 IMX8QM_M40_GPIO0_00 3
+#define IMX8QM_M40_GPIO0_01_M40_GPIO0_IO01 IMX8QM_M40_GPIO0_01 0
+#define IMX8QM_M40_GPIO0_01_M40_TPM0_CH1 IMX8QM_M40_GPIO0_01 1
+#define IMX8QM_M40_GPIO0_01_DMA_UART4_TX IMX8QM_M40_GPIO0_01 2
+#define IMX8QM_M40_GPIO0_01_LSIO_GPIO0_IO09 IMX8QM_M40_GPIO0_01 3
+#define IMX8QM_M41_I2C0_SCL_M41_I2C0_SCL IMX8QM_M41_I2C0_SCL 0
+#define IMX8QM_M41_I2C0_SCL_M41_UART0_RX IMX8QM_M41_I2C0_SCL 1
+#define IMX8QM_M41_I2C0_SCL_M41_GPIO0_IO02 IMX8QM_M41_I2C0_SCL 2
+#define IMX8QM_M41_I2C0_SCL_LSIO_GPIO0_IO10 IMX8QM_M41_I2C0_SCL 3
+#define IMX8QM_M41_I2C0_SDA_M41_I2C0_SDA IMX8QM_M41_I2C0_SDA 0
+#define IMX8QM_M41_I2C0_SDA_M41_UART0_TX IMX8QM_M41_I2C0_SDA 1
+#define IMX8QM_M41_I2C0_SDA_M41_GPIO0_IO03 IMX8QM_M41_I2C0_SDA 2
+#define IMX8QM_M41_I2C0_SDA_LSIO_GPIO0_IO11 IMX8QM_M41_I2C0_SDA 3
+#define IMX8QM_M41_GPIO0_00_M41_GPIO0_IO00 IMX8QM_M41_GPIO0_00 0
+#define IMX8QM_M41_GPIO0_00_M41_TPM0_CH0 IMX8QM_M41_GPIO0_00 1
+#define IMX8QM_M41_GPIO0_00_DMA_UART3_RX IMX8QM_M41_GPIO0_00 2
+#define IMX8QM_M41_GPIO0_00_LSIO_GPIO0_IO12 IMX8QM_M41_GPIO0_00 3
+#define IMX8QM_M41_GPIO0_01_M41_GPIO0_IO01 IMX8QM_M41_GPIO0_01 0
+#define IMX8QM_M41_GPIO0_01_M41_TPM0_CH1 IMX8QM_M41_GPIO0_01 1
+#define IMX8QM_M41_GPIO0_01_DMA_UART3_TX IMX8QM_M41_GPIO0_01 2
+#define IMX8QM_M41_GPIO0_01_LSIO_GPIO0_IO13 IMX8QM_M41_GPIO0_01 3
+#define IMX8QM_GPT0_CLK_LSIO_GPT0_CLK IMX8QM_GPT0_CLK 0
+#define IMX8QM_GPT0_CLK_DMA_I2C1_SCL IMX8QM_GPT0_CLK 1
+#define IMX8QM_GPT0_CLK_LSIO_KPP0_COL4 IMX8QM_GPT0_CLK 2
+#define IMX8QM_GPT0_CLK_LSIO_GPIO0_IO14 IMX8QM_GPT0_CLK 3
+#define IMX8QM_GPT0_CAPTURE_LSIO_GPT0_CAPTURE IMX8QM_GPT0_CAPTURE 0
+#define IMX8QM_GPT0_CAPTURE_DMA_I2C1_SDA IMX8QM_GPT0_CAPTURE 1
+#define IMX8QM_GPT0_CAPTURE_LSIO_KPP0_COL5 IMX8QM_GPT0_CAPTURE 2
+#define IMX8QM_GPT0_CAPTURE_LSIO_GPIO0_IO15 IMX8QM_GPT0_CAPTURE 3
+#define IMX8QM_GPT0_COMPARE_LSIO_GPT0_COMPARE IMX8QM_GPT0_COMPARE 0
+#define IMX8QM_GPT0_COMPARE_LSIO_PWM3_OUT IMX8QM_GPT0_COMPARE 1
+#define IMX8QM_GPT0_COMPARE_LSIO_KPP0_COL6 IMX8QM_GPT0_COMPARE 2
+#define IMX8QM_GPT0_COMPARE_LSIO_GPIO0_IO16 IMX8QM_GPT0_COMPARE 3
+#define IMX8QM_GPT1_CLK_LSIO_GPT1_CLK IMX8QM_GPT1_CLK 0
+#define IMX8QM_GPT1_CLK_DMA_I2C2_SCL IMX8QM_GPT1_CLK 1
+#define IMX8QM_GPT1_CLK_LSIO_KPP0_COL7 IMX8QM_GPT1_CLK 2
+#define IMX8QM_GPT1_CLK_LSIO_GPIO0_IO17 IMX8QM_GPT1_CLK 3
+#define IMX8QM_GPT1_CAPTURE_LSIO_GPT1_CAPTURE IMX8QM_GPT1_CAPTURE 0
+#define IMX8QM_GPT1_CAPTURE_DMA_I2C2_SDA IMX8QM_GPT1_CAPTURE 1
+#define IMX8QM_GPT1_CAPTURE_LSIO_KPP0_ROW4 IMX8QM_GPT1_CAPTURE 2
+#define IMX8QM_GPT1_CAPTURE_LSIO_GPIO0_IO18 IMX8QM_GPT1_CAPTURE 3
+#define IMX8QM_GPT1_COMPARE_LSIO_GPT1_COMPARE IMX8QM_GPT1_COMPARE 0
+#define IMX8QM_GPT1_COMPARE_LSIO_PWM2_OUT IMX8QM_GPT1_COMPARE 1
+#define IMX8QM_GPT1_COMPARE_LSIO_KPP0_ROW5 IMX8QM_GPT1_COMPARE 2
+#define IMX8QM_GPT1_COMPARE_LSIO_GPIO0_IO19 IMX8QM_GPT1_COMPARE 3
+#define IMX8QM_UART0_RX_DMA_UART0_RX IMX8QM_UART0_RX 0
+#define IMX8QM_UART0_RX_SCU_UART0_RX IMX8QM_UART0_RX 1
+#define IMX8QM_UART0_RX_LSIO_GPIO0_IO20 IMX8QM_UART0_RX 3
+#define IMX8QM_UART0_TX_DMA_UART0_TX IMX8QM_UART0_TX 0
+#define IMX8QM_UART0_TX_SCU_UART0_TX IMX8QM_UART0_TX 1
+#define IMX8QM_UART0_TX_LSIO_GPIO0_IO21 IMX8QM_UART0_TX 3
+#define IMX8QM_UART0_RTS_B_DMA_UART0_RTS_B IMX8QM_UART0_RTS_B 0
+#define IMX8QM_UART0_RTS_B_LSIO_PWM0_OUT IMX8QM_UART0_RTS_B 1
+#define IMX8QM_UART0_RTS_B_DMA_UART2_RX IMX8QM_UART0_RTS_B 2
+#define IMX8QM_UART0_RTS_B_LSIO_GPIO0_IO22 IMX8QM_UART0_RTS_B 3
+#define IMX8QM_UART0_CTS_B_DMA_UART0_CTS_B IMX8QM_UART0_CTS_B 0
+#define IMX8QM_UART0_CTS_B_LSIO_PWM1_OUT IMX8QM_UART0_CTS_B 1
+#define IMX8QM_UART0_CTS_B_DMA_UART2_TX IMX8QM_UART0_CTS_B 2
+#define IMX8QM_UART0_CTS_B_LSIO_GPIO0_IO23 IMX8QM_UART0_CTS_B 3
+#define IMX8QM_UART1_TX_DMA_UART1_TX IMX8QM_UART1_TX 0
+#define IMX8QM_UART1_TX_DMA_SPI3_SCK IMX8QM_UART1_TX 1
+#define IMX8QM_UART1_TX_LSIO_GPIO0_IO24 IMX8QM_UART1_TX 3
+#define IMX8QM_UART1_RX_DMA_UART1_RX IMX8QM_UART1_RX 0
+#define IMX8QM_UART1_RX_DMA_SPI3_SDO IMX8QM_UART1_RX 1
+#define IMX8QM_UART1_RX_LSIO_GPIO0_IO25 IMX8QM_UART1_RX 3
+#define IMX8QM_UART1_RTS_B_DMA_UART1_RTS_B IMX8QM_UART1_RTS_B 0
+#define IMX8QM_UART1_RTS_B_DMA_SPI3_SDI IMX8QM_UART1_RTS_B 1
+#define IMX8QM_UART1_RTS_B_DMA_UART1_CTS_B IMX8QM_UART1_RTS_B 2
+#define IMX8QM_UART1_RTS_B_LSIO_GPIO0_IO26 IMX8QM_UART1_RTS_B 3
+#define IMX8QM_UART1_CTS_B_DMA_UART1_CTS_B IMX8QM_UART1_CTS_B 0
+#define IMX8QM_UART1_CTS_B_DMA_SPI3_CS0 IMX8QM_UART1_CTS_B 1
+#define IMX8QM_UART1_CTS_B_DMA_UART1_RTS_B IMX8QM_UART1_CTS_B 2
+#define IMX8QM_UART1_CTS_B_LSIO_GPIO0_IO27 IMX8QM_UART1_CTS_B 3
+#define IMX8QM_SCU_PMIC_MEMC_ON_SCU_GPIO0_IOXX_PMIC_MEMC_ON IMX8QM_SCU_PMIC_MEMC_ON 0
+#define IMX8QM_SCU_WDOG_OUT_SCU_WDOG0_WDOG_OUT IMX8QM_SCU_WDOG_OUT 0
+#define IMX8QM_PMIC_I2C_SDA_SCU_PMIC_I2C_SDA IMX8QM_PMIC_I2C_SDA 0
+#define IMX8QM_PMIC_I2C_SCL_SCU_PMIC_I2C_SCL IMX8QM_PMIC_I2C_SCL 0
+#define IMX8QM_PMIC_EARLY_WARNING_SCU_PMIC_EARLY_WARNING IMX8QM_PMIC_EARLY_WARNING 0
+#define IMX8QM_PMIC_INT_B_SCU_DIMX8QMMIC_INT_B IMX8QM_PMIC_INT_B 0
+#define IMX8QM_SCU_GPIO0_00_SCU_GPIO0_IO00 IMX8QM_SCU_GPIO0_00 0
+#define IMX8QM_SCU_GPIO0_00_SCU_UART0_RX IMX8QM_SCU_GPIO0_00 1
+#define IMX8QM_SCU_GPIO0_00_LSIO_GPIO0_IO28 IMX8QM_SCU_GPIO0_00 3
+#define IMX8QM_SCU_GPIO0_01_SCU_GPIO0_IO01 IMX8QM_SCU_GPIO0_01 0
+#define IMX8QM_SCU_GPIO0_01_SCU_UART0_TX IMX8QM_SCU_GPIO0_01 1
+#define IMX8QM_SCU_GPIO0_01_LSIO_GPIO0_IO29 IMX8QM_SCU_GPIO0_01 3
+#define IMX8QM_SCU_GPIO0_02_SCU_GPIO0_IO02 IMX8QM_SCU_GPIO0_02 0
+#define IMX8QM_SCU_GPIO0_02_SCU_GPIO0_IOXX_PMIC_GPU0_ON IMX8QM_SCU_GPIO0_02 1
+#define IMX8QM_SCU_GPIO0_02_LSIO_GPIO0_IO30 IMX8QM_SCU_GPIO0_02 3
+#define IMX8QM_SCU_GPIO0_03_SCU_GPIO0_IO03 IMX8QM_SCU_GPIO0_03 0
+#define IMX8QM_SCU_GPIO0_03_SCU_GPIO0_IOXX_PMIC_GPU1_ON IMX8QM_SCU_GPIO0_03 1
+#define IMX8QM_SCU_GPIO0_03_LSIO_GPIO0_IO31 IMX8QM_SCU_GPIO0_03 3
+#define IMX8QM_SCU_GPIO0_04_SCU_GPIO0_IO04 IMX8QM_SCU_GPIO0_04 0
+#define IMX8QM_SCU_GPIO0_04_SCU_GPIO0_IOXX_PMIC_A72_ON IMX8QM_SCU_GPIO0_04 1
+#define IMX8QM_SCU_GPIO0_04_LSIO_GPIO1_IO00 IMX8QM_SCU_GPIO0_04 3
+#define IMX8QM_SCU_GPIO0_05_SCU_GPIO0_IO05 IMX8QM_SCU_GPIO0_05 0
+#define IMX8QM_SCU_GPIO0_05_SCU_GPIO0_IOXX_PMIC_A53_ON IMX8QM_SCU_GPIO0_05 1
+#define IMX8QM_SCU_GPIO0_05_LSIO_GPIO1_IO01 IMX8QM_SCU_GPIO0_05 3
+#define IMX8QM_SCU_GPIO0_06_SCU_GPIO0_IO06 IMX8QM_SCU_GPIO0_06 0
+#define IMX8QM_SCU_GPIO0_06_SCU_TPM0_CH0 IMX8QM_SCU_GPIO0_06 1
+#define IMX8QM_SCU_GPIO0_06_LSIO_GPIO1_IO02 IMX8QM_SCU_GPIO0_06 3
+#define IMX8QM_SCU_GPIO0_07_SCU_GPIO0_IO07 IMX8QM_SCU_GPIO0_07 0
+#define IMX8QM_SCU_GPIO0_07_SCU_TPM0_CH1 IMX8QM_SCU_GPIO0_07 1
+#define IMX8QM_SCU_GPIO0_07_SCU_DSC_RTC_CLOCK_OUTPUT_32K IMX8QM_SCU_GPIO0_07 2
+#define IMX8QM_SCU_GPIO0_07_LSIO_GPIO1_IO03 IMX8QM_SCU_GPIO0_07 3
+#define IMX8QM_SCU_BOOT_MODE0_SCU_DSC_BOOT_MODE0 IMX8QM_SCU_BOOT_MODE0 0
+#define IMX8QM_SCU_BOOT_MODE1_SCU_DSC_BOOT_MODE1 IMX8QM_SCU_BOOT_MODE1 0
+#define IMX8QM_SCU_BOOT_MODE2_SCU_DSC_BOOT_MODE2 IMX8QM_SCU_BOOT_MODE2 0
+#define IMX8QM_SCU_BOOT_MODE3_SCU_DSC_BOOT_MODE3 IMX8QM_SCU_BOOT_MODE3 0
+#define IMX8QM_SCU_BOOT_MODE4_SCU_DSC_BOOT_MODE4 IMX8QM_SCU_BOOT_MODE4 0
+#define IMX8QM_SCU_BOOT_MODE4_SCU_PMIC_I2C_SCL IMX8QM_SCU_BOOT_MODE4 1
+#define IMX8QM_SCU_BOOT_MODE5_SCU_DSC_BOOT_MODE5 IMX8QM_SCU_BOOT_MODE5 0
+#define IMX8QM_SCU_BOOT_MODE5_SCU_PMIC_I2C_SDA IMX8QM_SCU_BOOT_MODE5 1
+#define IMX8QM_LVDS0_GPIO00_LVDS0_GPIO0_IO00 IMX8QM_LVDS0_GPIO00 0
+#define IMX8QM_LVDS0_GPIO00_LVDS0_PWM0_OUT IMX8QM_LVDS0_GPIO00 1
+#define IMX8QM_LVDS0_GPIO00_LSIO_GPIO1_IO04 IMX8QM_LVDS0_GPIO00 3
+#define IMX8QM_LVDS0_GPIO01_LVDS0_GPIO0_IO01 IMX8QM_LVDS0_GPIO01 0
+#define IMX8QM_LVDS0_GPIO01_LSIO_GPIO1_IO05 IMX8QM_LVDS0_GPIO01 3
+#define IMX8QM_LVDS0_I2C0_SCL_LVDS0_I2C0_SCL IMX8QM_LVDS0_I2C0_SCL 0
+#define IMX8QM_LVDS0_I2C0_SCL_LVDS0_GPIO0_IO02 IMX8QM_LVDS0_I2C0_SCL 1
+#define IMX8QM_LVDS0_I2C0_SCL_LSIO_GPIO1_IO06 IMX8QM_LVDS0_I2C0_SCL 3
+#define IMX8QM_LVDS0_I2C0_SDA_LVDS0_I2C0_SDA IMX8QM_LVDS0_I2C0_SDA 0
+#define IMX8QM_LVDS0_I2C0_SDA_LVDS0_GPIO0_IO03 IMX8QM_LVDS0_I2C0_SDA 1
+#define IMX8QM_LVDS0_I2C0_SDA_LSIO_GPIO1_IO07 IMX8QM_LVDS0_I2C0_SDA 3
+#define IMX8QM_LVDS0_I2C1_SCL_LVDS0_I2C1_SCL IMX8QM_LVDS0_I2C1_SCL 0
+#define IMX8QM_LVDS0_I2C1_SCL_DMA_UART2_TX IMX8QM_LVDS0_I2C1_SCL 1
+#define IMX8QM_LVDS0_I2C1_SCL_LSIO_GPIO1_IO08 IMX8QM_LVDS0_I2C1_SCL 3
+#define IMX8QM_LVDS0_I2C1_SDA_LVDS0_I2C1_SDA IMX8QM_LVDS0_I2C1_SDA 0
+#define IMX8QM_LVDS0_I2C1_SDA_DMA_UART2_RX IMX8QM_LVDS0_I2C1_SDA 1
+#define IMX8QM_LVDS0_I2C1_SDA_LSIO_GPIO1_IO09 IMX8QM_LVDS0_I2C1_SDA 3
+#define IMX8QM_LVDS1_GPIO00_LVDS1_GPIO0_IO00 IMX8QM_LVDS1_GPIO00 0
+#define IMX8QM_LVDS1_GPIO00_LVDS1_PWM0_OUT IMX8QM_LVDS1_GPIO00 1
+#define IMX8QM_LVDS1_GPIO00_LSIO_GPIO1_IO10 IMX8QM_LVDS1_GPIO00 3
+#define IMX8QM_LVDS1_GPIO01_LVDS1_GPIO0_IO01 IMX8QM_LVDS1_GPIO01 0
+#define IMX8QM_LVDS1_GPIO01_LSIO_GPIO1_IO11 IMX8QM_LVDS1_GPIO01 3
+#define IMX8QM_LVDS1_I2C0_SCL_LVDS1_I2C0_SCL IMX8QM_LVDS1_I2C0_SCL 0
+#define IMX8QM_LVDS1_I2C0_SCL_LVDS1_GPIO0_IO02 IMX8QM_LVDS1_I2C0_SCL 1
+#define IMX8QM_LVDS1_I2C0_SCL_LSIO_GPIO1_IO12 IMX8QM_LVDS1_I2C0_SCL 3
+#define IMX8QM_LVDS1_I2C0_SDA_LVDS1_I2C0_SDA IMX8QM_LVDS1_I2C0_SDA 0
+#define IMX8QM_LVDS1_I2C0_SDA_LVDS1_GPIO0_IO03 IMX8QM_LVDS1_I2C0_SDA 1
+#define IMX8QM_LVDS1_I2C0_SDA_LSIO_GPIO1_IO13 IMX8QM_LVDS1_I2C0_SDA 3
+#define IMX8QM_LVDS1_I2C1_SCL_LVDS1_I2C1_SCL IMX8QM_LVDS1_I2C1_SCL 0
+#define IMX8QM_LVDS1_I2C1_SCL_DMA_UART3_TX IMX8QM_LVDS1_I2C1_SCL 1
+#define IMX8QM_LVDS1_I2C1_SCL_LSIO_GPIO1_IO14 IMX8QM_LVDS1_I2C1_SCL 3
+#define IMX8QM_LVDS1_I2C1_SDA_LVDS1_I2C1_SDA IMX8QM_LVDS1_I2C1_SDA 0
+#define IMX8QM_LVDS1_I2C1_SDA_DMA_UART3_RX IMX8QM_LVDS1_I2C1_SDA 1
+#define IMX8QM_LVDS1_I2C1_SDA_LSIO_GPIO1_IO15 IMX8QM_LVDS1_I2C1_SDA 3
+#define IMX8QM_MIPI_DSI0_I2C0_SCL_MIPI_DSI0_I2C0_SCL IMX8QM_MIPI_DSI0_I2C0_SCL 0
+#define IMX8QM_MIPI_DSI0_I2C0_SCL_LSIO_GPIO1_IO16 IMX8QM_MIPI_DSI0_I2C0_SCL 3
+#define IMX8QM_MIPI_DSI0_I2C0_SDA_MIPI_DSI0_I2C0_SDA IMX8QM_MIPI_DSI0_I2C0_SDA 0
+#define IMX8QM_MIPI_DSI0_I2C0_SDA_LSIO_GPIO1_IO17 IMX8QM_MIPI_DSI0_I2C0_SDA 3
+#define IMX8QM_MIPI_DSI0_GPIO0_00_MIPI_DSI0_GPIO0_IO00 IMX8QM_MIPI_DSI0_GPIO0_00 0
+#define IMX8QM_MIPI_DSI0_GPIO0_00_MIPI_DSI0_PWM0_OUT IMX8QM_MIPI_DSI0_GPIO0_00 1
+#define IMX8QM_MIPI_DSI0_GPIO0_00_LSIO_GPIO1_IO18 IMX8QM_MIPI_DSI0_GPIO0_00 3
+#define IMX8QM_MIPI_DSI0_GPIO0_01_MIPI_DSI0_GPIO0_IO01 IMX8QM_MIPI_DSI0_GPIO0_01 0
+#define IMX8QM_MIPI_DSI0_GPIO0_01_LSIO_GPIO1_IO19 IMX8QM_MIPI_DSI0_GPIO0_01 3
+#define IMX8QM_MIPI_DSI1_I2C0_SCL_MIPI_DSI1_I2C0_SCL IMX8QM_MIPI_DSI1_I2C0_SCL 0
+#define IMX8QM_MIPI_DSI1_I2C0_SCL_LSIO_GPIO1_IO20 IMX8QM_MIPI_DSI1_I2C0_SCL 3
+#define IMX8QM_MIPI_DSI1_I2C0_SDA_MIPI_DSI1_I2C0_SDA IMX8QM_MIPI_DSI1_I2C0_SDA 0
+#define IMX8QM_MIPI_DSI1_I2C0_SDA_LSIO_GPIO1_IO21 IMX8QM_MIPI_DSI1_I2C0_SDA 3
+#define IMX8QM_MIPI_DSI1_GPIO0_00_MIPI_DSI1_GPIO0_IO00 IMX8QM_MIPI_DSI1_GPIO0_00 0
+#define IMX8QM_MIPI_DSI1_GPIO0_00_MIPI_DSI1_PWM0_OUT IMX8QM_MIPI_DSI1_GPIO0_00 1
+#define IMX8QM_MIPI_DSI1_GPIO0_00_LSIO_GPIO1_IO22 IMX8QM_MIPI_DSI1_GPIO0_00 3
+#define IMX8QM_MIPI_DSI1_GPIO0_01_MIPI_DSI1_GPIO0_IO01 IMX8QM_MIPI_DSI1_GPIO0_01 0
+#define IMX8QM_MIPI_DSI1_GPIO0_01_LSIO_GPIO1_IO23 IMX8QM_MIPI_DSI1_GPIO0_01 3
+#define IMX8QM_MIPI_CSI0_MCLK_OUT_MIPI_CSI0_ACM_MCLK_OUT IMX8QM_MIPI_CSI0_MCLK_OUT 0
+#define IMX8QM_MIPI_CSI0_MCLK_OUT_LSIO_GPIO1_IO24 IMX8QM_MIPI_CSI0_MCLK_OUT 3
+#define IMX8QM_MIPI_CSI0_I2C0_SCL_MIPI_CSI0_I2C0_SCL IMX8QM_MIPI_CSI0_I2C0_SCL 0
+#define IMX8QM_MIPI_CSI0_I2C0_SCL_LSIO_GPIO1_IO25 IMX8QM_MIPI_CSI0_I2C0_SCL 3
+#define IMX8QM_MIPI_CSI0_I2C0_SDA_MIPI_CSI0_I2C0_SDA IMX8QM_MIPI_CSI0_I2C0_SDA 0
+#define IMX8QM_MIPI_CSI0_I2C0_SDA_LSIO_GPIO1_IO26 IMX8QM_MIPI_CSI0_I2C0_SDA 3
+#define IMX8QM_MIPI_CSI0_GPIO0_00_MIPI_CSI0_GPIO0_IO00 IMX8QM_MIPI_CSI0_GPIO0_00 0
+#define IMX8QM_MIPI_CSI0_GPIO0_00_DMA_I2C0_SCL IMX8QM_MIPI_CSI0_GPIO0_00 1
+#define IMX8QM_MIPI_CSI0_GPIO0_00_MIPI_CSI1_I2C0_SCL IMX8QM_MIPI_CSI0_GPIO0_00 2
+#define IMX8QM_MIPI_CSI0_GPIO0_00_LSIO_GPIO1_IO27 IMX8QM_MIPI_CSI0_GPIO0_00 3
+#define IMX8QM_MIPI_CSI0_GPIO0_01_MIPI_CSI0_GPIO0_IO01 IMX8QM_MIPI_CSI0_GPIO0_01 0
+#define IMX8QM_MIPI_CSI0_GPIO0_01_DMA_I2C0_SDA IMX8QM_MIPI_CSI0_GPIO0_01 1
+#define IMX8QM_MIPI_CSI0_GPIO0_01_MIPI_CSI1_I2C0_SDA IMX8QM_MIPI_CSI0_GPIO0_01 2
+#define IMX8QM_MIPI_CSI0_GPIO0_01_LSIO_GPIO1_IO28 IMX8QM_MIPI_CSI0_GPIO0_01 3
+#define IMX8QM_MIPI_CSI1_MCLK_OUT_MIPI_CSI1_ACM_MCLK_OUT IMX8QM_MIPI_CSI1_MCLK_OUT 0
+#define IMX8QM_MIPI_CSI1_MCLK_OUT_LSIO_GPIO1_IO29 IMX8QM_MIPI_CSI1_MCLK_OUT 3
+#define IMX8QM_MIPI_CSI1_GPIO0_00_MIPI_CSI1_GPIO0_IO00 IMX8QM_MIPI_CSI1_GPIO0_00 0
+#define IMX8QM_MIPI_CSI1_GPIO0_00_DMA_UART4_RX IMX8QM_MIPI_CSI1_GPIO0_00 1
+#define IMX8QM_MIPI_CSI1_GPIO0_00_LSIO_GPIO1_IO30 IMX8QM_MIPI_CSI1_GPIO0_00 3
+#define IMX8QM_MIPI_CSI1_GPIO0_01_MIPI_CSI1_GPIO0_IO01 IMX8QM_MIPI_CSI1_GPIO0_01 0
+#define IMX8QM_MIPI_CSI1_GPIO0_01_DMA_UART4_TX IMX8QM_MIPI_CSI1_GPIO0_01 1
+#define IMX8QM_MIPI_CSI1_GPIO0_01_LSIO_GPIO1_IO31 IMX8QM_MIPI_CSI1_GPIO0_01 3
+#define IMX8QM_MIPI_CSI1_I2C0_SCL_MIPI_CSI1_I2C0_SCL IMX8QM_MIPI_CSI1_I2C0_SCL 0
+#define IMX8QM_MIPI_CSI1_I2C0_SCL_LSIO_GPIO2_IO00 IMX8QM_MIPI_CSI1_I2C0_SCL 3
+#define IMX8QM_MIPI_CSI1_I2C0_SDA_MIPI_CSI1_I2C0_SDA IMX8QM_MIPI_CSI1_I2C0_SDA 0
+#define IMX8QM_MIPI_CSI1_I2C0_SDA_LSIO_GPIO2_IO01 IMX8QM_MIPI_CSI1_I2C0_SDA 3
+#define IMX8QM_HDMI_TX0_TS_SCL_HDMI_TX0_I2C0_SCL IMX8QM_HDMI_TX0_TS_SCL 0
+#define IMX8QM_HDMI_TX0_TS_SCL_DMA_I2C0_SCL IMX8QM_HDMI_TX0_TS_SCL 1
+#define IMX8QM_HDMI_TX0_TS_SCL_LSIO_GPIO2_IO02 IMX8QM_HDMI_TX0_TS_SCL 3
+#define IMX8QM_HDMI_TX0_TS_SDA_HDMI_TX0_I2C0_SDA IMX8QM_HDMI_TX0_TS_SDA 0
+#define IMX8QM_HDMI_TX0_TS_SDA_DMA_I2C0_SDA IMX8QM_HDMI_TX0_TS_SDA 1
+#define IMX8QM_HDMI_TX0_TS_SDA_LSIO_GPIO2_IO03 IMX8QM_HDMI_TX0_TS_SDA 3
+#define IMX8QM_ESAI1_FSR_AUD_ESAI1_FSR IMX8QM_ESAI1_FSR 0
+#define IMX8QM_ESAI1_FSR_LSIO_GPIO2_IO04 IMX8QM_ESAI1_FSR 3
+#define IMX8QM_ESAI1_FST_AUD_ESAI1_FST IMX8QM_ESAI1_FST 0
+#define IMX8QM_ESAI1_FST_AUD_SPDIF0_EXT_CLK IMX8QM_ESAI1_FST 1
+#define IMX8QM_ESAI1_FST_LSIO_GPIO2_IO05 IMX8QM_ESAI1_FST 3
+#define IMX8QM_ESAI1_SCKR_AUD_ESAI1_SCKR IMX8QM_ESAI1_SCKR 0
+#define IMX8QM_ESAI1_SCKR_LSIO_GPIO2_IO06 IMX8QM_ESAI1_SCKR 3
+#define IMX8QM_ESAI1_SCKT_AUD_ESAI1_SCKT IMX8QM_ESAI1_SCKT 0
+#define IMX8QM_ESAI1_SCKT_AUD_SAI2_RXC IMX8QM_ESAI1_SCKT 1
+#define IMX8QM_ESAI1_SCKT_AUD_SPDIF0_EXT_CLK IMX8QM_ESAI1_SCKT 2
+#define IMX8QM_ESAI1_SCKT_LSIO_GPIO2_IO07 IMX8QM_ESAI1_SCKT 3
+#define IMX8QM_ESAI1_TX0_AUD_ESAI1_TX0 IMX8QM_ESAI1_TX0 0
+#define IMX8QM_ESAI1_TX0_AUD_SAI2_RXD IMX8QM_ESAI1_TX0 1
+#define IMX8QM_ESAI1_TX0_AUD_SPDIF0_RX IMX8QM_ESAI1_TX0 2
+#define IMX8QM_ESAI1_TX0_LSIO_GPIO2_IO08 IMX8QM_ESAI1_TX0 3
+#define IMX8QM_ESAI1_TX1_AUD_ESAI1_TX1 IMX8QM_ESAI1_TX1 0
+#define IMX8QM_ESAI1_TX1_AUD_SAI2_RXFS IMX8QM_ESAI1_TX1 1
+#define IMX8QM_ESAI1_TX1_AUD_SPDIF0_TX IMX8QM_ESAI1_TX1 2
+#define IMX8QM_ESAI1_TX1_LSIO_GPIO2_IO09 IMX8QM_ESAI1_TX1 3
+#define IMX8QM_ESAI1_TX2_RX3_AUD_ESAI1_TX2_RX3 IMX8QM_ESAI1_TX2_RX3 0
+#define IMX8QM_ESAI1_TX2_RX3_AUD_SPDIF0_RX IMX8QM_ESAI1_TX2_RX3 1
+#define IMX8QM_ESAI1_TX2_RX3_LSIO_GPIO2_IO10 IMX8QM_ESAI1_TX2_RX3 3
+#define IMX8QM_ESAI1_TX3_RX2_AUD_ESAI1_TX3_RX2 IMX8QM_ESAI1_TX3_RX2 0
+#define IMX8QM_ESAI1_TX3_RX2_AUD_SPDIF0_TX IMX8QM_ESAI1_TX3_RX2 1
+#define IMX8QM_ESAI1_TX3_RX2_LSIO_GPIO2_IO11 IMX8QM_ESAI1_TX3_RX2 3
+#define IMX8QM_ESAI1_TX4_RX1_AUD_ESAI1_TX4_RX1 IMX8QM_ESAI1_TX4_RX1 0
+#define IMX8QM_ESAI1_TX4_RX1_LSIO_GPIO2_IO12 IMX8QM_ESAI1_TX4_RX1 3
+#define IMX8QM_ESAI1_TX5_RX0_AUD_ESAI1_TX5_RX0 IMX8QM_ESAI1_TX5_RX0 0
+#define IMX8QM_ESAI1_TX5_RX0_LSIO_GPIO2_IO13 IMX8QM_ESAI1_TX5_RX0 3
+#define IMX8QM_SPDIF0_RX_AUD_SPDIF0_RX IMX8QM_SPDIF0_RX 0
+#define IMX8QM_SPDIF0_RX_AUD_MQS_R IMX8QM_SPDIF0_RX 1
+#define IMX8QM_SPDIF0_RX_AUD_ACM_MCLK_IN1 IMX8QM_SPDIF0_RX 2
+#define IMX8QM_SPDIF0_RX_LSIO_GPIO2_IO14 IMX8QM_SPDIF0_RX 3
+#define IMX8QM_SPDIF0_TX_AUD_SPDIF0_TX IMX8QM_SPDIF0_TX 0
+#define IMX8QM_SPDIF0_TX_AUD_MQS_L IMX8QM_SPDIF0_TX 1
+#define IMX8QM_SPDIF0_TX_AUD_ACM_MCLK_OUT1 IMX8QM_SPDIF0_TX 2
+#define IMX8QM_SPDIF0_TX_LSIO_GPIO2_IO15 IMX8QM_SPDIF0_TX 3
+#define IMX8QM_SPDIF0_EXT_CLK_AUD_SPDIF0_EXT_CLK IMX8QM_SPDIF0_EXT_CLK 0
+#define IMX8QM_SPDIF0_EXT_CLK_DMA_DMA0_REQ_IN0 IMX8QM_SPDIF0_EXT_CLK 1
+#define IMX8QM_SPDIF0_EXT_CLK_LSIO_GPIO2_IO16 IMX8QM_SPDIF0_EXT_CLK 3
+#define IMX8QM_SPI3_SCK_DMA_SPI3_SCK IMX8QM_SPI3_SCK 0
+#define IMX8QM_SPI3_SCK_LSIO_GPIO2_IO17 IMX8QM_SPI3_SCK 3
+#define IMX8QM_SPI3_SDO_DMA_SPI3_SDO IMX8QM_SPI3_SDO 0
+#define IMX8QM_SPI3_SDO_DMA_FTM_CH0 IMX8QM_SPI3_SDO 1
+#define IMX8QM_SPI3_SDO_LSIO_GPIO2_IO18 IMX8QM_SPI3_SDO 3
+#define IMX8QM_SPI3_SDI_DMA_SPI3_SDI IMX8QM_SPI3_SDI 0
+#define IMX8QM_SPI3_SDI_DMA_FTM_CH1 IMX8QM_SPI3_SDI 1
+#define IMX8QM_SPI3_SDI_LSIO_GPIO2_IO19 IMX8QM_SPI3_SDI 3
+#define IMX8QM_SPI3_CS0_DMA_SPI3_CS0 IMX8QM_SPI3_CS0 0
+#define IMX8QM_SPI3_CS0_DMA_FTM_CH2 IMX8QM_SPI3_CS0 1
+#define IMX8QM_SPI3_CS0_LSIO_GPIO2_IO20 IMX8QM_SPI3_CS0 3
+#define IMX8QM_SPI3_CS1_DMA_SPI3_CS1 IMX8QM_SPI3_CS1 0
+#define IMX8QM_SPI3_CS1_LSIO_GPIO2_IO21 IMX8QM_SPI3_CS1 3
+#define IMX8QM_ESAI0_FSR_AUD_ESAI0_FSR IMX8QM_ESAI0_FSR 0
+#define IMX8QM_ESAI0_FSR_LSIO_GPIO2_IO22 IMX8QM_ESAI0_FSR 3
+#define IMX8QM_ESAI0_FST_AUD_ESAI0_FST IMX8QM_ESAI0_FST 0
+#define IMX8QM_ESAI0_FST_LSIO_GPIO2_IO23 IMX8QM_ESAI0_FST 3
+#define IMX8QM_ESAI0_SCKR_AUD_ESAI0_SCKR IMX8QM_ESAI0_SCKR 0
+#define IMX8QM_ESAI0_SCKR_LSIO_GPIO2_IO24 IMX8QM_ESAI0_SCKR 3
+#define IMX8QM_ESAI0_SCKT_AUD_ESAI0_SCKT IMX8QM_ESAI0_SCKT 0
+#define IMX8QM_ESAI0_SCKT_LSIO_GPIO2_IO25 IMX8QM_ESAI0_SCKT 3
+#define IMX8QM_ESAI0_TX0_AUD_ESAI0_TX0 IMX8QM_ESAI0_TX0 0
+#define IMX8QM_ESAI0_TX0_LSIO_GPIO2_IO26 IMX8QM_ESAI0_TX0 3
+#define IMX8QM_ESAI0_TX1_AUD_ESAI0_TX1 IMX8QM_ESAI0_TX1 0
+#define IMX8QM_ESAI0_TX1_LSIO_GPIO2_IO27 IMX8QM_ESAI0_TX1 3
+#define IMX8QM_ESAI0_TX2_RX3_AUD_ESAI0_TX2_RX3 IMX8QM_ESAI0_TX2_RX3 0
+#define IMX8QM_ESAI0_TX2_RX3_LSIO_GPIO2_IO28 IMX8QM_ESAI0_TX2_RX3 3
+#define IMX8QM_ESAI0_TX3_RX2_AUD_ESAI0_TX3_RX2 IMX8QM_ESAI0_TX3_RX2 0
+#define IMX8QM_ESAI0_TX3_RX2_LSIO_GPIO2_IO29 IMX8QM_ESAI0_TX3_RX2 3
+#define IMX8QM_ESAI0_TX4_RX1_AUD_ESAI0_TX4_RX1 IMX8QM_ESAI0_TX4_RX1 0
+#define IMX8QM_ESAI0_TX4_RX1_LSIO_GPIO2_IO30 IMX8QM_ESAI0_TX4_RX1 3
+#define IMX8QM_ESAI0_TX5_RX0_AUD_ESAI0_TX5_RX0 IMX8QM_ESAI0_TX5_RX0 0
+#define IMX8QM_ESAI0_TX5_RX0_LSIO_GPIO2_IO31 IMX8QM_ESAI0_TX5_RX0 3
+#define IMX8QM_MCLK_IN0_AUD_ACM_MCLK_IN0 IMX8QM_MCLK_IN0 0
+#define IMX8QM_MCLK_IN0_AUD_ESAI0_RX_HF_CLK IMX8QM_MCLK_IN0 1
+#define IMX8QM_MCLK_IN0_AUD_ESAI1_RX_HF_CLK IMX8QM_MCLK_IN0 2
+#define IMX8QM_MCLK_IN0_LSIO_GPIO3_IO00 IMX8QM_MCLK_IN0 3
+#define IMX8QM_MCLK_OUT0_AUD_ACM_MCLK_OUT0 IMX8QM_MCLK_OUT0 0
+#define IMX8QM_MCLK_OUT0_AUD_ESAI0_TX_HF_CLK IMX8QM_MCLK_OUT0 1
+#define IMX8QM_MCLK_OUT0_AUD_ESAI1_TX_HF_CLK IMX8QM_MCLK_OUT0 2
+#define IMX8QM_MCLK_OUT0_LSIO_GPIO3_IO01 IMX8QM_MCLK_OUT0 3
+#define IMX8QM_SPI0_SCK_DMA_SPI0_SCK IMX8QM_SPI0_SCK 0
+#define IMX8QM_SPI0_SCK_AUD_SAI0_RXC IMX8QM_SPI0_SCK 1
+#define IMX8QM_SPI0_SCK_LSIO_GPIO3_IO02 IMX8QM_SPI0_SCK 3
+#define IMX8QM_SPI0_SDO_DMA_SPI0_SDO IMX8QM_SPI0_SDO 0
+#define IMX8QM_SPI0_SDO_AUD_SAI0_TXD IMX8QM_SPI0_SDO 1
+#define IMX8QM_SPI0_SDO_LSIO_GPIO3_IO03 IMX8QM_SPI0_SDO 3
+#define IMX8QM_SPI0_SDI_DMA_SPI0_SDI IMX8QM_SPI0_SDI 0
+#define IMX8QM_SPI0_SDI_AUD_SAI0_RXD IMX8QM_SPI0_SDI 1
+#define IMX8QM_SPI0_SDI_LSIO_GPIO3_IO04 IMX8QM_SPI0_SDI 3
+#define IMX8QM_SPI0_CS0_DMA_SPI0_CS0 IMX8QM_SPI0_CS0 0
+#define IMX8QM_SPI0_CS0_AUD_SAI0_RXFS IMX8QM_SPI0_CS0 1
+#define IMX8QM_SPI0_CS0_LSIO_GPIO3_IO05 IMX8QM_SPI0_CS0 3
+#define IMX8QM_SPI0_CS1_DMA_SPI0_CS1 IMX8QM_SPI0_CS1 0
+#define IMX8QM_SPI0_CS1_AUD_SAI0_TXC IMX8QM_SPI0_CS1 1
+#define IMX8QM_SPI0_CS1_LSIO_GPIO3_IO06 IMX8QM_SPI0_CS1 3
+#define IMX8QM_SPI2_SCK_DMA_SPI2_SCK IMX8QM_SPI2_SCK 0
+#define IMX8QM_SPI2_SCK_LSIO_GPIO3_IO07 IMX8QM_SPI2_SCK 3
+#define IMX8QM_SPI2_SDO_DMA_SPI2_SDO IMX8QM_SPI2_SDO 0
+#define IMX8QM_SPI2_SDO_LSIO_GPIO3_IO08 IMX8QM_SPI2_SDO 3
+#define IMX8QM_SPI2_SDI_DMA_SPI2_SDI IMX8QM_SPI2_SDI 0
+#define IMX8QM_SPI2_SDI_LSIO_GPIO3_IO09 IMX8QM_SPI2_SDI 3
+#define IMX8QM_SPI2_CS0_DMA_SPI2_CS0 IMX8QM_SPI2_CS0 0
+#define IMX8QM_SPI2_CS0_LSIO_GPIO3_IO10 IMX8QM_SPI2_CS0 3
+#define IMX8QM_SPI2_CS1_DMA_SPI2_CS1 IMX8QM_SPI2_CS1 0
+#define IMX8QM_SPI2_CS1_AUD_SAI0_TXFS IMX8QM_SPI2_CS1 1
+#define IMX8QM_SPI2_CS1_LSIO_GPIO3_IO11 IMX8QM_SPI2_CS1 3
+#define IMX8QM_SAI1_RXC_AUD_SAI1_RXC IMX8QM_SAI1_RXC 0
+#define IMX8QM_SAI1_RXC_AUD_SAI0_TXD IMX8QM_SAI1_RXC 1
+#define IMX8QM_SAI1_RXC_LSIO_GPIO3_IO12 IMX8QM_SAI1_RXC 3
+#define IMX8QM_SAI1_RXD_AUD_SAI1_RXD IMX8QM_SAI1_RXD 0
+#define IMX8QM_SAI1_RXD_AUD_SAI0_TXFS IMX8QM_SAI1_RXD 1
+#define IMX8QM_SAI1_RXD_LSIO_GPIO3_IO13 IMX8QM_SAI1_RXD 3
+#define IMX8QM_SAI1_RXFS_AUD_SAI1_RXFS IMX8QM_SAI1_RXFS 0
+#define IMX8QM_SAI1_RXFS_AUD_SAI0_RXD IMX8QM_SAI1_RXFS 1
+#define IMX8QM_SAI1_RXFS_LSIO_GPIO3_IO14 IMX8QM_SAI1_RXFS 3
+#define IMX8QM_SAI1_TXC_AUD_SAI1_TXC IMX8QM_SAI1_TXC 0
+#define IMX8QM_SAI1_TXC_AUD_SAI0_TXC IMX8QM_SAI1_TXC 1
+#define IMX8QM_SAI1_TXC_LSIO_GPIO3_IO15 IMX8QM_SAI1_TXC 3
+#define IMX8QM_SAI1_TXD_AUD_SAI1_TXD IMX8QM_SAI1_TXD 0
+#define IMX8QM_SAI1_TXD_AUD_SAI1_RXC IMX8QM_SAI1_TXD 1
+#define IMX8QM_SAI1_TXD_LSIO_GPIO3_IO16 IMX8QM_SAI1_TXD 3
+#define IMX8QM_SAI1_TXFS_AUD_SAI1_TXFS IMX8QM_SAI1_TXFS 0
+#define IMX8QM_SAI1_TXFS_AUD_SAI1_RXFS IMX8QM_SAI1_TXFS 1
+#define IMX8QM_SAI1_TXFS_LSIO_GPIO3_IO17 IMX8QM_SAI1_TXFS 3
+#define IMX8QM_ADC_IN7_DMA_ADC1_IN3 IMX8QM_ADC_IN7 0
+#define IMX8QM_ADC_IN7_DMA_SPI1_CS1 IMX8QM_ADC_IN7 1
+#define IMX8QM_ADC_IN7_LSIO_KPP0_ROW3 IMX8QM_ADC_IN7 2
+#define IMX8QM_ADC_IN7_LSIO_GPIO3_IO25 IMX8QM_ADC_IN7 3
+#define IMX8QM_ADC_IN6_DMA_ADC1_IN2 IMX8QM_ADC_IN6 0
+#define IMX8QM_ADC_IN6_DMA_SPI1_CS0 IMX8QM_ADC_IN6 1
+#define IMX8QM_ADC_IN6_LSIO_KPP0_ROW2 IMX8QM_ADC_IN6 2
+#define IMX8QM_ADC_IN6_LSIO_GPIO3_IO24 IMX8QM_ADC_IN6 3
+#define IMX8QM_ADC_IN5_DMA_ADC1_IN1 IMX8QM_ADC_IN5 0
+#define IMX8QM_ADC_IN5_DMA_SPI1_SDI IMX8QM_ADC_IN5 1
+#define IMX8QM_ADC_IN5_LSIO_KPP0_ROW1 IMX8QM_ADC_IN5 2
+#define IMX8QM_ADC_IN5_LSIO_GPIO3_IO23 IMX8QM_ADC_IN5 3
+#define IMX8QM_ADC_IN4_DMA_ADC1_IN0 IMX8QM_ADC_IN4 0
+#define IMX8QM_ADC_IN4_DMA_SPI1_SDO IMX8QM_ADC_IN4 1
+#define IMX8QM_ADC_IN4_LSIO_KPP0_ROW0 IMX8QM_ADC_IN4 2
+#define IMX8QM_ADC_IN4_LSIO_GPIO3_IO22 IMX8QM_ADC_IN4 3
+#define IMX8QM_ADC_IN3_DMA_ADC0_IN3 IMX8QM_ADC_IN3 0
+#define IMX8QM_ADC_IN3_DMA_SPI1_SCK IMX8QM_ADC_IN3 1
+#define IMX8QM_ADC_IN3_LSIO_KPP0_COL3 IMX8QM_ADC_IN3 2
+#define IMX8QM_ADC_IN3_LSIO_GPIO3_IO21 IMX8QM_ADC_IN3 3
+#define IMX8QM_ADC_IN2_DMA_ADC0_IN2 IMX8QM_ADC_IN2 0
+#define IMX8QM_ADC_IN2_LSIO_KPP0_COL2 IMX8QM_ADC_IN2 2
+#define IMX8QM_ADC_IN2_LSIO_GPIO3_IO20 IMX8QM_ADC_IN2 3
+#define IMX8QM_ADC_IN1_DMA_ADC0_IN1 IMX8QM_ADC_IN1 0
+#define IMX8QM_ADC_IN1_LSIO_KPP0_COL1 IMX8QM_ADC_IN1 2
+#define IMX8QM_ADC_IN1_LSIO_GPIO3_IO19 IMX8QM_ADC_IN1 3
+#define IMX8QM_ADC_IN0_DMA_ADC0_IN0 IMX8QM_ADC_IN0 0
+#define IMX8QM_ADC_IN0_LSIO_KPP0_COL0 IMX8QM_ADC_IN0 2
+#define IMX8QM_ADC_IN0_LSIO_GPIO3_IO18 IMX8QM_ADC_IN0 3
+#define IMX8QM_MLB_SIG_CONN_MLB_SIG IMX8QM_MLB_SIG 0
+#define IMX8QM_MLB_SIG_AUD_SAI3_RXC IMX8QM_MLB_SIG 1
+#define IMX8QM_MLB_SIG_LSIO_GPIO3_IO26 IMX8QM_MLB_SIG 3
+#define IMX8QM_MLB_CLK_CONN_MLB_CLK IMX8QM_MLB_CLK 0
+#define IMX8QM_MLB_CLK_AUD_SAI3_RXFS IMX8QM_MLB_CLK 1
+#define IMX8QM_MLB_CLK_LSIO_GPIO3_IO27 IMX8QM_MLB_CLK 3
+#define IMX8QM_MLB_DATA_CONN_MLB_DATA IMX8QM_MLB_DATA 0
+#define IMX8QM_MLB_DATA_AUD_SAI3_RXD IMX8QM_MLB_DATA 1
+#define IMX8QM_MLB_DATA_LSIO_GPIO3_IO28 IMX8QM_MLB_DATA 3
+#define IMX8QM_FLEXCAN0_RX_DMA_FLEXCAN0_RX IMX8QM_FLEXCAN0_RX 0
+#define IMX8QM_FLEXCAN0_RX_LSIO_GPIO3_IO29 IMX8QM_FLEXCAN0_RX 3
+#define IMX8QM_FLEXCAN0_TX_DMA_FLEXCAN0_TX IMX8QM_FLEXCAN0_TX 0
+#define IMX8QM_FLEXCAN0_TX_LSIO_GPIO3_IO30 IMX8QM_FLEXCAN0_TX 3
+#define IMX8QM_FLEXCAN1_RX_DMA_FLEXCAN1_RX IMX8QM_FLEXCAN1_RX 0
+#define IMX8QM_FLEXCAN1_RX_LSIO_GPIO3_IO31 IMX8QM_FLEXCAN1_RX 3
+#define IMX8QM_FLEXCAN1_TX_DMA_FLEXCAN1_TX IMX8QM_FLEXCAN1_TX 0
+#define IMX8QM_FLEXCAN1_TX_LSIO_GPIO4_IO00 IMX8QM_FLEXCAN1_TX 3
+#define IMX8QM_FLEXCAN2_RX_DMA_FLEXCAN2_RX IMX8QM_FLEXCAN2_RX 0
+#define IMX8QM_FLEXCAN2_RX_LSIO_GPIO4_IO01 IMX8QM_FLEXCAN2_RX 3
+#define IMX8QM_FLEXCAN2_TX_DMA_FLEXCAN2_TX IMX8QM_FLEXCAN2_TX 0
+#define IMX8QM_FLEXCAN2_TX_LSIO_GPIO4_IO02 IMX8QM_FLEXCAN2_TX 3
+#define IMX8QM_USB_SS3_TC0_DMA_I2C1_SCL IMX8QM_USB_SS3_TC0 0
+#define IMX8QM_USB_SS3_TC0_CONN_USB_OTG1_PWR IMX8QM_USB_SS3_TC0 1
+#define IMX8QM_USB_SS3_TC0_LSIO_GPIO4_IO03 IMX8QM_USB_SS3_TC0 3
+#define IMX8QM_USB_SS3_TC1_DMA_I2C1_SCL IMX8QM_USB_SS3_TC1 0
+#define IMX8QM_USB_SS3_TC1_CONN_USB_OTG2_PWR IMX8QM_USB_SS3_TC1 1
+#define IMX8QM_USB_SS3_TC1_LSIO_GPIO4_IO04 IMX8QM_USB_SS3_TC1 3
+#define IMX8QM_USB_SS3_TC2_DMA_I2C1_SDA IMX8QM_USB_SS3_TC2 0
+#define IMX8QM_USB_SS3_TC2_CONN_USB_OTG1_OC IMX8QM_USB_SS3_TC2 1
+#define IMX8QM_USB_SS3_TC2_LSIO_GPIO4_IO05 IMX8QM_USB_SS3_TC2 3
+#define IMX8QM_USB_SS3_TC3_DMA_I2C1_SDA IMX8QM_USB_SS3_TC3 0
+#define IMX8QM_USB_SS3_TC3_CONN_USB_OTG2_OC IMX8QM_USB_SS3_TC3 1
+#define IMX8QM_USB_SS3_TC3_LSIO_GPIO4_IO06 IMX8QM_USB_SS3_TC3 3
+#define IMX8QM_USDHC1_RESET_B_CONN_USDHC1_RESET_B IMX8QM_USDHC1_RESET_B 0
+#define IMX8QM_USDHC1_RESET_B_LSIO_GPIO4_IO07 IMX8QM_USDHC1_RESET_B 3
+#define IMX8QM_USDHC1_VSELECT_CONN_USDHC1_VSELECT IMX8QM_USDHC1_VSELECT 0
+#define IMX8QM_USDHC1_VSELECT_LSIO_GPIO4_IO08 IMX8QM_USDHC1_VSELECT 3
+#define IMX8QM_USDHC2_RESET_B_CONN_USDHC2_RESET_B IMX8QM_USDHC2_RESET_B 0
+#define IMX8QM_USDHC2_RESET_B_LSIO_GPIO4_IO09 IMX8QM_USDHC2_RESET_B 3
+#define IMX8QM_USDHC2_VSELECT_CONN_USDHC2_VSELECT IMX8QM_USDHC2_VSELECT 0
+#define IMX8QM_USDHC2_VSELECT_LSIO_GPIO4_IO10 IMX8QM_USDHC2_VSELECT 3
+#define IMX8QM_USDHC2_WP_CONN_USDHC2_WP IMX8QM_USDHC2_WP 0
+#define IMX8QM_USDHC2_WP_LSIO_GPIO4_IO11 IMX8QM_USDHC2_WP 3
+#define IMX8QM_USDHC2_CD_B_CONN_USDHC2_CD_B IMX8QM_USDHC2_CD_B 0
+#define IMX8QM_USDHC2_CD_B_LSIO_GPIO4_IO12 IMX8QM_USDHC2_CD_B 3
+#define IMX8QM_ENET0_MDIO_CONN_ENET0_MDIO IMX8QM_ENET0_MDIO 0
+#define IMX8QM_ENET0_MDIO_DMA_I2C4_SDA IMX8QM_ENET0_MDIO 1
+#define IMX8QM_ENET0_MDIO_LSIO_GPIO4_IO13 IMX8QM_ENET0_MDIO 3
+#define IMX8QM_ENET0_MDC_CONN_ENET0_MDC IMX8QM_ENET0_MDC 0
+#define IMX8QM_ENET0_MDC_DMA_I2C4_SCL IMX8QM_ENET0_MDC 1
+#define IMX8QM_ENET0_MDC_LSIO_GPIO4_IO14 IMX8QM_ENET0_MDC 3
+#define IMX8QM_ENET0_REFCLK_125M_25M_CONN_ENET0_REFCLK_125M_25M IMX8QM_ENET0_REFCLK_125M_25M 0
+#define IMX8QM_ENET0_REFCLK_125M_25M_CONN_ENET0_PPS IMX8QM_ENET0_REFCLK_125M_25M 1
+#define IMX8QM_ENET0_REFCLK_125M_25M_LSIO_GPIO4_IO15 IMX8QM_ENET0_REFCLK_125M_25M 3
+#define IMX8QM_ENET1_REFCLK_125M_25M_CONN_ENET1_REFCLK_125M_25M IMX8QM_ENET1_REFCLK_125M_25M 0
+#define IMX8QM_ENET1_REFCLK_125M_25M_CONN_ENET1_PPS IMX8QM_ENET1_REFCLK_125M_25M 1
+#define IMX8QM_ENET1_REFCLK_125M_25M_LSIO_GPIO4_IO16 IMX8QM_ENET1_REFCLK_125M_25M 3
+#define IMX8QM_ENET1_MDIO_CONN_ENET1_MDIO IMX8QM_ENET1_MDIO 0
+#define IMX8QM_ENET1_MDIO_DMA_I2C4_SDA IMX8QM_ENET1_MDIO 1
+#define IMX8QM_ENET1_MDIO_LSIO_GPIO4_IO17 IMX8QM_ENET1_MDIO 3
+#define IMX8QM_ENET1_MDC_CONN_ENET1_MDC IMX8QM_ENET1_MDC 0
+#define IMX8QM_ENET1_MDC_DMA_I2C4_SCL IMX8QM_ENET1_MDC 1
+#define IMX8QM_ENET1_MDC_LSIO_GPIO4_IO18 IMX8QM_ENET1_MDC 3
+#define IMX8QM_QSPI1A_SS0_B_LSIO_QSPI1A_SS0_B IMX8QM_QSPI1A_SS0_B 0
+#define IMX8QM_QSPI1A_SS0_B_LSIO_GPIO4_IO19 IMX8QM_QSPI1A_SS0_B 3
+#define IMX8QM_QSPI1A_SS1_B_LSIO_QSPI1A_SS1_B IMX8QM_QSPI1A_SS1_B 0
+#define IMX8QM_QSPI1A_SS1_B_LSIO_QSPI1A_SCLK2 IMX8QM_QSPI1A_SS1_B 1
+#define IMX8QM_QSPI1A_SS1_B_LSIO_GPIO4_IO20 IMX8QM_QSPI1A_SS1_B 3
+#define IMX8QM_QSPI1A_SCLK_LSIO_QSPI1A_SCLK IMX8QM_QSPI1A_SCLK 0
+#define IMX8QM_QSPI1A_SCLK_LSIO_GPIO4_IO21 IMX8QM_QSPI1A_SCLK 3
+#define IMX8QM_QSPI1A_DQS_LSIO_QSPI1A_DQS IMX8QM_QSPI1A_DQS 0
+#define IMX8QM_QSPI1A_DQS_LSIO_GPIO4_IO22 IMX8QM_QSPI1A_DQS 3
+#define IMX8QM_QSPI1A_DATA3_LSIO_QSPI1A_DATA3 IMX8QM_QSPI1A_DATA3 0
+#define IMX8QM_QSPI1A_DATA3_DMA_I2C1_SDA IMX8QM_QSPI1A_DATA3 1
+#define IMX8QM_QSPI1A_DATA3_CONN_USB_OTG1_OC IMX8QM_QSPI1A_DATA3 2
+#define IMX8QM_QSPI1A_DATA3_LSIO_GPIO4_IO23 IMX8QM_QSPI1A_DATA3 3
+#define IMX8QM_QSPI1A_DATA2_LSIO_QSPI1A_DATA2 IMX8QM_QSPI1A_DATA2 0
+#define IMX8QM_QSPI1A_DATA2_DMA_I2C1_SCL IMX8QM_QSPI1A_DATA2 1
+#define IMX8QM_QSPI1A_DATA2_CONN_USB_OTG2_PWR IMX8QM_QSPI1A_DATA2 2
+#define IMX8QM_QSPI1A_DATA2_LSIO_GPIO4_IO24 IMX8QM_QSPI1A_DATA2 3
+#define IMX8QM_QSPI1A_DATA1_LSIO_QSPI1A_DATA1 IMX8QM_QSPI1A_DATA1 0
+#define IMX8QM_QSPI1A_DATA1_DMA_I2C1_SDA IMX8QM_QSPI1A_DATA1 1
+#define IMX8QM_QSPI1A_DATA1_CONN_USB_OTG2_OC IMX8QM_QSPI1A_DATA1 2
+#define IMX8QM_QSPI1A_DATA1_LSIO_GPIO4_IO25 IMX8QM_QSPI1A_DATA1 3
+#define IMX8QM_QSPI1A_DATA0_LSIO_QSPI1A_DATA0 IMX8QM_QSPI1A_DATA0 0
+#define IMX8QM_QSPI1A_DATA0_LSIO_GPIO4_IO26 IMX8QM_QSPI1A_DATA0 3
+#define IMX8QM_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 IMX8QM_QSPI0A_DATA0 0
+#define IMX8QM_QSPI0A_DATA1_LSIO_QSPI0A_DATA1 IMX8QM_QSPI0A_DATA1 0
+#define IMX8QM_QSPI0A_DATA2_LSIO_QSPI0A_DATA2 IMX8QM_QSPI0A_DATA2 0
+#define IMX8QM_QSPI0A_DATA3_LSIO_QSPI0A_DATA3 IMX8QM_QSPI0A_DATA3 0
+#define IMX8QM_QSPI0A_DQS_LSIO_QSPI0A_DQS IMX8QM_QSPI0A_DQS 0
+#define IMX8QM_QSPI0A_SS0_B_LSIO_QSPI0A_SS0_B IMX8QM_QSPI0A_SS0_B 0
+#define IMX8QM_QSPI0A_SS1_B_LSIO_QSPI0A_SS1_B IMX8QM_QSPI0A_SS1_B 0
+#define IMX8QM_QSPI0A_SS1_B_LSIO_QSPI0A_SCLK2 IMX8QM_QSPI0A_SS1_B 1
+#define IMX8QM_QSPI0A_SCLK_LSIO_QSPI0A_SCLK IMX8QM_QSPI0A_SCLK 0
+#define IMX8QM_QSPI0B_SCLK_LSIO_QSPI0B_SCLK IMX8QM_QSPI0B_SCLK 0
+#define IMX8QM_QSPI0B_DATA0_LSIO_QSPI0B_DATA0 IMX8QM_QSPI0B_DATA0 0
+#define IMX8QM_QSPI0B_DATA1_LSIO_QSPI0B_DATA1 IMX8QM_QSPI0B_DATA1 0
+#define IMX8QM_QSPI0B_DATA2_LSIO_QSPI0B_DATA2 IMX8QM_QSPI0B_DATA2 0
+#define IMX8QM_QSPI0B_DATA3_LSIO_QSPI0B_DATA3 IMX8QM_QSPI0B_DATA3 0
+#define IMX8QM_QSPI0B_DQS_LSIO_QSPI0B_DQS IMX8QM_QSPI0B_DQS 0
+#define IMX8QM_QSPI0B_SS0_B_LSIO_QSPI0B_SS0_B IMX8QM_QSPI0B_SS0_B 0
+#define IMX8QM_QSPI0B_SS1_B_LSIO_QSPI0B_SS1_B IMX8QM_QSPI0B_SS1_B 0
+#define IMX8QM_QSPI0B_SS1_B_LSIO_QSPI0B_SCLK2 IMX8QM_QSPI0B_SS1_B 1
+#define IMX8QM_PCIE_CTRL0_CLKREQ_B_HSIO_PCIE0_CLKREQ_B IMX8QM_PCIE_CTRL0_CLKREQ_B 0
+#define IMX8QM_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO4_IO27 IMX8QM_PCIE_CTRL0_CLKREQ_B 3
+#define IMX8QM_PCIE_CTRL0_WAKE_B_HSIO_PCIE0_WAKE_B IMX8QM_PCIE_CTRL0_WAKE_B 0
+#define IMX8QM_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO28 IMX8QM_PCIE_CTRL0_WAKE_B 3
+#define IMX8QM_PCIE_CTRL0_PERST_B_HSIO_PCIE0_PERST_B IMX8QM_PCIE_CTRL0_PERST_B 0
+#define IMX8QM_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO29 IMX8QM_PCIE_CTRL0_PERST_B 3
+#define IMX8QM_PCIE_CTRL1_CLKREQ_B_HSIO_PCIE1_CLKREQ_B IMX8QM_PCIE_CTRL1_CLKREQ_B 0
+#define IMX8QM_PCIE_CTRL1_CLKREQ_B_DMA_I2C1_SDA IMX8QM_PCIE_CTRL1_CLKREQ_B 1
+#define IMX8QM_PCIE_CTRL1_CLKREQ_B_CONN_USB_OTG2_OC IMX8QM_PCIE_CTRL1_CLKREQ_B 2
+#define IMX8QM_PCIE_CTRL1_CLKREQ_B_LSIO_GPIO4_IO30 IMX8QM_PCIE_CTRL1_CLKREQ_B 3
+#define IMX8QM_PCIE_CTRL1_WAKE_B_HSIO_PCIE1_WAKE_B IMX8QM_PCIE_CTRL1_WAKE_B 0
+#define IMX8QM_PCIE_CTRL1_WAKE_B_DMA_I2C1_SCL IMX8QM_PCIE_CTRL1_WAKE_B 1
+#define IMX8QM_PCIE_CTRL1_WAKE_B_CONN_USB_OTG2_PWR IMX8QM_PCIE_CTRL1_WAKE_B 2
+#define IMX8QM_PCIE_CTRL1_WAKE_B_LSIO_GPIO4_IO31 IMX8QM_PCIE_CTRL1_WAKE_B 3
+#define IMX8QM_PCIE_CTRL1_PERST_B_HSIO_PCIE1_PERST_B IMX8QM_PCIE_CTRL1_PERST_B 0
+#define IMX8QM_PCIE_CTRL1_PERST_B_DMA_I2C1_SCL IMX8QM_PCIE_CTRL1_PERST_B 1
+#define IMX8QM_PCIE_CTRL1_PERST_B_CONN_USB_OTG1_PWR IMX8QM_PCIE_CTRL1_PERST_B 2
+#define IMX8QM_PCIE_CTRL1_PERST_B_LSIO_GPIO5_IO00 IMX8QM_PCIE_CTRL1_PERST_B 3
+#define IMX8QM_USB_HSIC0_DATA_CONN_USB_HSIC0_DATA IMX8QM_USB_HSIC0_DATA 0
+#define IMX8QM_USB_HSIC0_DATA_DMA_I2C1_SDA IMX8QM_USB_HSIC0_DATA 1
+#define IMX8QM_USB_HSIC0_DATA_LSIO_GPIO5_IO01 IMX8QM_USB_HSIC0_DATA 3
+#define IMX8QM_USB_HSIC0_STROBE_CONN_USB_HSIC0_STROBE IMX8QM_USB_HSIC0_STROBE 0
+#define IMX8QM_USB_HSIC0_STROBE_DMA_I2C1_SCL IMX8QM_USB_HSIC0_STROBE 1
+#define IMX8QM_USB_HSIC0_STROBE_LSIO_GPIO5_IO02 IMX8QM_USB_HSIC0_STROBE 3
+#define IMX8QM_EMMC0_CLK_CONN_EMMC0_CLK IMX8QM_EMMC0_CLK 0
+#define IMX8QM_EMMC0_CLK_CONN_NAND_READY_B IMX8QM_EMMC0_CLK 1
+#define IMX8QM_EMMC0_CMD_CONN_EMMC0_CMD IMX8QM_EMMC0_CMD 0
+#define IMX8QM_EMMC0_CMD_CONN_NAND_DQS IMX8QM_EMMC0_CMD 1
+#define IMX8QM_EMMC0_CMD_AUD_MQS_R IMX8QM_EMMC0_CMD 2
+#define IMX8QM_EMMC0_CMD_LSIO_GPIO5_IO03 IMX8QM_EMMC0_CMD 3
+#define IMX8QM_EMMC0_DATA0_CONN_EMMC0_DATA0 IMX8QM_EMMC0_DATA0 0
+#define IMX8QM_EMMC0_DATA0_CONN_NAND_DATA00 IMX8QM_EMMC0_DATA0 1
+#define IMX8QM_EMMC0_DATA0_LSIO_GPIO5_IO04 IMX8QM_EMMC0_DATA0 3
+#define IMX8QM_EMMC0_DATA1_CONN_EMMC0_DATA1 IMX8QM_EMMC0_DATA1 0
+#define IMX8QM_EMMC0_DATA1_CONN_NAND_DATA01 IMX8QM_EMMC0_DATA1 1
+#define IMX8QM_EMMC0_DATA1_LSIO_GPIO5_IO05 IMX8QM_EMMC0_DATA1 3
+#define IMX8QM_EMMC0_DATA2_CONN_EMMC0_DATA2 IMX8QM_EMMC0_DATA2 0
+#define IMX8QM_EMMC0_DATA2_CONN_NAND_DATA02 IMX8QM_EMMC0_DATA2 1
+#define IMX8QM_EMMC0_DATA2_LSIO_GPIO5_IO06 IMX8QM_EMMC0_DATA2 3
+#define IMX8QM_EMMC0_DATA3_CONN_EMMC0_DATA3 IMX8QM_EMMC0_DATA3 0
+#define IMX8QM_EMMC0_DATA3_CONN_NAND_DATA03 IMX8QM_EMMC0_DATA3 1
+#define IMX8QM_EMMC0_DATA3_LSIO_GPIO5_IO07 IMX8QM_EMMC0_DATA3 3
+#define IMX8QM_EMMC0_DATA4_CONN_EMMC0_DATA4 IMX8QM_EMMC0_DATA4 0
+#define IMX8QM_EMMC0_DATA4_CONN_NAND_DATA04 IMX8QM_EMMC0_DATA4 1
+#define IMX8QM_EMMC0_DATA4_LSIO_GPIO5_IO08 IMX8QM_EMMC0_DATA4 3
+#define IMX8QM_EMMC0_DATA5_CONN_EMMC0_DATA5 IMX8QM_EMMC0_DATA5 0
+#define IMX8QM_EMMC0_DATA5_CONN_NAND_DATA05 IMX8QM_EMMC0_DATA5 1
+#define IMX8QM_EMMC0_DATA5_LSIO_GPIO5_IO09 IMX8QM_EMMC0_DATA5 3
+#define IMX8QM_EMMC0_DATA6_CONN_EMMC0_DATA6 IMX8QM_EMMC0_DATA6 0
+#define IMX8QM_EMMC0_DATA6_CONN_NAND_DATA06 IMX8QM_EMMC0_DATA6 1
+#define IMX8QM_EMMC0_DATA6_LSIO_GPIO5_IO10 IMX8QM_EMMC0_DATA6 3
+#define IMX8QM_EMMC0_DATA7_CONN_EMMC0_DATA7 IMX8QM_EMMC0_DATA7 0
+#define IMX8QM_EMMC0_DATA7_CONN_NAND_DATA07 IMX8QM_EMMC0_DATA7 1
+#define IMX8QM_EMMC0_DATA7_LSIO_GPIO5_IO11 IMX8QM_EMMC0_DATA7 3
+#define IMX8QM_EMMC0_STROBE_CONN_EMMC0_STROBE IMX8QM_EMMC0_STROBE 0
+#define IMX8QM_EMMC0_STROBE_CONN_NAND_CLE IMX8QM_EMMC0_STROBE 1
+#define IMX8QM_EMMC0_STROBE_LSIO_GPIO5_IO12 IMX8QM_EMMC0_STROBE 3
+#define IMX8QM_EMMC0_RESET_B_CONN_EMMC0_RESET_B IMX8QM_EMMC0_RESET_B 0
+#define IMX8QM_EMMC0_RESET_B_CONN_NAND_WP_B IMX8QM_EMMC0_RESET_B 1
+#define IMX8QM_EMMC0_RESET_B_CONN_USDHC1_VSELECT IMX8QM_EMMC0_RESET_B 2
+#define IMX8QM_EMMC0_RESET_B_LSIO_GPIO5_IO13 IMX8QM_EMMC0_RESET_B 3
+#define IMX8QM_USDHC1_CLK_CONN_USDHC1_CLK IMX8QM_USDHC1_CLK 0
+#define IMX8QM_USDHC1_CLK_AUD_MQS_R IMX8QM_USDHC1_CLK 1
+#define IMX8QM_USDHC1_CMD_CONN_USDHC1_CMD IMX8QM_USDHC1_CMD 0
+#define IMX8QM_USDHC1_CMD_AUD_MQS_L IMX8QM_USDHC1_CMD 1
+#define IMX8QM_USDHC1_CMD_LSIO_GPIO5_IO14 IMX8QM_USDHC1_CMD 3
+#define IMX8QM_USDHC1_DATA0_CONN_USDHC1_DATA0 IMX8QM_USDHC1_DATA0 0
+#define IMX8QM_USDHC1_DATA0_CONN_NAND_RE_N IMX8QM_USDHC1_DATA0 1
+#define IMX8QM_USDHC1_DATA0_LSIO_GPIO5_IO15 IMX8QM_USDHC1_DATA0 3
+#define IMX8QM_USDHC1_DATA1_CONN_USDHC1_DATA1 IMX8QM_USDHC1_DATA1 0
+#define IMX8QM_USDHC1_DATA1_CONN_NAND_RE_P IMX8QM_USDHC1_DATA1 1
+#define IMX8QM_USDHC1_DATA1_LSIO_GPIO5_IO16 IMX8QM_USDHC1_DATA1 3
+#define IMX8QM_USDHC1_DATA2_CONN_USDHC1_DATA2 IMX8QM_USDHC1_DATA2 0
+#define IMX8QM_USDHC1_DATA2_CONN_NAND_DQS_N IMX8QM_USDHC1_DATA2 1
+#define IMX8QM_USDHC1_DATA2_LSIO_GPIO5_IO17 IMX8QM_USDHC1_DATA2 3
+#define IMX8QM_USDHC1_DATA3_CONN_USDHC1_DATA3 IMX8QM_USDHC1_DATA3 0
+#define IMX8QM_USDHC1_DATA3_CONN_NAND_DQS_P IMX8QM_USDHC1_DATA3 1
+#define IMX8QM_USDHC1_DATA3_LSIO_GPIO5_IO18 IMX8QM_USDHC1_DATA3 3
+#define IMX8QM_USDHC1_DATA4_CONN_USDHC1_DATA4 IMX8QM_USDHC1_DATA4 0
+#define IMX8QM_USDHC1_DATA4_CONN_NAND_CE0_B IMX8QM_USDHC1_DATA4 1
+#define IMX8QM_USDHC1_DATA4_AUD_MQS_R IMX8QM_USDHC1_DATA4 2
+#define IMX8QM_USDHC1_DATA4_LSIO_GPIO5_IO19 IMX8QM_USDHC1_DATA4 3
+#define IMX8QM_USDHC1_DATA5_CONN_USDHC1_DATA5 IMX8QM_USDHC1_DATA5 0
+#define IMX8QM_USDHC1_DATA5_CONN_NAND_RE_B IMX8QM_USDHC1_DATA5 1
+#define IMX8QM_USDHC1_DATA5_AUD_MQS_L IMX8QM_USDHC1_DATA5 2
+#define IMX8QM_USDHC1_DATA5_LSIO_GPIO5_IO20 IMX8QM_USDHC1_DATA5 3
+#define IMX8QM_USDHC1_DATA6_CONN_USDHC1_DATA6 IMX8QM_USDHC1_DATA6 0
+#define IMX8QM_USDHC1_DATA6_CONN_NAND_WE_B IMX8QM_USDHC1_DATA6 1
+#define IMX8QM_USDHC1_DATA6_CONN_USDHC1_WP IMX8QM_USDHC1_DATA6 2
+#define IMX8QM_USDHC1_DATA6_LSIO_GPIO5_IO21 IMX8QM_USDHC1_DATA6 3
+#define IMX8QM_USDHC1_DATA7_CONN_USDHC1_DATA7 IMX8QM_USDHC1_DATA7 0
+#define IMX8QM_USDHC1_DATA7_CONN_NAND_ALE IMX8QM_USDHC1_DATA7 1
+#define IMX8QM_USDHC1_DATA7_CONN_USDHC1_CD_B IMX8QM_USDHC1_DATA7 2
+#define IMX8QM_USDHC1_DATA7_LSIO_GPIO5_IO22 IMX8QM_USDHC1_DATA7 3
+#define IMX8QM_USDHC1_STROBE_CONN_USDHC1_STROBE IMX8QM_USDHC1_STROBE 0
+#define IMX8QM_USDHC1_STROBE_CONN_NAND_CE1_B IMX8QM_USDHC1_STROBE 1
+#define IMX8QM_USDHC1_STROBE_CONN_USDHC1_RESET_B IMX8QM_USDHC1_STROBE 2
+#define IMX8QM_USDHC1_STROBE_LSIO_GPIO5_IO23 IMX8QM_USDHC1_STROBE 3
+#define IMX8QM_USDHC2_CLK_CONN_USDHC2_CLK IMX8QM_USDHC2_CLK 0
+#define IMX8QM_USDHC2_CLK_AUD_MQS_R IMX8QM_USDHC2_CLK 1
+#define IMX8QM_USDHC2_CLK_LSIO_GPIO5_IO24 IMX8QM_USDHC2_CLK 3
+#define IMX8QM_USDHC2_CMD_CONN_USDHC2_CMD IMX8QM_USDHC2_CMD 0
+#define IMX8QM_USDHC2_CMD_AUD_MQS_L IMX8QM_USDHC2_CMD 1
+#define IMX8QM_USDHC2_CMD_LSIO_GPIO5_IO25 IMX8QM_USDHC2_CMD 3
+#define IMX8QM_USDHC2_DATA0_CONN_USDHC2_DATA0 IMX8QM_USDHC2_DATA0 0
+#define IMX8QM_USDHC2_DATA0_DMA_UART4_RX IMX8QM_USDHC2_DATA0 1
+#define IMX8QM_USDHC2_DATA0_LSIO_GPIO5_IO26 IMX8QM_USDHC2_DATA0 3
+#define IMX8QM_USDHC2_DATA1_CONN_USDHC2_DATA1 IMX8QM_USDHC2_DATA1 0
+#define IMX8QM_USDHC2_DATA1_DMA_UART4_TX IMX8QM_USDHC2_DATA1 1
+#define IMX8QM_USDHC2_DATA1_LSIO_GPIO5_IO27 IMX8QM_USDHC2_DATA1 3
+#define IMX8QM_USDHC2_DATA2_CONN_USDHC2_DATA2 IMX8QM_USDHC2_DATA2 0
+#define IMX8QM_USDHC2_DATA2_DMA_UART4_CTS_B IMX8QM_USDHC2_DATA2 1
+#define IMX8QM_USDHC2_DATA2_LSIO_GPIO5_IO28 IMX8QM_USDHC2_DATA2 3
+#define IMX8QM_USDHC2_DATA3_CONN_USDHC2_DATA3 IMX8QM_USDHC2_DATA3 0
+#define IMX8QM_USDHC2_DATA3_DMA_UART4_RTS_B IMX8QM_USDHC2_DATA3 1
+#define IMX8QM_USDHC2_DATA3_LSIO_GPIO5_IO29 IMX8QM_USDHC2_DATA3 3
+#define IMX8QM_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC IMX8QM_ENET0_RGMII_TXC 0
+#define IMX8QM_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_OUT IMX8QM_ENET0_RGMII_TXC 1
+#define IMX8QM_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_IN IMX8QM_ENET0_RGMII_TXC 2
+#define IMX8QM_ENET0_RGMII_TXC_LSIO_GPIO5_IO30 IMX8QM_ENET0_RGMII_TXC 3
+#define IMX8QM_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL IMX8QM_ENET0_RGMII_TX_CTL 0
+#define IMX8QM_ENET0_RGMII_TX_CTL_LSIO_GPIO5_IO31 IMX8QM_ENET0_RGMII_TX_CTL 3
+#define IMX8QM_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 IMX8QM_ENET0_RGMII_TXD0 0
+#define IMX8QM_ENET0_RGMII_TXD0_LSIO_GPIO6_IO00 IMX8QM_ENET0_RGMII_TXD0 3
+#define IMX8QM_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 IMX8QM_ENET0_RGMII_TXD1 0
+#define IMX8QM_ENET0_RGMII_TXD1_LSIO_GPIO6_IO01 IMX8QM_ENET0_RGMII_TXD1 3
+#define IMX8QM_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 IMX8QM_ENET0_RGMII_TXD2 0
+#define IMX8QM_ENET0_RGMII_TXD2_DMA_UART3_TX IMX8QM_ENET0_RGMII_TXD2 1
+#define IMX8QM_ENET0_RGMII_TXD2_VPU_TSI_S1_VID IMX8QM_ENET0_RGMII_TXD2 2
+#define IMX8QM_ENET0_RGMII_TXD2_LSIO_GPIO6_IO02 IMX8QM_ENET0_RGMII_TXD2 3
+#define IMX8QM_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 IMX8QM_ENET0_RGMII_TXD3 0
+#define IMX8QM_ENET0_RGMII_TXD3_DMA_UART3_RTS_B IMX8QM_ENET0_RGMII_TXD3 1
+#define IMX8QM_ENET0_RGMII_TXD3_VPU_TSI_S1_SYNC IMX8QM_ENET0_RGMII_TXD3 2
+#define IMX8QM_ENET0_RGMII_TXD3_LSIO_GPIO6_IO03 IMX8QM_ENET0_RGMII_TXD3 3
+#define IMX8QM_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC IMX8QM_ENET0_RGMII_RXC 0
+#define IMX8QM_ENET0_RGMII_RXC_DMA_UART3_CTS_B IMX8QM_ENET0_RGMII_RXC 1
+#define IMX8QM_ENET0_RGMII_RXC_VPU_TSI_S1_DATA IMX8QM_ENET0_RGMII_RXC 2
+#define IMX8QM_ENET0_RGMII_RXC_LSIO_GPIO6_IO04 IMX8QM_ENET0_RGMII_RXC 3
+#define IMX8QM_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL IMX8QM_ENET0_RGMII_RX_CTL 0
+#define IMX8QM_ENET0_RGMII_RX_CTL_VPU_TSI_S0_VID IMX8QM_ENET0_RGMII_RX_CTL 2
+#define IMX8QM_ENET0_RGMII_RX_CTL_LSIO_GPIO6_IO05 IMX8QM_ENET0_RGMII_RX_CTL 3
+#define IMX8QM_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 IMX8QM_ENET0_RGMII_RXD0 0
+#define IMX8QM_ENET0_RGMII_RXD0_VPU_TSI_S0_SYNC IMX8QM_ENET0_RGMII_RXD0 2
+#define IMX8QM_ENET0_RGMII_RXD0_LSIO_GPIO6_IO06 IMX8QM_ENET0_RGMII_RXD0 3
+#define IMX8QM_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 IMX8QM_ENET0_RGMII_RXD1 0
+#define IMX8QM_ENET0_RGMII_RXD1_VPU_TSI_S0_DATA IMX8QM_ENET0_RGMII_RXD1 2
+#define IMX8QM_ENET0_RGMII_RXD1_LSIO_GPIO6_IO07 IMX8QM_ENET0_RGMII_RXD1 3
+#define IMX8QM_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 IMX8QM_ENET0_RGMII_RXD2 0
+#define IMX8QM_ENET0_RGMII_RXD2_CONN_ENET0_RMII_RX_ER IMX8QM_ENET0_RGMII_RXD2 1
+#define IMX8QM_ENET0_RGMII_RXD2_VPU_TSI_S0_CLK IMX8QM_ENET0_RGMII_RXD2 2
+#define IMX8QM_ENET0_RGMII_RXD2_LSIO_GPIO6_IO08 IMX8QM_ENET0_RGMII_RXD2 3
+#define IMX8QM_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 IMX8QM_ENET0_RGMII_RXD3 0
+#define IMX8QM_ENET0_RGMII_RXD3_DMA_UART3_RX IMX8QM_ENET0_RGMII_RXD3 1
+#define IMX8QM_ENET0_RGMII_RXD3_VPU_TSI_S1_CLK IMX8QM_ENET0_RGMII_RXD3 2
+#define IMX8QM_ENET0_RGMII_RXD3_LSIO_GPIO6_IO09 IMX8QM_ENET0_RGMII_RXD3 3
+#define IMX8QM_ENET1_RGMII_TXC_CONN_ENET1_RGMII_TXC IMX8QM_ENET1_RGMII_TXC 0
+#define IMX8QM_ENET1_RGMII_TXC_CONN_ENET1_RCLK50M_OUT IMX8QM_ENET1_RGMII_TXC 1
+#define IMX8QM_ENET1_RGMII_TXC_CONN_ENET1_RCLK50M_IN IMX8QM_ENET1_RGMII_TXC 2
+#define IMX8QM_ENET1_RGMII_TXC_LSIO_GPIO6_IO10 IMX8QM_ENET1_RGMII_TXC 3
+#define IMX8QM_ENET1_RGMII_TX_CTL_CONN_ENET1_RGMII_TX_CTL IMX8QM_ENET1_RGMII_TX_CTL 0
+#define IMX8QM_ENET1_RGMII_TX_CTL_LSIO_GPIO6_IO11 IMX8QM_ENET1_RGMII_TX_CTL 3
+#define IMX8QM_ENET1_RGMII_TXD0_CONN_ENET1_RGMII_TXD0 IMX8QM_ENET1_RGMII_TXD0 0
+#define IMX8QM_ENET1_RGMII_TXD0_LSIO_GPIO6_IO12 IMX8QM_ENET1_RGMII_TXD0 3
+#define IMX8QM_ENET1_RGMII_TXD1_CONN_ENET1_RGMII_TXD1 IMX8QM_ENET1_RGMII_TXD1 0
+#define IMX8QM_ENET1_RGMII_TXD1_LSIO_GPIO6_IO13 IMX8QM_ENET1_RGMII_TXD1 3
+#define IMX8QM_ENET1_RGMII_TXD2_CONN_ENET1_RGMII_TXD2 IMX8QM_ENET1_RGMII_TXD2 0
+#define IMX8QM_ENET1_RGMII_TXD2_DMA_UART3_TX IMX8QM_ENET1_RGMII_TXD2 1
+#define IMX8QM_ENET1_RGMII_TXD2_VPU_TSI_S1_VID IMX8QM_ENET1_RGMII_TXD2 2
+#define IMX8QM_ENET1_RGMII_TXD2_LSIO_GPIO6_IO14 IMX8QM_ENET1_RGMII_TXD2 3
+#define IMX8QM_ENET1_RGMII_TXD3_CONN_ENET1_RGMII_TXD3 IMX8QM_ENET1_RGMII_TXD3 0
+#define IMX8QM_ENET1_RGMII_TXD3_DMA_UART3_RTS_B IMX8QM_ENET1_RGMII_TXD3 1
+#define IMX8QM_ENET1_RGMII_TXD3_VPU_TSI_S1_SYNC IMX8QM_ENET1_RGMII_TXD3 2
+#define IMX8QM_ENET1_RGMII_TXD3_LSIO_GPIO6_IO15 IMX8QM_ENET1_RGMII_TXD3 3
+#define IMX8QM_ENET1_RGMII_RXC_CONN_ENET1_RGMII_RXC IMX8QM_ENET1_RGMII_RXC 0
+#define IMX8QM_ENET1_RGMII_RXC_DMA_UART3_CTS_B IMX8QM_ENET1_RGMII_RXC 1
+#define IMX8QM_ENET1_RGMII_RXC_VPU_TSI_S1_DATA IMX8QM_ENET1_RGMII_RXC 2
+#define IMX8QM_ENET1_RGMII_RXC_LSIO_GPIO6_IO16 IMX8QM_ENET1_RGMII_RXC 3
+#define IMX8QM_ENET1_RGMII_RX_CTL_CONN_ENET1_RGMII_RX_CTL IMX8QM_ENET1_RGMII_RX_CTL 0
+#define IMX8QM_ENET1_RGMII_RX_CTL_VPU_TSI_S0_VID IMX8QM_ENET1_RGMII_RX_CTL 2
+#define IMX8QM_ENET1_RGMII_RX_CTL_LSIO_GPIO6_IO17 IMX8QM_ENET1_RGMII_RX_CTL 3
+#define IMX8QM_ENET1_RGMII_RXD0_CONN_ENET1_RGMII_RXD0 IMX8QM_ENET1_RGMII_RXD0 0
+#define IMX8QM_ENET1_RGMII_RXD0_VPU_TSI_S0_SYNC IMX8QM_ENET1_RGMII_RXD0 2
+#define IMX8QM_ENET1_RGMII_RXD0_LSIO_GPIO6_IO18 IMX8QM_ENET1_RGMII_RXD0 3
+#define IMX8QM_ENET1_RGMII_RXD1_CONN_ENET1_RGMII_RXD1 IMX8QM_ENET1_RGMII_RXD1 0
+#define IMX8QM_ENET1_RGMII_RXD1_VPU_TSI_S0_DATA IMX8QM_ENET1_RGMII_RXD1 2
+#define IMX8QM_ENET1_RGMII_RXD1_LSIO_GPIO6_IO19 IMX8QM_ENET1_RGMII_RXD1 3
+#define IMX8QM_ENET1_RGMII_RXD2_CONN_ENET1_RGMII_RXD2 IMX8QM_ENET1_RGMII_RXD2 0
+#define IMX8QM_ENET1_RGMII_RXD2_CONN_ENET1_RMII_RX_ER IMX8QM_ENET1_RGMII_RXD2 1
+#define IMX8QM_ENET1_RGMII_RXD2_VPU_TSI_S0_CLK IMX8QM_ENET1_RGMII_RXD2 2
+#define IMX8QM_ENET1_RGMII_RXD2_LSIO_GPIO6_IO20 IMX8QM_ENET1_RGMII_RXD2 3
+#define IMX8QM_ENET1_RGMII_RXD3_CONN_ENET1_RGMII_RXD3 IMX8QM_ENET1_RGMII_RXD3 0
+#define IMX8QM_ENET1_RGMII_RXD3_DMA_UART3_RX IMX8QM_ENET1_RGMII_RXD3 1
+#define IMX8QM_ENET1_RGMII_RXD3_VPU_TSI_S1_CLK IMX8QM_ENET1_RGMII_RXD3 2
+#define IMX8QM_ENET1_RGMII_RXD3_LSIO_GPIO6_IO21 IMX8QM_ENET1_RGMII_RXD3 3
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB_PAD IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB 0
+#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA_PAD IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA 0
+
+#endif /* _IMX8QM_PADS_H */
diff --git a/include/dt-bindings/pinctrl/pads-imx8qxp.h b/include/dt-bindings/pinctrl/pads-imx8qxp.h
new file mode 100644
index 000000000000..fbfee7ecf844
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pads-imx8qxp.h
@@ -0,0 +1,751 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ */
+
+#ifndef _IMX8QXP_PADS_H
+#define _IMX8QXP_PADS_H
+
+/* pin id */
+#define IMX8QXP_PCIE_CTRL0_PERST_B 0
+#define IMX8QXP_PCIE_CTRL0_CLKREQ_B 1
+#define IMX8QXP_PCIE_CTRL0_WAKE_B 2
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_PCIESEP 3
+#define IMX8QXP_USB_SS3_TC0 4
+#define IMX8QXP_USB_SS3_TC1 5
+#define IMX8QXP_USB_SS3_TC2 6
+#define IMX8QXP_USB_SS3_TC3 7
+#define IMX8QXP_COMP_CTL_GPIO_3V3_USB3IO 8
+#define IMX8QXP_EMMC0_CLK 9
+#define IMX8QXP_EMMC0_CMD 10
+#define IMX8QXP_EMMC0_DATA0 11
+#define IMX8QXP_EMMC0_DATA1 12
+#define IMX8QXP_EMMC0_DATA2 13
+#define IMX8QXP_EMMC0_DATA3 14
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_SD1FIX0 15
+#define IMX8QXP_EMMC0_DATA4 16
+#define IMX8QXP_EMMC0_DATA5 17
+#define IMX8QXP_EMMC0_DATA6 18
+#define IMX8QXP_EMMC0_DATA7 19
+#define IMX8QXP_EMMC0_STROBE 20
+#define IMX8QXP_EMMC0_RESET_B 21
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_SD1FIX1 22
+#define IMX8QXP_USDHC1_RESET_B 23
+#define IMX8QXP_USDHC1_VSELECT 24
+#define IMX8QXP_CTL_NAND_RE_P_N 25
+#define IMX8QXP_USDHC1_WP 26
+#define IMX8QXP_USDHC1_CD_B 27
+#define IMX8QXP_CTL_NAND_DQS_P_N 28
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_VSELSEP 29
+#define IMX8QXP_USDHC1_CLK 30
+#define IMX8QXP_USDHC1_CMD 31
+#define IMX8QXP_USDHC1_DATA0 32
+#define IMX8QXP_USDHC1_DATA1 33
+#define IMX8QXP_USDHC1_DATA2 34
+#define IMX8QXP_USDHC1_DATA3 35
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_VSEL3 36
+#define IMX8QXP_ENET0_RGMII_TXC 37
+#define IMX8QXP_ENET0_RGMII_TX_CTL 38
+#define IMX8QXP_ENET0_RGMII_TXD0 39
+#define IMX8QXP_ENET0_RGMII_TXD1 40
+#define IMX8QXP_ENET0_RGMII_TXD2 41
+#define IMX8QXP_ENET0_RGMII_TXD3 42
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0 43
+#define IMX8QXP_ENET0_RGMII_RXC 44
+#define IMX8QXP_ENET0_RGMII_RX_CTL 45
+#define IMX8QXP_ENET0_RGMII_RXD0 46
+#define IMX8QXP_ENET0_RGMII_RXD1 47
+#define IMX8QXP_ENET0_RGMII_RXD2 48
+#define IMX8QXP_ENET0_RGMII_RXD3 49
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1 50
+#define IMX8QXP_ENET0_REFCLK_125M_25M 51
+#define IMX8QXP_ENET0_MDIO 52
+#define IMX8QXP_ENET0_MDC 53
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIOCT 54
+#define IMX8QXP_ESAI0_FSR 55
+#define IMX8QXP_ESAI0_FST 56
+#define IMX8QXP_ESAI0_SCKR 57
+#define IMX8QXP_ESAI0_SCKT 58
+#define IMX8QXP_ESAI0_TX0 59
+#define IMX8QXP_ESAI0_TX1 60
+#define IMX8QXP_ESAI0_TX2_RX3 61
+#define IMX8QXP_ESAI0_TX3_RX2 62
+#define IMX8QXP_ESAI0_TX4_RX1 63
+#define IMX8QXP_ESAI0_TX5_RX0 64
+#define IMX8QXP_SPDIF0_RX 65
+#define IMX8QXP_SPDIF0_TX 66
+#define IMX8QXP_SPDIF0_EXT_CLK 67
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHB 68
+#define IMX8QXP_SPI3_SCK 69
+#define IMX8QXP_SPI3_SDO 70
+#define IMX8QXP_SPI3_SDI 71
+#define IMX8QXP_SPI3_CS0 72
+#define IMX8QXP_SPI3_CS1 73
+#define IMX8QXP_MCLK_IN1 74
+#define IMX8QXP_MCLK_IN0 75
+#define IMX8QXP_MCLK_OUT0 76
+#define IMX8QXP_UART1_TX 77
+#define IMX8QXP_UART1_RX 78
+#define IMX8QXP_UART1_RTS_B 79
+#define IMX8QXP_UART1_CTS_B 80
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHK 81
+#define IMX8QXP_SAI0_TXD 82
+#define IMX8QXP_SAI0_TXC 83
+#define IMX8QXP_SAI0_RXD 84
+#define IMX8QXP_SAI0_TXFS 85
+#define IMX8QXP_SAI1_RXD 86
+#define IMX8QXP_SAI1_RXC 87
+#define IMX8QXP_SAI1_RXFS 88
+#define IMX8QXP_SPI2_CS0 89
+#define IMX8QXP_SPI2_SDO 90
+#define IMX8QXP_SPI2_SDI 91
+#define IMX8QXP_SPI2_SCK 92
+#define IMX8QXP_SPI0_SCK 93
+#define IMX8QXP_SPI0_SDI 94
+#define IMX8QXP_SPI0_SDO 95
+#define IMX8QXP_SPI0_CS1 96
+#define IMX8QXP_SPI0_CS0 97
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHT 98
+#define IMX8QXP_ADC_IN1 99
+#define IMX8QXP_ADC_IN0 100
+#define IMX8QXP_ADC_IN3 101
+#define IMX8QXP_ADC_IN2 102
+#define IMX8QXP_ADC_IN5 103
+#define IMX8QXP_ADC_IN4 104
+#define IMX8QXP_FLEXCAN0_RX 105
+#define IMX8QXP_FLEXCAN0_TX 106
+#define IMX8QXP_FLEXCAN1_RX 107
+#define IMX8QXP_FLEXCAN1_TX 108
+#define IMX8QXP_FLEXCAN2_RX 109
+#define IMX8QXP_FLEXCAN2_TX 110
+#define IMX8QXP_UART0_RX 111
+#define IMX8QXP_UART0_TX 112
+#define IMX8QXP_UART2_TX 113
+#define IMX8QXP_UART2_RX 114
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIOLH 115
+#define IMX8QXP_MIPI_DSI0_I2C0_SCL 116
+#define IMX8QXP_MIPI_DSI0_I2C0_SDA 117
+#define IMX8QXP_MIPI_DSI0_GPIO0_00 118
+#define IMX8QXP_MIPI_DSI0_GPIO0_01 119
+#define IMX8QXP_MIPI_DSI1_I2C0_SCL 120
+#define IMX8QXP_MIPI_DSI1_I2C0_SDA 121
+#define IMX8QXP_MIPI_DSI1_GPIO0_00 122
+#define IMX8QXP_MIPI_DSI1_GPIO0_01 123
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_MIPIDSIGPIO 124
+#define IMX8QXP_JTAG_TRST_B 125
+#define IMX8QXP_PMIC_I2C_SCL 126
+#define IMX8QXP_PMIC_I2C_SDA 127
+#define IMX8QXP_PMIC_INT_B 128
+#define IMX8QXP_SCU_GPIO0_00 129
+#define IMX8QXP_SCU_GPIO0_01 130
+#define IMX8QXP_SCU_PMIC_STANDBY 131
+#define IMX8QXP_SCU_BOOT_MODE0 132
+#define IMX8QXP_SCU_BOOT_MODE1 133
+#define IMX8QXP_SCU_BOOT_MODE2 134
+#define IMX8QXP_SCU_BOOT_MODE3 135
+#define IMX8QXP_CSI_D00 136
+#define IMX8QXP_CSI_D01 137
+#define IMX8QXP_CSI_D02 138
+#define IMX8QXP_CSI_D03 139
+#define IMX8QXP_CSI_D04 140
+#define IMX8QXP_CSI_D05 141
+#define IMX8QXP_CSI_D06 142
+#define IMX8QXP_CSI_D07 143
+#define IMX8QXP_CSI_HSYNC 144
+#define IMX8QXP_CSI_VSYNC 145
+#define IMX8QXP_CSI_PCLK 146
+#define IMX8QXP_CSI_MCLK 147
+#define IMX8QXP_CSI_EN 148
+#define IMX8QXP_CSI_RESET 149
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHD 150
+#define IMX8QXP_MIPI_CSI0_MCLK_OUT 151
+#define IMX8QXP_MIPI_CSI0_I2C0_SCL 152
+#define IMX8QXP_MIPI_CSI0_I2C0_SDA 153
+#define IMX8QXP_MIPI_CSI0_GPIO0_01 154
+#define IMX8QXP_MIPI_CSI0_GPIO0_00 155
+#define IMX8QXP_QSPI0A_DATA0 156
+#define IMX8QXP_QSPI0A_DATA1 157
+#define IMX8QXP_QSPI0A_DATA2 158
+#define IMX8QXP_QSPI0A_DATA3 159
+#define IMX8QXP_QSPI0A_DQS 160
+#define IMX8QXP_QSPI0A_SS0_B 161
+#define IMX8QXP_QSPI0A_SS1_B 162
+#define IMX8QXP_QSPI0A_SCLK 163
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_QSPI0A 164
+#define IMX8QXP_QSPI0B_SCLK 165
+#define IMX8QXP_QSPI0B_DATA0 166
+#define IMX8QXP_QSPI0B_DATA1 167
+#define IMX8QXP_QSPI0B_DATA2 168
+#define IMX8QXP_QSPI0B_DATA3 169
+#define IMX8QXP_QSPI0B_DQS 170
+#define IMX8QXP_QSPI0B_SS0_B 171
+#define IMX8QXP_QSPI0B_SS1_B 172
+#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_QSPI0B 173
+
+/*
+ * format: <pin_id mux_mode>
+ */
+#define IMX8QXP_PCIE_CTRL0_PERST_B_HSIO_PCIE0_PERST_B IMX8QXP_PCIE_CTRL0_PERST_B 0
+#define IMX8QXP_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO00 IMX8QXP_PCIE_CTRL0_PERST_B 4
+#define IMX8QXP_PCIE_CTRL0_CLKREQ_B_HSIO_PCIE0_CLKREQ_B IMX8QXP_PCIE_CTRL0_CLKREQ_B 0
+#define IMX8QXP_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO4_IO01 IMX8QXP_PCIE_CTRL0_CLKREQ_B 4
+#define IMX8QXP_PCIE_CTRL0_WAKE_B_HSIO_PCIE0_WAKE_B IMX8QXP_PCIE_CTRL0_WAKE_B 0
+#define IMX8QXP_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO02 IMX8QXP_PCIE_CTRL0_WAKE_B 4
+#define IMX8QXP_USB_SS3_TC0_ADMA_I2C1_SCL IMX8QXP_USB_SS3_TC0 0
+#define IMX8QXP_USB_SS3_TC0_CONN_USB_OTG1_PWR IMX8QXP_USB_SS3_TC0 1
+#define IMX8QXP_USB_SS3_TC0_CONN_USB_OTG2_PWR IMX8QXP_USB_SS3_TC0 2
+#define IMX8QXP_USB_SS3_TC0_LSIO_GPIO4_IO03 IMX8QXP_USB_SS3_TC0 4
+#define IMX8QXP_USB_SS3_TC1_ADMA_I2C1_SCL IMX8QXP_USB_SS3_TC1 0
+#define IMX8QXP_USB_SS3_TC1_CONN_USB_OTG2_PWR IMX8QXP_USB_SS3_TC1 1
+#define IMX8QXP_USB_SS3_TC1_LSIO_GPIO4_IO04 IMX8QXP_USB_SS3_TC1 4
+#define IMX8QXP_USB_SS3_TC2_ADMA_I2C1_SDA IMX8QXP_USB_SS3_TC2 0
+#define IMX8QXP_USB_SS3_TC2_CONN_USB_OTG1_OC IMX8QXP_USB_SS3_TC2 1
+#define IMX8QXP_USB_SS3_TC2_CONN_USB_OTG2_OC IMX8QXP_USB_SS3_TC2 2
+#define IMX8QXP_USB_SS3_TC2_LSIO_GPIO4_IO05 IMX8QXP_USB_SS3_TC2 4
+#define IMX8QXP_USB_SS3_TC3_ADMA_I2C1_SDA IMX8QXP_USB_SS3_TC3 0
+#define IMX8QXP_USB_SS3_TC3_CONN_USB_OTG2_OC IMX8QXP_USB_SS3_TC3 1
+#define IMX8QXP_USB_SS3_TC3_LSIO_GPIO4_IO06 IMX8QXP_USB_SS3_TC3 4
+#define IMX8QXP_EMMC0_CLK_CONN_EMMC0_CLK IMX8QXP_EMMC0_CLK 0
+#define IMX8QXP_EMMC0_CLK_CONN_NAND_READY_B IMX8QXP_EMMC0_CLK 1
+#define IMX8QXP_EMMC0_CLK_LSIO_GPIO4_IO07 IMX8QXP_EMMC0_CLK 4
+#define IMX8QXP_EMMC0_CMD_CONN_EMMC0_CMD IMX8QXP_EMMC0_CMD 0
+#define IMX8QXP_EMMC0_CMD_CONN_NAND_DQS IMX8QXP_EMMC0_CMD 1
+#define IMX8QXP_EMMC0_CMD_LSIO_GPIO4_IO08 IMX8QXP_EMMC0_CMD 4
+#define IMX8QXP_EMMC0_DATA0_CONN_EMMC0_DATA0 IMX8QXP_EMMC0_DATA0 0
+#define IMX8QXP_EMMC0_DATA0_CONN_NAND_DATA00 IMX8QXP_EMMC0_DATA0 1
+#define IMX8QXP_EMMC0_DATA0_LSIO_GPIO4_IO09 IMX8QXP_EMMC0_DATA0 4
+#define IMX8QXP_EMMC0_DATA1_CONN_EMMC0_DATA1 IMX8QXP_EMMC0_DATA1 0
+#define IMX8QXP_EMMC0_DATA1_CONN_NAND_DATA01 IMX8QXP_EMMC0_DATA1 1
+#define IMX8QXP_EMMC0_DATA1_LSIO_GPIO4_IO10 IMX8QXP_EMMC0_DATA1 4
+#define IMX8QXP_EMMC0_DATA2_CONN_EMMC0_DATA2 IMX8QXP_EMMC0_DATA2 0
+#define IMX8QXP_EMMC0_DATA2_CONN_NAND_DATA02 IMX8QXP_EMMC0_DATA2 1
+#define IMX8QXP_EMMC0_DATA2_LSIO_GPIO4_IO11 IMX8QXP_EMMC0_DATA2 4
+#define IMX8QXP_EMMC0_DATA3_CONN_EMMC0_DATA3 IMX8QXP_EMMC0_DATA3 0
+#define IMX8QXP_EMMC0_DATA3_CONN_NAND_DATA03 IMX8QXP_EMMC0_DATA3 1
+#define IMX8QXP_EMMC0_DATA3_LSIO_GPIO4_IO12 IMX8QXP_EMMC0_DATA3 4
+#define IMX8QXP_EMMC0_DATA4_CONN_EMMC0_DATA4 IMX8QXP_EMMC0_DATA4 0
+#define IMX8QXP_EMMC0_DATA4_CONN_NAND_DATA04 IMX8QXP_EMMC0_DATA4 1
+#define IMX8QXP_EMMC0_DATA4_CONN_EMMC0_WP IMX8QXP_EMMC0_DATA4 3
+#define IMX8QXP_EMMC0_DATA4_LSIO_GPIO4_IO13 IMX8QXP_EMMC0_DATA4 4
+#define IMX8QXP_EMMC0_DATA5_CONN_EMMC0_DATA5 IMX8QXP_EMMC0_DATA5 0
+#define IMX8QXP_EMMC0_DATA5_CONN_NAND_DATA05 IMX8QXP_EMMC0_DATA5 1
+#define IMX8QXP_EMMC0_DATA5_CONN_EMMC0_VSELECT IMX8QXP_EMMC0_DATA5 3
+#define IMX8QXP_EMMC0_DATA5_LSIO_GPIO4_IO14 IMX8QXP_EMMC0_DATA5 4
+#define IMX8QXP_EMMC0_DATA6_CONN_EMMC0_DATA6 IMX8QXP_EMMC0_DATA6 0
+#define IMX8QXP_EMMC0_DATA6_CONN_NAND_DATA06 IMX8QXP_EMMC0_DATA6 1
+#define IMX8QXP_EMMC0_DATA6_CONN_MLB_CLK IMX8QXP_EMMC0_DATA6 3
+#define IMX8QXP_EMMC0_DATA6_LSIO_GPIO4_IO15 IMX8QXP_EMMC0_DATA6 4
+#define IMX8QXP_EMMC0_DATA7_CONN_EMMC0_DATA7 IMX8QXP_EMMC0_DATA7 0
+#define IMX8QXP_EMMC0_DATA7_CONN_NAND_DATA07 IMX8QXP_EMMC0_DATA7 1
+#define IMX8QXP_EMMC0_DATA7_CONN_MLB_SIG IMX8QXP_EMMC0_DATA7 3
+#define IMX8QXP_EMMC0_DATA7_LSIO_GPIO4_IO16 IMX8QXP_EMMC0_DATA7 4
+#define IMX8QXP_EMMC0_STROBE_CONN_EMMC0_STROBE IMX8QXP_EMMC0_STROBE 0
+#define IMX8QXP_EMMC0_STROBE_CONN_NAND_CLE IMX8QXP_EMMC0_STROBE 1
+#define IMX8QXP_EMMC0_STROBE_CONN_MLB_DATA IMX8QXP_EMMC0_STROBE 3
+#define IMX8QXP_EMMC0_STROBE_LSIO_GPIO4_IO17 IMX8QXP_EMMC0_STROBE 4
+#define IMX8QXP_EMMC0_RESET_B_CONN_EMMC0_RESET_B IMX8QXP_EMMC0_RESET_B 0
+#define IMX8QXP_EMMC0_RESET_B_CONN_NAND_WP_B IMX8QXP_EMMC0_RESET_B 1
+#define IMX8QXP_EMMC0_RESET_B_LSIO_GPIO4_IO18 IMX8QXP_EMMC0_RESET_B 4
+#define IMX8QXP_USDHC1_RESET_B_CONN_USDHC1_RESET_B IMX8QXP_USDHC1_RESET_B 0
+#define IMX8QXP_USDHC1_RESET_B_CONN_NAND_RE_N IMX8QXP_USDHC1_RESET_B 1
+#define IMX8QXP_USDHC1_RESET_B_ADMA_SPI2_SCK IMX8QXP_USDHC1_RESET_B 2
+#define IMX8QXP_USDHC1_RESET_B_LSIO_GPIO4_IO19 IMX8QXP_USDHC1_RESET_B 4
+#define IMX8QXP_USDHC1_VSELECT_CONN_USDHC1_VSELECT IMX8QXP_USDHC1_VSELECT 0
+#define IMX8QXP_USDHC1_VSELECT_CONN_NAND_RE_P IMX8QXP_USDHC1_VSELECT 1
+#define IMX8QXP_USDHC1_VSELECT_ADMA_SPI2_SDO IMX8QXP_USDHC1_VSELECT 2
+#define IMX8QXP_USDHC1_VSELECT_CONN_NAND_RE_B IMX8QXP_USDHC1_VSELECT 3
+#define IMX8QXP_USDHC1_VSELECT_LSIO_GPIO4_IO20 IMX8QXP_USDHC1_VSELECT 4
+#define IMX8QXP_USDHC1_WP_CONN_USDHC1_WP IMX8QXP_USDHC1_WP 0
+#define IMX8QXP_USDHC1_WP_CONN_NAND_DQS_N IMX8QXP_USDHC1_WP 1
+#define IMX8QXP_USDHC1_WP_ADMA_SPI2_SDI IMX8QXP_USDHC1_WP 2
+#define IMX8QXP_USDHC1_WP_LSIO_GPIO4_IO21 IMX8QXP_USDHC1_WP 4
+#define IMX8QXP_USDHC1_CD_B_CONN_USDHC1_CD_B IMX8QXP_USDHC1_CD_B 0
+#define IMX8QXP_USDHC1_CD_B_CONN_NAND_DQS_P IMX8QXP_USDHC1_CD_B 1
+#define IMX8QXP_USDHC1_CD_B_ADMA_SPI2_CS0 IMX8QXP_USDHC1_CD_B 2
+#define IMX8QXP_USDHC1_CD_B_CONN_NAND_DQS IMX8QXP_USDHC1_CD_B 3
+#define IMX8QXP_USDHC1_CD_B_LSIO_GPIO4_IO22 IMX8QXP_USDHC1_CD_B 4
+#define IMX8QXP_USDHC1_CLK_CONN_USDHC1_CLK IMX8QXP_USDHC1_CLK 0
+#define IMX8QXP_USDHC1_CLK_ADMA_UART3_RX IMX8QXP_USDHC1_CLK 2
+#define IMX8QXP_USDHC1_CLK_LSIO_GPIO4_IO23 IMX8QXP_USDHC1_CLK 4
+#define IMX8QXP_USDHC1_CMD_CONN_USDHC1_CMD IMX8QXP_USDHC1_CMD 0
+#define IMX8QXP_USDHC1_CMD_CONN_NAND_CE0_B IMX8QXP_USDHC1_CMD 1
+#define IMX8QXP_USDHC1_CMD_ADMA_MQS_R IMX8QXP_USDHC1_CMD 2
+#define IMX8QXP_USDHC1_CMD_LSIO_GPIO4_IO24 IMX8QXP_USDHC1_CMD 4
+#define IMX8QXP_USDHC1_DATA0_CONN_USDHC1_DATA0 IMX8QXP_USDHC1_DATA0 0
+#define IMX8QXP_USDHC1_DATA0_CONN_NAND_CE1_B IMX8QXP_USDHC1_DATA0 1
+#define IMX8QXP_USDHC1_DATA0_ADMA_MQS_L IMX8QXP_USDHC1_DATA0 2
+#define IMX8QXP_USDHC1_DATA0_LSIO_GPIO4_IO25 IMX8QXP_USDHC1_DATA0 4
+#define IMX8QXP_USDHC1_DATA1_CONN_USDHC1_DATA1 IMX8QXP_USDHC1_DATA1 0
+#define IMX8QXP_USDHC1_DATA1_CONN_NAND_RE_B IMX8QXP_USDHC1_DATA1 1
+#define IMX8QXP_USDHC1_DATA1_ADMA_UART3_TX IMX8QXP_USDHC1_DATA1 2
+#define IMX8QXP_USDHC1_DATA1_LSIO_GPIO4_IO26 IMX8QXP_USDHC1_DATA1 4
+#define IMX8QXP_USDHC1_DATA2_CONN_USDHC1_DATA2 IMX8QXP_USDHC1_DATA2 0
+#define IMX8QXP_USDHC1_DATA2_CONN_NAND_WE_B IMX8QXP_USDHC1_DATA2 1
+#define IMX8QXP_USDHC1_DATA2_ADMA_UART3_CTS_B IMX8QXP_USDHC1_DATA2 2
+#define IMX8QXP_USDHC1_DATA2_LSIO_GPIO4_IO27 IMX8QXP_USDHC1_DATA2 4
+#define IMX8QXP_USDHC1_DATA3_CONN_USDHC1_DATA3 IMX8QXP_USDHC1_DATA3 0
+#define IMX8QXP_USDHC1_DATA3_CONN_NAND_ALE IMX8QXP_USDHC1_DATA3 1
+#define IMX8QXP_USDHC1_DATA3_ADMA_UART3_RTS_B IMX8QXP_USDHC1_DATA3 2
+#define IMX8QXP_USDHC1_DATA3_LSIO_GPIO4_IO28 IMX8QXP_USDHC1_DATA3 4
+#define IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC IMX8QXP_ENET0_RGMII_TXC 0
+#define IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_OUT IMX8QXP_ENET0_RGMII_TXC 1
+#define IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_IN IMX8QXP_ENET0_RGMII_TXC 2
+#define IMX8QXP_ENET0_RGMII_TXC_CONN_NAND_CE1_B IMX8QXP_ENET0_RGMII_TXC 3
+#define IMX8QXP_ENET0_RGMII_TXC_LSIO_GPIO4_IO29 IMX8QXP_ENET0_RGMII_TXC 4
+#define IMX8QXP_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL IMX8QXP_ENET0_RGMII_TX_CTL 0
+#define IMX8QXP_ENET0_RGMII_TX_CTL_CONN_USDHC1_RESET_B IMX8QXP_ENET0_RGMII_TX_CTL 3
+#define IMX8QXP_ENET0_RGMII_TX_CTL_LSIO_GPIO4_IO30 IMX8QXP_ENET0_RGMII_TX_CTL 4
+#define IMX8QXP_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 IMX8QXP_ENET0_RGMII_TXD0 0
+#define IMX8QXP_ENET0_RGMII_TXD0_CONN_USDHC1_VSELECT IMX8QXP_ENET0_RGMII_TXD0 3
+#define IMX8QXP_ENET0_RGMII_TXD0_LSIO_GPIO4_IO31 IMX8QXP_ENET0_RGMII_TXD0 4
+#define IMX8QXP_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 IMX8QXP_ENET0_RGMII_TXD1 0
+#define IMX8QXP_ENET0_RGMII_TXD1_CONN_USDHC1_WP IMX8QXP_ENET0_RGMII_TXD1 3
+#define IMX8QXP_ENET0_RGMII_TXD1_LSIO_GPIO5_IO00 IMX8QXP_ENET0_RGMII_TXD1 4
+#define IMX8QXP_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 IMX8QXP_ENET0_RGMII_TXD2 0
+#define IMX8QXP_ENET0_RGMII_TXD2_CONN_MLB_CLK IMX8QXP_ENET0_RGMII_TXD2 1
+#define IMX8QXP_ENET0_RGMII_TXD2_CONN_NAND_CE0_B IMX8QXP_ENET0_RGMII_TXD2 2
+#define IMX8QXP_ENET0_RGMII_TXD2_CONN_USDHC1_CD_B IMX8QXP_ENET0_RGMII_TXD2 3
+#define IMX8QXP_ENET0_RGMII_TXD2_LSIO_GPIO5_IO01 IMX8QXP_ENET0_RGMII_TXD2 4
+#define IMX8QXP_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 IMX8QXP_ENET0_RGMII_TXD3 0
+#define IMX8QXP_ENET0_RGMII_TXD3_CONN_MLB_SIG IMX8QXP_ENET0_RGMII_TXD3 1
+#define IMX8QXP_ENET0_RGMII_TXD3_CONN_NAND_RE_B IMX8QXP_ENET0_RGMII_TXD3 2
+#define IMX8QXP_ENET0_RGMII_TXD3_LSIO_GPIO5_IO02 IMX8QXP_ENET0_RGMII_TXD3 4
+#define IMX8QXP_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC IMX8QXP_ENET0_RGMII_RXC 0
+#define IMX8QXP_ENET0_RGMII_RXC_CONN_MLB_DATA IMX8QXP_ENET0_RGMII_RXC 1
+#define IMX8QXP_ENET0_RGMII_RXC_CONN_NAND_WE_B IMX8QXP_ENET0_RGMII_RXC 2
+#define IMX8QXP_ENET0_RGMII_RXC_CONN_USDHC1_CLK IMX8QXP_ENET0_RGMII_RXC 3
+#define IMX8QXP_ENET0_RGMII_RXC_LSIO_GPIO5_IO03 IMX8QXP_ENET0_RGMII_RXC 4
+#define IMX8QXP_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL IMX8QXP_ENET0_RGMII_RX_CTL 0
+#define IMX8QXP_ENET0_RGMII_RX_CTL_CONN_USDHC1_CMD IMX8QXP_ENET0_RGMII_RX_CTL 3
+#define IMX8QXP_ENET0_RGMII_RX_CTL_LSIO_GPIO5_IO04 IMX8QXP_ENET0_RGMII_RX_CTL 4
+#define IMX8QXP_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 IMX8QXP_ENET0_RGMII_RXD0 0
+#define IMX8QXP_ENET0_RGMII_RXD0_CONN_USDHC1_DATA0 IMX8QXP_ENET0_RGMII_RXD0 3
+#define IMX8QXP_ENET0_RGMII_RXD0_LSIO_GPIO5_IO05 IMX8QXP_ENET0_RGMII_RXD0 4
+#define IMX8QXP_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 IMX8QXP_ENET0_RGMII_RXD1 0
+#define IMX8QXP_ENET0_RGMII_RXD1_CONN_USDHC1_DATA1 IMX8QXP_ENET0_RGMII_RXD1 3
+#define IMX8QXP_ENET0_RGMII_RXD1_LSIO_GPIO5_IO06 IMX8QXP_ENET0_RGMII_RXD1 4
+#define IMX8QXP_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 IMX8QXP_ENET0_RGMII_RXD2 0
+#define IMX8QXP_ENET0_RGMII_RXD2_CONN_ENET0_RMII_RX_ER IMX8QXP_ENET0_RGMII_RXD2 1
+#define IMX8QXP_ENET0_RGMII_RXD2_CONN_USDHC1_DATA2 IMX8QXP_ENET0_RGMII_RXD2 3
+#define IMX8QXP_ENET0_RGMII_RXD2_LSIO_GPIO5_IO07 IMX8QXP_ENET0_RGMII_RXD2 4
+#define IMX8QXP_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 IMX8QXP_ENET0_RGMII_RXD3 0
+#define IMX8QXP_ENET0_RGMII_RXD3_CONN_NAND_ALE IMX8QXP_ENET0_RGMII_RXD3 2
+#define IMX8QXP_ENET0_RGMII_RXD3_CONN_USDHC1_DATA3 IMX8QXP_ENET0_RGMII_RXD3 3
+#define IMX8QXP_ENET0_RGMII_RXD3_LSIO_GPIO5_IO08 IMX8QXP_ENET0_RGMII_RXD3 4
+#define IMX8QXP_ENET0_REFCLK_125M_25M_CONN_ENET0_REFCLK_125M_25M IMX8QXP_ENET0_REFCLK_125M_25M 0
+#define IMX8QXP_ENET0_REFCLK_125M_25M_CONN_ENET0_PPS IMX8QXP_ENET0_REFCLK_125M_25M 1
+#define IMX8QXP_ENET0_REFCLK_125M_25M_CONN_ENET1_PPS IMX8QXP_ENET0_REFCLK_125M_25M 2
+#define IMX8QXP_ENET0_REFCLK_125M_25M_LSIO_GPIO5_IO09 IMX8QXP_ENET0_REFCLK_125M_25M 4
+#define IMX8QXP_ENET0_MDIO_CONN_ENET0_MDIO IMX8QXP_ENET0_MDIO 0
+#define IMX8QXP_ENET0_MDIO_ADMA_I2C3_SDA IMX8QXP_ENET0_MDIO 1
+#define IMX8QXP_ENET0_MDIO_CONN_ENET1_MDIO IMX8QXP_ENET0_MDIO 2
+#define IMX8QXP_ENET0_MDIO_LSIO_GPIO5_IO10 IMX8QXP_ENET0_MDIO 4
+#define IMX8QXP_ENET0_MDC_CONN_ENET0_MDC IMX8QXP_ENET0_MDC 0
+#define IMX8QXP_ENET0_MDC_ADMA_I2C3_SCL IMX8QXP_ENET0_MDC 1
+#define IMX8QXP_ENET0_MDC_CONN_ENET1_MDC IMX8QXP_ENET0_MDC 2
+#define IMX8QXP_ENET0_MDC_LSIO_GPIO5_IO11 IMX8QXP_ENET0_MDC 4
+#define IMX8QXP_ESAI0_FSR_ADMA_ESAI0_FSR IMX8QXP_ESAI0_FSR 0
+#define IMX8QXP_ESAI0_FSR_CONN_ENET1_RCLK50M_OUT IMX8QXP_ESAI0_FSR 1
+#define IMX8QXP_ESAI0_FSR_ADMA_LCDIF_D00 IMX8QXP_ESAI0_FSR 2
+#define IMX8QXP_ESAI0_FSR_CONN_ENET1_RGMII_TXC IMX8QXP_ESAI0_FSR 3
+#define IMX8QXP_ESAI0_FSR_CONN_ENET1_RCLK50M_IN IMX8QXP_ESAI0_FSR 4
+#define IMX8QXP_ESAI0_FST_ADMA_ESAI0_FST IMX8QXP_ESAI0_FST 0
+#define IMX8QXP_ESAI0_FST_CONN_MLB_CLK IMX8QXP_ESAI0_FST 1
+#define IMX8QXP_ESAI0_FST_ADMA_LCDIF_D01 IMX8QXP_ESAI0_FST 2
+#define IMX8QXP_ESAI0_FST_CONN_ENET1_RGMII_TXD2 IMX8QXP_ESAI0_FST 3
+#define IMX8QXP_ESAI0_FST_LSIO_GPIO0_IO01 IMX8QXP_ESAI0_FST 4
+#define IMX8QXP_ESAI0_SCKR_ADMA_ESAI0_SCKR IMX8QXP_ESAI0_SCKR 0
+#define IMX8QXP_ESAI0_SCKR_ADMA_LCDIF_D02 IMX8QXP_ESAI0_SCKR 2
+#define IMX8QXP_ESAI0_SCKR_CONN_ENET1_RGMII_TX_CTL IMX8QXP_ESAI0_SCKR 3
+#define IMX8QXP_ESAI0_SCKR_LSIO_GPIO0_IO02 IMX8QXP_ESAI0_SCKR 4
+#define IMX8QXP_ESAI0_SCKT_ADMA_ESAI0_SCKT IMX8QXP_ESAI0_SCKT 0
+#define IMX8QXP_ESAI0_SCKT_CONN_MLB_SIG IMX8QXP_ESAI0_SCKT 1
+#define IMX8QXP_ESAI0_SCKT_ADMA_LCDIF_D03 IMX8QXP_ESAI0_SCKT 2
+#define IMX8QXP_ESAI0_SCKT_CONN_ENET1_RGMII_TXD3 IMX8QXP_ESAI0_SCKT 3
+#define IMX8QXP_ESAI0_SCKT_LSIO_GPIO0_IO03 IMX8QXP_ESAI0_SCKT 4
+#define IMX8QXP_ESAI0_TX0_ADMA_ESAI0_TX0 IMX8QXP_ESAI0_TX0 0
+#define IMX8QXP_ESAI0_TX0_CONN_MLB_DATA IMX8QXP_ESAI0_TX0 1
+#define IMX8QXP_ESAI0_TX0_ADMA_LCDIF_D04 IMX8QXP_ESAI0_TX0 2
+#define IMX8QXP_ESAI0_TX0_CONN_ENET1_RGMII_RXC IMX8QXP_ESAI0_TX0 3
+#define IMX8QXP_ESAI0_TX0_LSIO_GPIO0_IO04 IMX8QXP_ESAI0_TX0 4
+#define IMX8QXP_ESAI0_TX1_ADMA_ESAI0_TX1 IMX8QXP_ESAI0_TX1 0
+#define IMX8QXP_ESAI0_TX1_ADMA_LCDIF_D05 IMX8QXP_ESAI0_TX1 2
+#define IMX8QXP_ESAI0_TX1_CONN_ENET1_RGMII_RXD3 IMX8QXP_ESAI0_TX1 3
+#define IMX8QXP_ESAI0_TX1_LSIO_GPIO0_IO05 IMX8QXP_ESAI0_TX1 4
+#define IMX8QXP_ESAI0_TX2_RX3_ADMA_ESAI0_TX2_RX3 IMX8QXP_ESAI0_TX2_RX3 0
+#define IMX8QXP_ESAI0_TX2_RX3_CONN_ENET1_RMII_RX_ER IMX8QXP_ESAI0_TX2_RX3 1
+#define IMX8QXP_ESAI0_TX2_RX3_ADMA_LCDIF_D06 IMX8QXP_ESAI0_TX2_RX3 2
+#define IMX8QXP_ESAI0_TX2_RX3_CONN_ENET1_RGMII_RXD2 IMX8QXP_ESAI0_TX2_RX3 3
+#define IMX8QXP_ESAI0_TX2_RX3_LSIO_GPIO0_IO06 IMX8QXP_ESAI0_TX2_RX3 4
+#define IMX8QXP_ESAI0_TX3_RX2_ADMA_ESAI0_TX3_RX2 IMX8QXP_ESAI0_TX3_RX2 0
+#define IMX8QXP_ESAI0_TX3_RX2_ADMA_LCDIF_D07 IMX8QXP_ESAI0_TX3_RX2 2
+#define IMX8QXP_ESAI0_TX3_RX2_CONN_ENET1_RGMII_RXD1 IMX8QXP_ESAI0_TX3_RX2 3
+#define IMX8QXP_ESAI0_TX3_RX2_LSIO_GPIO0_IO07 IMX8QXP_ESAI0_TX3_RX2 4
+#define IMX8QXP_ESAI0_TX4_RX1_ADMA_ESAI0_TX4_RX1 IMX8QXP_ESAI0_TX4_RX1 0
+#define IMX8QXP_ESAI0_TX4_RX1_ADMA_LCDIF_D08 IMX8QXP_ESAI0_TX4_RX1 2
+#define IMX8QXP_ESAI0_TX4_RX1_CONN_ENET1_RGMII_TXD0 IMX8QXP_ESAI0_TX4_RX1 3
+#define IMX8QXP_ESAI0_TX4_RX1_LSIO_GPIO0_IO08 IMX8QXP_ESAI0_TX4_RX1 4
+#define IMX8QXP_ESAI0_TX5_RX0_ADMA_ESAI0_TX5_RX0 IMX8QXP_ESAI0_TX5_RX0 0
+#define IMX8QXP_ESAI0_TX5_RX0_ADMA_LCDIF_D09 IMX8QXP_ESAI0_TX5_RX0 2
+#define IMX8QXP_ESAI0_TX5_RX0_CONN_ENET1_RGMII_TXD1 IMX8QXP_ESAI0_TX5_RX0 3
+#define IMX8QXP_ESAI0_TX5_RX0_LSIO_GPIO0_IO09 IMX8QXP_ESAI0_TX5_RX0 4
+#define IMX8QXP_SPDIF0_RX_ADMA_SPDIF0_RX IMX8QXP_SPDIF0_RX 0
+#define IMX8QXP_SPDIF0_RX_ADMA_MQS_R IMX8QXP_SPDIF0_RX 1
+#define IMX8QXP_SPDIF0_RX_ADMA_LCDIF_D10 IMX8QXP_SPDIF0_RX 2
+#define IMX8QXP_SPDIF0_RX_CONN_ENET1_RGMII_RXD0 IMX8QXP_SPDIF0_RX 3
+#define IMX8QXP_SPDIF0_RX_LSIO_GPIO0_IO10 IMX8QXP_SPDIF0_RX 4
+#define IMX8QXP_SPDIF0_TX_ADMA_SPDIF0_TX IMX8QXP_SPDIF0_TX 0
+#define IMX8QXP_SPDIF0_TX_ADMA_MQS_L IMX8QXP_SPDIF0_TX 1
+#define IMX8QXP_SPDIF0_TX_ADMA_LCDIF_D11 IMX8QXP_SPDIF0_TX 2
+#define IMX8QXP_SPDIF0_TX_CONN_ENET1_RGMII_RX_CTL IMX8QXP_SPDIF0_TX 3
+#define IMX8QXP_SPDIF0_TX_LSIO_GPIO0_IO11 IMX8QXP_SPDIF0_TX 4
+#define IMX8QXP_SPDIF0_EXT_CLK_ADMA_SPDIF0_EXT_CLK IMX8QXP_SPDIF0_EXT_CLK 0
+#define IMX8QXP_SPDIF0_EXT_CLK_ADMA_LCDIF_D12 IMX8QXP_SPDIF0_EXT_CLK 2
+#define IMX8QXP_SPDIF0_EXT_CLK_CONN_ENET1_REFCLK_125M_25M IMX8QXP_SPDIF0_EXT_CLK 3
+#define IMX8QXP_SPDIF0_EXT_CLK_LSIO_GPIO0_IO12 IMX8QXP_SPDIF0_EXT_CLK 4
+#define IMX8QXP_SPI3_SCK_ADMA_SPI3_SCK IMX8QXP_SPI3_SCK 0
+#define IMX8QXP_SPI3_SCK_ADMA_LCDIF_D13 IMX8QXP_SPI3_SCK 2
+#define IMX8QXP_SPI3_SCK_LSIO_GPIO0_IO13 IMX8QXP_SPI3_SCK 4
+#define IMX8QXP_SPI3_SDO_ADMA_SPI3_SDO IMX8QXP_SPI3_SDO 0
+#define IMX8QXP_SPI3_SDO_ADMA_LCDIF_D14 IMX8QXP_SPI3_SDO 2
+#define IMX8QXP_SPI3_SDO_LSIO_GPIO0_IO14 IMX8QXP_SPI3_SDO 4
+#define IMX8QXP_SPI3_SDI_ADMA_SPI3_SDI IMX8QXP_SPI3_SDI 0
+#define IMX8QXP_SPI3_SDI_ADMA_LCDIF_D15 IMX8QXP_SPI3_SDI 2
+#define IMX8QXP_SPI3_SDI_LSIO_GPIO0_IO15 IMX8QXP_SPI3_SDI 4
+#define IMX8QXP_SPI3_CS0_ADMA_SPI3_CS0 IMX8QXP_SPI3_CS0 0
+#define IMX8QXP_SPI3_CS0_ADMA_ACM_MCLK_OUT1 IMX8QXP_SPI3_CS0 1
+#define IMX8QXP_SPI3_CS0_ADMA_LCDIF_HSYNC IMX8QXP_SPI3_CS0 2
+#define IMX8QXP_SPI3_CS0_LSIO_GPIO0_IO16 IMX8QXP_SPI3_CS0 4
+#define IMX8QXP_SPI3_CS1_ADMA_SPI3_CS1 IMX8QXP_SPI3_CS1 0
+#define IMX8QXP_SPI3_CS1_ADMA_I2C3_SCL IMX8QXP_SPI3_CS1 1
+#define IMX8QXP_SPI3_CS1_ADMA_LCDIF_RESET IMX8QXP_SPI3_CS1 2
+#define IMX8QXP_SPI3_CS1_ADMA_SPI2_CS0 IMX8QXP_SPI3_CS1 3
+#define IMX8QXP_SPI3_CS1_ADMA_LCDIF_D16 IMX8QXP_SPI3_CS1 4
+#define IMX8QXP_MCLK_IN1_ADMA_ACM_MCLK_IN1 IMX8QXP_MCLK_IN1 0
+#define IMX8QXP_MCLK_IN1_ADMA_I2C3_SDA IMX8QXP_MCLK_IN1 1
+#define IMX8QXP_MCLK_IN1_ADMA_LCDIF_EN IMX8QXP_MCLK_IN1 2
+#define IMX8QXP_MCLK_IN1_ADMA_SPI2_SCK IMX8QXP_MCLK_IN1 3
+#define IMX8QXP_MCLK_IN1_ADMA_LCDIF_D17 IMX8QXP_MCLK_IN1 4
+#define IMX8QXP_MCLK_IN0_ADMA_ACM_MCLK_IN0 IMX8QXP_MCLK_IN0 0
+#define IMX8QXP_MCLK_IN0_ADMA_ESAI0_RX_HF_CLK IMX8QXP_MCLK_IN0 1
+#define IMX8QXP_MCLK_IN0_ADMA_LCDIF_VSYNC IMX8QXP_MCLK_IN0 2
+#define IMX8QXP_MCLK_IN0_ADMA_SPI2_SDI IMX8QXP_MCLK_IN0 3
+#define IMX8QXP_MCLK_IN0_LSIO_GPIO0_IO19 IMX8QXP_MCLK_IN0 4
+#define IMX8QXP_MCLK_OUT0_ADMA_ACM_MCLK_OUT0 IMX8QXP_MCLK_OUT0 0
+#define IMX8QXP_MCLK_OUT0_ADMA_ESAI0_TX_HF_CLK IMX8QXP_MCLK_OUT0 1
+#define IMX8QXP_MCLK_OUT0_ADMA_LCDIF_CLK IMX8QXP_MCLK_OUT0 2
+#define IMX8QXP_MCLK_OUT0_ADMA_SPI2_SDO IMX8QXP_MCLK_OUT0 3
+#define IMX8QXP_MCLK_OUT0_LSIO_GPIO0_IO20 IMX8QXP_MCLK_OUT0 4
+#define IMX8QXP_UART1_TX_ADMA_UART1_TX IMX8QXP_UART1_TX 0
+#define IMX8QXP_UART1_TX_LSIO_PWM0_OUT IMX8QXP_UART1_TX 1
+#define IMX8QXP_UART1_TX_LSIO_GPT0_CAPTURE IMX8QXP_UART1_TX 2
+#define IMX8QXP_UART1_TX_LSIO_GPIO0_IO21 IMX8QXP_UART1_TX 4
+#define IMX8QXP_UART1_RX_ADMA_UART1_RX IMX8QXP_UART1_RX 0
+#define IMX8QXP_UART1_RX_LSIO_PWM1_OUT IMX8QXP_UART1_RX 1
+#define IMX8QXP_UART1_RX_LSIO_GPT0_COMPARE IMX8QXP_UART1_RX 2
+#define IMX8QXP_UART1_RX_LSIO_GPT1_CLK IMX8QXP_UART1_RX 3
+#define IMX8QXP_UART1_RX_LSIO_GPIO0_IO22 IMX8QXP_UART1_RX 4
+#define IMX8QXP_UART1_RTS_B_ADMA_UART1_RTS_B IMX8QXP_UART1_RTS_B 0
+#define IMX8QXP_UART1_RTS_B_LSIO_PWM2_OUT IMX8QXP_UART1_RTS_B 1
+#define IMX8QXP_UART1_RTS_B_ADMA_LCDIF_D16 IMX8QXP_UART1_RTS_B 2
+#define IMX8QXP_UART1_RTS_B_LSIO_GPT1_CAPTURE IMX8QXP_UART1_RTS_B 3
+#define IMX8QXP_UART1_RTS_B_LSIO_GPT0_CLK IMX8QXP_UART1_RTS_B 4
+#define IMX8QXP_UART1_CTS_B_ADMA_UART1_CTS_B IMX8QXP_UART1_CTS_B 0
+#define IMX8QXP_UART1_CTS_B_LSIO_PWM3_OUT IMX8QXP_UART1_CTS_B 1
+#define IMX8QXP_UART1_CTS_B_ADMA_LCDIF_D17 IMX8QXP_UART1_CTS_B 2
+#define IMX8QXP_UART1_CTS_B_LSIO_GPT1_COMPARE IMX8QXP_UART1_CTS_B 3
+#define IMX8QXP_UART1_CTS_B_LSIO_GPIO0_IO24 IMX8QXP_UART1_CTS_B 4
+#define IMX8QXP_SAI0_TXD_ADMA_SAI0_TXD IMX8QXP_SAI0_TXD 0
+#define IMX8QXP_SAI0_TXD_ADMA_SAI1_RXC IMX8QXP_SAI0_TXD 1
+#define IMX8QXP_SAI0_TXD_ADMA_SPI1_SDO IMX8QXP_SAI0_TXD 2
+#define IMX8QXP_SAI0_TXD_ADMA_LCDIF_D18 IMX8QXP_SAI0_TXD 3
+#define IMX8QXP_SAI0_TXD_LSIO_GPIO0_IO25 IMX8QXP_SAI0_TXD 4
+#define IMX8QXP_SAI0_TXC_ADMA_SAI0_TXC IMX8QXP_SAI0_TXC 0
+#define IMX8QXP_SAI0_TXC_ADMA_SAI1_TXD IMX8QXP_SAI0_TXC 1
+#define IMX8QXP_SAI0_TXC_ADMA_SPI1_SDI IMX8QXP_SAI0_TXC 2
+#define IMX8QXP_SAI0_TXC_ADMA_LCDIF_D19 IMX8QXP_SAI0_TXC 3
+#define IMX8QXP_SAI0_TXC_LSIO_GPIO0_IO26 IMX8QXP_SAI0_TXC 4
+#define IMX8QXP_SAI0_RXD_ADMA_SAI0_RXD IMX8QXP_SAI0_RXD 0
+#define IMX8QXP_SAI0_RXD_ADMA_SAI1_RXFS IMX8QXP_SAI0_RXD 1
+#define IMX8QXP_SAI0_RXD_ADMA_SPI1_CS0 IMX8QXP_SAI0_RXD 2
+#define IMX8QXP_SAI0_RXD_ADMA_LCDIF_D20 IMX8QXP_SAI0_RXD 3
+#define IMX8QXP_SAI0_RXD_LSIO_GPIO0_IO27 IMX8QXP_SAI0_RXD 4
+#define IMX8QXP_SAI0_TXFS_ADMA_SAI0_TXFS IMX8QXP_SAI0_TXFS 0
+#define IMX8QXP_SAI0_TXFS_ADMA_SPI2_CS1 IMX8QXP_SAI0_TXFS 1
+#define IMX8QXP_SAI0_TXFS_ADMA_SPI1_SCK IMX8QXP_SAI0_TXFS 2
+#define IMX8QXP_SAI0_TXFS_LSIO_GPIO0_IO28 IMX8QXP_SAI0_TXFS 4
+#define IMX8QXP_SAI1_RXD_ADMA_SAI1_RXD IMX8QXP_SAI1_RXD 0
+#define IMX8QXP_SAI1_RXD_ADMA_SAI0_RXFS IMX8QXP_SAI1_RXD 1
+#define IMX8QXP_SAI1_RXD_ADMA_SPI1_CS1 IMX8QXP_SAI1_RXD 2
+#define IMX8QXP_SAI1_RXD_ADMA_LCDIF_D21 IMX8QXP_SAI1_RXD 3
+#define IMX8QXP_SAI1_RXD_LSIO_GPIO0_IO29 IMX8QXP_SAI1_RXD 4
+#define IMX8QXP_SAI1_RXC_ADMA_SAI1_RXC IMX8QXP_SAI1_RXC 0
+#define IMX8QXP_SAI1_RXC_ADMA_SAI1_TXC IMX8QXP_SAI1_RXC 1
+#define IMX8QXP_SAI1_RXC_ADMA_LCDIF_D22 IMX8QXP_SAI1_RXC 3
+#define IMX8QXP_SAI1_RXC_LSIO_GPIO0_IO30 IMX8QXP_SAI1_RXC 4
+#define IMX8QXP_SAI1_RXFS_ADMA_SAI1_RXFS IMX8QXP_SAI1_RXFS 0
+#define IMX8QXP_SAI1_RXFS_ADMA_SAI1_TXFS IMX8QXP_SAI1_RXFS 1
+#define IMX8QXP_SAI1_RXFS_ADMA_LCDIF_D23 IMX8QXP_SAI1_RXFS 3
+#define IMX8QXP_SAI1_RXFS_LSIO_GPIO0_IO31 IMX8QXP_SAI1_RXFS 4
+#define IMX8QXP_SPI2_CS0_ADMA_SPI2_CS0 IMX8QXP_SPI2_CS0 0
+#define IMX8QXP_SPI2_CS0_LSIO_GPIO1_IO00 IMX8QXP_SPI2_CS0 4
+#define IMX8QXP_SPI2_SDO_ADMA_SPI2_SDO IMX8QXP_SPI2_SDO 0
+#define IMX8QXP_SPI2_SDO_LSIO_GPIO1_IO01 IMX8QXP_SPI2_SDO 4
+#define IMX8QXP_SPI2_SDI_ADMA_SPI2_SDI IMX8QXP_SPI2_SDI 0
+#define IMX8QXP_SPI2_SDI_LSIO_GPIO1_IO02 IMX8QXP_SPI2_SDI 4
+#define IMX8QXP_SPI2_SCK_ADMA_SPI2_SCK IMX8QXP_SPI2_SCK 0
+#define IMX8QXP_SPI2_SCK_LSIO_GPIO1_IO03 IMX8QXP_SPI2_SCK 4
+#define IMX8QXP_SPI0_SCK_ADMA_SPI0_SCK IMX8QXP_SPI0_SCK 0
+#define IMX8QXP_SPI0_SCK_ADMA_SAI0_TXC IMX8QXP_SPI0_SCK 1
+#define IMX8QXP_SPI0_SCK_M40_I2C0_SCL IMX8QXP_SPI0_SCK 2
+#define IMX8QXP_SPI0_SCK_M40_GPIO0_IO00 IMX8QXP_SPI0_SCK 3
+#define IMX8QXP_SPI0_SCK_LSIO_GPIO1_IO04 IMX8QXP_SPI0_SCK 4
+#define IMX8QXP_SPI0_SDI_ADMA_SPI0_SDI IMX8QXP_SPI0_SDI 0
+#define IMX8QXP_SPI0_SDI_ADMA_SAI0_TXD IMX8QXP_SPI0_SDI 1
+#define IMX8QXP_SPI0_SDI_M40_TPM0_CH0 IMX8QXP_SPI0_SDI 2
+#define IMX8QXP_SPI0_SDI_M40_GPIO0_IO02 IMX8QXP_SPI0_SDI 3
+#define IMX8QXP_SPI0_SDI_LSIO_GPIO1_IO05 IMX8QXP_SPI0_SDI 4
+#define IMX8QXP_SPI0_SDO_ADMA_SPI0_SDO IMX8QXP_SPI0_SDO 0
+#define IMX8QXP_SPI0_SDO_ADMA_SAI0_TXFS IMX8QXP_SPI0_SDO 1
+#define IMX8QXP_SPI0_SDO_M40_I2C0_SDA IMX8QXP_SPI0_SDO 2
+#define IMX8QXP_SPI0_SDO_M40_GPIO0_IO01 IMX8QXP_SPI0_SDO 3
+#define IMX8QXP_SPI0_SDO_LSIO_GPIO1_IO06 IMX8QXP_SPI0_SDO 4
+#define IMX8QXP_SPI0_CS1_ADMA_SPI0_CS1 IMX8QXP_SPI0_CS1 0
+#define IMX8QXP_SPI0_CS1_ADMA_SAI0_RXC IMX8QXP_SPI0_CS1 1
+#define IMX8QXP_SPI0_CS1_ADMA_SAI1_TXD IMX8QXP_SPI0_CS1 2
+#define IMX8QXP_SPI0_CS1_ADMA_LCD_PWM0_OUT IMX8QXP_SPI0_CS1 3
+#define IMX8QXP_SPI0_CS1_LSIO_GPIO1_IO07 IMX8QXP_SPI0_CS1 4
+#define IMX8QXP_SPI0_CS0_ADMA_SPI0_CS0 IMX8QXP_SPI0_CS0 0
+#define IMX8QXP_SPI0_CS0_ADMA_SAI0_RXD IMX8QXP_SPI0_CS0 1
+#define IMX8QXP_SPI0_CS0_M40_TPM0_CH1 IMX8QXP_SPI0_CS0 2
+#define IMX8QXP_SPI0_CS0_M40_GPIO0_IO03 IMX8QXP_SPI0_CS0 3
+#define IMX8QXP_SPI0_CS0_LSIO_GPIO1_IO08 IMX8QXP_SPI0_CS0 4
+#define IMX8QXP_ADC_IN1_ADMA_ADC_IN1 IMX8QXP_ADC_IN1 0
+#define IMX8QXP_ADC_IN1_M40_I2C0_SDA IMX8QXP_ADC_IN1 1
+#define IMX8QXP_ADC_IN1_M40_GPIO0_IO01 IMX8QXP_ADC_IN1 2
+#define IMX8QXP_ADC_IN1_LSIO_GPIO1_IO09 IMX8QXP_ADC_IN1 4
+#define IMX8QXP_ADC_IN0_ADMA_ADC_IN0 IMX8QXP_ADC_IN0 0
+#define IMX8QXP_ADC_IN0_M40_I2C0_SCL IMX8QXP_ADC_IN0 1
+#define IMX8QXP_ADC_IN0_M40_GPIO0_IO00 IMX8QXP_ADC_IN0 2
+#define IMX8QXP_ADC_IN0_LSIO_GPIO1_IO10 IMX8QXP_ADC_IN0 4
+#define IMX8QXP_ADC_IN3_ADMA_ADC_IN3 IMX8QXP_ADC_IN3 0
+#define IMX8QXP_ADC_IN3_M40_UART0_TX IMX8QXP_ADC_IN3 1
+#define IMX8QXP_ADC_IN3_M40_GPIO0_IO03 IMX8QXP_ADC_IN3 2
+#define IMX8QXP_ADC_IN3_ADMA_ACM_MCLK_OUT0 IMX8QXP_ADC_IN3 3
+#define IMX8QXP_ADC_IN3_LSIO_GPIO1_IO11 IMX8QXP_ADC_IN3 4
+#define IMX8QXP_ADC_IN2_ADMA_ADC_IN2 IMX8QXP_ADC_IN2 0
+#define IMX8QXP_ADC_IN2_M40_UART0_RX IMX8QXP_ADC_IN2 1
+#define IMX8QXP_ADC_IN2_M40_GPIO0_IO02 IMX8QXP_ADC_IN2 2
+#define IMX8QXP_ADC_IN2_ADMA_ACM_MCLK_IN0 IMX8QXP_ADC_IN2 3
+#define IMX8QXP_ADC_IN2_LSIO_GPIO1_IO12 IMX8QXP_ADC_IN2 4
+#define IMX8QXP_ADC_IN5_ADMA_ADC_IN5 IMX8QXP_ADC_IN5 0
+#define IMX8QXP_ADC_IN5_M40_TPM0_CH1 IMX8QXP_ADC_IN5 1
+#define IMX8QXP_ADC_IN5_M40_GPIO0_IO05 IMX8QXP_ADC_IN5 2
+#define IMX8QXP_ADC_IN5_LSIO_GPIO1_IO13 IMX8QXP_ADC_IN5 4
+#define IMX8QXP_ADC_IN4_ADMA_ADC_IN4 IMX8QXP_ADC_IN4 0
+#define IMX8QXP_ADC_IN4_M40_TPM0_CH0 IMX8QXP_ADC_IN4 1
+#define IMX8QXP_ADC_IN4_M40_GPIO0_IO04 IMX8QXP_ADC_IN4 2
+#define IMX8QXP_ADC_IN4_LSIO_GPIO1_IO14 IMX8QXP_ADC_IN4 4
+#define IMX8QXP_FLEXCAN0_RX_ADMA_FLEXCAN0_RX IMX8QXP_FLEXCAN0_RX 0
+#define IMX8QXP_FLEXCAN0_RX_ADMA_SAI2_RXC IMX8QXP_FLEXCAN0_RX 1
+#define IMX8QXP_FLEXCAN0_RX_ADMA_UART0_RTS_B IMX8QXP_FLEXCAN0_RX 2
+#define IMX8QXP_FLEXCAN0_RX_ADMA_SAI1_TXC IMX8QXP_FLEXCAN0_RX 3
+#define IMX8QXP_FLEXCAN0_RX_LSIO_GPIO1_IO15 IMX8QXP_FLEXCAN0_RX 4
+#define IMX8QXP_FLEXCAN0_TX_ADMA_FLEXCAN0_TX IMX8QXP_FLEXCAN0_TX 0
+#define IMX8QXP_FLEXCAN0_TX_ADMA_SAI2_RXD IMX8QXP_FLEXCAN0_TX 1
+#define IMX8QXP_FLEXCAN0_TX_ADMA_UART0_CTS_B IMX8QXP_FLEXCAN0_TX 2
+#define IMX8QXP_FLEXCAN0_TX_ADMA_SAI1_TXFS IMX8QXP_FLEXCAN0_TX 3
+#define IMX8QXP_FLEXCAN0_TX_LSIO_GPIO1_IO16 IMX8QXP_FLEXCAN0_TX 4
+#define IMX8QXP_FLEXCAN1_RX_ADMA_FLEXCAN1_RX IMX8QXP_FLEXCAN1_RX 0
+#define IMX8QXP_FLEXCAN1_RX_ADMA_SAI2_RXFS IMX8QXP_FLEXCAN1_RX 1
+#define IMX8QXP_FLEXCAN1_RX_ADMA_FTM_CH2 IMX8QXP_FLEXCAN1_RX 2
+#define IMX8QXP_FLEXCAN1_RX_ADMA_SAI1_TXD IMX8QXP_FLEXCAN1_RX 3
+#define IMX8QXP_FLEXCAN1_RX_LSIO_GPIO1_IO17 IMX8QXP_FLEXCAN1_RX 4
+#define IMX8QXP_FLEXCAN1_TX_ADMA_FLEXCAN1_TX IMX8QXP_FLEXCAN1_TX 0
+#define IMX8QXP_FLEXCAN1_TX_ADMA_SAI3_RXC IMX8QXP_FLEXCAN1_TX 1
+#define IMX8QXP_FLEXCAN1_TX_ADMA_DMA0_REQ_IN0 IMX8QXP_FLEXCAN1_TX 2
+#define IMX8QXP_FLEXCAN1_TX_ADMA_SAI1_RXD IMX8QXP_FLEXCAN1_TX 3
+#define IMX8QXP_FLEXCAN1_TX_LSIO_GPIO1_IO18 IMX8QXP_FLEXCAN1_TX 4
+#define IMX8QXP_FLEXCAN2_RX_ADMA_FLEXCAN2_RX IMX8QXP_FLEXCAN2_RX 0
+#define IMX8QXP_FLEXCAN2_RX_ADMA_SAI3_RXD IMX8QXP_FLEXCAN2_RX 1
+#define IMX8QXP_FLEXCAN2_RX_ADMA_UART3_RX IMX8QXP_FLEXCAN2_RX 2
+#define IMX8QXP_FLEXCAN2_RX_ADMA_SAI1_RXFS IMX8QXP_FLEXCAN2_RX 3
+#define IMX8QXP_FLEXCAN2_RX_LSIO_GPIO1_IO19 IMX8QXP_FLEXCAN2_RX 4
+#define IMX8QXP_FLEXCAN2_TX_ADMA_FLEXCAN2_TX IMX8QXP_FLEXCAN2_TX 0
+#define IMX8QXP_FLEXCAN2_TX_ADMA_SAI3_RXFS IMX8QXP_FLEXCAN2_TX 1
+#define IMX8QXP_FLEXCAN2_TX_ADMA_UART3_TX IMX8QXP_FLEXCAN2_TX 2
+#define IMX8QXP_FLEXCAN2_TX_ADMA_SAI1_RXC IMX8QXP_FLEXCAN2_TX 3
+#define IMX8QXP_FLEXCAN2_TX_LSIO_GPIO1_IO20 IMX8QXP_FLEXCAN2_TX 4
+#define IMX8QXP_UART0_RX_ADMA_UART0_RX IMX8QXP_UART0_RX 0
+#define IMX8QXP_UART0_RX_ADMA_MQS_R IMX8QXP_UART0_RX 1
+#define IMX8QXP_UART0_RX_ADMA_FLEXCAN0_RX IMX8QXP_UART0_RX 2
+#define IMX8QXP_UART0_RX_LSIO_GPIO1_IO21 IMX8QXP_UART0_RX 4
+#define IMX8QXP_UART0_TX_ADMA_UART0_TX IMX8QXP_UART0_TX 0
+#define IMX8QXP_UART0_TX_ADMA_MQS_L IMX8QXP_UART0_TX 1
+#define IMX8QXP_UART0_TX_ADMA_FLEXCAN0_TX IMX8QXP_UART0_TX 2
+#define IMX8QXP_UART0_TX_LSIO_GPIO1_IO22 IMX8QXP_UART0_TX 4
+#define IMX8QXP_UART2_TX_ADMA_UART2_TX IMX8QXP_UART2_TX 0
+#define IMX8QXP_UART2_TX_ADMA_FTM_CH1 IMX8QXP_UART2_TX 1
+#define IMX8QXP_UART2_TX_ADMA_FLEXCAN1_TX IMX8QXP_UART2_TX 2
+#define IMX8QXP_UART2_TX_LSIO_GPIO1_IO23 IMX8QXP_UART2_TX 4
+#define IMX8QXP_UART2_RX_ADMA_UART2_RX IMX8QXP_UART2_RX 0
+#define IMX8QXP_UART2_RX_ADMA_FTM_CH0 IMX8QXP_UART2_RX 1
+#define IMX8QXP_UART2_RX_ADMA_FLEXCAN1_RX IMX8QXP_UART2_RX 2
+#define IMX8QXP_UART2_RX_LSIO_GPIO1_IO24 IMX8QXP_UART2_RX 4
+#define IMX8QXP_MIPI_DSI0_I2C0_SCL_MIPI_DSI0_I2C0_SCL IMX8QXP_MIPI_DSI0_I2C0_SCL 0
+#define IMX8QXP_MIPI_DSI0_I2C0_SCL_MIPI_DSI1_GPIO0_IO02 IMX8QXP_MIPI_DSI0_I2C0_SCL 1
+#define IMX8QXP_MIPI_DSI0_I2C0_SCL_LSIO_GPIO1_IO25 IMX8QXP_MIPI_DSI0_I2C0_SCL 4
+#define IMX8QXP_MIPI_DSI0_I2C0_SDA_MIPI_DSI0_I2C0_SDA IMX8QXP_MIPI_DSI0_I2C0_SDA 0
+#define IMX8QXP_MIPI_DSI0_I2C0_SDA_MIPI_DSI1_GPIO0_IO03 IMX8QXP_MIPI_DSI0_I2C0_SDA 1
+#define IMX8QXP_MIPI_DSI0_I2C0_SDA_LSIO_GPIO1_IO26 IMX8QXP_MIPI_DSI0_I2C0_SDA 4
+#define IMX8QXP_MIPI_DSI0_GPIO0_00_MIPI_DSI0_GPIO0_IO00 IMX8QXP_MIPI_DSI0_GPIO0_00 0
+#define IMX8QXP_MIPI_DSI0_GPIO0_00_ADMA_I2C1_SCL IMX8QXP_MIPI_DSI0_GPIO0_00 1
+#define IMX8QXP_MIPI_DSI0_GPIO0_00_MIPI_DSI0_PWM0_OUT IMX8QXP_MIPI_DSI0_GPIO0_00 2
+#define IMX8QXP_MIPI_DSI0_GPIO0_00_LSIO_GPIO1_IO27 IMX8QXP_MIPI_DSI0_GPIO0_00 4
+#define IMX8QXP_MIPI_DSI0_GPIO0_01_MIPI_DSI0_GPIO0_IO01 IMX8QXP_MIPI_DSI0_GPIO0_01 0
+#define IMX8QXP_MIPI_DSI0_GPIO0_01_ADMA_I2C1_SDA IMX8QXP_MIPI_DSI0_GPIO0_01 1
+#define IMX8QXP_MIPI_DSI0_GPIO0_01_LSIO_GPIO1_IO28 IMX8QXP_MIPI_DSI0_GPIO0_01 4
+#define IMX8QXP_MIPI_DSI1_I2C0_SCL_MIPI_DSI1_I2C0_SCL IMX8QXP_MIPI_DSI1_I2C0_SCL 0
+#define IMX8QXP_MIPI_DSI1_I2C0_SCL_MIPI_DSI0_GPIO0_IO02 IMX8QXP_MIPI_DSI1_I2C0_SCL 1
+#define IMX8QXP_MIPI_DSI1_I2C0_SCL_LSIO_GPIO1_IO29 IMX8QXP_MIPI_DSI1_I2C0_SCL 4
+#define IMX8QXP_MIPI_DSI1_I2C0_SDA_MIPI_DSI1_I2C0_SDA IMX8QXP_MIPI_DSI1_I2C0_SDA 0
+#define IMX8QXP_MIPI_DSI1_I2C0_SDA_MIPI_DSI0_GPIO0_IO03 IMX8QXP_MIPI_DSI1_I2C0_SDA 1
+#define IMX8QXP_MIPI_DSI1_I2C0_SDA_LSIO_GPIO1_IO30 IMX8QXP_MIPI_DSI1_I2C0_SDA 4
+#define IMX8QXP_MIPI_DSI1_GPIO0_00_MIPI_DSI1_GPIO0_IO00 IMX8QXP_MIPI_DSI1_GPIO0_00 0
+#define IMX8QXP_MIPI_DSI1_GPIO0_00_ADMA_I2C2_SCL IMX8QXP_MIPI_DSI1_GPIO0_00 1
+#define IMX8QXP_MIPI_DSI1_GPIO0_00_MIPI_DSI1_PWM0_OUT IMX8QXP_MIPI_DSI1_GPIO0_00 2
+#define IMX8QXP_MIPI_DSI1_GPIO0_00_LSIO_GPIO1_IO31 IMX8QXP_MIPI_DSI1_GPIO0_00 4
+#define IMX8QXP_MIPI_DSI1_GPIO0_01_MIPI_DSI1_GPIO0_IO01 IMX8QXP_MIPI_DSI1_GPIO0_01 0
+#define IMX8QXP_MIPI_DSI1_GPIO0_01_ADMA_I2C2_SDA IMX8QXP_MIPI_DSI1_GPIO0_01 1
+#define IMX8QXP_MIPI_DSI1_GPIO0_01_LSIO_GPIO2_IO00 IMX8QXP_MIPI_DSI1_GPIO0_01 4
+#define IMX8QXP_JTAG_TRST_B_SCU_JTAG_TRST_B IMX8QXP_JTAG_TRST_B 0
+#define IMX8QXP_JTAG_TRST_B_SCU_WDOG0_WDOG_OUT IMX8QXP_JTAG_TRST_B 1
+#define IMX8QXP_PMIC_I2C_SCL_SCU_PMIC_I2C_SCL IMX8QXP_PMIC_I2C_SCL 0
+#define IMX8QXP_PMIC_I2C_SCL_SCU_GPIO0_IOXX_PMIC_A35_ON IMX8QXP_PMIC_I2C_SCL 1
+#define IMX8QXP_PMIC_I2C_SCL_LSIO_GPIO2_IO01 IMX8QXP_PMIC_I2C_SCL 4
+#define IMX8QXP_PMIC_I2C_SDA_SCU_PMIC_I2C_SDA IMX8QXP_PMIC_I2C_SDA 0
+#define IMX8QXP_PMIC_I2C_SDA_SCU_GPIO0_IOXX_PMIC_GPU_ON IMX8QXP_PMIC_I2C_SDA 1
+#define IMX8QXP_PMIC_I2C_SDA_LSIO_GPIO2_IO02 IMX8QXP_PMIC_I2C_SDA 4
+#define IMX8QXP_PMIC_INT_B_SCU_DIMX8QXPMIC_INT_B IMX8QXP_PMIC_INT_B 0
+#define IMX8QXP_SCU_GPIO0_00_SCU_GPIO0_IO00 IMX8QXP_SCU_GPIO0_00 0
+#define IMX8QXP_SCU_GPIO0_00_SCU_UART0_RX IMX8QXP_SCU_GPIO0_00 1
+#define IMX8QXP_SCU_GPIO0_00_M40_UART0_RX IMX8QXP_SCU_GPIO0_00 2
+#define IMX8QXP_SCU_GPIO0_00_ADMA_UART3_RX IMX8QXP_SCU_GPIO0_00 3
+#define IMX8QXP_SCU_GPIO0_00_LSIO_GPIO2_IO03 IMX8QXP_SCU_GPIO0_00 4
+#define IMX8QXP_SCU_GPIO0_01_SCU_GPIO0_IO01 IMX8QXP_SCU_GPIO0_01 0
+#define IMX8QXP_SCU_GPIO0_01_SCU_UART0_TX IMX8QXP_SCU_GPIO0_01 1
+#define IMX8QXP_SCU_GPIO0_01_M40_UART0_TX IMX8QXP_SCU_GPIO0_01 2
+#define IMX8QXP_SCU_GPIO0_01_ADMA_UART3_TX IMX8QXP_SCU_GPIO0_01 3
+#define IMX8QXP_SCU_GPIO0_01_SCU_WDOG0_WDOG_OUT IMX8QXP_SCU_GPIO0_01 4
+#define IMX8QXP_SCU_PMIC_STANDBY_SCU_DIMX8QXPMIC_STANDBY IMX8QXP_SCU_PMIC_STANDBY 0
+#define IMX8QXP_SCU_BOOT_MODE0_SCU_DSC_BOOT_MODE0 IMX8QXP_SCU_BOOT_MODE0 0
+#define IMX8QXP_SCU_BOOT_MODE1_SCU_DSC_BOOT_MODE1 IMX8QXP_SCU_BOOT_MODE1 0
+#define IMX8QXP_SCU_BOOT_MODE2_SCU_DSC_BOOT_MODE2 IMX8QXP_SCU_BOOT_MODE2 0
+#define IMX8QXP_SCU_BOOT_MODE2_SCU_PMIC_I2C_SDA IMX8QXP_SCU_BOOT_MODE2 1
+#define IMX8QXP_SCU_BOOT_MODE3_SCU_DSC_BOOT_MODE3 IMX8QXP_SCU_BOOT_MODE3 0
+#define IMX8QXP_SCU_BOOT_MODE3_SCU_PMIC_I2C_SCL IMX8QXP_SCU_BOOT_MODE3 1
+#define IMX8QXP_SCU_BOOT_MODE3_SCU_DSC_RTC_CLOCK_OUTPUT_32K IMX8QXP_SCU_BOOT_MODE3 3
+#define IMX8QXP_CSI_D00_CI_PI_D02 IMX8QXP_CSI_D00 0
+#define IMX8QXP_CSI_D00_ADMA_SAI0_RXC IMX8QXP_CSI_D00 2
+#define IMX8QXP_CSI_D01_CI_PI_D03 IMX8QXP_CSI_D01 0
+#define IMX8QXP_CSI_D01_ADMA_SAI0_RXD IMX8QXP_CSI_D01 2
+#define IMX8QXP_CSI_D02_CI_PI_D04 IMX8QXP_CSI_D02 0
+#define IMX8QXP_CSI_D02_ADMA_SAI0_RXFS IMX8QXP_CSI_D02 2
+#define IMX8QXP_CSI_D03_CI_PI_D05 IMX8QXP_CSI_D03 0
+#define IMX8QXP_CSI_D03_ADMA_SAI2_RXC IMX8QXP_CSI_D03 2
+#define IMX8QXP_CSI_D04_CI_PI_D06 IMX8QXP_CSI_D04 0
+#define IMX8QXP_CSI_D04_ADMA_SAI2_RXD IMX8QXP_CSI_D04 2
+#define IMX8QXP_CSI_D05_CI_PI_D07 IMX8QXP_CSI_D05 0
+#define IMX8QXP_CSI_D05_ADMA_SAI2_RXFS IMX8QXP_CSI_D05 2
+#define IMX8QXP_CSI_D06_CI_PI_D08 IMX8QXP_CSI_D06 0
+#define IMX8QXP_CSI_D06_ADMA_SAI3_RXC IMX8QXP_CSI_D06 2
+#define IMX8QXP_CSI_D07_CI_PI_D09 IMX8QXP_CSI_D07 0
+#define IMX8QXP_CSI_D07_ADMA_SAI3_RXD IMX8QXP_CSI_D07 2
+#define IMX8QXP_CSI_HSYNC_CI_PI_HSYNC IMX8QXP_CSI_HSYNC 0
+#define IMX8QXP_CSI_HSYNC_CI_PI_D00 IMX8QXP_CSI_HSYNC 1
+#define IMX8QXP_CSI_HSYNC_ADMA_SAI3_RXFS IMX8QXP_CSI_HSYNC 2
+#define IMX8QXP_CSI_VSYNC_CI_PI_VSYNC IMX8QXP_CSI_VSYNC 0
+#define IMX8QXP_CSI_VSYNC_CI_PI_D01 IMX8QXP_CSI_VSYNC 1
+#define IMX8QXP_CSI_PCLK_CI_PI_PCLK IMX8QXP_CSI_PCLK 0
+#define IMX8QXP_CSI_PCLK_MIPI_CSI0_I2C0_SCL IMX8QXP_CSI_PCLK 1
+#define IMX8QXP_CSI_PCLK_ADMA_SPI1_SCK IMX8QXP_CSI_PCLK 3
+#define IMX8QXP_CSI_PCLK_LSIO_GPIO3_IO00 IMX8QXP_CSI_PCLK 4
+#define IMX8QXP_CSI_MCLK_CI_PI_MCLK IMX8QXP_CSI_MCLK 0
+#define IMX8QXP_CSI_MCLK_MIPI_CSI0_I2C0_SDA IMX8QXP_CSI_MCLK 1
+#define IMX8QXP_CSI_MCLK_ADMA_SPI1_SDO IMX8QXP_CSI_MCLK 3
+#define IMX8QXP_CSI_MCLK_LSIO_GPIO3_IO01 IMX8QXP_CSI_MCLK 4
+#define IMX8QXP_CSI_EN_CI_PI_EN IMX8QXP_CSI_EN 0
+#define IMX8QXP_CSI_EN_CI_PI_I2C_SCL IMX8QXP_CSI_EN 1
+#define IMX8QXP_CSI_EN_ADMA_I2C3_SCL IMX8QXP_CSI_EN 2
+#define IMX8QXP_CSI_EN_ADMA_SPI1_SDI IMX8QXP_CSI_EN 3
+#define IMX8QXP_CSI_EN_LSIO_GPIO3_IO02 IMX8QXP_CSI_EN 4
+#define IMX8QXP_CSI_RESET_CI_PI_RESET IMX8QXP_CSI_RESET 0
+#define IMX8QXP_CSI_RESET_CI_PI_I2C_SDA IMX8QXP_CSI_RESET 1
+#define IMX8QXP_CSI_RESET_ADMA_I2C3_SDA IMX8QXP_CSI_RESET 2
+#define IMX8QXP_CSI_RESET_ADMA_SPI1_CS0 IMX8QXP_CSI_RESET 3
+#define IMX8QXP_CSI_RESET_LSIO_GPIO3_IO03 IMX8QXP_CSI_RESET 4
+#define IMX8QXP_MIPI_CSI0_MCLK_OUT_MIPI_CSI0_ACM_MCLK_OUT IMX8QXP_MIPI_CSI0_MCLK_OUT 0
+#define IMX8QXP_MIPI_CSI0_MCLK_OUT_LSIO_GPIO3_IO04 IMX8QXP_MIPI_CSI0_MCLK_OUT 4
+#define IMX8QXP_MIPI_CSI0_I2C0_SCL_MIPI_CSI0_I2C0_SCL IMX8QXP_MIPI_CSI0_I2C0_SCL 0
+#define IMX8QXP_MIPI_CSI0_I2C0_SCL_MIPI_CSI0_GPIO0_IO02 IMX8QXP_MIPI_CSI0_I2C0_SCL 1
+#define IMX8QXP_MIPI_CSI0_I2C0_SCL_LSIO_GPIO3_IO05 IMX8QXP_MIPI_CSI0_I2C0_SCL 4
+#define IMX8QXP_MIPI_CSI0_I2C0_SDA_MIPI_CSI0_I2C0_SDA IMX8QXP_MIPI_CSI0_I2C0_SDA 0
+#define IMX8QXP_MIPI_CSI0_I2C0_SDA_MIPI_CSI0_GPIO0_IO03 IMX8QXP_MIPI_CSI0_I2C0_SDA 1
+#define IMX8QXP_MIPI_CSI0_I2C0_SDA_LSIO_GPIO3_IO06 IMX8QXP_MIPI_CSI0_I2C0_SDA 4
+#define IMX8QXP_MIPI_CSI0_GPIO0_01_MIPI_CSI0_GPIO0_IO01 IMX8QXP_MIPI_CSI0_GPIO0_01 0
+#define IMX8QXP_MIPI_CSI0_GPIO0_01_ADMA_I2C0_SDA IMX8QXP_MIPI_CSI0_GPIO0_01 1
+#define IMX8QXP_MIPI_CSI0_GPIO0_01_LSIO_GPIO3_IO07 IMX8QXP_MIPI_CSI0_GPIO0_01 4
+#define IMX8QXP_MIPI_CSI0_GPIO0_00_MIPI_CSI0_GPIO0_IO00 IMX8QXP_MIPI_CSI0_GPIO0_00 0
+#define IMX8QXP_MIPI_CSI0_GPIO0_00_ADMA_I2C0_SCL IMX8QXP_MIPI_CSI0_GPIO0_00 1
+#define IMX8QXP_MIPI_CSI0_GPIO0_00_LSIO_GPIO3_IO08 IMX8QXP_MIPI_CSI0_GPIO0_00 4
+#define IMX8QXP_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 IMX8QXP_QSPI0A_DATA0 0
+#define IMX8QXP_QSPI0A_DATA0_LSIO_GPIO3_IO09 IMX8QXP_QSPI0A_DATA0 4
+#define IMX8QXP_QSPI0A_DATA1_LSIO_QSPI0A_DATA1 IMX8QXP_QSPI0A_DATA1 0
+#define IMX8QXP_QSPI0A_DATA1_LSIO_GPIO3_IO10 IMX8QXP_QSPI0A_DATA1 4
+#define IMX8QXP_QSPI0A_DATA2_LSIO_QSPI0A_DATA2 IMX8QXP_QSPI0A_DATA2 0
+#define IMX8QXP_QSPI0A_DATA2_LSIO_GPIO3_IO11 IMX8QXP_QSPI0A_DATA2 4
+#define IMX8QXP_QSPI0A_DATA3_LSIO_QSPI0A_DATA3 IMX8QXP_QSPI0A_DATA3 0
+#define IMX8QXP_QSPI0A_DATA3_LSIO_GPIO3_IO12 IMX8QXP_QSPI0A_DATA3 4
+#define IMX8QXP_QSPI0A_DQS_LSIO_QSPI0A_DQS IMX8QXP_QSPI0A_DQS 0
+#define IMX8QXP_QSPI0A_DQS_LSIO_GPIO3_IO13 IMX8QXP_QSPI0A_DQS 4
+#define IMX8QXP_QSPI0A_SS0_B_LSIO_QSPI0A_SS0_B IMX8QXP_QSPI0A_SS0_B 0
+#define IMX8QXP_QSPI0A_SS0_B_LSIO_GPIO3_IO14 IMX8QXP_QSPI0A_SS0_B 4
+#define IMX8QXP_QSPI0A_SS1_B_LSIO_QSPI0A_SS1_B IMX8QXP_QSPI0A_SS1_B 0
+#define IMX8QXP_QSPI0A_SS1_B_LSIO_GPIO3_IO15 IMX8QXP_QSPI0A_SS1_B 4
+#define IMX8QXP_QSPI0A_SCLK_LSIO_QSPI0A_SCLK IMX8QXP_QSPI0A_SCLK 0
+#define IMX8QXP_QSPI0A_SCLK_LSIO_GPIO3_IO16 IMX8QXP_QSPI0A_SCLK 4
+#define IMX8QXP_QSPI0B_SCLK_LSIO_QSPI0B_SCLK IMX8QXP_QSPI0B_SCLK 0
+#define IMX8QXP_QSPI0B_SCLK_LSIO_QSPI1A_SCLK IMX8QXP_QSPI0B_SCLK 1
+#define IMX8QXP_QSPI0B_SCLK_LSIO_KPP0_COL0 IMX8QXP_QSPI0B_SCLK 2
+#define IMX8QXP_QSPI0B_SCLK_LSIO_GPIO3_IO17 IMX8QXP_QSPI0B_SCLK 4
+#define IMX8QXP_QSPI0B_DATA0_LSIO_QSPI0B_DATA0 IMX8QXP_QSPI0B_DATA0 0
+#define IMX8QXP_QSPI0B_DATA0_LSIO_QSPI1A_DATA0 IMX8QXP_QSPI0B_DATA0 1
+#define IMX8QXP_QSPI0B_DATA0_LSIO_KPP0_COL1 IMX8QXP_QSPI0B_DATA0 2
+#define IMX8QXP_QSPI0B_DATA0_LSIO_GPIO3_IO18 IMX8QXP_QSPI0B_DATA0 4
+#define IMX8QXP_QSPI0B_DATA1_LSIO_QSPI0B_DATA1 IMX8QXP_QSPI0B_DATA1 0
+#define IMX8QXP_QSPI0B_DATA1_LSIO_QSPI1A_DATA1 IMX8QXP_QSPI0B_DATA1 1
+#define IMX8QXP_QSPI0B_DATA1_LSIO_KPP0_COL2 IMX8QXP_QSPI0B_DATA1 2
+#define IMX8QXP_QSPI0B_DATA1_LSIO_GPIO3_IO19 IMX8QXP_QSPI0B_DATA1 4
+#define IMX8QXP_QSPI0B_DATA2_LSIO_QSPI0B_DATA2 IMX8QXP_QSPI0B_DATA2 0
+#define IMX8QXP_QSPI0B_DATA2_LSIO_QSPI1A_DATA2 IMX8QXP_QSPI0B_DATA2 1
+#define IMX8QXP_QSPI0B_DATA2_LSIO_KPP0_COL3 IMX8QXP_QSPI0B_DATA2 2
+#define IMX8QXP_QSPI0B_DATA2_LSIO_GPIO3_IO20 IMX8QXP_QSPI0B_DATA2 4
+#define IMX8QXP_QSPI0B_DATA3_LSIO_QSPI0B_DATA3 IMX8QXP_QSPI0B_DATA3 0
+#define IMX8QXP_QSPI0B_DATA3_LSIO_QSPI1A_DATA3 IMX8QXP_QSPI0B_DATA3 1
+#define IMX8QXP_QSPI0B_DATA3_LSIO_KPP0_ROW0 IMX8QXP_QSPI0B_DATA3 2
+#define IMX8QXP_QSPI0B_DATA3_LSIO_GPIO3_IO21 IMX8QXP_QSPI0B_DATA3 4
+#define IMX8QXP_QSPI0B_DQS_LSIO_QSPI0B_DQS IMX8QXP_QSPI0B_DQS 0
+#define IMX8QXP_QSPI0B_DQS_LSIO_QSPI1A_DQS IMX8QXP_QSPI0B_DQS 1
+#define IMX8QXP_QSPI0B_DQS_LSIO_KPP0_ROW1 IMX8QXP_QSPI0B_DQS 2
+#define IMX8QXP_QSPI0B_DQS_LSIO_GPIO3_IO22 IMX8QXP_QSPI0B_DQS 4
+#define IMX8QXP_QSPI0B_SS0_B_LSIO_QSPI0B_SS0_B IMX8QXP_QSPI0B_SS0_B 0
+#define IMX8QXP_QSPI0B_SS0_B_LSIO_QSPI1A_SS0_B IMX8QXP_QSPI0B_SS0_B 1
+#define IMX8QXP_QSPI0B_SS0_B_LSIO_KPP0_ROW2 IMX8QXP_QSPI0B_SS0_B 2
+#define IMX8QXP_QSPI0B_SS0_B_LSIO_GPIO3_IO23 IMX8QXP_QSPI0B_SS0_B 4
+#define IMX8QXP_QSPI0B_SS1_B_LSIO_QSPI0B_SS1_B IMX8QXP_QSPI0B_SS1_B 0
+#define IMX8QXP_QSPI0B_SS1_B_LSIO_QSPI1A_SS1_B IMX8QXP_QSPI0B_SS1_B 1
+#define IMX8QXP_QSPI0B_SS1_B_LSIO_KPP0_ROW3 IMX8QXP_QSPI0B_SS1_B 2
+#define IMX8QXP_QSPI0B_SS1_B_LSIO_GPIO3_IO24 IMX8QXP_QSPI0B_SS1_B 4
+
+#endif /* _IMX8QXP_PADS_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-tegra-io-pad.h b/include/dt-bindings/pinctrl/pinctrl-tegra-io-pad.h
new file mode 100644
index 000000000000..20f43404cac0
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-tegra-io-pad.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * pinctrl-tegra-io-pad.h: Tegra I/O pad source voltage configuration constants
+ * pinctrl bindings.
+ *
+ * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Aapo Vienamo <avienamo@nvidia.com>
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_TEGRA_IO_PAD_H
+#define _DT_BINDINGS_PINCTRL_TEGRA_IO_PAD_H
+
+/* Voltage levels of the I/O pad's source rail */
+#define TEGRA_IO_PAD_VOLTAGE_1V8 0
+#define TEGRA_IO_PAD_VOLTAGE_3V3 1
+
+#endif
diff --git a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
new file mode 100644
index 000000000000..2d0c23e5d3a7
--- /dev/null
+++ b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Defines macros and constants for Renesas RZ/A2 pin controller pin
+ * muxing functions.
+ */
+#ifndef __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H
+#define __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H
+
+#define RZA2_PINS_PER_PORT 8
+
+/* Port names as labeled in the Hardware Manual */
+#define PORT0 0
+#define PORT1 1
+#define PORT2 2
+#define PORT3 3
+#define PORT4 4
+#define PORT5 5
+#define PORT6 6
+#define PORT7 7
+#define PORT8 8
+#define PORT9 9
+#define PORTA 10
+#define PORTB 11
+#define PORTC 12
+#define PORTD 13
+#define PORTE 14
+#define PORTF 15
+#define PORTG 16
+#define PORTH 17
+/* No I */
+#define PORTJ 18
+#define PORTK 19
+#define PORTL 20
+#define PORTM 21 /* Pins PM_0/1 are labeled JP_0/1 in HW manual */
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZA2_PINMUX(b, p, f) ((b) * RZA2_PINS_PER_PORT + (p) | (f << 16))
+
+/*
+ * Convert a port and pin label to its global pin index
+ */
+ #define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin))
+
+#endif /* __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H */
diff --git a/include/dt-bindings/pinctrl/rzn1-pinctrl.h b/include/dt-bindings/pinctrl/rzn1-pinctrl.h
new file mode 100644
index 000000000000..21d6cc4d59f5
--- /dev/null
+++ b/include/dt-bindings/pinctrl/rzn1-pinctrl.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Defines macros and constants for Renesas RZ/N1 pin controller pin
+ * muxing functions.
+ */
+#ifndef __DT_BINDINGS_RZN1_PINCTRL_H
+#define __DT_BINDINGS_RZN1_PINCTRL_H
+
+#define RZN1_PINMUX(_gpio, _func) \
+ (((_func) << 8) | (_gpio))
+
+/*
+ * Given the different levels of muxing on the SoC, it was decided to
+ * 'linearize' them into one numerical space. So mux level 1, 2 and the MDIO
+ * muxes are all represented by one single value.
+ *
+ * You can derive the hardware value pretty easily too, as
+ * 0...9 are Level 1
+ * 10...71 are Level 2. The Level 2 mux will be set to this
+ * value - RZN1_FUNC_L2_OFFSET, and the Level 1 mux will be
+ * set accordingly.
+ * 72...103 are for the 2 MDIO muxes.
+ */
+#define RZN1_FUNC_HIGHZ 0
+#define RZN1_FUNC_0L 1
+#define RZN1_FUNC_CLK_ETH_MII_RGMII_RMII 2
+#define RZN1_FUNC_CLK_ETH_NAND 3
+#define RZN1_FUNC_QSPI 4
+#define RZN1_FUNC_SDIO 5
+#define RZN1_FUNC_LCD 6
+#define RZN1_FUNC_LCD_E 7
+#define RZN1_FUNC_MSEBIM 8
+#define RZN1_FUNC_MSEBIS 9
+#define RZN1_FUNC_L2_OFFSET 10 /* I'm Special */
+
+#define RZN1_FUNC_HIGHZ1 (RZN1_FUNC_L2_OFFSET + 0)
+#define RZN1_FUNC_ETHERCAT (RZN1_FUNC_L2_OFFSET + 1)
+#define RZN1_FUNC_SERCOS3 (RZN1_FUNC_L2_OFFSET + 2)
+#define RZN1_FUNC_SDIO_E (RZN1_FUNC_L2_OFFSET + 3)
+#define RZN1_FUNC_ETH_MDIO (RZN1_FUNC_L2_OFFSET + 4)
+#define RZN1_FUNC_ETH_MDIO_E1 (RZN1_FUNC_L2_OFFSET + 5)
+#define RZN1_FUNC_USB (RZN1_FUNC_L2_OFFSET + 6)
+#define RZN1_FUNC_MSEBIM_E (RZN1_FUNC_L2_OFFSET + 7)
+#define RZN1_FUNC_MSEBIS_E (RZN1_FUNC_L2_OFFSET + 8)
+#define RZN1_FUNC_RSV (RZN1_FUNC_L2_OFFSET + 9)
+#define RZN1_FUNC_RSV_E (RZN1_FUNC_L2_OFFSET + 10)
+#define RZN1_FUNC_RSV_E1 (RZN1_FUNC_L2_OFFSET + 11)
+#define RZN1_FUNC_UART0_I (RZN1_FUNC_L2_OFFSET + 12)
+#define RZN1_FUNC_UART0_I_E (RZN1_FUNC_L2_OFFSET + 13)
+#define RZN1_FUNC_UART1_I (RZN1_FUNC_L2_OFFSET + 14)
+#define RZN1_FUNC_UART1_I_E (RZN1_FUNC_L2_OFFSET + 15)
+#define RZN1_FUNC_UART2_I (RZN1_FUNC_L2_OFFSET + 16)
+#define RZN1_FUNC_UART2_I_E (RZN1_FUNC_L2_OFFSET + 17)
+#define RZN1_FUNC_UART0 (RZN1_FUNC_L2_OFFSET + 18)
+#define RZN1_FUNC_UART0_E (RZN1_FUNC_L2_OFFSET + 19)
+#define RZN1_FUNC_UART1 (RZN1_FUNC_L2_OFFSET + 20)
+#define RZN1_FUNC_UART1_E (RZN1_FUNC_L2_OFFSET + 21)
+#define RZN1_FUNC_UART2 (RZN1_FUNC_L2_OFFSET + 22)
+#define RZN1_FUNC_UART2_E (RZN1_FUNC_L2_OFFSET + 23)
+#define RZN1_FUNC_UART3 (RZN1_FUNC_L2_OFFSET + 24)
+#define RZN1_FUNC_UART3_E (RZN1_FUNC_L2_OFFSET + 25)
+#define RZN1_FUNC_UART4 (RZN1_FUNC_L2_OFFSET + 26)
+#define RZN1_FUNC_UART4_E (RZN1_FUNC_L2_OFFSET + 27)
+#define RZN1_FUNC_UART5 (RZN1_FUNC_L2_OFFSET + 28)
+#define RZN1_FUNC_UART5_E (RZN1_FUNC_L2_OFFSET + 29)
+#define RZN1_FUNC_UART6 (RZN1_FUNC_L2_OFFSET + 30)
+#define RZN1_FUNC_UART6_E (RZN1_FUNC_L2_OFFSET + 31)
+#define RZN1_FUNC_UART7 (RZN1_FUNC_L2_OFFSET + 32)
+#define RZN1_FUNC_UART7_E (RZN1_FUNC_L2_OFFSET + 33)
+#define RZN1_FUNC_SPI0_M (RZN1_FUNC_L2_OFFSET + 34)
+#define RZN1_FUNC_SPI0_M_E (RZN1_FUNC_L2_OFFSET + 35)
+#define RZN1_FUNC_SPI1_M (RZN1_FUNC_L2_OFFSET + 36)
+#define RZN1_FUNC_SPI1_M_E (RZN1_FUNC_L2_OFFSET + 37)
+#define RZN1_FUNC_SPI2_M (RZN1_FUNC_L2_OFFSET + 38)
+#define RZN1_FUNC_SPI2_M_E (RZN1_FUNC_L2_OFFSET + 39)
+#define RZN1_FUNC_SPI3_M (RZN1_FUNC_L2_OFFSET + 40)
+#define RZN1_FUNC_SPI3_M_E (RZN1_FUNC_L2_OFFSET + 41)
+#define RZN1_FUNC_SPI4_S (RZN1_FUNC_L2_OFFSET + 42)
+#define RZN1_FUNC_SPI4_S_E (RZN1_FUNC_L2_OFFSET + 43)
+#define RZN1_FUNC_SPI5_S (RZN1_FUNC_L2_OFFSET + 44)
+#define RZN1_FUNC_SPI5_S_E (RZN1_FUNC_L2_OFFSET + 45)
+#define RZN1_FUNC_SGPIO0_M (RZN1_FUNC_L2_OFFSET + 46)
+#define RZN1_FUNC_SGPIO1_M (RZN1_FUNC_L2_OFFSET + 47)
+#define RZN1_FUNC_GPIO (RZN1_FUNC_L2_OFFSET + 48)
+#define RZN1_FUNC_CAN (RZN1_FUNC_L2_OFFSET + 49)
+#define RZN1_FUNC_I2C (RZN1_FUNC_L2_OFFSET + 50)
+#define RZN1_FUNC_SAFE (RZN1_FUNC_L2_OFFSET + 51)
+#define RZN1_FUNC_PTO_PWM (RZN1_FUNC_L2_OFFSET + 52)
+#define RZN1_FUNC_PTO_PWM1 (RZN1_FUNC_L2_OFFSET + 53)
+#define RZN1_FUNC_PTO_PWM2 (RZN1_FUNC_L2_OFFSET + 54)
+#define RZN1_FUNC_PTO_PWM3 (RZN1_FUNC_L2_OFFSET + 55)
+#define RZN1_FUNC_PTO_PWM4 (RZN1_FUNC_L2_OFFSET + 56)
+#define RZN1_FUNC_DELTA_SIGMA (RZN1_FUNC_L2_OFFSET + 57)
+#define RZN1_FUNC_SGPIO2_M (RZN1_FUNC_L2_OFFSET + 58)
+#define RZN1_FUNC_SGPIO3_M (RZN1_FUNC_L2_OFFSET + 59)
+#define RZN1_FUNC_SGPIO4_S (RZN1_FUNC_L2_OFFSET + 60)
+#define RZN1_FUNC_MAC_MTIP_SWITCH (RZN1_FUNC_L2_OFFSET + 61)
+
+#define RZN1_FUNC_MDIO_OFFSET (RZN1_FUNC_L2_OFFSET + 62)
+
+/* These are MDIO0 peripherals for the RZN1_FUNC_ETH_MDIO function */
+#define RZN1_FUNC_MDIO0_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 0)
+#define RZN1_FUNC_MDIO0_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 1)
+#define RZN1_FUNC_MDIO0_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 2)
+#define RZN1_FUNC_MDIO0_ECAT (RZN1_FUNC_MDIO_OFFSET + 3)
+#define RZN1_FUNC_MDIO0_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 4)
+#define RZN1_FUNC_MDIO0_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 5)
+#define RZN1_FUNC_MDIO0_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 6)
+#define RZN1_FUNC_MDIO0_SWITCH (RZN1_FUNC_MDIO_OFFSET + 7)
+/* These are MDIO0 peripherals for the RZN1_FUNC_ETH_MDIO_E1 function */
+#define RZN1_FUNC_MDIO0_E1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 8)
+#define RZN1_FUNC_MDIO0_E1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 9)
+#define RZN1_FUNC_MDIO0_E1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 10)
+#define RZN1_FUNC_MDIO0_E1_ECAT (RZN1_FUNC_MDIO_OFFSET + 11)
+#define RZN1_FUNC_MDIO0_E1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 12)
+#define RZN1_FUNC_MDIO0_E1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 13)
+#define RZN1_FUNC_MDIO0_E1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 14)
+#define RZN1_FUNC_MDIO0_E1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 15)
+
+/* These are MDIO1 peripherals for the RZN1_FUNC_ETH_MDIO function */
+#define RZN1_FUNC_MDIO1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 16)
+#define RZN1_FUNC_MDIO1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 17)
+#define RZN1_FUNC_MDIO1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 18)
+#define RZN1_FUNC_MDIO1_ECAT (RZN1_FUNC_MDIO_OFFSET + 19)
+#define RZN1_FUNC_MDIO1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 20)
+#define RZN1_FUNC_MDIO1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 21)
+#define RZN1_FUNC_MDIO1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 22)
+#define RZN1_FUNC_MDIO1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 23)
+/* These are MDIO1 peripherals for the RZN1_FUNC_ETH_MDIO_E1 function */
+#define RZN1_FUNC_MDIO1_E1_HIGHZ (RZN1_FUNC_MDIO_OFFSET + 24)
+#define RZN1_FUNC_MDIO1_E1_GMAC0 (RZN1_FUNC_MDIO_OFFSET + 25)
+#define RZN1_FUNC_MDIO1_E1_GMAC1 (RZN1_FUNC_MDIO_OFFSET + 26)
+#define RZN1_FUNC_MDIO1_E1_ECAT (RZN1_FUNC_MDIO_OFFSET + 27)
+#define RZN1_FUNC_MDIO1_E1_S3_MDIO0 (RZN1_FUNC_MDIO_OFFSET + 28)
+#define RZN1_FUNC_MDIO1_E1_S3_MDIO1 (RZN1_FUNC_MDIO_OFFSET + 29)
+#define RZN1_FUNC_MDIO1_E1_HWRTOS (RZN1_FUNC_MDIO_OFFSET + 30)
+#define RZN1_FUNC_MDIO1_E1_SWITCH (RZN1_FUNC_MDIO_OFFSET + 31)
+
+#define RZN1_FUNC_MAX (RZN1_FUNC_MDIO_OFFSET + 32)
+
+#endif /* __DT_BINDINGS_RZN1_PINCTRL_H */
diff --git a/include/dt-bindings/power/imx8mq-power.h b/include/dt-bindings/power/imx8mq-power.h
new file mode 100644
index 000000000000..8a513bd9166e
--- /dev/null
+++ b/include/dt-bindings/power/imx8mq-power.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2018 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#ifndef __DT_BINDINGS_IMX8MQ_POWER_H__
+#define __DT_BINDINGS_IMX8MQ_POWER_H__
+
+#define IMX8M_POWER_DOMAIN_MIPI 0
+#define IMX8M_POWER_DOMAIN_PCIE1 1
+#define IMX8M_POWER_DOMAIN_USB_OTG1 2
+#define IMX8M_POWER_DOMAIN_USB_OTG2 3
+#define IMX8M_POWER_DOMAIN_DDR1 4
+#define IMX8M_POWER_DOMAIN_GPU 5
+#define IMX8M_POWER_DOMAIN_VPU 6
+#define IMX8M_POWER_DOMAIN_DISP 7
+#define IMX8M_POWER_DOMAIN_MIPI_CSI1 8
+#define IMX8M_POWER_DOMAIN_MIPI_CSI2 9
+#define IMX8M_POWER_DOMAIN_PCIE2 10
+
+#endif
diff --git a/include/dt-bindings/power/owl-s900-powergate.h b/include/dt-bindings/power/owl-s900-powergate.h
new file mode 100644
index 000000000000..d939bd964657
--- /dev/null
+++ b/include/dt-bindings/power/owl-s900-powergate.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) */
+/*
+ * Actions Semi S900 SPS
+ *
+ * Copyright (c) 2018 Linaro Ltd.
+ */
+#ifndef DT_BINDINGS_POWER_OWL_S900_POWERGATE_H
+#define DT_BINDINGS_POWER_OWL_S900_POWERGATE_H
+
+#define S900_PD_GPU_B 0
+#define S900_PD_VCE 1
+#define S900_PD_SENSOR 2
+#define S900_PD_VDE 3
+#define S900_PD_HDE 4
+#define S900_PD_USB3 5
+#define S900_PD_DDR0 6
+#define S900_PD_DDR1 7
+#define S900_PD_DE 8
+#define S900_PD_NAND 9
+#define S900_PD_USB2_H0 10
+#define S900_PD_USB2_H1 11
+
+#endif
diff --git a/include/dt-bindings/power/r8a7744-sysc.h b/include/dt-bindings/power/r8a7744-sysc.h
new file mode 100644
index 000000000000..8b6529778f98
--- /dev/null
+++ b/include/dt-bindings/power/r8a7744-sysc.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7744_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7744_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ *
+ * Note that RZ/G1N is identical to RZ/G2M w.r.t. power domains.
+ */
+
+#define R8A7744_PD_CA15_CPU0 0
+#define R8A7744_PD_CA15_CPU1 1
+#define R8A7744_PD_CA15_SCU 12
+#define R8A7744_PD_SGX 20
+
+/* Always-on power area */
+#define R8A7744_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7744_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a774a1-sysc.h b/include/dt-bindings/power/r8a774a1-sysc.h
new file mode 100644
index 000000000000..580f431cd32e
--- /dev/null
+++ b/include/dt-bindings/power/r8a774a1-sysc.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A774A1_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A774A1_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A774A1_PD_CA57_CPU0 0
+#define R8A774A1_PD_CA57_CPU1 1
+#define R8A774A1_PD_CA53_CPU0 5
+#define R8A774A1_PD_CA53_CPU1 6
+#define R8A774A1_PD_CA53_CPU2 7
+#define R8A774A1_PD_CA53_CPU3 8
+#define R8A774A1_PD_CA57_SCU 12
+#define R8A774A1_PD_A3VC 14
+#define R8A774A1_PD_3DG_A 17
+#define R8A774A1_PD_3DG_B 18
+#define R8A774A1_PD_CA53_SCU 21
+#define R8A774A1_PD_A2VC0 25
+#define R8A774A1_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A774A1_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A774A1_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a774c0-sysc.h b/include/dt-bindings/power/r8a774c0-sysc.h
new file mode 100644
index 000000000000..9922d4c6f87d
--- /dev/null
+++ b/include/dt-bindings/power/r8a774c0-sysc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A774C0_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A774C0_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A774C0_PD_CA53_CPU0 5
+#define R8A774C0_PD_CA53_CPU1 6
+#define R8A774C0_PD_A3VC 14
+#define R8A774C0_PD_3DG_A 17
+#define R8A774C0_PD_3DG_B 18
+#define R8A774C0_PD_CA53_SCU 21
+#define R8A774C0_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A774C0_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A774C0_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a77970-sysc.h b/include/dt-bindings/power/r8a77970-sysc.h
index bf54779d1625..85cc5f23cf9f 100644
--- a/include/dt-bindings/power/r8a77970-sysc.h
+++ b/include/dt-bindings/power/r8a77970-sysc.h
@@ -16,13 +16,12 @@
#define R8A77970_PD_CA53_CPU0 5
#define R8A77970_PD_CA53_CPU1 6
-#define R8A77970_PD_CR7 13
#define R8A77970_PD_CA53_SCU 21
#define R8A77970_PD_A2IR0 23
-#define R8A77970_PD_A3IR 24
+#define R8A77970_PD_A3IR 24
#define R8A77970_PD_A2IR1 27
-#define R8A77970_PD_A2IR2 28
-#define R8A77970_PD_A2IR3 29
+#define R8A77970_PD_A2DP 28
+#define R8A77970_PD_A2CN 29
#define R8A77970_PD_A2SC0 30
#define R8A77970_PD_A2SC1 31
diff --git a/include/dt-bindings/power/r8a77980-sysc.h b/include/dt-bindings/power/r8a77980-sysc.h
index 2c90c1237725..e12c8587b87e 100644
--- a/include/dt-bindings/power/r8a77980-sysc.h
+++ b/include/dt-bindings/power/r8a77980-sysc.h
@@ -15,14 +15,14 @@
#define R8A77980_PD_A2SC2 0
#define R8A77980_PD_A2SC3 1
#define R8A77980_PD_A2SC4 2
-#define R8A77980_PD_A2PD0 3
-#define R8A77980_PD_A2PD1 4
+#define R8A77980_PD_A2DP0 3
+#define R8A77980_PD_A2DP1 4
#define R8A77980_PD_CA53_CPU0 5
#define R8A77980_PD_CA53_CPU1 6
#define R8A77980_PD_CA53_CPU2 7
#define R8A77980_PD_CA53_CPU3 8
#define R8A77980_PD_A2CN 10
-#define R8A77980_PD_A3VIP 11
+#define R8A77980_PD_A3VIP0 11
#define R8A77980_PD_A2IR5 12
#define R8A77980_PD_CR7 13
#define R8A77980_PD_A2IR4 15
diff --git a/include/dt-bindings/power/raspberrypi-power.h b/include/dt-bindings/power/raspberrypi-power.h
index b3ff8e09a78f..3575f9f4b0bd 100644
--- a/include/dt-bindings/power/raspberrypi-power.h
+++ b/include/dt-bindings/power/raspberrypi-power.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright © 2015 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H
diff --git a/include/dt-bindings/power/rk3066-power.h b/include/dt-bindings/power/rk3066-power.h
new file mode 100644
index 000000000000..acf9f310ac53
--- /dev/null
+++ b/include/dt-bindings/power/rk3066-power.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_POWER_RK3066_POWER_H__
+#define __DT_BINDINGS_POWER_RK3066_POWER_H__
+
+/* VD_CORE */
+#define RK3066_PD_A9_0 0
+#define RK3066_PD_A9_1 1
+#define RK3066_PD_DBG 4
+#define RK3066_PD_SCU 5
+
+/* VD_LOGIC */
+#define RK3066_PD_VIDEO 6
+#define RK3066_PD_VIO 7
+#define RK3066_PD_GPU 8
+#define RK3066_PD_PERI 9
+#define RK3066_PD_CPU 10
+#define RK3066_PD_ALIVE 11
+
+/* VD_PMU */
+#define RK3066_PD_RTC 12
+
+#endif
diff --git a/include/dt-bindings/power/rk3188-power.h b/include/dt-bindings/power/rk3188-power.h
new file mode 100644
index 000000000000..93d23dfba33f
--- /dev/null
+++ b/include/dt-bindings/power/rk3188-power.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_POWER_RK3188_POWER_H__
+#define __DT_BINDINGS_POWER_RK3188_POWER_H__
+
+/* VD_CORE */
+#define RK3188_PD_A9_0 0
+#define RK3188_PD_A9_1 1
+#define RK3188_PD_A9_2 2
+#define RK3188_PD_A9_3 3
+#define RK3188_PD_DBG 4
+#define RK3188_PD_SCU 5
+
+/* VD_LOGIC */
+#define RK3188_PD_VIDEO 6
+#define RK3188_PD_VIO 7
+#define RK3188_PD_GPU 8
+#define RK3188_PD_PERI 9
+#define RK3188_PD_CPU 10
+#define RK3188_PD_ALIVE 11
+
+/* VD_PMU */
+#define RK3188_PD_RTC 12
+
+#endif
diff --git a/include/dt-bindings/regulator/active-semi,8945a-regulator.h b/include/dt-bindings/regulator/active-semi,8945a-regulator.h
new file mode 100644
index 000000000000..9bdba5e3141a
--- /dev/null
+++ b/include/dt-bindings/regulator/active-semi,8945a-regulator.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 Microchip Technology, Inc. All rights reserved.
+ *
+ * Device Tree binding constants for the ACT8945A PMIC regulators
+ */
+
+#ifndef _DT_BINDINGS_REGULATOR_ACT8945A_H
+#define _DT_BINDINGS_REGULATOR_ACT8945A_H
+
+/*
+ * These constants should be used to specify regulator modes in device tree for
+ * ACT8945A regulators as follows:
+ * ACT8945A_REGULATOR_MODE_FIXED: It is specific to DCDC regulators and it
+ * specifies the usage of fixed-frequency
+ * PWM.
+ *
+ * ACT8945A_REGULATOR_MODE_NORMAL: It is specific to LDO regulators and it
+ * specifies the usage of normal mode.
+ *
+ * ACT8945A_REGULATOR_MODE_LOWPOWER: For DCDC and LDO regulators; it specify
+ * the usage of proprietary power-saving
+ * mode.
+ */
+
+#define ACT8945A_REGULATOR_MODE_FIXED 1
+#define ACT8945A_REGULATOR_MODE_NORMAL 2
+#define ACT8945A_REGULATOR_MODE_LOWPOWER 3
+
+#endif
diff --git a/include/dt-bindings/reset/actions,s700-reset.h b/include/dt-bindings/reset/actions,s700-reset.h
new file mode 100644
index 000000000000..5e3b16b8ef53
--- /dev/null
+++ b/include/dt-bindings/reset/actions,s700-reset.h
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+//
+// Device Tree binding constants for Actions Semi S700 Reset Management Unit
+//
+// Copyright (c) 2018 Linaro Ltd.
+
+#ifndef __DT_BINDINGS_ACTIONS_S700_RESET_H
+#define __DT_BINDINGS_ACTIONS_S700_RESET_H
+
+#define RESET_AUDIO 0
+#define RESET_CSI 1
+#define RESET_DE 2
+#define RESET_DSI 3
+#define RESET_GPIO 4
+#define RESET_I2C0 5
+#define RESET_I2C1 6
+#define RESET_I2C2 7
+#define RESET_I2C3 8
+#define RESET_KEY 9
+#define RESET_LCD0 10
+#define RESET_SI 11
+#define RESET_SPI0 12
+#define RESET_SPI1 13
+#define RESET_SPI2 14
+#define RESET_SPI3 15
+#define RESET_UART0 16
+#define RESET_UART1 17
+#define RESET_UART2 18
+#define RESET_UART3 19
+#define RESET_UART4 20
+#define RESET_UART5 21
+#define RESET_UART6 22
+
+#endif /* __DT_BINDINGS_ACTIONS_S700_RESET_H */
diff --git a/include/dt-bindings/reset/actions,s900-reset.h b/include/dt-bindings/reset/actions,s900-reset.h
new file mode 100644
index 000000000000..42c19d02e43b
--- /dev/null
+++ b/include/dt-bindings/reset/actions,s900-reset.h
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+//
+// Device Tree binding constants for Actions Semi S900 Reset Management Unit
+//
+// Copyright (c) 2018 Linaro Ltd.
+
+#ifndef __DT_BINDINGS_ACTIONS_S900_RESET_H
+#define __DT_BINDINGS_ACTIONS_S900_RESET_H
+
+#define RESET_CHIPID 0
+#define RESET_CPU_SCNT 1
+#define RESET_SRAMI 2
+#define RESET_DDR_CTL_PHY 3
+#define RESET_DMAC 4
+#define RESET_GPIO 5
+#define RESET_BISP_AXI 6
+#define RESET_CSI0 7
+#define RESET_CSI1 8
+#define RESET_DE 9
+#define RESET_DSI 10
+#define RESET_GPU3D_PA 11
+#define RESET_GPU3D_PB 12
+#define RESET_HDE 13
+#define RESET_I2C0 14
+#define RESET_I2C1 15
+#define RESET_I2C2 16
+#define RESET_I2C3 17
+#define RESET_I2C4 18
+#define RESET_I2C5 19
+#define RESET_IMX 20
+#define RESET_NANDC0 21
+#define RESET_NANDC1 22
+#define RESET_SD0 23
+#define RESET_SD1 24
+#define RESET_SD2 25
+#define RESET_SD3 26
+#define RESET_SPI0 27
+#define RESET_SPI1 28
+#define RESET_SPI2 29
+#define RESET_SPI3 30
+#define RESET_UART0 31
+#define RESET_UART1 32
+#define RESET_UART2 33
+#define RESET_UART3 34
+#define RESET_UART4 35
+#define RESET_UART5 36
+#define RESET_UART6 37
+#define RESET_HDMI 38
+#define RESET_LVDS 39
+#define RESET_EDP 40
+#define RESET_USB2HUB 41
+#define RESET_USB2HSIC 42
+#define RESET_USB3 43
+#define RESET_PCM1 44
+#define RESET_AUDIO 45
+#define RESET_PCM0 46
+#define RESET_SE 47
+#define RESET_GIC 48
+#define RESET_DDR_CTL_PHY_AXI 49
+#define RESET_CMU_DDR 50
+#define RESET_DMM 51
+#define RESET_HDCP2TX 52
+#define RESET_ETHERNET 53
+
+#endif /* __DT_BINDINGS_ACTIONS_S900_RESET_H */
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h b/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h
new file mode 100644
index 000000000000..05c36367875c
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ *
+ * Copyright (c) 2018 Baylibre SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H
+#define _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H
+
+#define AXG_ARB_TODDR_A 0
+#define AXG_ARB_TODDR_B 1
+#define AXG_ARB_TODDR_C 2
+#define AXG_ARB_FRDDR_A 3
+#define AXG_ARB_FRDDR_B 4
+#define AXG_ARB_FRDDR_C 5
+
+#endif /* _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H */
diff --git a/include/dt-bindings/reset/imx7-reset.h b/include/dt-bindings/reset/imx7-reset.h
index 63948170c7b2..31b3f87dde9a 100644
--- a/include/dt-bindings/reset/imx7-reset.h
+++ b/include/dt-bindings/reset/imx7-reset.h
@@ -56,7 +56,9 @@
#define IMX7_RESET_DDRC_PRST 23
#define IMX7_RESET_DDRC_CORE_RST 24
-#define IMX7_RESET_NUM 25
+#define IMX7_RESET_PCIE_CTRL_APPS_TURNOFF 25
+
+#define IMX7_RESET_NUM 26
#endif
diff --git a/include/dt-bindings/reset/qcom,sdm845-aoss.h b/include/dt-bindings/reset/qcom,sdm845-aoss.h
new file mode 100644
index 000000000000..476c5fc873b6
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sdm845-aoss.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_AOSS_SDM_845_H
+#define _DT_BINDINGS_RESET_AOSS_SDM_845_H
+
+#define AOSS_CC_MSS_RESTART 0
+#define AOSS_CC_CAMSS_RESTART 1
+#define AOSS_CC_VENUS_RESTART 2
+#define AOSS_CC_GPU_RESTART 3
+#define AOSS_CC_DISPSS_RESTART 4
+#define AOSS_CC_WCSS_RESTART 5
+#define AOSS_CC_LPASS_RESTART 6
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,sdm845-pdc.h b/include/dt-bindings/reset/qcom,sdm845-pdc.h
new file mode 100644
index 000000000000..53c37f9c319a
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sdm845-pdc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_PDC_SDM_845_H
+#define _DT_BINDINGS_RESET_PDC_SDM_845_H
+
+#define PDC_APPS_SYNC_RESET 0
+#define PDC_SP_SYNC_RESET 1
+#define PDC_AUDIO_SYNC_RESET 2
+#define PDC_SENSORS_SYNC_RESET 3
+#define PDC_AOP_SYNC_RESET 4
+#define PDC_DEBUG_SYNC_RESET 5
+#define PDC_GPU_SYNC_RESET 6
+#define PDC_DISPLAY_SYNC_RESET 7
+#define PDC_COMPUTE_SYNC_RESET 8
+#define PDC_MODEM_SYNC_RESET 9
+
+#endif
diff --git a/include/dt-bindings/reset/sun8i-de2.h b/include/dt-bindings/reset/sun8i-de2.h
index 9526017432f0..1c36a6ac86d6 100644
--- a/include/dt-bindings/reset/sun8i-de2.h
+++ b/include/dt-bindings/reset/sun8i-de2.h
@@ -10,5 +10,6 @@
#define RST_MIXER0 0
#define RST_MIXER1 1
#define RST_WB 2
+#define RST_ROT 3
#endif /* _DT_BINDINGS_RESET_SUN8I_DE2_H_ */
diff --git a/include/dt-bindings/reset/suniv-ccu-f1c100s.h b/include/dt-bindings/reset/suniv-ccu-f1c100s.h
new file mode 100644
index 000000000000..6a4b4385fe5a
--- /dev/null
+++ b/include/dt-bindings/reset/suniv-ccu-f1c100s.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ *
+ * Copyright (C) 2018 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ */
+
+#ifndef _DT_BINDINGS_RST_SUNIV_F1C100S_H_
+#define _DT_BINDINGS_RST_SUNIV_F1C100S_H_
+
+#define RST_USB_PHY0 0
+#define RST_BUS_DMA 1
+#define RST_BUS_MMC0 2
+#define RST_BUS_MMC1 3
+#define RST_BUS_DRAM 4
+#define RST_BUS_SPI0 5
+#define RST_BUS_SPI1 6
+#define RST_BUS_OTG 7
+#define RST_BUS_VE 8
+#define RST_BUS_LCD 9
+#define RST_BUS_DEINTERLACE 10
+#define RST_BUS_CSI 11
+#define RST_BUS_TVD 12
+#define RST_BUS_TVE 13
+#define RST_BUS_DE_BE 14
+#define RST_BUS_DE_FE 15
+#define RST_BUS_CODEC 16
+#define RST_BUS_SPDIF 17
+#define RST_BUS_IR 18
+#define RST_BUS_RSB 19
+#define RST_BUS_I2S0 20
+#define RST_BUS_I2C0 21
+#define RST_BUS_I2C1 22
+#define RST_BUS_I2C2 23
+#define RST_BUS_UART0 24
+#define RST_BUS_UART1 25
+#define RST_BUS_UART2 26
+
+#endif /* _DT_BINDINGS_RST_SUNIV_F1C100S_H_ */
diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h
index e2d3892240b8..1df06f8ad5c3 100644
--- a/include/dt-bindings/sound/qcom,q6afe.h
+++ b/include/dt-bindings/sound/qcom,q6afe.h
@@ -106,6 +106,7 @@
#define QUINARY_TDM_TX_6 101
#define QUINARY_TDM_RX_7 102
#define QUINARY_TDM_TX_7 103
+#define DISPLAY_PORT_RX 104
#endif /* __DT_BINDINGS_Q6_AFE_H__ */
diff --git a/include/dt-bindings/thermal/tegra194-bpmp-thermal.h b/include/dt-bindings/thermal/tegra194-bpmp-thermal.h
new file mode 100644
index 000000000000..aa7fb08135ca
--- /dev/null
+++ b/include/dt-bindings/thermal/tegra194-bpmp-thermal.h
@@ -0,0 +1,15 @@
+/*
+ * This header provides constants for binding nvidia,tegra194-bpmp-thermal.
+ */
+
+#ifndef _DT_BINDINGS_THERMAL_TEGRA194_BPMP_THERMAL_H
+#define _DT_BINDINGS_THERMAL_TEGRA194_BPMP_THERMAL_H
+
+#define TEGRA194_BPMP_THERMAL_ZONE_CPU 2
+#define TEGRA194_BPMP_THERMAL_ZONE_GPU 3
+#define TEGRA194_BPMP_THERMAL_ZONE_AUX 4
+#define TEGRA194_BPMP_THERMAL_ZONE_PLLX 5
+#define TEGRA194_BPMP_THERMAL_ZONE_AO 6
+#define TEGRA194_BPMP_THERMAL_ZONE_TJ_MAX 7
+
+#endif
diff --git a/include/dt-bindings/thermal/thermal_exynos.h b/include/dt-bindings/thermal/thermal_exynos.h
index 0646500bca69..642e4e7f4084 100644
--- a/include/dt-bindings/thermal/thermal_exynos.h
+++ b/include/dt-bindings/thermal/thermal_exynos.h
@@ -1,19 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* thermal_exynos.h - Samsung EXYNOS TMU device tree definitions
*
* Copyright (C) 2014 Samsung Electronics
* Lukasz Majewski <l.majewski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _EXYNOS_THERMAL_TMU_DT_H
diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h
index 7b7a92fefa0a..985f2bbd4d24 100644
--- a/include/dt-bindings/usb/pd.h
+++ b/include/dt-bindings/usb/pd.h
@@ -59,4 +59,30 @@
(PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \
PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma))
+#define APDO_TYPE_PPS 0
+
+#define PDO_APDO_TYPE_SHIFT 28 /* Only valid value currently is 0x0 - PPS */
+#define PDO_APDO_TYPE_MASK 0x3
+
+#define PDO_APDO_TYPE(t) ((t) << PDO_APDO_TYPE_SHIFT)
+
+#define PDO_PPS_APDO_MAX_VOLT_SHIFT 17 /* 100mV units */
+#define PDO_PPS_APDO_MIN_VOLT_SHIFT 8 /* 100mV units */
+#define PDO_PPS_APDO_MAX_CURR_SHIFT 0 /* 50mA units */
+
+#define PDO_PPS_APDO_VOLT_MASK 0xff
+#define PDO_PPS_APDO_CURR_MASK 0x7f
+
+#define PDO_PPS_APDO_MIN_VOLT(mv) \
+ ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MIN_VOLT_SHIFT)
+#define PDO_PPS_APDO_MAX_VOLT(mv) \
+ ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MAX_VOLT_SHIFT)
+#define PDO_PPS_APDO_MAX_CURR(ma) \
+ ((((ma) / 50) & PDO_PPS_APDO_CURR_MASK) << PDO_PPS_APDO_MAX_CURR_SHIFT)
+
+#define PDO_PPS_APDO(min_mv, max_mv, max_ma) \
+ (PDO_TYPE(PDO_TYPE_APDO) | PDO_APDO_TYPE(APDO_TYPE_PPS) | \
+ PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \
+ PDO_PPS_APDO_MAX_CURR(max_ma))
+
#endif /* __DT_POWER_DELIVERY_H */
diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
index e0a9c2368872..9ce2f0fae57e 100644
--- a/include/keys/asymmetric-subtype.h
+++ b/include/keys/asymmetric-subtype.h
@@ -17,6 +17,8 @@
#include <linux/seq_file.h>
#include <keys/asymmetric-type.h>
+struct kernel_pkey_query;
+struct kernel_pkey_params;
struct public_key_signature;
/*
@@ -34,6 +36,13 @@ struct asymmetric_key_subtype {
/* Destroy a key of this subtype */
void (*destroy)(void *payload_crypto, void *payload_auth);
+ int (*query)(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info);
+
+ /* Encrypt/decrypt/sign data */
+ int (*eds_op)(struct kernel_pkey_params *params,
+ const void *in, void *out);
+
/* Verify the signature on a key of this subtype (optional) */
int (*verify_signature)(const struct key *key,
const struct public_key_signature *sig);
diff --git a/include/keys/trusted.h b/include/keys/trusted.h
new file mode 100644
index 000000000000..adbcb6817826
--- /dev/null
+++ b/include/keys/trusted.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TRUSTED_KEY_H
+#define __TRUSTED_KEY_H
+
+/* implementation specific TPM constants */
+#define MAX_BUF_SIZE 1024
+#define TPM_GETRANDOM_SIZE 14
+#define TPM_OSAP_SIZE 36
+#define TPM_OIAP_SIZE 10
+#define TPM_SEAL_SIZE 87
+#define TPM_UNSEAL_SIZE 104
+#define TPM_SIZE_OFFSET 2
+#define TPM_RETURN_OFFSET 6
+#define TPM_DATA_OFFSET 10
+
+#define LOAD32(buffer, offset) (ntohl(*(uint32_t *)&buffer[offset]))
+#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset])
+#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
+
+struct tpm_buf {
+ int len;
+ unsigned char data[MAX_BUF_SIZE];
+};
+
+#define INIT_BUF(tb) (tb->len = 0)
+
+struct osapsess {
+ uint32_t handle;
+ unsigned char secret[SHA1_DIGEST_SIZE];
+ unsigned char enonce[TPM_NONCE_SIZE];
+};
+
+/* discrete values, but have to store in uint16_t for TPM use */
+enum {
+ SEAL_keytype = 1,
+ SRK_keytype = 4
+};
+
+int TSS_authhmac(unsigned char *digest, const unsigned char *key,
+ unsigned int keylen, unsigned char *h1,
+ unsigned char *h2, unsigned char h3, ...);
+int TSS_checkhmac1(unsigned char *buffer,
+ const uint32_t command,
+ const unsigned char *ononce,
+ const unsigned char *key,
+ unsigned int keylen, ...);
+
+int trusted_tpm_send(unsigned char *cmd, size_t buflen);
+int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce);
+
+#define TPM_DEBUG 0
+
+#if TPM_DEBUG
+static inline void dump_options(struct trusted_key_options *o)
+{
+ pr_info("trusted_key: sealing key type %d\n", o->keytype);
+ pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle);
+ pr_info("trusted_key: pcrlock %d\n", o->pcrlock);
+ pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len);
+ print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE,
+ 16, 1, o->pcrinfo, o->pcrinfo_len, 0);
+}
+
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+ pr_info("trusted_key: key_len %d\n", p->key_len);
+ print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
+ 16, 1, p->key, p->key_len, 0);
+ pr_info("trusted_key: bloblen %d\n", p->blob_len);
+ print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
+ 16, 1, p->blob, p->blob_len, 0);
+ pr_info("trusted_key: migratable %d\n", p->migratable);
+}
+
+static inline void dump_sess(struct osapsess *s)
+{
+ print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE,
+ 16, 1, &s->handle, 4, 0);
+ pr_info("trusted-key: secret:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
+ 16, 1, &s->secret, SHA1_DIGEST_SIZE, 0);
+ pr_info("trusted-key: enonce:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
+ 16, 1, &s->enonce, SHA1_DIGEST_SIZE, 0);
+}
+
+static inline void dump_tpm_buf(unsigned char *buf)
+{
+ int len;
+
+ pr_info("\ntrusted-key: tpm buffer\n");
+ len = LOAD32(buf, TPM_SIZE_OFFSET);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0);
+}
+#else
+static inline void dump_options(struct trusted_key_options *o)
+{
+}
+
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+}
+
+static inline void dump_sess(struct osapsess *s)
+{
+}
+
+static inline void dump_tpm_buf(unsigned char *buf)
+{
+}
+#endif
+
+static inline void store8(struct tpm_buf *buf, const unsigned char value)
+{
+ buf->data[buf->len++] = value;
+}
+
+static inline void store16(struct tpm_buf *buf, const uint16_t value)
+{
+ *(uint16_t *) & buf->data[buf->len] = htons(value);
+ buf->len += sizeof value;
+}
+
+static inline void store32(struct tpm_buf *buf, const uint32_t value)
+{
+ *(uint32_t *) & buf->data[buf->len] = htonl(value);
+ buf->len += sizeof value;
+}
+
+static inline void storebytes(struct tpm_buf *buf, const unsigned char *in,
+ const int len)
+{
+ memcpy(buf->data + buf->len, in, len);
+ buf->len += len;
+}
+#endif
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 6502feb9524b..33771352dcd6 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -21,7 +21,6 @@
#include <linux/clocksource.h>
#include <linux/hrtimer.h>
-#include <linux/workqueue.h>
struct arch_timer_context {
/* Registers: control register, timer value */
@@ -52,9 +51,6 @@ struct arch_timer_cpu {
/* Background timer used when the guest is not running */
struct hrtimer bg_timer;
- /* Work queued with the above timer expires */
- struct work_struct expired;
-
/* Physical timer emulation */
struct hrtimer phys_timer;
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index cfdd2484cc42..4f31f96bbfab 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -133,6 +133,7 @@ struct vgic_irq {
u8 source; /* GICv2 SGIs only */
u8 active_source; /* GICv2 SGIs only */
u8 priority;
+ u8 group; /* 0 == group 0, 1 == group 1 */
enum vgic_irq_config config; /* Level or edge */
/*
@@ -217,6 +218,12 @@ struct vgic_dist {
/* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
u32 vgic_model;
+ /* Implementation revision as reported in the GICD_IIDR */
+ u32 implementation_rev;
+
+ /* Userspace can write to GICv2 IGROUPR */
+ bool v2_groups_user_writable;
+
/* Do injected MSIs require an additional device ID? */
bool msis_require_devid;
@@ -366,7 +373,7 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
-void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1);
/**
* kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index de8d3d3fa651..87715f20b69a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -101,7 +101,7 @@ static inline bool has_acpi_companion(struct device *dev)
static inline void acpi_preset_companion(struct device *dev,
struct acpi_device *parent, u64 addr)
{
- ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL));
+ ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false));
}
static inline const char *acpi_dev_name(struct acpi_device *adev)
@@ -340,7 +340,14 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
+#ifdef CONFIG_PCI
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
+#else
+static inline void acpi_penalize_sci_irq(int irq, int trigger,
+ int polarity)
+{
+}
+#endif
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
@@ -831,8 +838,6 @@ static inline int acpi_dma_configure(struct device *dev,
return 0;
}
-static inline void acpi_dma_deconfigure(struct device *dev) { }
-
#define ACPI_PTR(_ptr) (NULL)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -1056,6 +1061,17 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
}
#endif
+#if defined(CONFIG_ACPI) && IS_ENABLED(CONFIG_I2C)
+bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c);
+#else
+static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c)
+{
+ return false;
+}
+#endif
+
/* Device properties */
#ifdef CONFIG_ACPI
@@ -1074,6 +1090,15 @@ static inline int acpi_node_get_property_reference(
NR_FWNODE_REFERENCE_ARGS, args);
}
+static inline bool acpi_dev_has_props(const struct acpi_device *adev)
+{
+ return !list_empty(&adev->data.properties);
+}
+
+struct acpi_device_properties *
+acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
+ const union acpi_object *properties);
+
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
int acpi_dev_prop_read_single(struct acpi_device *adev,
@@ -1306,4 +1331,14 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
}
#endif
+#ifdef CONFIG_ACPI
+extern int acpi_platform_notify(struct device *dev, enum kobject_action action);
+#else
+static inline int
+acpi_platform_notify(struct device *dev, enum kobject_action action)
+{
+ return 0;
+}
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/adxl.h b/include/linux/adxl.h
new file mode 100644
index 000000000000..2a629acb4c3f
--- /dev/null
+++ b/include/linux/adxl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Address translation interface via ACPI DSM.
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef _LINUX_ADXL_H
+#define _LINUX_ADXL_H
+
+const char * const *adxl_get_component_names(void);
+int adxl_decode(u64 addr, u64 component_values[]);
+
+#endif /* _LINUX_ADXL_H */
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 1b0a17b22cd3..eaedca5fe6fc 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -30,7 +30,7 @@ void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
struct ahci_host_priv *ahci_platform_get_resources(
- struct platform_device *pdev);
+ struct platform_device *pdev, unsigned int flags);
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
const struct ata_port_info *pi_template,
@@ -43,4 +43,6 @@ int ahci_platform_resume_host(struct device *dev);
int ahci_platform_suspend(struct device *dev);
int ahci_platform_resume(struct device *dev);
+#define AHCI_PLATFORM_GET_RESETS 0x01
+
#endif /* _AHCI_PLATFORM_H */
diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h
new file mode 100644
index 000000000000..da973e8a2da8
--- /dev/null
+++ b/include/linux/alcor_pci.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * Driver for Alcor Micro AU6601 and AU6621 controllers
+ */
+
+#ifndef __ALCOR_PCI_H
+#define __ALCOR_PCI_H
+
+#define ALCOR_SD_CARD 0
+#define ALCOR_MS_CARD 1
+
+#define DRV_NAME_ALCOR_PCI_SDMMC "alcor_sdmmc"
+#define DRV_NAME_ALCOR_PCI_MS "alcor_ms"
+
+#define PCI_ID_ALCOR_MICRO 0x1AEA
+#define PCI_ID_AU6601 0x6601
+#define PCI_ID_AU6621 0x6621
+
+#define MHZ_TO_HZ(freq) ((freq) * 1000 * 1000)
+
+#define AU6601_BASE_CLOCK 31000000
+#define AU6601_MIN_CLOCK 150000
+#define AU6601_MAX_CLOCK 208000000
+#define AU6601_MAX_DMA_SEGMENTS 1
+#define AU6601_MAX_PIO_SEGMENTS 1
+#define AU6601_MAX_DMA_BLOCK_SIZE 0x1000
+#define AU6601_MAX_PIO_BLOCK_SIZE 0x200
+#define AU6601_MAX_DMA_BLOCKS 1
+#define AU6601_DMA_LOCAL_SEGMENTS 1
+
+/* registers spotter by reverse engineering but still
+ * with unknown functionality:
+ * 0x10 - ADMA phy address. AU6621 only?
+ * 0x51 - LED ctrl?
+ * 0x52 - unknown
+ * 0x61 - LED related? Always toggled BIT0
+ * 0x63 - Same as 0x61?
+ * 0x77 - unknown
+ */
+
+/* SDMA phy address. Higher then 0x0800.0000?
+ * The au6601 and au6621 have different DMA engines with different issues. One
+ * For example au6621 engine is triggered by addr change. No other interaction
+ * is needed. This means, if we get two buffers with same address, then engine
+ * will stall.
+ */
+#define AU6601_REG_SDMA_ADDR 0x00
+#define AU6601_SDMA_MASK 0xffffffff
+
+#define AU6601_DMA_BOUNDARY 0x05
+#define AU6621_DMA_PAGE_CNT 0x05
+/* PIO */
+#define AU6601_REG_BUFFER 0x08
+/* ADMA ctrl? AU6621 only. */
+#define AU6621_DMA_CTRL 0x0c
+#define AU6621_DMA_ENABLE BIT(0)
+/* CMD index */
+#define AU6601_REG_CMD_OPCODE 0x23
+/* CMD parametr */
+#define AU6601_REG_CMD_ARG 0x24
+/* CMD response 4x4 Bytes */
+#define AU6601_REG_CMD_RSP0 0x30
+#define AU6601_REG_CMD_RSP1 0x34
+#define AU6601_REG_CMD_RSP2 0x38
+#define AU6601_REG_CMD_RSP3 0x3C
+/* default timeout set to 125: 125 * 40ms = 5 sec
+ * how exactly it is calculated?
+ */
+#define AU6601_TIME_OUT_CTRL 0x69
+/* Block size for SDMA or PIO */
+#define AU6601_REG_BLOCK_SIZE 0x6c
+/* Some power related reg, used together with AU6601_OUTPUT_ENABLE */
+#define AU6601_POWER_CONTROL 0x70
+
+/* PLL ctrl */
+#define AU6601_CLK_SELECT 0x72
+#define AU6601_CLK_OVER_CLK 0x80
+#define AU6601_CLK_384_MHZ 0x30
+#define AU6601_CLK_125_MHZ 0x20
+#define AU6601_CLK_48_MHZ 0x10
+#define AU6601_CLK_EXT_PLL 0x04
+#define AU6601_CLK_X2_MODE 0x02
+#define AU6601_CLK_ENABLE 0x01
+#define AU6601_CLK_31_25_MHZ 0x00
+
+#define AU6601_CLK_DIVIDER 0x73
+
+#define AU6601_INTERFACE_MODE_CTRL 0x74
+#define AU6601_DLINK_MODE 0x80
+#define AU6601_INTERRUPT_DELAY_TIME 0x40
+#define AU6601_SIGNAL_REQ_CTRL 0x30
+#define AU6601_MS_CARD_WP BIT(3)
+#define AU6601_SD_CARD_WP BIT(0)
+
+/* same register values are used for:
+ * - AU6601_OUTPUT_ENABLE
+ * - AU6601_POWER_CONTROL
+ */
+#define AU6601_ACTIVE_CTRL 0x75
+#define AU6601_XD_CARD BIT(4)
+/* AU6601_MS_CARD_ACTIVE - will cativate MS card section? */
+#define AU6601_MS_CARD BIT(3)
+#define AU6601_SD_CARD BIT(0)
+
+/* card slot state. It should automatically detect type of
+ * the card
+ */
+#define AU6601_DETECT_STATUS 0x76
+#define AU6601_DETECT_EN BIT(7)
+#define AU6601_MS_DETECTED BIT(3)
+#define AU6601_SD_DETECTED BIT(0)
+#define AU6601_DETECT_STATUS_M 0xf
+
+#define AU6601_REG_SW_RESET 0x79
+#define AU6601_BUF_CTRL_RESET BIT(7)
+#define AU6601_RESET_DATA BIT(3)
+#define AU6601_RESET_CMD BIT(0)
+
+#define AU6601_OUTPUT_ENABLE 0x7a
+
+#define AU6601_PAD_DRIVE0 0x7b
+#define AU6601_PAD_DRIVE1 0x7c
+#define AU6601_PAD_DRIVE2 0x7d
+/* read EEPROM? */
+#define AU6601_FUNCTION 0x7f
+
+#define AU6601_CMD_XFER_CTRL 0x81
+#define AU6601_CMD_17_BYTE_CRC 0xc0
+#define AU6601_CMD_6_BYTE_WO_CRC 0x80
+#define AU6601_CMD_6_BYTE_CRC 0x40
+#define AU6601_CMD_START_XFER 0x20
+#define AU6601_CMD_STOP_WAIT_RDY 0x10
+#define AU6601_CMD_NO_RESP 0x00
+
+#define AU6601_REG_BUS_CTRL 0x82
+#define AU6601_BUS_WIDTH_4BIT 0x20
+#define AU6601_BUS_WIDTH_8BIT 0x10
+#define AU6601_BUS_WIDTH_1BIT 0x00
+
+#define AU6601_DATA_XFER_CTRL 0x83
+#define AU6601_DATA_WRITE BIT(7)
+#define AU6601_DATA_DMA_MODE BIT(6)
+#define AU6601_DATA_START_XFER BIT(0)
+
+#define AU6601_DATA_PIN_STATE 0x84
+#define AU6601_BUS_STAT_CMD BIT(15)
+/* BIT(4) - BIT(7) are permanently 1.
+ * May be reserved or not attached DAT4-DAT7
+ */
+#define AU6601_BUS_STAT_DAT3 BIT(3)
+#define AU6601_BUS_STAT_DAT2 BIT(2)
+#define AU6601_BUS_STAT_DAT1 BIT(1)
+#define AU6601_BUS_STAT_DAT0 BIT(0)
+#define AU6601_BUS_STAT_DAT_MASK 0xf
+
+#define AU6601_OPT 0x85
+#define AU6601_OPT_CMD_LINE_LEVEL 0x80
+#define AU6601_OPT_NCRC_16_CLK BIT(4)
+#define AU6601_OPT_CMD_NWT BIT(3)
+#define AU6601_OPT_STOP_CLK BIT(2)
+#define AU6601_OPT_DDR_MODE BIT(1)
+#define AU6601_OPT_SD_18V BIT(0)
+
+#define AU6601_CLK_DELAY 0x86
+#define AU6601_CLK_DATA_POSITIVE_EDGE 0x80
+#define AU6601_CLK_CMD_POSITIVE_EDGE 0x40
+#define AU6601_CLK_POSITIVE_EDGE_ALL (AU6601_CLK_CMD_POSITIVE_EDGE \
+ | AU6601_CLK_DATA_POSITIVE_EDGE)
+
+
+#define AU6601_REG_INT_STATUS 0x90
+#define AU6601_REG_INT_ENABLE 0x94
+#define AU6601_INT_DATA_END_BIT_ERR BIT(22)
+#define AU6601_INT_DATA_CRC_ERR BIT(21)
+#define AU6601_INT_DATA_TIMEOUT_ERR BIT(20)
+#define AU6601_INT_CMD_INDEX_ERR BIT(19)
+#define AU6601_INT_CMD_END_BIT_ERR BIT(18)
+#define AU6601_INT_CMD_CRC_ERR BIT(17)
+#define AU6601_INT_CMD_TIMEOUT_ERR BIT(16)
+#define AU6601_INT_ERROR BIT(15)
+#define AU6601_INT_OVER_CURRENT_ERR BIT(8)
+#define AU6601_INT_CARD_INSERT BIT(7)
+#define AU6601_INT_CARD_REMOVE BIT(6)
+#define AU6601_INT_READ_BUF_RDY BIT(5)
+#define AU6601_INT_WRITE_BUF_RDY BIT(4)
+#define AU6601_INT_DMA_END BIT(3)
+#define AU6601_INT_DATA_END BIT(1)
+#define AU6601_INT_CMD_END BIT(0)
+
+#define AU6601_INT_NORMAL_MASK 0x00007FFF
+#define AU6601_INT_ERROR_MASK 0xFFFF8000
+
+#define AU6601_INT_CMD_MASK (AU6601_INT_CMD_END | \
+ AU6601_INT_CMD_TIMEOUT_ERR | AU6601_INT_CMD_CRC_ERR | \
+ AU6601_INT_CMD_END_BIT_ERR | AU6601_INT_CMD_INDEX_ERR)
+#define AU6601_INT_DATA_MASK (AU6601_INT_DATA_END | AU6601_INT_DMA_END | \
+ AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY | \
+ AU6601_INT_DATA_TIMEOUT_ERR | AU6601_INT_DATA_CRC_ERR | \
+ AU6601_INT_DATA_END_BIT_ERR)
+#define AU6601_INT_ALL_MASK ((u32)-1)
+
+/* MS_CARD mode registers */
+
+#define AU6601_MS_STATUS 0xa0
+
+#define AU6601_MS_BUS_MODE_CTRL 0xa1
+#define AU6601_MS_BUS_8BIT_MODE 0x03
+#define AU6601_MS_BUS_4BIT_MODE 0x01
+#define AU6601_MS_BUS_1BIT_MODE 0x00
+
+#define AU6601_MS_TPC_CMD 0xa2
+#define AU6601_MS_TPC_READ_PAGE_DATA 0x02
+#define AU6601_MS_TPC_READ_REG 0x04
+#define AU6601_MS_TPC_GET_INT 0x07
+#define AU6601_MS_TPC_WRITE_PAGE_DATA 0x0D
+#define AU6601_MS_TPC_WRITE_REG 0x0B
+#define AU6601_MS_TPC_SET_RW_REG_ADRS 0x08
+#define AU6601_MS_TPC_SET_CMD 0x0E
+#define AU6601_MS_TPC_EX_SET_CMD 0x09
+#define AU6601_MS_TPC_READ_SHORT_DATA 0x03
+#define AU6601_MS_TPC_WRITE_SHORT_DATA 0x0C
+
+#define AU6601_MS_TRANSFER_MODE 0xa3
+#define AU6601_MS_XFER_INT_TIMEOUT_CHK BIT(2)
+#define AU6601_MS_XFER_DMA_ENABLE BIT(1)
+#define AU6601_MS_XFER_START BIT(0)
+
+#define AU6601_MS_DATA_PIN_STATE 0xa4
+
+#define AU6601_MS_INT_STATUS 0xb0
+#define AU6601_MS_INT_ENABLE 0xb4
+#define AU6601_MS_INT_OVER_CURRENT_ERROR BIT(23)
+#define AU6601_MS_INT_DATA_CRC_ERROR BIT(21)
+#define AU6601_MS_INT_INT_TIMEOUT BIT(20)
+#define AU6601_MS_INT_INT_RESP_ERROR BIT(19)
+#define AU6601_MS_INT_CED_ERROR BIT(18)
+#define AU6601_MS_INT_TPC_TIMEOUT BIT(16)
+#define AU6601_MS_INT_ERROR BIT(15)
+#define AU6601_MS_INT_CARD_INSERT BIT(7)
+#define AU6601_MS_INT_CARD_REMOVE BIT(6)
+#define AU6601_MS_INT_BUF_READ_RDY BIT(5)
+#define AU6601_MS_INT_BUF_WRITE_RDY BIT(4)
+#define AU6601_MS_INT_DMA_END BIT(3)
+#define AU6601_MS_INT_TPC_END BIT(1)
+
+#define AU6601_MS_INT_DATA_MASK 0x00000038
+#define AU6601_MS_INT_TPC_MASK 0x003d8002
+#define AU6601_MS_INT_TPC_ERROR 0x003d0000
+
+#define ALCOR_PCIE_LINK_CTRL_OFFSET 0x10
+#define ALCOR_PCIE_LINK_CAP_OFFSET 0x0c
+#define ALCOR_CAP_START_OFFSET 0x34
+
+struct alcor_dev_cfg {
+ u8 dma;
+};
+
+struct alcor_pci_priv {
+ struct pci_dev *pdev;
+ struct pci_dev *parent_pdev;
+ struct device *dev;
+ void __iomem *iobase;
+ unsigned int irq;
+
+ unsigned long id; /* idr id */
+
+ struct alcor_dev_cfg *cfg;
+
+ /* PCI ASPM related vars */
+ int pdev_cap_off;
+ u8 pdev_aspm_cap;
+ int parent_cap_off;
+ u8 parent_aspm_cap;
+ u8 ext_config_dev_aspm;
+};
+
+void alcor_write8(struct alcor_pci_priv *priv, u8 val, unsigned int addr);
+void alcor_write16(struct alcor_pci_priv *priv, u16 val, unsigned int addr);
+void alcor_write32(struct alcor_pci_priv *priv, u32 val, unsigned int addr);
+void alcor_write32be(struct alcor_pci_priv *priv, u32 val, unsigned int addr);
+u8 alcor_read8(struct alcor_pci_priv *priv, unsigned int addr);
+u32 alcor_read32(struct alcor_pci_priv *priv, unsigned int addr);
+u32 alcor_read32be(struct alcor_pci_priv *priv, unsigned int addr);
+#endif
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index da8357ba11bc..c92ebc39fc1f 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -18,20 +18,13 @@
* mask into a value to be binary (or set some other custom bits
* in MMCIPWR) or:ed and written into the MMCIPWR register of the
* block. May also control external power based on the power_mode.
- * @status: if no GPIO read function was given to the block in
- * gpio_wp (below) this function will be called to determine
- * whether a card is present in the MMC slot or not
- * @gpio_wp: read this GPIO pin to see if the card is write protected
- * @gpio_cd: read this GPIO pin to detect card insertion
- * @cd_invert: true if the gpio_cd pin value is active low
+ * @status: if no GPIO line was given to the block in this function will
+ * be called to determine whether a card is present in the MMC slot or not
*/
struct mmci_platform_data {
unsigned int ocr_mask;
int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
- int gpio_wp;
- int gpio_cd;
- bool cd_invert;
};
#endif
diff --git a/include/linux/amifd.h b/include/linux/amifd.h
deleted file mode 100644
index 202a77dbe46d..000000000000
--- a/include/linux/amifd.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _AMIFD_H
-#define _AMIFD_H
-
-/* Definitions for the Amiga floppy driver */
-
-#include <linux/fd.h>
-
-#define FD_MAX_UNITS 4 /* Max. Number of drives */
-#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
-
-#ifndef ASSEMBLER
-
-struct fd_data_type {
- char *name; /* description of data type */
- int sects; /* sectors per track */
-#ifdef __STDC__
- int (*read_fkt)(int);
- void (*write_fkt)(int);
-#else
- int (*read_fkt)(); /* read whole track */
- void (*write_fkt)(); /* write whole track */
-#endif
-};
-
-/*
-** Floppy type descriptions
-*/
-
-struct fd_drive_type {
- unsigned long code; /* code returned from drive */
- char *name; /* description of drive */
- unsigned int tracks; /* number of tracks */
- unsigned int heads; /* number of heads */
- unsigned int read_size; /* raw read size for one track */
- unsigned int write_size; /* raw write size for one track */
- unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
- unsigned int precomp1; /* start track for precomp 1 */
- unsigned int precomp2; /* start track for precomp 2 */
- unsigned int step_delay; /* time (in ms) for delay after step */
- unsigned int settle_time; /* time to settle after dir change */
- unsigned int side_time; /* time needed to change sides */
-};
-
-struct amiga_floppy_struct {
- struct fd_drive_type *type; /* type of floppy for this unit */
- struct fd_data_type *dtype; /* type of floppy for this unit */
- int track; /* current track (-1 == unknown) */
- unsigned char *trackbuf; /* current track (kmaloc()'d */
-
- int blocks; /* total # blocks on disk */
-
- int changed; /* true when not known */
- int disk; /* disk in drive (-1 == unknown) */
- int motor; /* true when motor is at speed */
- int busy; /* true when drive is active */
- int dirty; /* true when trackbuf is not on disk */
- int status; /* current error code for unit */
- struct gendisk *gendisk;
-};
-#endif
-
-#endif
diff --git a/include/linux/amifdreg.h b/include/linux/amifdreg.h
deleted file mode 100644
index 9b514d05ec70..000000000000
--- a/include/linux/amifdreg.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_AMIFDREG_H
-#define _LINUX_AMIFDREG_H
-
-/*
-** CIAAPRA bits (read only)
-*/
-
-#define DSKRDY (0x1<<5) /* disk ready when low */
-#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
-#define DSKPROT (0x1<<3) /* disk protected when low */
-#define DSKCHANGE (0x1<<2) /* low when disk removed */
-
-/*
-** CIAAPRB bits (read/write)
-*/
-
-#define DSKMOTOR (0x1<<7) /* motor on when low */
-#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
-#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
-#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
-#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
-#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
-#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
-#define DSKSTEP (0x1) /* pulse low to step head 1 track */
-
-/*
-** DSKBYTR bits (read only)
-*/
-
-#define DSKBYT (1<<15) /* register contains valid byte when set */
-#define DMAON (1<<14) /* disk DMA enabled */
-#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
-#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
-/* bits 7-0 are data */
-
-/*
-** ADKCON/ADKCONR bits
-*/
-
-#ifndef SETCLR
-#define ADK_SETCLR (1<<15) /* control bit */
-#endif
-#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
-#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
-#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
-#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
-#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
-#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
-
-/*
-** DSKLEN bits
-*/
-
-#define DSKLEN_DMAEN (1<<15)
-#define DSKLEN_WRITE (1<<14)
-
-/*
-** INTENA/INTREQ bits
-*/
-
-#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
-
-/*
-** Misc
-*/
-
-#define MFM_SYNC 0x4489 /* standard MFM sync value */
-
-/* Values for FD_COMMAND */
-#define FD_RECALIBRATE 0x07 /* move to track 0 */
-#define FD_SEEK 0x0F /* seek track */
-#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
-#define FD_WRITE 0xC5 /* write with MT, MFM */
-#define FD_SENSEI 0x08 /* Sense Interrupt Status */
-#define FD_SPECIFY 0x03 /* specify HUT etc */
-#define FD_FORMAT 0x4D /* format one track */
-#define FD_VERSION 0x10 /* get version code */
-#define FD_CONFIGURE 0x13 /* configure FIFO operation */
-#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
-
-#endif /* _LINUX_AMIFDREG_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 2b709416de05..d9bdc1a7f4e7 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -9,6 +9,7 @@
#include <linux/percpu.h>
void topology_normalize_cpu_scale(void);
+int topology_update_cpu_topology(void);
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index ca1d2cc2cdfa..18863d56273c 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -199,47 +199,57 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define __declare_arg_0(a0, res) \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
register unsigned long r1 asm("r1"); \
register unsigned long r2 asm("r2"); \
register unsigned long r3 asm("r3")
#define __declare_arg_1(a0, a1, res) \
+ typeof(a1) __a1 = a1; \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
- register typeof(a1) r1 asm("r1") = a1; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
+ register unsigned long r1 asm("r1") = __a1; \
register unsigned long r2 asm("r2"); \
register unsigned long r3 asm("r3")
#define __declare_arg_2(a0, a1, a2, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
- register typeof(a1) r1 asm("r1") = a1; \
- register typeof(a2) r2 asm("r2") = a2; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
+ register unsigned long r1 asm("r1") = __a1; \
+ register unsigned long r2 asm("r2") = __a2; \
register unsigned long r3 asm("r3")
#define __declare_arg_3(a0, a1, a2, a3, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
+ typeof(a3) __a3 = a3; \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
- register typeof(a1) r1 asm("r1") = a1; \
- register typeof(a2) r2 asm("r2") = a2; \
- register typeof(a3) r3 asm("r3") = a3
+ register unsigned long r0 asm("r0") = (u32)a0; \
+ register unsigned long r1 asm("r1") = __a1; \
+ register unsigned long r2 asm("r2") = __a2; \
+ register unsigned long r3 asm("r3") = __a3
#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+ typeof(a4) __a4 = a4; \
__declare_arg_3(a0, a1, a2, a3, res); \
- register typeof(a4) r4 asm("r4") = a4
+ register unsigned long r4 asm("r4") = __a4
#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+ typeof(a5) __a5 = a5; \
__declare_arg_4(a0, a1, a2, a3, a4, res); \
- register typeof(a5) r5 asm("r5") = a5
+ register unsigned long r5 asm("r5") = __a5
#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+ typeof(a6) __a6 = a6; \
__declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
- register typeof(a6) r6 asm("r6") = a6
+ register unsigned long r6 asm("r6") = __a6
#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+ typeof(a7) __a7 = a7; \
__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
- register typeof(a7) r7 asm("r7") = a7
+ register unsigned long r7 asm("r7") = __a7
#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 9334fbef7bae..a625c29a2ea2 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -115,8 +115,6 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall);
struct filename;
-extern void audit_log_session_info(struct audit_buffer *ab);
-
#define AUDIT_OFF 0
#define AUDIT_ON 1
#define AUDIT_LOCKED 2
@@ -153,8 +151,7 @@ extern void audit_log_link_denied(const char *operation);
extern void audit_log_lost(const char *message);
extern int audit_log_task_context(struct audit_buffer *ab);
-extern void audit_log_task_info(struct audit_buffer *ab,
- struct task_struct *tsk);
+extern void audit_log_task_info(struct audit_buffer *ab);
extern int audit_update_lsm_rules(void);
@@ -202,8 +199,7 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
{
return 0;
}
-static inline void audit_log_task_info(struct audit_buffer *ab,
- struct task_struct *tsk)
+static inline void audit_log_task_info(struct audit_buffer *ab)
{ }
#define audit_enabled AUDIT_OFF
#endif /* CONFIG_AUDIT */
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 212b3822d180..7605b5919c3a 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -62,13 +62,19 @@
/* Error Codes */
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
- VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
- VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+ VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
};
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
@@ -165,7 +171,7 @@ struct virtchnl_msg {
VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
-/* Message descriptions and data structures.*/
+/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
@@ -252,6 +258,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
+/* Define below the capability flags that are not offloads */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -334,6 +342,8 @@ struct virtchnl_vsi_queue_config_info {
struct virtchnl_queue_pair_info qpair[1];
};
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
* this VF. Each VF gets a guaranteed number of queues on init but asking for
@@ -349,8 +359,6 @@ struct virtchnl_vf_res_request {
u16 num_queue_pairs;
};
-VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
-
/* VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
@@ -573,7 +581,7 @@ struct virtchnl_filter {
enum virtchnl_flow_type flow_type;
enum virtchnl_action action;
u32 action_meta;
- __u8 field_flags;
+ u8 field_flags;
};
VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
@@ -596,10 +604,23 @@ enum virtchnl_event_codes {
struct virtchnl_pf_event {
enum virtchnl_event_codes event;
union {
+ /* If the PF driver does not support the new speed reporting
+ * capabilities then use link_event else use link_event_adv to
+ * get the speed and link information. The ability to understand
+ * new speeds is indicated by setting the capability flag
+ * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+ * in virtchnl_vf_resource struct and can be used to determine
+ * which link event struct to use below.
+ */
struct {
enum virtchnl_link_speed link_speed;
bool link_status;
} link_event;
+ struct {
+ /* link_speed provided in Mbps */
+ u32 link_speed;
+ u8 link_status;
+ } link_event_adv;
} event_data;
int severity;
@@ -798,8 +819,8 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
if (msglen >= valid_len) {
struct virtchnl_tc_info *vti =
(struct virtchnl_tc_info *)msg;
- valid_len += vti->num_tc *
- sizeof(struct virtchnl_channel_info);
+ valid_len += (vti->num_tc - 1) *
+ sizeof(struct virtchnl_channel_info);
if (vti->num_tc == 0)
err_msg_format = true;
}
@@ -816,7 +837,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
- return VIRTCHNL_ERR_PARAM;
+ return VIRTCHNL_STATUS_ERR_PARAM;
}
/* few more checks */
if (err_msg_format || valid_len != msglen)
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 24251762c20c..c31157135598 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -12,6 +12,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kref.h>
+#include <linux/refcount.h>
struct page;
struct device;
@@ -75,7 +76,7 @@ enum wb_reason {
*/
struct bdi_writeback_congested {
unsigned long state; /* WB_[a]sync_congested flags */
- atomic_t refcnt; /* nr of attached wb's and blkg */
+ refcount_t refcnt; /* nr of attached wb's and blkg */
#ifdef CONFIG_CGROUP_WRITEBACK
struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
@@ -257,6 +258,14 @@ static inline void wb_get(struct bdi_writeback *wb)
*/
static inline void wb_put(struct bdi_writeback *wb)
{
+ if (WARN_ON_ONCE(!wb->bdi)) {
+ /*
+ * A driver bug might cause a file to be removed before bdi was
+ * initialized.
+ */
+ return;
+ }
+
if (wb != &wb->bdi->wb)
percpu_ref_put(&wb->refcnt);
}
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 72ca0f3d39f3..c28a47cbe355 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -404,13 +404,13 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
static inline struct bdi_writeback_congested *
wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
{
- atomic_inc(&bdi->wb_congested->refcnt);
+ refcount_inc(&bdi->wb_congested->refcnt);
return bdi->wb_congested;
}
static inline void wb_congested_put(struct bdi_writeback_congested *congested)
{
- if (atomic_dec_and_test(&congested->refcnt))
+ if (refcount_dec_and_test(&congested->refcnt))
kfree(congested);
}
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index c05f24fac4f6..688ab0de7810 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -25,6 +25,7 @@ struct linux_binprm {
#endif
struct mm_struct *mm;
unsigned long p; /* current top of mem */
+ unsigned long argmin; /* rlimit marker for copy_strings() */
unsigned int
/*
* True after the bprm_set_creds hook has been called once
@@ -78,7 +79,7 @@ struct linux_binprm {
/* Function parameter for binfmt->coredump */
struct coredump_params {
- const siginfo_t *siginfo;
+ const kernel_siginfo_t *siginfo;
struct pt_regs *regs;
struct file *file;
unsigned long limit;
@@ -138,7 +139,6 @@ extern int transfer_args_to_stack(struct linux_binprm *bprm,
extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc, const char *const *argv,
struct linux_binprm *bprm);
-extern int prepare_bprm_creds(struct linux_binprm *bprm);
extern void install_exec_creds(struct linux_binprm *bprm);
extern void set_binfmt(struct linux_binfmt *new);
extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 51371740d2a8..7380b094dcca 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -21,12 +21,8 @@
#include <linux/highmem.h>
#include <linux/mempool.h>
#include <linux/ioprio.h>
-#include <linux/bug.h>
#ifdef CONFIG_BLOCK
-
-#include <asm/io.h>
-
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
@@ -133,32 +129,6 @@ static inline bool bio_full(struct bio *bio)
}
/*
- * will die
- */
-#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
-
-/*
- * merge helpers etc
- */
-
-/* Default implementation of BIOVEC_PHYS_MERGEABLE */
-#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-
-/*
- * allow arch override, for eg virtualized architectures (put in asm/io.h)
- */
-#ifndef BIOVEC_PHYS_MERGEABLE
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
-#endif
-
-#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
- (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
-#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
- __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
-
-/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
@@ -170,27 +140,11 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
{
iter->bi_sector += bytes >> 9;
- if (bio_no_advance_iter(bio)) {
+ if (bio_no_advance_iter(bio))
iter->bi_size -= bytes;
- iter->bi_done += bytes;
- } else {
+ else
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
/* TODO: It is reasonable to complete bio with error here. */
- }
-}
-
-static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter,
- unsigned int bytes)
-{
- iter->bi_sector -= bytes >> 9;
-
- if (bio_no_advance_iter(bio)) {
- iter->bi_size += bytes;
- iter->bi_done -= bytes;
- return true;
- }
-
- return bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
}
#define __bio_for_each_segment(bvl, bio, iter, start) \
@@ -353,6 +307,8 @@ struct bio_integrity_payload {
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
unsigned short bip_flags; /* control flags */
+ struct bvec_iter bio_iter; /* for rewinding parent bio */
+
struct work_struct bip_work; /* I/O completion */
struct bio_vec *bip_vec;
@@ -535,35 +491,40 @@ do { \
bio_clear_flag(bio, BIO_THROTTLED);\
(bio)->bi_disk = (bdev)->bd_disk; \
(bio)->bi_partno = (bdev)->bd_partno; \
+ bio_associate_blkg(bio); \
} while (0)
#define bio_copy_dev(dst, src) \
do { \
(dst)->bi_disk = (src)->bi_disk; \
(dst)->bi_partno = (src)->bi_partno; \
+ bio_clone_blkg_association(dst, src); \
} while (0)
#define bio_dev(bio) \
disk_devt((bio)->bi_disk)
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
+void bio_associate_blkg_from_page(struct bio *bio, struct page *page);
#else
-static inline int bio_associate_blkcg_from_page(struct bio *bio,
- struct page *page) { return 0; }
+static inline void bio_associate_blkg_from_page(struct bio *bio,
+ struct page *page) { }
#endif
#ifdef CONFIG_BLK_CGROUP
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
-int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
-void bio_disassociate_task(struct bio *bio);
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
+void bio_disassociate_blkg(struct bio *bio);
+void bio_associate_blkg(struct bio *bio);
+void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css);
+void bio_clone_blkg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
-static inline int bio_associate_blkcg(struct bio *bio,
- struct cgroup_subsys_state *blkcg_css) { return 0; }
-static inline void bio_disassociate_task(struct bio *bio) { }
-static inline void bio_clone_blkcg_association(struct bio *dst,
- struct bio *src) { }
+static inline void bio_disassociate_blkg(struct bio *bio) { }
+static inline void bio_associate_blkg(struct bio *bio) { }
+static inline void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{ }
+static inline void bio_clone_blkg_association(struct bio *dst,
+ struct bio *src) { }
#endif /* CONFIG_BLK_CGROUP */
#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index acf5e8df3504..f58e97446abc 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -28,8 +28,8 @@
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
- * Note that nbits should be always a compile time evaluable constant.
- * Otherwise many inlines will generate horrible code.
+ * The generated code is more efficient when nbits is known at
+ * compile-time and at most BITS_PER_LONG.
*
* ::
*
@@ -204,38 +204,31 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+/*
+ * The static inlines below do not handle constant nbits==0 correctly,
+ * so make such users (should any ever turn up) call the out-of-line
+ * versions.
+ */
#define small_const_nbits(nbits) \
- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
- if (small_const_nbits(nbits))
- *dst = 0UL;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0, len);
- }
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
}
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
- if (small_const_nbits(nbits))
- *dst = ~0UL;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0xff, len);
- }
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0xff, len);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
- if (small_const_nbits(nbits))
- *dst = *src;
- else {
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memcpy(dst, src, len);
- }
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memcpy(dst, src, len);
}
/*
@@ -398,7 +391,7 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
}
static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, int nbits)
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index af419012d77d..705f7c442691 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -4,7 +4,8 @@
#include <asm/types.h>
#include <linux/bits.h>
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
@@ -235,33 +236,33 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
#ifdef __KERNEL__
#ifndef set_mask_bits
-#define set_mask_bits(ptr, _mask, _bits) \
+#define set_mask_bits(ptr, mask, bits) \
({ \
- const typeof(*ptr) mask = (_mask), bits = (_bits); \
- typeof(*ptr) old, new; \
+ const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
+ typeof(*(ptr)) old__, new__; \
\
do { \
- old = READ_ONCE(*ptr); \
- new = (old & ~mask) | bits; \
- } while (cmpxchg(ptr, old, new) != old); \
+ old__ = READ_ONCE(*(ptr)); \
+ new__ = (old__ & ~mask__) | bits__; \
+ } while (cmpxchg(ptr, old__, new__) != old__); \
\
- new; \
+ new__; \
})
#endif
#ifndef bit_clear_unless
-#define bit_clear_unless(ptr, _clear, _test) \
+#define bit_clear_unless(ptr, clear, test) \
({ \
- const typeof(*ptr) clear = (_clear), test = (_test); \
- typeof(*ptr) old, new; \
+ const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
+ typeof(*(ptr)) old__, new__; \
\
do { \
- old = READ_ONCE(*ptr); \
- new = old & ~clear; \
- } while (!(old & test) && \
- cmpxchg(ptr, old, new) != old); \
+ old__ = READ_ONCE(*(ptr)); \
+ new__ = old__ & ~clear__; \
+ } while (!(old__ & test__) && \
+ cmpxchg(ptr, old__, new__) != old__); \
\
- !(old & test); \
+ !(old__ & test__); \
})
#endif
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 34aec30e06c7..76c61318fda5 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -21,6 +21,7 @@
#include <linux/blkdev.h>
#include <linux/atomic.h>
#include <linux/kthread.h>
+#include <linux/fs.h>
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
@@ -56,6 +57,7 @@ struct blkcg {
struct list_head all_blkcgs_node;
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
+ refcount_t cgwb_refcnt;
#endif
};
@@ -89,7 +91,6 @@ struct blkg_policy_data {
/* the blkg and policy id this per-policy data belongs to */
struct blkcg_gq *blkg;
int plid;
- bool offline;
};
/*
@@ -122,11 +123,8 @@ struct blkcg_gq {
/* all non-root blkcg_gq's are guaranteed to have access to parent */
struct blkcg_gq *parent;
- /* request allocation list for this blkcg-q pair */
- struct request_list rl;
-
/* reference count */
- atomic_t refcnt;
+ struct percpu_ref refcnt;
/* is this blkg online? protected by both blkcg and q locks */
bool online;
@@ -184,6 +182,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
@@ -230,22 +230,62 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx);
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+/**
+ * blkcg_css - find the current css
+ *
+ * Find the css associated with either the kthread or the current task.
+ * This may return a dying css, so it is up to the caller to use tryget logic
+ * to confirm it is alive and well.
+ */
+static inline struct cgroup_subsys_state *blkcg_css(void)
+{
+ struct cgroup_subsys_state *css;
+
+ css = kthread_blkcg();
+ if (css)
+ return css;
+ return task_css(current, io_cgrp_id);
+}
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct blkcg, css) : NULL;
}
-static inline struct blkcg *bio_blkcg(struct bio *bio)
+/**
+ * __bio_blkcg - internal, inconsistent version to get blkcg
+ *
+ * DO NOT USE.
+ * This function is inconsistent and consequently is dangerous to use. The
+ * first part of the function returns a blkcg where a reference is owned by the
+ * bio. This means it does not need to be rcu protected as it cannot go away
+ * with the bio owning a reference to it. However, the latter potentially gets
+ * it from task_css(). This can race against task migration and the cgroup
+ * dying. It is also semantically different as it must be called rcu protected
+ * and is susceptible to failure when trying to get a reference to it.
+ * Therefore, it is not ok to assume that *_get() will always succeed on the
+ * blkcg returned here.
+ */
+static inline struct blkcg *__bio_blkcg(struct bio *bio)
{
- struct cgroup_subsys_state *css;
+ if (bio && bio->bi_blkg)
+ return bio->bi_blkg->blkcg;
+ return css_to_blkcg(blkcg_css());
+}
- if (bio && bio->bi_css)
- return css_to_blkcg(bio->bi_css);
- css = kthread_blkcg();
- if (css)
- return css_to_blkcg(css);
- return css_to_blkcg(task_css(current, io_cgrp_id));
+/**
+ * bio_blkcg - grab the blkcg associated with a bio
+ * @bio: target bio
+ *
+ * This returns the blkcg associated with a bio, %NULL if not associated.
+ * Callers are expected to either handle %NULL or know association has been
+ * done prior to calling this.
+ */
+static inline struct blkcg *bio_blkcg(struct bio *bio)
+{
+ if (bio && bio->bi_blkg)
+ return bio->bi_blkg->blkcg;
+ return NULL;
}
static inline bool blk_cgroup_congested(void)
@@ -328,16 +368,12 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
* @q: request_queue of interest
*
* Lookup blkg for the @blkcg - @q pair. This function should be called
- * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
- * - see blk_queue_bypass_start() for details.
+ * under RCU read loc.
*/
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
struct request_queue *q)
{
WARN_ON_ONCE(!rcu_read_lock_held());
-
- if (unlikely(blk_queue_bypass(q)))
- return NULL;
return __blkg_lookup(blkcg, q, false);
}
@@ -387,6 +423,49 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
return cpd ? cpd->blkcg : NULL;
}
+extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+/**
+ * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
+ * @blkcg: blkcg of interest
+ *
+ * This is used to track the number of active wb's related to a blkcg.
+ */
+static inline void blkcg_cgwb_get(struct blkcg *blkcg)
+{
+ refcount_inc(&blkcg->cgwb_refcnt);
+}
+
+/**
+ * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
+ * @blkcg: blkcg of interest
+ *
+ * This is used to track the number of active wb's related to a blkcg.
+ * When this count goes to zero, all active wb has finished so the
+ * blkcg can continue destruction by calling blkcg_destroy_blkgs().
+ * This work may occur in cgwb_release_workfn() on the cgwb_release
+ * workqueue.
+ */
+static inline void blkcg_cgwb_put(struct blkcg *blkcg)
+{
+ if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
+ blkcg_destroy_blkgs(blkcg);
+}
+
+#else
+
+static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
+
+static inline void blkcg_cgwb_put(struct blkcg *blkcg)
+{
+ /* wb isn't being accounted, so trigger destruction right away */
+ blkcg_destroy_blkgs(blkcg);
+}
+
+#endif
+
/**
* blkg_path - format cgroup path of blkg
* @blkg: blkg of interest
@@ -408,26 +487,46 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
*/
static inline void blkg_get(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- atomic_inc(&blkg->refcnt);
+ percpu_ref_get(&blkg->refcnt);
}
/**
- * blkg_try_get - try and get a blkg reference
+ * blkg_tryget - try and get a blkg reference
* @blkg: blkg to get
*
* This is for use when doing an RCU lookup of the blkg. We may be in the midst
* of freeing this blkg, so we can only use it if the refcnt is not zero.
*/
-static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
+static inline bool blkg_tryget(struct blkcg_gq *blkg)
{
- if (atomic_inc_not_zero(&blkg->refcnt))
- return blkg;
- return NULL;
+ return blkg && percpu_ref_tryget(&blkg->refcnt);
}
+/**
+ * blkg_tryget_closest - try and get a blkg ref on the closet blkg
+ * @blkg: blkg to get
+ *
+ * This needs to be called rcu protected. As the failure mode here is to walk
+ * up the blkg tree, this ensure that the blkg->parent pointers are always
+ * valid. This returns the blkg that it ended up taking a reference on or %NULL
+ * if no reference was taken.
+ */
+static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
+{
+ struct blkcg_gq *ret_blkg = NULL;
-void __blkg_release_rcu(struct rcu_head *rcu);
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ while (blkg) {
+ if (blkg_tryget(blkg)) {
+ ret_blkg = blkg;
+ break;
+ }
+ blkg = blkg->parent;
+ }
+
+ return ret_blkg;
+}
/**
* blkg_put - put a blkg reference
@@ -435,9 +534,7 @@ void __blkg_release_rcu(struct rcu_head *rcu);
*/
static inline void blkg_put(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- if (atomic_dec_and_test(&blkg->refcnt))
- call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+ percpu_ref_put(&blkg->refcnt);
}
/**
@@ -472,94 +569,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false)))
-/**
- * blk_get_rl - get request_list to use
- * @q: request_queue of interest
- * @bio: bio which will be attached to the allocated request (may be %NULL)
- *
- * The caller wants to allocate a request from @q to use for @bio. Find
- * the request_list to use and obtain a reference on it. Should be called
- * under queue_lock. This function is guaranteed to return non-%NULL
- * request_list.
- */
-static inline struct request_list *blk_get_rl(struct request_queue *q,
- struct bio *bio)
-{
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
-
- rcu_read_lock();
-
- blkcg = bio_blkcg(bio);
-
- /* bypass blkg lookup and use @q->root_rl directly for root */
- if (blkcg == &blkcg_root)
- goto root_rl;
-
- /*
- * Try to use blkg->rl. blkg lookup may fail under memory pressure
- * or if either the blkcg or queue is going away. Fall back to
- * root_rl in such cases.
- */
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg))
- goto root_rl;
-
- blkg_get(blkg);
- rcu_read_unlock();
- return &blkg->rl;
-root_rl:
- rcu_read_unlock();
- return &q->root_rl;
-}
-
-/**
- * blk_put_rl - put request_list
- * @rl: request_list to put
- *
- * Put the reference acquired by blk_get_rl(). Should be called under
- * queue_lock.
- */
-static inline void blk_put_rl(struct request_list *rl)
-{
- if (rl->blkg->blkcg != &blkcg_root)
- blkg_put(rl->blkg);
-}
-
-/**
- * blk_rq_set_rl - associate a request with a request_list
- * @rq: request of interest
- * @rl: target request_list
- *
- * Associate @rq with @rl so that accounting and freeing can know the
- * request_list @rq came from.
- */
-static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
-{
- rq->rl = rl;
-}
-
-/**
- * blk_rq_rl - return the request_list a request came from
- * @rq: request of interest
- *
- * Return the request_list @rq is allocated from.
- */
-static inline struct request_list *blk_rq_rl(struct request *rq)
-{
- return rq->rl;
-}
-
-struct request_list *__blk_queue_next_rl(struct request_list *rl,
- struct request_queue *q);
-/**
- * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
- *
- * Should be used under queue_lock.
- */
-#define blk_queue_for_each_rl(rl, q) \
- for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
-
static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
{
int ret;
@@ -754,32 +763,34 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
struct bio *bio) { return false; }
#endif
+
+static inline void blkcg_bio_issue_init(struct bio *bio)
+{
+ bio_issue_init(&bio->bi_issue, bio_sectors(bio));
+}
+
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio)
{
- struct blkcg *blkcg;
struct blkcg_gq *blkg;
bool throtl = false;
rcu_read_lock();
- blkcg = bio_blkcg(bio);
-
- /* associate blkcg if bio hasn't attached one */
- bio_associate_blkcg(bio, &blkcg->css);
-
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- spin_unlock_irq(q->queue_lock);
+
+ if (!bio->bi_blkg) {
+ char b[BDEVNAME_SIZE];
+
+ WARN_ONCE(1,
+ "no blkg associated for bio on block-device: %s\n",
+ bio_devname(bio, b));
+ bio_associate_blkg(bio);
}
+ blkg = bio->bi_blkg;
+
throtl = blk_throtl_bio(q, blkg, bio);
if (!throtl) {
- blkg = blkg ?: q->root_blkg;
/*
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
* is a split bio and we would have already accounted for the
@@ -791,6 +802,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
}
+ blkcg_bio_issue_init(bio);
+
rcu_read_unlock();
return !throtl;
}
@@ -887,6 +900,7 @@ static inline int blkcg_activate_policy(struct request_queue *q,
static inline void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol) { }
+static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
@@ -896,12 +910,7 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
-static inline struct request_list *blk_get_rl(struct request_queue *q,
- struct bio *bio) { return &q->root_rl; }
-static inline void blk_put_rl(struct request_list *rl) { }
-static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
-static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
-
+static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio) { return true; }
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
index 9f4c17f0d2d8..0b1f45c62623 100644
--- a/include/linux/blk-mq-pci.h
+++ b/include/linux/blk-mq-pci.h
@@ -2,10 +2,10 @@
#ifndef _LINUX_BLK_MQ_PCI_H
#define _LINUX_BLK_MQ_PCI_H
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
struct pci_dev;
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
index b4ade198007d..7b6ecf9ac4c3 100644
--- a/include/linux/blk-mq-rdma.h
+++ b/include/linux/blk-mq-rdma.h
@@ -4,7 +4,7 @@
struct blk_mq_tag_set;
struct ib_device;
-int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec);
#endif /* _LINUX_BLK_MQ_RDMA_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
index 69b4da262c45..687ae287e1dc 100644
--- a/include/linux/blk-mq-virtio.h
+++ b/include/linux/blk-mq-virtio.h
@@ -2,10 +2,10 @@
#ifndef _LINUX_BLK_MQ_VIRTIO_H
#define _LINUX_BLK_MQ_VIRTIO_H
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
struct virtio_device;
-int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec);
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 1da59c16f637..0e030f5f76b6 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -37,7 +37,8 @@ struct blk_mq_hw_ctx {
struct blk_mq_ctx *dispatch_from;
unsigned int dispatch_busy;
- unsigned int nr_ctx;
+ unsigned short type;
+ unsigned short nr_ctx;
struct blk_mq_ctx **ctxs;
spinlock_t dispatch_wait_lock;
@@ -74,10 +75,31 @@ struct blk_mq_hw_ctx {
struct srcu_struct srcu[0];
};
+struct blk_mq_queue_map {
+ unsigned int *mq_map;
+ unsigned int nr_queues;
+ unsigned int queue_offset;
+};
+
+enum hctx_type {
+ HCTX_TYPE_DEFAULT, /* all I/O not otherwise accounted for */
+ HCTX_TYPE_READ, /* just for READ I/O */
+ HCTX_TYPE_POLL, /* polled I/O of any kind */
+
+ HCTX_MAX_TYPES,
+};
+
struct blk_mq_tag_set {
- unsigned int *mq_map;
+ /*
+ * map[] holds ctx -> hctx mappings, one map exists for each type
+ * that the driver wishes to support. There are no restrictions
+ * on maps being of the same size, and it's perfectly legal to
+ * share maps between types.
+ */
+ struct blk_mq_queue_map map[HCTX_MAX_TYPES];
+ unsigned int nr_maps; /* nr entries in map[] */
const struct blk_mq_ops *ops;
- unsigned int nr_hw_queues;
+ unsigned int nr_hw_queues; /* nr hw queues across maps */
unsigned int queue_depth; /* max hw supported */
unsigned int reserved_tags;
unsigned int cmd_size; /* per-request extra data */
@@ -99,6 +121,7 @@ struct blk_mq_queue_data {
typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
const struct blk_mq_queue_data *);
+typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *);
typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
@@ -109,11 +132,13 @@ typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int);
-typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
+typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
-typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
-typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
+typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
+typedef int (poll_fn)(struct blk_mq_hw_ctx *);
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
+typedef bool (busy_fn)(struct request_queue *);
+typedef void (complete_fn)(struct request *);
struct blk_mq_ops {
@@ -123,6 +148,15 @@ struct blk_mq_ops {
queue_rq_fn *queue_rq;
/*
+ * If a driver uses bd->last to judge when to submit requests to
+ * hardware, it must define this function. In case of errors that
+ * make us stop issuing further requests, this hook serves the
+ * purpose of kicking the hardware (which the last request otherwise
+ * would have done).
+ */
+ commit_rqs_fn *commit_rqs;
+
+ /*
* Reserve budget before queue request, once .queue_rq is
* run, it is driver's responsibility to release the
* reserved budget. Also we have to handle failure case
@@ -141,7 +175,7 @@ struct blk_mq_ops {
*/
poll_fn *poll;
- softirq_done_fn *complete;
+ complete_fn *complete;
/*
* Called when the block layer side of a hardware queue has been
@@ -165,6 +199,11 @@ struct blk_mq_ops {
/* Called from inside blk_get_request() */
void (*initialize_rq_fn)(struct request *rq);
+ /*
+ * If set, returns whether or not this queue currently is busy
+ */
+ busy_fn *busy;
+
map_queues_fn *map_queues;
#ifdef CONFIG_BLK_DEBUG_FS
@@ -203,6 +242,10 @@ enum {
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);
+struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops,
+ unsigned int queue_depth,
+ unsigned int set_flags);
int blk_mq_register_dev(struct device *, struct request_queue *);
void blk_mq_unregister_dev(struct device *, struct request_queue *);
@@ -214,6 +257,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+bool blk_mq_queue_inflight(struct request_queue *q);
+
enum {
/* return when out of requests */
BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
@@ -260,7 +305,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
-void blk_mq_complete_request(struct request *rq);
+bool blk_mq_complete_request(struct request *rq);
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio);
bool blk_mq_queue_stopped(struct request_queue *q);
@@ -284,24 +329,12 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_map_queues(struct blk_mq_tag_set *set);
+int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
-/**
- * blk_mq_mark_complete() - Set request state to complete
- * @rq: request to set to complete state
- *
- * Returns true if request state was successfully set to complete. If
- * successful, the caller is responsibile for seeing this request is ended, as
- * blk_mq_complete_request will not work again.
- */
-static inline bool blk_mq_mark_complete(struct request *rq)
-{
- return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
- MQ_RQ_IN_FLIGHT;
-}
+unsigned int blk_mq_rq_cpu(struct request *rq);
/*
* Driver command data is immediately after the request. So subtract request
@@ -324,4 +357,14 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
+static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ if (rq->tag != -1)
+ return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
+
+ return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
+ BLK_QC_T_INTERNAL;
+}
+
#endif
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
new file mode 100644
index 000000000000..b80c65aba249
--- /dev/null
+++ b/include/linux/blk-pm.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BLK_PM_H_
+#define _BLK_PM_H_
+
+struct device;
+struct request_queue;
+
+/*
+ * block layer runtime pm functions
+ */
+#ifdef CONFIG_PM
+extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
+extern int blk_pre_runtime_suspend(struct request_queue *q);
+extern void blk_post_runtime_suspend(struct request_queue *q, int err);
+extern void blk_pre_runtime_resume(struct request_queue *q);
+extern void blk_post_runtime_resume(struct request_queue *q, int err);
+extern void blk_set_runtime_active(struct request_queue *q);
+#else
+static inline void blk_pm_runtime_init(struct request_queue *q,
+ struct device *dev) {}
+#endif
+
+#endif /* _BLK_PM_H_ */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index f6dfb30737d8..5c7e7f859a24 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -174,11 +174,11 @@ struct bio {
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
- * Optional ioc and css associated with this bio. Put on bio
- * release. Read comment on top of bio_associate_current().
+ * Represents the association of the css and request_queue for the bio.
+ * If a bio goes direct to device, it will not have a blkg as it will
+ * not have a request_queue associated with it. The reference is put
+ * on release of the bio.
*/
- struct io_context *bi_ioc;
- struct cgroup_subsys_state *bi_css;
struct blkcg_gq *bi_blkg;
struct bio_issue bi_issue;
#endif
@@ -228,6 +228,7 @@ struct bio {
#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
* of this bio. */
#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */
+#define BIO_TRACKED 12 /* set if bio goes through the rq_qos path */
/* See BVEC_POOL_OFFSET below before adding new flags */
@@ -284,8 +285,6 @@ enum req_opf {
REQ_OP_FLUSH = 2,
/* discard sectors */
REQ_OP_DISCARD = 3,
- /* get zone information */
- REQ_OP_ZONE_REPORT = 4,
/* securely erase sectors */
REQ_OP_SECURE_ERASE = 5,
/* seset a zone write pointer */
@@ -325,6 +324,8 @@ enum req_flag_bits {
/* command specific flags for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
+ __REQ_HIPRI,
+
/* for driver use */
__REQ_DRV,
__REQ_SWAP, /* swapping request. */
@@ -345,8 +346,8 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
-
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
+#define REQ_HIPRI (1ULL << __REQ_HIPRI)
#define REQ_DRV (1ULL << __REQ_DRV)
#define REQ_SWAP (1ULL << __REQ_SWAP)
@@ -424,17 +425,6 @@ static inline bool blk_qc_t_valid(blk_qc_t cookie)
return cookie != BLK_QC_T_NONE;
}
-static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
- bool internal)
-{
- blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
-
- if (internal)
- ret |= BLK_QC_T_INTERNAL;
-
- return ret;
-}
-
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
{
return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d6869e0e2b64..338604dff7d0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -54,29 +54,10 @@ struct blk_stat_callback;
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 3
+#define BLKCG_MAX_POLS 5
typedef void (rq_end_io_fn)(struct request *, blk_status_t);
-#define BLK_RL_SYNCFULL (1U << 0)
-#define BLK_RL_ASYNCFULL (1U << 1)
-
-struct request_list {
- struct request_queue *q; /* the queue this rl belongs to */
-#ifdef CONFIG_BLK_CGROUP
- struct blkcg_gq *blkg; /* blkg this request pool belongs to */
-#endif
- /*
- * count[], starved[], and wait[] are indexed by
- * BLK_RW_SYNC/BLK_RW_ASYNC
- */
- int count[2];
- int starved[2];
- mempool_t *rq_pool;
- wait_queue_head_t wait[2];
- unsigned int flags;
-};
-
/*
* request flags */
typedef __u32 __bitwise req_flags_t;
@@ -85,8 +66,6 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_SORTED ((__force req_flags_t)(1 << 0))
/* drive already may have started this one */
#define RQF_STARTED ((__force req_flags_t)(1 << 1))
-/* uses tagged queueing */
-#define RQF_QUEUED ((__force req_flags_t)(1 << 2))
/* may not be passed by ioscheduler */
#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
/* request for flush sequence */
@@ -108,7 +87,7 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_QUIET ((__force req_flags_t)(1 << 11))
/* elevator private data attached */
#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
-/* account I/O stat */
+/* account into disk and partition IO statistics */
#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
/* request came from our alloc pool */
#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
@@ -116,7 +95,7 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_PM ((__force req_flags_t)(1 << 15))
/* on IO scheduler merge hash */
#define RQF_HASHED ((__force req_flags_t)(1 << 16))
-/* IO stats tracking on */
+/* track IO completion time */
#define RQF_STATS ((__force req_flags_t)(1 << 17))
/* Look at ->special_vec for the actual data payload instead of the
bio chain. */
@@ -150,8 +129,8 @@ enum mq_rq_state {
struct request {
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
+ struct blk_mq_hw_ctx *mq_hctx;
- int cpu;
unsigned int cmd_flags; /* op and common flags */
req_flags_t rq_flags;
@@ -245,11 +224,7 @@ struct request {
refcount_t ref;
unsigned int timeout;
-
- /* access through blk_rq_set_deadline, blk_rq_deadline */
- unsigned long __deadline;
-
- struct list_head timeout_list;
+ unsigned long deadline;
union {
struct __call_single_data csd;
@@ -264,10 +239,6 @@ struct request {
/* for bidi */
struct request *next_rq;
-
-#ifdef CONFIG_BLK_CGROUP
- struct request_list *rl; /* rl this rq is alloced from */
-#endif
};
static inline bool blk_op_is_scsi(unsigned int op)
@@ -311,41 +282,21 @@ static inline unsigned short req_get_ioprio(struct request *req)
struct blk_queue_ctx;
-typedef void (request_fn_proc) (struct request_queue *q);
typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
-typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
-typedef int (prep_rq_fn) (struct request_queue *, struct request *);
-typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
struct bio_vec;
-typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
-typedef int (lld_busy_fn) (struct request_queue *q);
-typedef int (bsg_job_fn) (struct bsg_job *);
-typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
-typedef void (exit_rq_fn)(struct request_queue *, struct request *);
enum blk_eh_timer_return {
BLK_EH_DONE, /* drivers has completed the command */
BLK_EH_RESET_TIMER, /* reset timer and try again */
};
-typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
-
enum blk_queue_state {
Queue_down,
Queue_up,
};
-struct blk_queue_tag {
- struct request **tag_index; /* map of busy tags */
- unsigned long *tag_map; /* bit map of free/busy tags */
- int max_depth; /* what we will send to device */
- int real_max_depth; /* what the array can hold */
- atomic_t refcnt; /* map can be shared */
- int alloc_policy; /* tag allocation policy */
- int next_tag; /* next tag */
-};
#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
@@ -389,23 +340,19 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
- unsigned char cluster;
unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned;
};
#ifdef CONFIG_BLK_DEV_ZONED
-struct blk_zone_report_hdr {
- unsigned int nr_zones;
- u8 padding[60];
-};
-
+extern unsigned int blkdev_nr_zones(struct block_device *bdev);
extern int blkdev_report_zones(struct block_device *bdev,
sector_t sector, struct blk_zone *zones,
unsigned int *nr_zones, gfp_t gfp_mask);
extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
sector_t nr_sectors, gfp_t gfp_mask);
+extern int blk_revalidate_disk_zones(struct gendisk *disk);
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
@@ -414,6 +361,16 @@ extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int blkdev_nr_zones(struct block_device *bdev)
+{
+ return 0;
+}
+
+static inline int blk_revalidate_disk_zones(struct gendisk *disk)
+{
+ return 0;
+}
+
static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
fmode_t mode, unsigned int cmd,
unsigned long arg)
@@ -437,40 +394,15 @@ struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
- int nr_rqs[2]; /* # allocated [a]sync rqs */
- int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
- /*
- * If blkcg is not used, @q->root_rl serves all requests. If blkcg
- * is used, root blkg allocates from @q->root_rl and all other
- * blkgs from their own blkg->rl. Which one to use should be
- * determined using bio_request_list().
- */
- struct request_list root_rl;
-
- request_fn_proc *request_fn;
make_request_fn *make_request_fn;
- poll_q_fn *poll_fn;
- prep_rq_fn *prep_rq_fn;
- unprep_rq_fn *unprep_rq_fn;
- softirq_done_fn *softirq_done_fn;
- rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
- lld_busy_fn *lld_busy_fn;
- /* Called just after a request is allocated */
- init_rq_fn *init_rq_fn;
- /* Called just before a request is freed */
- exit_rq_fn *exit_rq_fn;
- /* Called from inside blk_get_request() */
- void (*initialize_rq_fn)(struct request *rq);
const struct blk_mq_ops *mq_ops;
- unsigned int *mq_map;
-
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues;
@@ -481,17 +413,6 @@ struct request_queue {
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
- /*
- * Dispatch queue sorting
- */
- sector_t end_sector;
- struct request *boundary_rq;
-
- /*
- * Delayed queue handling
- */
- struct delayed_work delay_work;
-
struct backing_dev_info *backing_dev_info;
/*
@@ -504,6 +425,12 @@ struct request_queue {
* various queue flags, see QUEUE_* below
*/
unsigned long queue_flags;
+ /*
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
+ * processed.
+ */
+ atomic_t pm_only;
/*
* ida allocated id for this queue. Used to index queues from
@@ -516,13 +443,7 @@ struct request_queue {
*/
gfp_t bounce_gfp;
- /*
- * protects queue structures from reentrancy. ->__queue_lock should
- * _never_ be used directly, it is queue private. always use
- * ->queue_lock.
- */
- spinlock_t __queue_lock;
- spinlock_t *queue_lock;
+ spinlock_t queue_lock;
/*
* queue kobject
@@ -532,7 +453,7 @@ struct request_queue {
/*
* mq queue kobject
*/
- struct kobject mq_kobj;
+ struct kobject *mq_kobj;
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity integrity;
@@ -548,27 +469,12 @@ struct request_queue {
* queue settings
*/
unsigned long nr_requests; /* Max # of requests */
- unsigned int nr_congestion_on;
- unsigned int nr_congestion_off;
- unsigned int nr_batching;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
- struct blk_queue_tag *queue_tags;
-
- unsigned int nr_sorted;
- unsigned int in_flight[2];
-
- /*
- * Number of active block driver functions for which blk_drain_queue()
- * must wait. Must be incremented around functions that unlock the
- * queue_lock internally, e.g. scsi_request_fn().
- */
- unsigned int request_fn_active;
-
unsigned int rq_timeout;
int poll_nsec;
@@ -577,7 +483,6 @@ struct request_queue {
struct timer_list timeout;
struct work_struct timeout_work;
- struct list_head timeout_list;
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
@@ -632,11 +537,9 @@ struct request_queue {
struct mutex sysfs_lock;
- int bypass_depth;
atomic_t mq_freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG)
- bsg_job_fn *bsg_job_fn;
struct bsg_class_device bsg_dev;
#endif
@@ -656,12 +559,12 @@ struct request_queue {
#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
+ struct dentry *rqos_debugfs_dir;
#endif
bool mq_sysfs_init_done;
size_t cmd_size;
- void *rq_alloc_data;
struct work_struct release_work;
@@ -669,17 +572,15 @@ struct request_queue {
u64 write_hints[BLK_MAX_WRITE_HINTS];
};
-#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */
#define QUEUE_FLAG_DYING 2 /* queue being torn down */
-#define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */
#define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */
+#define QUEUE_FLAG_IO_STAT 10 /* do disk/partitions IO accounting */
#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */
@@ -693,31 +594,27 @@ struct request_queue {
#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
#define QUEUE_FLAG_DAX 23 /* device supports DAX */
-#define QUEUE_FLAG_STATS 24 /* track rq completion times */
+#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */
#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
-#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
+#define QUEUE_FLAG_PCI_P2PDMA 29 /* device supports PCI p2p requests */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_POLL))
+ (1 << QUEUE_FLAG_SAME_COMP))
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
-bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
-#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
-#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
@@ -731,49 +628,33 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_queue_scsi_passthrough(q) \
test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
+#define blk_queue_pci_p2pdma(q) \
+ test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
-#define blk_queue_preempt_only(q) \
- test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
+#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
-extern int blk_set_preempt_only(struct request_queue *q);
-extern void blk_clear_preempt_only(struct request_queue *q);
-
-static inline int queue_in_flight(struct request_queue *q)
-{
- return q->in_flight[0] + q->in_flight[1];
-}
+extern void blk_set_pm_only(struct request_queue *q);
+extern void blk_clear_pm_only(struct request_queue *q);
static inline bool blk_account_rq(struct request *rq)
{
return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
}
-#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
-/* rq->queuelist of dequeued request must be list_empty() */
-#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
-/*
- * Driver can handle struct request, if it either has an old style
- * request_fn defined, or is blk-mq based.
- */
-static inline bool queue_is_rq_based(struct request_queue *q)
+static inline bool queue_is_mq(struct request_queue *q)
{
- return q->request_fn || q->mq_ops;
-}
-
-static inline unsigned int blk_queue_cluster(struct request_queue *q)
-{
- return q->limits.cluster;
+ return q->mq_ops;
}
static inline enum blk_zoned_model
@@ -799,6 +680,11 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
}
#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+{
+ return blk_queue_is_zoned(q) ? q->nr_zones : 0;
+}
+
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
sector_t sector)
{
@@ -814,6 +700,11 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
return false;
return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
}
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+{
+ return 0;
+}
#endif /* CONFIG_BLK_DEV_ZONED */
static inline bool rq_is_sync(struct request *rq)
@@ -821,27 +712,6 @@ static inline bool rq_is_sync(struct request *rq)
return op_is_sync(rq->cmd_flags);
}
-static inline bool blk_rl_full(struct request_list *rl, bool sync)
-{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- return rl->flags & flag;
-}
-
-static inline void blk_set_rl_full(struct request_list *rl, bool sync)
-{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- rl->flags |= flag;
-}
-
-static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
-{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- rl->flags &= ~flag;
-}
-
static inline bool rq_mergeable(struct request *rq)
{
if (blk_rq_is_passthrough(rq))
@@ -878,16 +748,6 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
return q->nr_requests;
}
-/*
- * q->prep_rq_fn return values
- */
-enum {
- BLKPREP_OK, /* serve it */
- BLKPREP_KILL, /* fatal error, kill, return -EIO */
- BLKPREP_DEFER, /* leave on queue */
- BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
-};
-
extern unsigned long blk_max_low_pfn, blk_max_pfn;
/*
@@ -959,10 +819,8 @@ extern blk_qc_t direct_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *);
-extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
blk_mq_req_flags_t flags);
-extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
@@ -972,7 +830,6 @@ extern void blk_rq_unprep_clone(struct request *rq);
extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
-extern void blk_delay_queue(struct request_queue *, unsigned long);
extern void blk_queue_split(struct request_queue *, struct bio **);
extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
@@ -985,15 +842,7 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
-extern void blk_start_queue(struct request_queue *q);
-extern void blk_start_queue_async(struct request_queue *q);
-extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *q);
-extern void __blk_run_queue_uncond(struct request_queue *q);
-extern void blk_run_queue(struct request_queue *);
-extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
@@ -1010,7 +859,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
-bool blk_poll(struct request_queue *q, blk_qc_t cookie);
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
@@ -1148,13 +997,6 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
return nr_bios;
}
-/*
- * Request issue related functions.
- */
-extern struct request *blk_peek_request(struct request_queue *q);
-extern void blk_start_request(struct request *rq);
-extern struct request *blk_fetch_request(struct request_queue *q);
-
void blk_steal_bios(struct bio_list *list, struct request *rq);
/*
@@ -1172,27 +1014,18 @@ void blk_steal_bios(struct bio_list *list, struct request *rq);
*/
extern bool blk_update_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes);
-extern void blk_finish_request(struct request *rq, blk_status_t error);
-extern bool blk_end_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
extern void blk_end_request_all(struct request *rq, blk_status_t error);
extern bool __blk_end_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes);
extern void __blk_end_request_all(struct request *rq, blk_status_t error);
extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
-extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
-extern void blk_unprep_request(struct request *);
/*
* Access functions for manipulating queue properties
*/
-extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
- spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern int blk_init_allocated_queue(struct request_queue *);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
@@ -1231,15 +1064,10 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size);
-extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
-extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
-extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
-extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
@@ -1275,35 +1103,11 @@ extern long nr_blockdev_pages(void);
bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
- spinlock_t *lock);
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
/*
- * block layer runtime pm functions
- */
-#ifdef CONFIG_PM
-extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
-extern int blk_pre_runtime_suspend(struct request_queue *q);
-extern void blk_post_runtime_suspend(struct request_queue *q, int err);
-extern void blk_pre_runtime_resume(struct request_queue *q);
-extern void blk_post_runtime_resume(struct request_queue *q, int err);
-extern void blk_set_runtime_active(struct request_queue *q);
-#else
-static inline void blk_pm_runtime_init(struct request_queue *q,
- struct device *dev) {}
-static inline int blk_pre_runtime_suspend(struct request_queue *q)
-{
- return -ENOSYS;
-}
-static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
-static inline void blk_pre_runtime_resume(struct request_queue *q) {}
-static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
-static inline void blk_set_runtime_active(struct request_queue *q) {}
-#endif
-
-/*
* blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests
* into single larger request. As the requests are moved from a per-task list to
@@ -1316,9 +1120,10 @@ static inline void blk_set_runtime_active(struct request_queue *q) {}
* schedule() where blk_schedule_flush_plug() is called.
*/
struct blk_plug {
- struct list_head list; /* requests */
struct list_head mq_list; /* blk-mq requests */
struct list_head cb_list; /* md requires an unplug callback */
+ unsigned short rq_count;
+ bool multiple_queues;
};
#define BLK_MAX_REQUEST_COUNT 16
#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
@@ -1357,31 +1162,10 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
struct blk_plug *plug = tsk->plug;
return plug &&
- (!list_empty(&plug->list) ||
- !list_empty(&plug->mq_list) ||
+ (!list_empty(&plug->mq_list) ||
!list_empty(&plug->cb_list));
}
-/*
- * tag stuff
- */
-extern int blk_queue_start_tag(struct request_queue *, struct request *);
-extern struct request *blk_queue_find_tag(struct request_queue *, int);
-extern void blk_queue_end_tag(struct request_queue *, struct request *);
-extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
-extern void blk_queue_free_tags(struct request_queue *);
-extern int blk_queue_resize_tags(struct request_queue *, int);
-extern struct blk_queue_tag *blk_init_tags(int, int);
-extern void blk_free_tags(struct blk_queue_tag *);
-
-static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
- int tag)
-{
- if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
- return NULL;
- return bqt->tag_index[tag];
-}
-
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
@@ -1676,94 +1460,6 @@ static inline void put_dev_sector(Sector p)
put_page(p.v);
}
-static inline bool __bvec_gap_to_prev(struct request_queue *q,
- struct bio_vec *bprv, unsigned int offset)
-{
- return offset ||
- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
-}
-
-/*
- * Check if adding a bio_vec after bprv with offset would create a gap in
- * the SG list. Most drivers don't care about this, but some do.
- */
-static inline bool bvec_gap_to_prev(struct request_queue *q,
- struct bio_vec *bprv, unsigned int offset)
-{
- if (!queue_virt_boundary(q))
- return false;
- return __bvec_gap_to_prev(q, bprv, offset);
-}
-
-/*
- * Check if the two bvecs from two bios can be merged to one segment.
- * If yes, no need to check gap between the two bios since the 1st bio
- * and the 1st bvec in the 2nd bio can be handled in one segment.
- */
-static inline bool bios_segs_mergeable(struct request_queue *q,
- struct bio *prev, struct bio_vec *prev_last_bv,
- struct bio_vec *next_first_bv)
-{
- if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
- return false;
- if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
- return false;
- if (prev->bi_seg_back_size + next_first_bv->bv_len >
- queue_max_segment_size(q))
- return false;
- return true;
-}
-
-static inline bool bio_will_gap(struct request_queue *q,
- struct request *prev_rq,
- struct bio *prev,
- struct bio *next)
-{
- if (bio_has_data(prev) && queue_virt_boundary(q)) {
- struct bio_vec pb, nb;
-
- /*
- * don't merge if the 1st bio starts with non-zero
- * offset, otherwise it is quite difficult to respect
- * sg gap limit. We work hard to merge a huge number of small
- * single bios in case of mkfs.
- */
- if (prev_rq)
- bio_get_first_bvec(prev_rq->bio, &pb);
- else
- bio_get_first_bvec(prev, &pb);
- if (pb.bv_offset)
- return true;
-
- /*
- * We don't need to worry about the situation that the
- * merged segment ends in unaligned virt boundary:
- *
- * - if 'pb' ends aligned, the merged segment ends aligned
- * - if 'pb' ends unaligned, the next bio must include
- * one single bvec of 'nb', otherwise the 'nb' can't
- * merge with 'pb'
- */
- bio_get_last_bvec(prev, &pb);
- bio_get_first_bvec(next, &nb);
-
- if (!bios_segs_mergeable(q, prev, &pb, &nb))
- return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
- }
-
- return false;
-}
-
-static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
-{
- return bio_will_gap(req->q, req, req->biotail, bio);
-}
-
-static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
-{
- return bio_will_gap(req->q, NULL, bio, req->bio);
-}
-
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
@@ -1843,26 +1539,6 @@ queue_max_integrity_segments(struct request_queue *q)
return q->limits.max_integrity_segments;
}
-static inline bool integrity_req_gap_back_merge(struct request *req,
- struct bio *next)
-{
- struct bio_integrity_payload *bip = bio_integrity(req->bio);
- struct bio_integrity_payload *bip_next = bio_integrity(next);
-
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
- bip_next->bip_vec[0].bv_offset);
-}
-
-static inline bool integrity_req_gap_front_merge(struct request *req,
- struct bio *bio)
-{
- struct bio_integrity_payload *bip = bio_integrity(bio);
- struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
-
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
- bip_next->bip_vec[0].bv_offset);
-}
-
/**
* bio_integrity_intervals - Return number of integrity intervals for a bio
* @bi: blk_integrity profile for device
@@ -1947,17 +1623,6 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
return true;
}
-static inline bool integrity_req_gap_back_merge(struct request *req,
- struct bio *next)
-{
- return false;
-}
-static inline bool integrity_req_gap_front_merge(struct request *req,
- struct bio *bio)
-{
- return false;
-}
-
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
unsigned int sectors)
{
@@ -1987,6 +1652,9 @@ struct block_device_operations {
int (*getgeo)(struct block_device *, struct hd_geometry *);
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
+ int (*report_zones)(struct gendisk *, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask);
struct module *owner;
const struct pr_ops *pr_ops;
};
@@ -2097,4 +1765,17 @@ static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
#endif /* CONFIG_BLOCK */
+static inline void blk_wake_io_task(struct task_struct *waiter)
+{
+ /*
+ * If we're polling, the task itself is doing the completions. For
+ * that case, we don't need to signal a wakeup, it's enough to just
+ * mark us as RUNNING.
+ */
+ if (waiter == current)
+ __set_current_state(TASK_RUNNING);
+ else
+ wake_up_process(waiter);
+}
+
#endif
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
deleted file mode 100644
index 42515195d7d8..000000000000
--- a/include/linux/bootmem.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
- */
-#ifndef _LINUX_BOOTMEM_H
-#define _LINUX_BOOTMEM_H
-
-#include <linux/mmzone.h>
-#include <linux/mm_types.h>
-#include <asm/dma.h>
-#include <asm/processor.h>
-
-/*
- * simple boot-time physical memory area allocator.
- */
-
-extern unsigned long max_low_pfn;
-extern unsigned long min_low_pfn;
-
-/*
- * highest page
- */
-extern unsigned long max_pfn;
-/*
- * highest possible page
- */
-extern unsigned long long max_possible_pfn;
-
-#ifndef CONFIG_NO_BOOTMEM
-/**
- * struct bootmem_data - per-node information used by the bootmem allocator
- * @node_min_pfn: the starting physical address of the node's memory
- * @node_low_pfn: the end physical address of the directly addressable memory
- * @node_bootmem_map: is a bitmap pointer - the bits represent all physical
- * memory pages (including holes) on the node.
- * @last_end_off: the offset within the page of the end of the last allocation;
- * if 0, the page used is full
- * @hint_idx: the PFN of the page used with the last allocation;
- * together with using this with the @last_end_offset field,
- * a test can be made to see if allocations can be merged
- * with the page used for the last allocation rather than
- * using up a full new page.
- * @list: list entry in the linked list ordered by the memory addresses
- */
-typedef struct bootmem_data {
- unsigned long node_min_pfn;
- unsigned long node_low_pfn;
- void *node_bootmem_map;
- unsigned long last_end_off;
- unsigned long hint_idx;
- struct list_head list;
-} bootmem_data_t;
-
-extern bootmem_data_t bootmem_node_data[];
-#endif
-
-extern unsigned long bootmem_bootmap_pages(unsigned long);
-
-extern unsigned long init_bootmem_node(pg_data_t *pgdat,
- unsigned long freepfn,
- unsigned long startpfn,
- unsigned long endpfn);
-extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
-
-extern unsigned long free_all_bootmem(void);
-extern void reset_node_managed_pages(pg_data_t *pgdat);
-extern void reset_all_zones_managed_pages(void);
-
-extern void free_bootmem_node(pg_data_t *pgdat,
- unsigned long addr,
- unsigned long size);
-extern void free_bootmem(unsigned long physaddr, unsigned long size);
-extern void free_bootmem_late(unsigned long physaddr, unsigned long size);
-
-/*
- * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
- * the architecture-specific code should honor this).
- *
- * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success).
- * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory
- * already was reserved.
- */
-#define BOOTMEM_DEFAULT 0
-#define BOOTMEM_EXCLUSIVE (1<<0)
-
-extern int reserve_bootmem(unsigned long addr,
- unsigned long size,
- int flags);
-extern int reserve_bootmem_node(pg_data_t *pgdat,
- unsigned long physaddr,
- unsigned long size,
- int flags);
-
-extern void *__alloc_bootmem(unsigned long size,
- unsigned long align,
- unsigned long goal);
-extern void *__alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_node(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *__alloc_bootmem_node_high(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal,
- unsigned long limit) __malloc;
-extern void *__alloc_bootmem_low(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-void *__alloc_bootmem_low_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
- unsigned long size,
- unsigned long align,
- unsigned long goal) __malloc;
-
-#ifdef CONFIG_NO_BOOTMEM
-/* We are using top down, so it is safe to use 0 here */
-#define BOOTMEM_LOW_LIMIT 0
-#else
-#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
-#endif
-
-#ifndef ARCH_LOW_ADDRESS_LIMIT
-#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
-#endif
-
-#define alloc_bootmem(x) \
- __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_align(x, align) \
- __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_nopanic(x) \
- __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages(x) \
- __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_nopanic(x) \
- __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_node(pgdat, x) \
- __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_node_nopanic(pgdat, x) \
- __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_node(pgdat, x) \
- __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
- __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
-
-#define alloc_bootmem_low(x) \
- __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
-#define alloc_bootmem_low_pages_nopanic(x) \
- __alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0)
-#define alloc_bootmem_low_pages(x) \
- __alloc_bootmem_low(x, PAGE_SIZE, 0)
-#define alloc_bootmem_low_pages_node(pgdat, x) \
- __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
-
-
-#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM)
-
-/* FIXME: use MEMBLOCK_ALLOC_* variants here */
-#define BOOTMEM_ALLOC_ACCESSIBLE 0
-#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
-
-/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
-void *memblock_virt_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr,
- phys_addr_t max_addr, int nid);
-void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
- phys_addr_t align, phys_addr_t min_addr,
- phys_addr_t max_addr, int nid);
-void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid);
-void __memblock_free_early(phys_addr_t base, phys_addr_t size);
-void __memblock_free_late(phys_addr_t base, phys_addr_t size);
-
-static inline void * __init memblock_virt_alloc(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_raw(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align,
- BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_low(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid(size, align,
- BOOTMEM_LOW_LIMIT,
- ARCH_LOW_ADDRESS_LIMIT,
- NUMA_NO_NODE);
-}
-static inline void * __init memblock_virt_alloc_low_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align,
- BOOTMEM_LOW_LIMIT,
- ARCH_LOW_ADDRESS_LIMIT,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_from_nopanic(
- phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr,
- BOOTMEM_ALLOC_ACCESSIBLE,
- NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_virt_alloc_node(
- phys_addr_t size, int nid)
-{
- return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE, nid);
-}
-
-static inline void * __init memblock_virt_alloc_node_nopanic(
- phys_addr_t size, int nid)
-{
- return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT,
- BOOTMEM_ALLOC_ACCESSIBLE,
- nid);
-}
-
-static inline void __init memblock_free_early(
- phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_early(base, size);
-}
-
-static inline void __init memblock_free_early_nid(
- phys_addr_t base, phys_addr_t size, int nid)
-{
- __memblock_free_early(base, size);
-}
-
-static inline void __init memblock_free_late(
- phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_late(base, size);
-}
-
-#else
-
-#define BOOTMEM_ALLOC_ACCESSIBLE 0
-
-
-/* Fall back to all the existing bootmem APIs */
-static inline void * __init memblock_virt_alloc(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_raw(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_low(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_low(size, align, 0);
-}
-
-static inline void * __init memblock_virt_alloc_low_nopanic(
- phys_addr_t size, phys_addr_t align)
-{
- if (!align)
- align = SMP_CACHE_BYTES;
- return __alloc_bootmem_low_nopanic(size, align, 0);
-}
-
-static inline void * __init memblock_virt_alloc_from_nopanic(
- phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
-{
- return __alloc_bootmem_nopanic(size, align, min_addr);
-}
-
-static inline void * __init memblock_virt_alloc_node(
- phys_addr_t size, int nid)
-{
- return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES,
- BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_node_nopanic(
- phys_addr_t size, int nid)
-{
- return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
- SMP_CACHE_BYTES,
- BOOTMEM_LOW_LIMIT);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size,
- phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return __alloc_bootmem_node_high(NODE_DATA(nid), size, align,
- min_addr);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid_raw(
- phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
- min_addr, max_addr);
-}
-
-static inline void * __init memblock_virt_alloc_try_nid_nopanic(
- phys_addr_t size, phys_addr_t align,
- phys_addr_t min_addr, phys_addr_t max_addr, int nid)
-{
- return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
- min_addr, max_addr);
-}
-
-static inline void __init memblock_free_early(
- phys_addr_t base, phys_addr_t size)
-{
- free_bootmem(base, size);
-}
-
-static inline void __init memblock_free_early_nid(
- phys_addr_t base, phys_addr_t size, int nid)
-{
- free_bootmem_node(NODE_DATA(nid), base, size);
-}
-
-static inline void __init memblock_free_late(
- phys_addr_t base, phys_addr_t size)
-{
- free_bootmem_late(base, size);
-}
-#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */
-
-extern void *alloc_large_system_hash(const char *tablename,
- unsigned long bucketsize,
- unsigned long numentries,
- int scale,
- int flags,
- unsigned int *_hash_shift,
- unsigned int *_hash_mask,
- unsigned long low_limit,
- unsigned long high_limit);
-
-#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
-#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
- * shift passed via *_hash_shift */
-#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
-
-/* Only NUMA needs hash distribution. 64bit NUMA architectures have
- * sufficient vmalloc space.
- */
-#ifdef CONFIG_NUMA
-#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
-extern int hashdist; /* Distribute hashes across NUMA nodes? */
-#else
-#define hashdist (0)
-#endif
-
-
-#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index f91b0f8ff3a9..588dd5f0bd85 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -2,6 +2,7 @@
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H
+#include <linux/bpf.h>
#include <linux/errno.h>
#include <linux/jump_label.h>
#include <linux/percpu.h>
@@ -22,7 +23,11 @@ struct bpf_cgroup_storage;
extern struct static_key_false cgroup_bpf_enabled_key;
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
-DECLARE_PER_CPU(void*, bpf_cgroup_storage);
+DECLARE_PER_CPU(struct bpf_cgroup_storage*,
+ bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
+
+#define for_each_cgroup_storage_type(stype) \
+ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
struct bpf_cgroup_storage_map;
@@ -32,7 +37,10 @@ struct bpf_storage_buffer {
};
struct bpf_cgroup_storage {
- struct bpf_storage_buffer *buf;
+ union {
+ struct bpf_storage_buffer *buf;
+ void __percpu *percpu_buf;
+ };
struct bpf_cgroup_storage_map *map;
struct bpf_cgroup_storage_key key;
struct list_head list;
@@ -43,7 +51,7 @@ struct bpf_cgroup_storage {
struct bpf_prog_list {
struct list_head node;
struct bpf_prog *prog;
- struct bpf_cgroup_storage *storage;
+ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};
struct bpf_prog_array;
@@ -101,18 +109,26 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
short access, enum bpf_attach_type type);
-static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage)
+static inline enum bpf_cgroup_storage_type cgroup_storage_type(
+ struct bpf_map *map)
{
- struct bpf_storage_buffer *buf;
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
+ return BPF_CGROUP_STORAGE_PERCPU;
+
+ return BPF_CGROUP_STORAGE_SHARED;
+}
- if (!storage)
- return;
+static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
+ *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
+{
+ enum bpf_cgroup_storage_type stype;
- buf = READ_ONCE(storage->buf);
- this_cpu_write(bpf_cgroup_storage, &buf->data[0]);
+ for_each_cgroup_storage_type(stype)
+ this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
}
-struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
+struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
+ enum bpf_cgroup_storage_type stype);
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
struct cgroup *cgroup,
@@ -121,6 +137,10 @@ void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
+int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
+ void *value, u64 flags);
+
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
@@ -265,15 +285,24 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
return -EINVAL;
}
-static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {}
+static inline void bpf_cgroup_storage_set(
+ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
struct bpf_map *map) { return 0; }
static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
- struct bpf_prog *prog) { return 0; }
+ struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; }
static inline void bpf_cgroup_storage_free(
struct bpf_cgroup_storage *storage) {}
+static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
+ void *value) {
+ return 0;
+}
+static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
+ void *key, void *value, u64 flags) {
+ return 0;
+}
#define cgroup_bpf_enabled (0)
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
@@ -293,6 +322,8 @@ static inline void bpf_cgroup_storage_free(
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
+#define for_each_cgroup_storage_type(stype) for (; false; )
+
#endif /* CONFIG_CGROUP_BPF */
#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 523481a3471b..e734f163bd0b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -23,6 +23,7 @@ struct bpf_prog;
struct bpf_map;
struct sock;
struct seq_file;
+struct btf;
struct btf_type;
/* map is generic key/value storage optionally accesible by eBPF programs */
@@ -39,6 +40,9 @@ struct bpf_map_ops {
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_map *map, void *key);
+ int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
+ int (*map_pop_elem)(struct bpf_map *map, void *value);
+ int (*map_peek_elem)(struct bpf_map *map, void *value);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
@@ -49,6 +53,7 @@ struct bpf_map_ops {
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
int (*map_check_btf)(const struct bpf_map *map,
+ const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
};
@@ -123,6 +128,7 @@ static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
}
int map_check_no_btf(const struct bpf_map *map,
+ const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
@@ -138,6 +144,7 @@ enum bpf_arg_type {
ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
+ ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
/* the following constraints used to prototype bpf_memcmp() and other
* functions that access data on eBPF program stack
@@ -154,6 +161,7 @@ enum bpf_arg_type {
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
+ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
};
/* type of values returned from helper functions */
@@ -162,6 +170,7 @@ enum bpf_return_type {
RET_VOID, /* function doesn't return anything */
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
+ RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
};
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
@@ -212,6 +221,9 @@ enum bpf_reg_type {
PTR_TO_PACKET_META, /* skb->data - meta_len */
PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */
+ PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
+ PTR_TO_SOCKET, /* reg points to struct bpf_sock */
+ PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
};
/* The information passed from prog-specific *_is_valid_access
@@ -258,26 +270,40 @@ struct bpf_verifier_ops {
struct bpf_prog_offload_ops {
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
+ int (*finalize)(struct bpf_verifier_env *env);
+ int (*prepare)(struct bpf_prog *prog);
+ int (*translate)(struct bpf_prog *prog);
+ void (*destroy)(struct bpf_prog *prog);
};
struct bpf_prog_offload {
struct bpf_prog *prog;
struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
void *dev_priv;
struct list_head offloads;
bool dev_state;
- const struct bpf_prog_offload_ops *dev_ops;
void *jited_image;
u32 jited_len;
};
+enum bpf_cgroup_storage_type {
+ BPF_CGROUP_STORAGE_SHARED,
+ BPF_CGROUP_STORAGE_PERCPU,
+ __BPF_CGROUP_STORAGE_MAX
+};
+
+#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+
struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
u32 max_ctx_offset;
+ u32 max_pkt_offset;
u32 stack_depth;
u32 id;
- u32 func_cnt;
+ u32 func_cnt; /* used by non-func prog as the number of func progs */
+ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
bool offload_requested;
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
@@ -288,12 +314,36 @@ struct bpf_prog_aux {
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
- struct bpf_map *cgroup_storage;
+ struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
void *security;
#endif
struct bpf_prog_offload *offload;
+ struct btf *btf;
+ struct bpf_func_info *func_info;
+ /* bpf_line_info loaded from userspace. linfo->insn_off
+ * has the xlated insn offset.
+ * Both the main and sub prog share the same linfo.
+ * The subprog can access its first linfo by
+ * using the linfo_idx.
+ */
+ struct bpf_line_info *linfo;
+ /* jited_linfo is the jited addr of the linfo. It has a
+ * one to one mapping to linfo:
+ * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
+ * Both the main and sub prog share the same jited_linfo.
+ * The subprog can access its first jited_linfo by
+ * using the linfo_idx.
+ */
+ void **jited_linfo;
+ u32 func_info_cnt;
+ u32 nr_linfo;
+ /* subprog can use linfo_idx to access its first linfo and
+ * jited_linfo.
+ * main prog always has linfo_idx == 0
+ */
+ u32 linfo_idx;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -334,6 +384,11 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
+typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
+ const struct bpf_insn *src,
+ struct bpf_insn *dst,
+ struct bpf_prog *prog,
+ u32 *target_size);
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
@@ -357,7 +412,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
*/
struct bpf_prog_array_item {
struct bpf_prog *prog;
- struct bpf_cgroup_storage *cgroup_storage;
+ struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};
struct bpf_prog_array {
@@ -500,7 +555,8 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
}
/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
+ union bpf_attr __user *uattr);
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
@@ -668,7 +724,8 @@ int bpf_map_offload_get_next_key(struct bpf_map *map,
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
-struct bpf_offload_dev *bpf_offload_dev_create(void);
+struct bpf_offload_dev *
+bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops);
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
struct net_device *netdev);
@@ -718,33 +775,18 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
}
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
-#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
-struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
-struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
-int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
-int sockmap_get_from_fd(const union bpf_attr *attr, int type,
- struct bpf_prog *prog);
+#if defined(CONFIG_BPF_STREAM_PARSER)
+int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
+int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
#else
-static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
-{
- return NULL;
-}
-
-static inline struct sock *__sock_hash_lookup_elem(struct bpf_map *map,
- void *key)
-{
- return NULL;
-}
-
-static inline int sock_map_prog(struct bpf_map *map,
- struct bpf_prog *prog,
- u32 type)
+static inline int sock_map_prog_update(struct bpf_map *map,
+ struct bpf_prog *prog, u32 which)
{
return -EOPNOTSUPP;
}
-static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
- struct bpf_prog *prog)
+static inline int sock_map_get_from_fd(const union bpf_attr *attr,
+ struct bpf_prog *prog)
{
return -EINVAL;
}
@@ -806,6 +848,9 @@ static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+extern const struct bpf_func_proto bpf_map_push_elem_proto;
+extern const struct bpf_func_proto bpf_map_pop_elem_proto;
+extern const struct bpf_func_proto bpf_map_peek_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
@@ -820,6 +865,10 @@ extern const struct bpf_func_proto bpf_get_stack_proto;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
+extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
+extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
+extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
extern const struct bpf_func_proto bpf_get_local_storage_proto;
@@ -827,4 +876,29 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto;
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+#if defined(CONFIG_NET)
+bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info);
+u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size);
+#else
+static inline bool bpf_sock_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog,
+ u32 *target_size)
+{
+ return 0;
+}
+#endif
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index cd26c090e7c0..44d9ab4809bd 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -16,6 +16,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg)
+BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector)
#endif
#ifdef CONFIG_BPF_EVENTS
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
@@ -42,6 +43,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
#endif
#ifdef CONFIG_CGROUP_BPF
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, cgroup_storage_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
@@ -49,13 +51,13 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops)
#ifdef CONFIG_PERF_EVENTS
-BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
#ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
-#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET)
+#if defined(CONFIG_BPF_STREAM_PARSER)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
#endif
@@ -67,3 +69,5 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
#endif
#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 38b04f559ad3..27b74947cd2b 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -38,9 +38,11 @@ enum bpf_reg_liveness {
REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
+ REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */
};
struct bpf_reg_state {
+ /* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
union {
/* valid when type == PTR_TO_PACKET */
@@ -50,6 +52,9 @@ struct bpf_reg_state {
* PTR_TO_MAP_VALUE_OR_NULL
*/
struct bpf_map *map_ptr;
+
+ /* Max size from any of the above. */
+ unsigned long raw;
};
/* Fixed part of pointer offset, pointer types only */
s32 off;
@@ -57,9 +62,10 @@ struct bpf_reg_state {
* offset, so they can share range knowledge.
* For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
* came from, when one is tested for != NULL.
+ * For PTR_TO_SOCKET this is used to share which pointers retain the
+ * same reference to the socket, to determine proper reference freeing.
*/
u32 id;
- /* Ordering of fields matters. See states_equal() */
/* For scalar types (SCALAR_VALUE), this represents our knowledge of
* the actual value.
* For pointer types, this represents the variable part of the offset
@@ -76,15 +82,15 @@ struct bpf_reg_state {
s64 smax_value; /* maximum possible (s64)value */
u64 umin_value; /* minimum possible (u64)value */
u64 umax_value; /* maximum possible (u64)value */
+ /* parentage chain for liveness checking */
+ struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like
* R1=fp-8 and R2=fp-8, but one of them points to this function stack
* while another to the caller's stack. To differentiate them 'frameno'
* is used which is an index in bpf_verifier_state->frame[] array
* pointing to bpf_func_state.
- * This field must be second to last, for states_equal() reasons.
*/
u32 frameno;
- /* This field must be last, for states_equal() reasons. */
enum bpf_reg_liveness live;
};
@@ -102,12 +108,22 @@ struct bpf_stack_state {
u8 slot_type[BPF_REG_SIZE];
};
+struct bpf_reference_state {
+ /* Track each reference created with a unique id, even if the same
+ * instruction creates the reference multiple times (eg, via CALL).
+ */
+ int id;
+ /* Instruction where the allocation of this reference occurred. This
+ * is used purely to inform the user of a reference leak.
+ */
+ int insn_idx;
+};
+
/* state of the program:
* type of all registers and stack info
*/
struct bpf_func_state {
struct bpf_reg_state regs[MAX_BPF_REG];
- struct bpf_verifier_state *parent;
/* index of call instruction that called into this func */
int callsite;
/* stack frame number of this function state from pov of
@@ -120,7 +136,9 @@ struct bpf_func_state {
*/
u32 subprogno;
- /* should be second to last. See copy_func_state() */
+ /* The following fields should be last. See copy_func_state() */
+ int acquired_refs;
+ struct bpf_reference_state *refs;
int allocated_stack;
struct bpf_stack_state *stack;
};
@@ -129,25 +147,45 @@ struct bpf_func_state {
struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
- struct bpf_verifier_state *parent;
u32 curframe;
+ bool speculative;
};
+#define bpf_get_spilled_reg(slot, frame) \
+ (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
+ (frame->stack[slot].slot_type[0] == STACK_SPILL)) \
+ ? &frame->stack[slot].spilled_ptr : NULL)
+
+/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
+#define bpf_for_each_spilled_reg(iter, frame, reg) \
+ for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
+ iter < frame->allocated_stack / BPF_REG_SIZE; \
+ iter++, reg = bpf_get_spilled_reg(iter, frame))
+
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
struct bpf_verifier_state_list *next;
};
+/* Possible states for alu_state member. */
+#define BPF_ALU_SANITIZE_SRC 1U
+#define BPF_ALU_SANITIZE_DST 2U
+#define BPF_ALU_NEG_VALUE (1U << 2)
+#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
+ BPF_ALU_SANITIZE_DST)
+
struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
unsigned long map_state; /* pointer/poison value for maps */
s32 call_imm; /* saved imm field of call insn */
+ u32 alu_limit; /* limit for add/sub register with pointer */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */
+ u8 alu_state; /* used in combination with alu_limit */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -176,6 +214,7 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
struct bpf_subprog_info {
u32 start; /* insn idx of function entry point */
+ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
};
@@ -183,6 +222,8 @@ struct bpf_subprog_info {
* one verifier_env per bpf_check() call
*/
struct bpf_verifier_env {
+ u32 insn_idx;
+ u32 prev_insn_idx;
struct bpf_prog *prog; /* eBPF program being verified */
const struct bpf_verifier_ops *ops;
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
@@ -196,6 +237,7 @@ struct bpf_verifier_env {
bool allow_ptr_leaks;
bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
+ const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
u32 subprog_cnt;
@@ -206,15 +248,21 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...);
-static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
+static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *cur = env->cur_state;
- return cur->frame[cur->curframe]->regs;
+ return cur->frame[cur->curframe];
+}
+
+static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
+{
+ return cur_func(env)->regs;
}
-int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
+int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
+int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 949e9af8d9d6..9cd00a37b8d3 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -28,6 +28,7 @@
#define PHY_ID_BCM89610 0x03625cd0
#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
#define PHY_ID_BCM7268 0xae025090
#define PHY_ID_BCM7271 0xae0253b0
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 6aeaf6472665..b356e0006731 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -31,6 +31,9 @@ struct device;
struct scatterlist;
struct request_queue;
+typedef int (bsg_job_fn) (struct bsg_job *);
+typedef enum blk_eh_timer_return (bsg_timeout_fn)(struct request *);
+
struct bsg_buffer {
unsigned int payload_len;
int sg_cnt;
@@ -72,7 +75,8 @@ struct bsg_job {
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, int dd_job_size);
+ bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size);
+void bsg_remove_queue(struct request_queue *q);
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/btf.h b/include/linux/btf.h
index e076c4697049..12502e25e767 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
struct btf;
+struct btf_member;
struct btf_type;
union bpf_attr;
@@ -46,5 +47,24 @@ void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
struct seq_file *m);
int btf_get_fd_by_id(u32 id);
u32 btf_id(const struct btf *btf);
+bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
+ const struct btf_member *m,
+ u32 expected_offset, u32 expected_size);
+
+#ifdef CONFIG_BPF_SYSCALL
+const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
+const char *btf_name_by_offset(const struct btf *btf, u32 offset);
+#else
+static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
+ u32 type_id)
+{
+ return NULL;
+}
+static inline const char *btf_name_by_offset(const struct btf *btf,
+ u32 offset)
+{
+ return NULL;
+}
+#endif
#endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 96225a77c112..7b73ef7f902d 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -242,7 +242,7 @@ int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
/* Convert errno to return value from ->page_mkwrite() call */
-static inline int block_page_mkwrite_return(int err)
+static inline vm_fault_t block_page_mkwrite_return(int err)
{
if (err == 0)
return VM_FAULT_LOCKED;
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index 43d1fd50d433..faeec7433aab 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -5,21 +5,8 @@
#include <linux/compiler.h>
#ifdef __CHECKER__
-#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
-#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_ZERO(e) (0)
-#define BUILD_BUG_ON_INVALID(e) (0)
-#define BUILD_BUG_ON_MSG(cond, msg) (0)
-#define BUILD_BUG_ON(condition) (0)
-#define BUILD_BUG() (0)
#else /* __CHECKER__ */
-
-/* Force a compilation error if a constant expression is not a power of 2 */
-#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
- BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
-#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
- BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
-
/*
* Force a compilation error if condition is true, but also produce a
* result (of value 0 and type size_t), so the expression can be used
@@ -27,6 +14,13 @@
* aren't permitted).
*/
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); }))
+#endif /* __CHECKER__ */
+
+/* Force a compilation error if a constant expression is not a power of 2 */
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
+#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
/*
* BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
@@ -51,23 +45,9 @@
* If you have some code which relies on certain constants being equal, or
* some other compile-time-evaluated condition, you should use BUILD_BUG_ON to
* detect if someone changes it.
- *
- * The implementation uses gcc's reluctance to create a negative array, but gcc
- * (as of 4.4) only emits that error for obvious cases (e.g. not arguments to
- * inline functions). Luckily, in 4.3 they added the "error" function
- * attribute just for this type of case. Thus, we use a negative sized array
- * (should always create an error on gcc versions older than 4.4) and then call
- * an undefined function with the error attribute (should always create an
- * error on gcc 4.3 and later). If for some reason, neither creates a
- * compile-time error, we'll still have a link-time error, which is harder to
- * track down.
*/
-#ifndef __OPTIMIZE__
-#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
-#else
#define BUILD_BUG_ON(condition) \
BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
-#endif
/**
* BUILD_BUG - break compile if used.
@@ -78,6 +58,4 @@
*/
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
-#endif /* __CHECKER__ */
-
#endif /* _LINUX_BUILD_BUG_H */
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index fe7a22dd133b..02c73c6aa805 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -40,8 +40,6 @@ struct bvec_iter {
unsigned int bi_idx; /* current index into bvl_vec */
- unsigned int bi_done; /* number of bytes completed */
-
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
};
@@ -85,7 +83,6 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
bytes -= len;
iter->bi_size -= len;
iter->bi_bvec_done += len;
- iter->bi_done += len;
if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
iter->bi_bvec_done = 0;
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index a83e1f632eb7..f01623aef2f7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index cb31683bbe15..8268811a697e 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
+int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp);
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp);
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+ struct sk_buff *skb);
void can_rx_offload_reset(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index e931da8424a4..6728c2ee0205 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -64,6 +64,10 @@ struct ceph_auth_client_ops {
/* ensure that an existing authorizer is up to date */
int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth);
+ int (*add_authorizer_challenge)(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *challenge_buf,
+ int challenge_buf_len);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
struct ceph_authorizer *a);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
@@ -118,6 +122,10 @@ void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *a);
+int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *challenge_buf,
+ int challenge_buf_len);
extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
struct ceph_authorizer *a);
extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 3901927cf6a0..65a38c4a02a1 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -165,9 +165,9 @@ DEFINE_CEPH_FEATURE(58, 1, FS_FILE_LAYOUT_V2) // overlap
DEFINE_CEPH_FEATURE(59, 1, FS_BTIME)
DEFINE_CEPH_FEATURE(59, 1, FS_CHANGE_ATTR) // overlap
DEFINE_CEPH_FEATURE(59, 1, MSG_ADDR2) // overlap
-DEFINE_CEPH_FEATURE(60, 1, BLKIN_TRACING) // *do not share this bit*
+DEFINE_CEPH_FEATURE(60, 1, OSD_RECOVERY_DELETES) // *do not share this bit*
+DEFINE_CEPH_FEATURE(61, 1, CEPHX_V2) // *do not share this bit*
-DEFINE_CEPH_FEATURE(61, 1, RESERVED2) // unused, but slow down!
DEFINE_CEPH_FEATURE(62, 1, RESERVED) // do not use; used as a sentinal
DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facing
@@ -210,14 +210,9 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_SERVER_JEWEL | \
CEPH_FEATURE_MON_STATEFUL_SUB | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
- CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
-
-#define CEPH_FEATURES_REQUIRED_DEFAULT \
- (CEPH_FEATURE_NOSRCADDR | \
- CEPH_FEATURE_SUBSCRIBE2 | \
- CEPH_FEATURE_RECONNECT_SEQ | \
- CEPH_FEATURE_PGID64 | \
- CEPH_FEATURE_PGPOOL3 | \
- CEPH_FEATURE_OSDENC)
+ CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
+ CEPH_FEATURE_CEPHX_V2)
+
+#define CEPH_FEATURES_REQUIRED_DEFAULT 0
#endif
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index d143ac8879c6..a6c2a48d42e0 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -194,16 +194,22 @@ ceph_decode_skip_n(p, end, sizeof(u8), bad)
} while (0)
/*
- * struct ceph_timespec <-> struct timespec
+ * struct ceph_timespec <-> struct timespec64
*/
-static inline void ceph_decode_timespec(struct timespec *ts,
- const struct ceph_timespec *tv)
+static inline void ceph_decode_timespec64(struct timespec64 *ts,
+ const struct ceph_timespec *tv)
{
- ts->tv_sec = (__kernel_time_t)le32_to_cpu(tv->tv_sec);
+ /*
+ * This will still overflow in year 2106. We could extend
+ * the protocol to steal two more bits from tv_nsec to
+ * add three more 136 year epochs after that the way ext4
+ * does if necessary.
+ */
+ ts->tv_sec = (time64_t)le32_to_cpu(tv->tv_sec);
ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec);
}
-static inline void ceph_encode_timespec(struct ceph_timespec *tv,
- const struct timespec *ts)
+static inline void ceph_encode_timespec64(struct ceph_timespec *tv,
+ const struct timespec64 *ts)
{
tv->tv_sec = cpu_to_le32((u32)ts->tv_sec);
tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 49c93b9308d7..68bb09c29ce8 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -81,7 +81,13 @@ struct ceph_options {
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
-#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
+
+/*
+ * Handle the largest possible rbd object in one message.
+ * There is no limit on the size of cephfs objects, but it has to obey
+ * rsize and wsize mount options anyway.
+ */
+#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024)
#define CEPH_AUTH_NAME_DEFAULT "guest"
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index c7dfcb8a1fb2..800a2128d411 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -31,6 +31,9 @@ struct ceph_connection_operations {
struct ceph_auth_handshake *(*get_authorizer) (
struct ceph_connection *con,
int *proto, int force_new);
+ int (*add_authorizer_challenge)(struct ceph_connection *con,
+ void *challenge_buf,
+ int challenge_buf_len);
int (*verify_authorizer_reply) (struct ceph_connection *con);
int (*invalidate_authorizer)(struct ceph_connection *con);
@@ -79,22 +82,6 @@ enum ceph_msg_data_type {
CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
};
-static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type)
-{
- switch (type) {
- case CEPH_MSG_DATA_NONE:
- case CEPH_MSG_DATA_PAGES:
- case CEPH_MSG_DATA_PAGELIST:
-#ifdef CONFIG_BLOCK
- case CEPH_MSG_DATA_BIO:
-#endif /* CONFIG_BLOCK */
- case CEPH_MSG_DATA_BVECS:
- return true;
- default:
- return false;
- }
-}
-
#ifdef CONFIG_BLOCK
struct ceph_bio_iter {
@@ -178,7 +165,6 @@ struct ceph_bvec_iter {
} while (0)
struct ceph_msg_data {
- struct list_head links; /* ceph_msg->data */
enum ceph_msg_data_type type;
union {
#ifdef CONFIG_BLOCK
@@ -199,7 +185,6 @@ struct ceph_msg_data {
struct ceph_msg_data_cursor {
size_t total_resid; /* across all data items */
- struct list_head *data_head; /* = &ceph_msg->data */
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
@@ -237,7 +222,9 @@ struct ceph_msg {
struct ceph_buffer *middle;
size_t data_length;
- struct list_head data;
+ struct ceph_msg_data *data;
+ int num_data_items;
+ int max_data_items;
struct ceph_msg_data_cursor cursor;
struct ceph_connection *con;
@@ -286,9 +273,8 @@ struct ceph_connection {
attempt for this connection, client */
u32 peer_global_seq; /* peer's global seq for this connection */
+ struct ceph_auth_handshake *auth;
int auth_retry; /* true if we need a newer authorizer */
- void *auth_reply_buf; /* where to put the authorizer reply */
- int auth_reply_buf_len;
struct mutex mutex;
@@ -330,7 +316,7 @@ struct ceph_connection {
int in_base_pos; /* bytes read */
__le64 in_temp_ack; /* for reading an ack */
- struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
+ struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */
struct delayed_work work; /* send|recv work */
unsigned long delay; /* current delay interval */
@@ -379,6 +365,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
struct ceph_bvec_iter *bvec_pos);
+struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
+ gfp_t flags, bool can_fail);
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
bool can_fail);
diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h
index 76c98a512758..729cdf700eae 100644
--- a/include/linux/ceph/msgpool.h
+++ b/include/linux/ceph/msgpool.h
@@ -13,14 +13,15 @@ struct ceph_msgpool {
mempool_t *pool;
int type; /* preallocated message type */
int front_len; /* preallocated payload size */
+ int max_data_items;
};
-extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
- int front_len, int size, bool blocking,
- const char *name);
+int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
+ int front_len, int max_data_items, int size,
+ const char *name);
extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
-extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
- int front_len);
+struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
+ int max_data_items);
extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
#endif
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 73ae2a926548..9e50aede46c8 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -91,7 +91,7 @@ struct ceph_entity_inst {
#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */
#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
-
+#define CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER 16 /* cephx v2 doing server challenge */
/*
* connection negotiation
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 0d6ee04b4c41..7a2af5034278 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -136,6 +136,13 @@ struct ceph_osd_req_op {
u64 expected_object_size;
u64 expected_write_size;
} alloc_hint;
+ struct {
+ u64 snapid;
+ u64 src_version;
+ u8 flags;
+ u32 src_fadvise_flags;
+ struct ceph_osd_data osd_data;
+ } copy_from;
};
};
@@ -199,7 +206,7 @@ struct ceph_osd_request {
/* set by submitter */
u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */
struct ceph_snap_context *r_snapc; /* for writes */
- struct timespec r_mtime; /* ditto */
+ struct timespec64 r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
bool r_linger; /* don't resend on failure */
@@ -253,7 +260,7 @@ struct ceph_osd_linger_request {
struct ceph_osd_request_target t;
u32 map_dne_bound;
- struct timespec mtime;
+ struct timespec64 mtime;
struct kref kref;
struct mutex lock;
@@ -444,9 +451,8 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
-extern int osd_req_op_cls_init(struct ceph_osd_request *osd_req,
- unsigned int which, u16 opcode,
- const char *class, const char *method);
+int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
+ const char *class, const char *method);
extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, const char *name, const void *value,
size_t size, u8 cmp_op, u8 cmp_mode);
@@ -508,9 +514,19 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
struct ceph_snap_context *sc,
u64 off, u64 len,
u32 truncate_seq, u64 truncate_size,
- struct timespec *mtime,
+ struct timespec64 *mtime,
struct page **pages, int nr_pages);
+int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ struct ceph_object_id *dst_oid,
+ struct ceph_object_locator *dst_oloc,
+ u32 dst_fadvise_flags,
+ u8 copy_from_flags);
+
/* watch/notify */
struct ceph_osd_linger_request *
ceph_osdc_watch(struct ceph_osd_client *osdc,
@@ -528,12 +544,12 @@ int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
u64 notify_id,
u64 cookie,
void *payload,
- size_t payload_len);
+ u32 payload_len);
int ceph_osdc_notify(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
void *payload,
- size_t payload_len,
+ u32 payload_len,
u32 timeout,
struct page ***preply_pages,
size_t *preply_len);
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 7edcded07641..5dead8486fd8 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -23,16 +23,7 @@ struct ceph_pagelist_cursor {
size_t room; /* room remaining to reset to */
};
-static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
-{
- INIT_LIST_HEAD(&pl->head);
- pl->mapped_tail = NULL;
- pl->length = 0;
- pl->room = 0;
- INIT_LIST_HEAD(&pl->free_list);
- pl->num_pages_free = 0;
- refcount_set(&pl->refcnt, 1);
-}
+struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags);
extern void ceph_pagelist_release(struct ceph_pagelist *pl);
@@ -68,7 +59,7 @@ static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v)
return ceph_pagelist_append(pl, &v, 1);
}
static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl,
- char *s, size_t len)
+ char *s, u32 len)
{
int ret = ceph_pagelist_encode_32(pl, len);
if (ret)
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index f1988387c5ad..3eb0e55665b4 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -410,6 +410,14 @@ enum {
enum {
CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */
+ CEPH_OSD_OP_FLAG_FADVISE_RANDOM = 0x4, /* the op is random */
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL = 0x8, /* the op is sequential */
+ CEPH_OSD_OP_FLAG_FADVISE_WILLNEED = 0x10,/* data will be accessed in
+ the near future */
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED = 0x20,/* data will not be accessed
+ in the near future */
+ CEPH_OSD_OP_FLAG_FADVISE_NOCACHE = 0x40,/* data will be accessed only
+ once by this client */
};
#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
@@ -432,6 +440,15 @@ enum {
};
enum {
+ CEPH_OSD_COPY_FROM_FLAG_FLUSH = 1, /* part of a flush operation */
+ CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY = 2, /* ignore pool overlay */
+ CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE = 4, /* ignore osd cache logic */
+ CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to
+ * cloneid */
+ CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16, /* order with write */
+};
+
+enum {
CEPH_OSD_WATCH_OP_UNWATCH = 0,
CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1,
/* note: use only ODD ids to prevent pre-giant code from
@@ -497,6 +514,17 @@ struct ceph_osd_op {
__le64 expected_object_size;
__le64 expected_write_size;
} __attribute__ ((packed)) alloc_hint;
+ struct {
+ __le64 snapid;
+ __le64 src_version;
+ __u8 flags; /* CEPH_OSD_COPY_FROM_FLAG_* */
+ /*
+ * CEPH_OSD_OP_FLAG_FADVISE_*: fadvise flags
+ * for src object, flags for dest object are in
+ * ceph_osd_op::flags.
+ */
+ __le32 src_fadvise_flags;
+ } __attribute__ ((packed)) copy_from;
};
__le32 payload_len;
} __attribute__ ((packed));
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ff20b677fb9f..8fcbae1b8db0 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -20,6 +20,7 @@
#include <linux/u64_stats_sync.h>
#include <linux/workqueue.h>
#include <linux/bpf-cgroup.h>
+#include <linux/psi_types.h>
#ifdef CONFIG_CGROUPS
@@ -91,6 +92,7 @@ enum {
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
+ CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
/* internal flags, do not use outside cgroup core proper */
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
@@ -412,6 +414,7 @@ struct cgroup {
* specific task are charged to the dom_cgrp.
*/
struct cgroup *dom_cgrp;
+ struct cgroup *old_dom_cgrp; /* used while enabling threaded */
/* per-cpu recursive resource statistics */
struct cgroup_rstat_cpu __percpu *rstat_cpu;
@@ -435,6 +438,9 @@ struct cgroup {
/* used to schedule release agent */
struct work_struct release_agent_work;
+ /* used to track pressure stalls */
+ struct psi_group psi;
+
/* used to store eBPF programs */
struct cgroup_bpf bpf;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 32c553556bbd..9968332cceed 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -93,6 +93,8 @@ extern struct css_set init_css_set;
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
@@ -567,20 +569,11 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
int ancestor_level)
{
- struct cgroup *ptr;
-
if (cgrp->level < ancestor_level)
return NULL;
-
- for (ptr = cgrp;
- ptr && ptr->level > ancestor_level;
- ptr = cgroup_parent(ptr))
- ;
-
- if (ptr && ptr->level == ancestor_level)
- return ptr;
-
- return NULL;
+ while (cgrp && cgrp->level > ancestor_level)
+ cgrp = cgroup_parent(cgrp);
+ return cgrp;
}
/**
@@ -657,6 +650,11 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+ return &cgrp->psi;
+}
+
static inline void cgroup_init_kthreadd(void)
{
/*
@@ -710,6 +708,16 @@ static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
return NULL;
}
+static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
+{
+ return NULL;
+}
+
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+ return NULL;
+}
+
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
{
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 08b1aa70a38d..e443fa9fa859 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -1,12 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * linux/include/linux/clk-provider.h
- *
* Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_CLK_PROVIDER_H
#define __LINUX_CLK_PROVIDER_H
@@ -119,6 +114,11 @@ struct clk_duty {
* Called with enable_lock held. This function must not
* sleep.
*
+ * @save_context: Save the context of the clock in prepration for poweroff.
+ *
+ * @restore_context: Restore the context of the clock after a restoration
+ * of power.
+ *
* @recalc_rate Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
* ensure that the prepare_mutex is held across this call.
@@ -223,6 +223,8 @@ struct clk_ops {
void (*disable)(struct clk_hw *hw);
int (*is_enabled)(struct clk_hw *hw);
void (*disable_unused)(struct clk_hw *hw);
+ int (*save_context)(struct clk_hw *hw);
+ void (*restore_context)(struct clk_hw *hw);
unsigned long (*recalc_rate)(struct clk_hw *hw,
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long rate,
@@ -594,6 +596,12 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
* @lock: register lock
*
* Clock with adjustable fractional divider affecting its output frequency.
+ *
+ * Flags:
+ * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
+ * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
+ * is set then the numerator and denominator are both the value read
+ * plus one.
*/
struct clk_fractional_divider {
struct clk_hw hw;
@@ -613,6 +621,8 @@ struct clk_fractional_divider {
#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
+#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0)
+
extern const struct clk_ops clk_fractional_divider_ops;
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
@@ -1011,5 +1021,7 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
#endif /* platform dependent I/O accessors */
+void clk_gate_restore_context(struct clk_hw *hw);
+
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 4f750c481b82..a7773b5c0b9f 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -312,7 +312,26 @@ struct clk *clk_get(struct device *dev, const char *id);
*/
int __must_check clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
-
+/**
+ * clk_bulk_get_all - lookup and obtain all available references to clock
+ * producer.
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * This helper function allows drivers to get all clk consumers in one
+ * operation. If any of the clk cannot be acquired then any clks
+ * that were obtained will be freed before returning to the caller.
+ *
+ * Returns a positive value for the number of clocks obtained while the
+ * clock references are stored in the clk_bulk_data table in @clks field.
+ * Returns 0 if there're none and a negative value if something failed.
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * clk_bulk_get should not be called from within interrupt context.
+ */
+int __must_check clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks);
/**
* devm_clk_bulk_get - managed get multiple clk consumers
* @dev: device for clock "consumer"
@@ -327,6 +346,22 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks,
*/
int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
+/**
+ * devm_clk_bulk_get_all - managed get multiple clk consumers
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Returns a positive value for the number of clocks obtained while the
+ * clock references are stored in the clk_bulk_data table in @clks field.
+ * Returns 0 if there're none and a negative value if something failed.
+ *
+ * This helper function allows drivers to get several clk
+ * consumers in one operation with management, the clks will
+ * automatically be freed when the device is unbound.
+ */
+
+int __must_check devm_clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks);
/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
@@ -488,6 +523,19 @@ void clk_put(struct clk *clk);
void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
/**
+ * clk_bulk_put_all - "free" all the clock source
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * Note: drivers must ensure that all clk_bulk_enable calls made on this
+ * clock source are balanced by clk_bulk_disable calls prior to calling
+ * this function.
+ *
+ * clk_bulk_put_all should not be called from within interrupt context.
+ */
+void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
+
+/**
* devm_clk_put - "free" a managed clock source
* @dev: device used to acquire the clock
* @clk: clock source acquired with devm_clk_get()
@@ -629,6 +677,23 @@ struct clk *clk_get_parent(struct clk *clk);
*/
struct clk *clk_get_sys(const char *dev_id, const char *con_id);
+/**
+ * clk_save_context - save clock context for poweroff
+ *
+ * Saves the context of the clock register for powerstates in which the
+ * contents of the registers will be lost. Occurs deep within the suspend
+ * code so locking is not necessary.
+ */
+int clk_save_context(void);
+
+/**
+ * clk_restore_context - restore clock context after poweroff
+ *
+ * This occurs with all clocks enabled. Occurs deep within the resume code
+ * so locking is not necessary.
+ */
+void clk_restore_context(void);
+
#else /* !CONFIG_HAVE_CLK */
static inline struct clk *clk_get(struct device *dev, const char *id)
@@ -642,6 +707,12 @@ static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
return 0;
}
+static inline int __must_check clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks)
+{
+ return 0;
+}
+
static inline struct clk *devm_clk_get(struct device *dev, const char *id)
{
return NULL;
@@ -653,6 +724,13 @@ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clk
return 0;
}
+static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
+ struct clk_bulk_data **clks)
+{
+
+ return 0;
+}
+
static inline struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
@@ -663,6 +741,8 @@ static inline void clk_put(struct clk *clk) {}
static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
+static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
+
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
@@ -728,6 +808,14 @@ static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
return NULL;
}
+
+static inline int clk_save_context(void)
+{
+ return 0;
+}
+
+static inline void clk_restore_context(void) {}
+
#endif
/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 6aca5ce8a99a..931ab05f771d 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -47,8 +47,10 @@
#define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */
#define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */
#define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */
+#define AT91_PMC_WAITMODE (1 << 2) /* Wait Mode Command */
#define AT91_PMC_MOSCRCEN (1 << 3) /* Main On-Chip RC Oscillator Enable [some SAM9] */
#define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */
+#define AT91_PMC_KEY_MASK (0xff << 16)
#define AT91_PMC_KEY (0x37 << 16) /* MOR Writing Key */
#define AT91_PMC_MOSCSEL (1 << 24) /* Main Oscillator Selection [some SAM9] */
#define AT91_PMC_CFDEN (1 << 25) /* Clock Failure Detector Enable [some SAM9] */
@@ -155,6 +157,19 @@
#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */
#define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */
+#define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */
+#define AT91_PMC_FSTT(n) BIT(n)
+#define AT91_PMC_RTCAL BIT(17) /* RTC Alarm Enable */
+#define AT91_PMC_USBAL BIT(18) /* USB Resume Enable */
+#define AT91_PMC_SDMMC_CD BIT(19) /* SDMMC Card Detect Enable */
+#define AT91_PMC_LPM BIT(20) /* Low-power Mode */
+#define AT91_PMC_RXLP_MCE BIT(24) /* Backup UART Receive Enable */
+#define AT91_PMC_ACC_CE BIT(25) /* ACC Enable */
+
+#define AT91_PMC_FSPR 0x74 /* Fast Startup Polarity Reg */
+
+#define AT91_PMC_FS_INPUT_MASK 0x7ff
+
#define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */
#define AT91_PMC_PROT 0xe4 /* Write Protect Mode Register [some SAM9] */
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
index e0c362363c38..85f8cf9d1226 100644
--- a/include/linux/clk/clk-conf.h
+++ b/include/linux/clk/clk-conf.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Samsung Electronics Co., Ltd.
* Sylwester Nawrocki <s.nawrocki@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/types.h>
diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h
index 9ebf1f8243bb..0ebbe2f0b45e 100644
--- a/include/linux/clk/renesas.h
+++ b/include/linux/clk/renesas.h
@@ -1,14 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright 2013 Ideas On Board SPRL
* Copyright 2013, 2014 Horms Solutions Ltd.
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Contact: Simon Horman <horms@verge.net.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_CLK_RENESAS_H_
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index a8faa38b1ed6..eacc5df57b99 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -159,6 +159,7 @@ struct clk_hw_omap {
const char *clkdm_name;
struct clockdomain *clkdm;
const struct clk_hw_omap_ops *ops;
+ u32 context;
};
/*
@@ -290,9 +291,15 @@ struct ti_clk_features {
#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
#define TI_CLK_ERRATA_I810 BIT(3)
+#define TI_CLK_CLKCTRL_COMPAT BIT(4)
void ti_clk_setup_features(struct ti_clk_features *features);
const struct ti_clk_features *ti_clk_get_features(void);
+int omap3_noncore_dpll_save_context(struct clk_hw *hw);
+void omap3_noncore_dpll_restore_context(struct clk_hw *hw);
+
+int omap3_core_dpll_save_context(struct clk_hw *hw);
+void omap3_core_dpll_restore_context(struct clk_hw *hw);
extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 308918928767..b21db536fd52 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -241,6 +241,11 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz
__clocksource_update_freq_scale(cs, 1000, khz);
}
+#ifdef CONFIG_ARCH_CLOCKSOURCE_INIT
+extern void clocksource_arch_init(struct clocksource *cs);
+#else
+static inline void clocksource_arch_init(struct clocksource *cs) { }
+#endif
extern int timekeeping_notify(struct clocksource *clock);
@@ -257,9 +262,6 @@ extern int clocksource_i8253_init(void);
#define TIMER_OF_DECLARE(name, compat, fn) \
OF_DECLARE_1_RET(timer, name, compat, fn)
-#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
- TIMER_OF_DECLARE(name, compat, fn)
-
#ifdef CONFIG_TIMER_PROBE
extern void timer_probe(void);
#else
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 1a3c4f37e908..056be0d03722 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -7,7 +7,7 @@
*/
#include <linux/types.h>
-#include <linux/compat_time.h>
+#include <linux/time.h>
#include <linux/stat.h>
#include <linux/param.h> /* for HZ */
@@ -103,6 +103,9 @@ typedef struct compat_sigaltstack {
compat_size_t ss_size;
} compat_stack_t;
#endif
+#ifndef COMPAT_MINSIGSTKSZ
+#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ
+#endif
#define compat_jiffies_to_clock_t(x) \
(((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
@@ -110,19 +113,12 @@ typedef struct compat_sigaltstack {
typedef __compat_uid32_t compat_uid_t;
typedef __compat_gid32_t compat_gid_t;
-typedef compat_ulong_t compat_aio_context_t;
-
struct compat_sel_arg_struct;
struct rusage;
-struct compat_utimbuf {
- compat_time_t actime;
- compat_time_t modtime;
-};
-
struct compat_itimerval {
- struct compat_timeval it_interval;
- struct compat_timeval it_value;
+ struct old_timeval32 it_interval;
+ struct old_timeval32 it_value;
};
struct itimerval;
@@ -146,7 +142,7 @@ struct compat_timex {
compat_long_t constant;
compat_long_t precision;
compat_long_t tolerance;
- struct compat_timeval time;
+ struct old_timeval32 time;
compat_long_t tick;
compat_long_t ppsfreq;
compat_long_t jitter;
@@ -173,6 +169,10 @@ typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
+int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
+ sigset_t *set, sigset_t *oldset,
+ size_t sigsetsize);
+
struct compat_sigaction {
#ifndef __ARCH_HAS_IRIX_SIGACTION
compat_uptr_t sa_handler;
@@ -307,8 +307,8 @@ struct compat_rlimit {
};
struct compat_rusage {
- struct compat_timeval ru_utime;
- struct compat_timeval ru_stime;
+ struct old_timeval32 ru_utime;
+ struct old_timeval32 ru_stime;
compat_long_t ru_maxrss;
compat_long_t ru_ixrss;
compat_long_t ru_idrss;
@@ -452,13 +452,13 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size);
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
-int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from);
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
+int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from);
+int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
-static inline int compat_timeval_compare(struct compat_timeval *lhs,
- struct compat_timeval *rhs)
+static inline int old_timeval32_compare(struct old_timeval32 *lhs,
+ struct old_timeval32 *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
@@ -467,8 +467,8 @@ static inline int compat_timeval_compare(struct compat_timeval *lhs,
return lhs->tv_usec - rhs->tv_usec;
}
-static inline int compat_timespec_compare(struct compat_timespec *lhs,
- struct compat_timespec *rhs)
+static inline int old_timespec32_compare(struct old_timespec32 *lhs,
+ struct old_timespec32 *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
@@ -492,8 +492,11 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
+ /* fall through */
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
+ /* fall through */
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
+ /* fall through */
case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
}
return copy_to_user(compat, &v, size) ? -EFAULT : 0;
@@ -552,12 +555,18 @@ asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
compat_long_t min_nr,
compat_long_t nr,
struct io_event __user *events,
- struct compat_timespec __user *timeout);
+ struct old_timespec32 __user *timeout);
asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
compat_long_t min_nr,
compat_long_t nr,
struct io_event __user *events,
- struct compat_timespec __user *timeout,
+ struct old_timespec32 __user *timeout,
+ const struct __compat_aio_sigset __user *usig);
+asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id,
+ compat_long_t min_nr,
+ compat_long_t nr,
+ struct io_event __user *events,
+ struct __kernel_timespec __user *timeout,
const struct __compat_aio_sigset __user *usig);
/* fs/cookies.c */
@@ -642,11 +651,21 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
- struct compat_timespec __user *tsp,
+ struct old_timespec32 __user *tsp,
+ void __user *sig);
+asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp,
+ compat_ulong_t __user *outp,
+ compat_ulong_t __user *exp,
+ struct __kernel_timespec __user *tsp,
void __user *sig);
asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
unsigned int nfds,
- struct compat_timespec __user *tsp,
+ struct old_timespec32 __user *tsp,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds,
+ unsigned int nfds,
+ struct __kernel_timespec __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
@@ -671,15 +690,15 @@ asmlinkage long compat_sys_newfstat(unsigned int fd,
/* fs/timerfd.c */
asmlinkage long compat_sys_timerfd_gettime(int ufd,
- struct compat_itimerspec __user *otmr);
+ struct old_itimerspec32 __user *otmr);
asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
- const struct compat_itimerspec __user *utmr,
- struct compat_itimerspec __user *otmr);
+ const struct old_itimerspec32 __user *utmr,
+ struct old_itimerspec32 __user *otmr);
/* fs/utimes.c */
asmlinkage long compat_sys_utimensat(unsigned int dfd,
const char __user *filename,
- struct compat_timespec __user *t,
+ struct old_timespec32 __user *t,
int flags);
/* kernel/exit.c */
@@ -691,7 +710,7 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t,
/* kernel/futex.c */
asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
- struct compat_timespec __user *utime, u32 __user *uaddr2,
+ struct old_timespec32 __user *utime, u32 __user *uaddr2,
u32 val3);
asmlinkage long
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
@@ -701,8 +720,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
compat_size_t __user *len_ptr);
/* kernel/hrtimer.c */
-asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp);
+asmlinkage long compat_sys_nanosleep(struct old_timespec32 __user *rqtp,
+ struct old_timespec32 __user *rmtp);
/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
@@ -722,19 +741,19 @@ asmlinkage long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
timer_t __user *created_timer_id);
asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
- struct compat_itimerspec __user *setting);
+ struct old_itimerspec32 __user *setting);
asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
- struct compat_itimerspec __user *new,
- struct compat_itimerspec __user *old);
+ struct old_itimerspec32 __user *new,
+ struct old_itimerspec32 __user *old);
asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
- struct compat_timespec __user *tp);
+ struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
- struct compat_timespec __user *tp);
+ struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
- struct compat_timespec __user *tp);
+ struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
- struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp);
+ struct old_timespec32 __user *rqtp,
+ struct old_timespec32 __user *rmtp);
/* kernel/ptrace.c */
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
@@ -748,7 +767,7 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval);
+ struct old_timespec32 __user *interval);
/* kernel/signal.c */
asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
@@ -768,7 +787,10 @@ asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
struct compat_siginfo __user *uinfo,
- struct compat_timespec __user *uts, compat_size_t sigsetsize);
+ struct old_timespec32 __user *uts, compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese,
+ struct compat_siginfo __user *uinfo,
+ struct __kernel_timespec __user *uts, compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
/* No generic prototype for rt_sigreturn */
@@ -782,9 +804,9 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource,
asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
/* kernel/time.c */
-asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
+asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
-asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
+asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
@@ -798,11 +820,11 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
const char __user *u_msg_ptr,
compat_size_t msg_len, unsigned int msg_prio,
- const struct compat_timespec __user *u_abs_timeout);
+ const struct old_timespec32 __user *u_abs_timeout);
asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
char __user *u_msg_ptr,
compat_size_t msg_len, unsigned int __user *u_msg_prio,
- const struct compat_timespec __user *u_abs_timeout);
+ const struct old_timespec32 __user *u_abs_timeout);
asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
const struct compat_sigevent __user *u_notification);
asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
@@ -819,7 +841,7 @@ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
/* ipc/sem.c */
asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
- unsigned nsems, const struct compat_timespec __user *timeout);
+ unsigned nsems, const struct old_timespec32 __user *timeout);
/* ipc/shm.c */
asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
@@ -874,9 +896,12 @@ asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
+asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags,
+ struct __kernel_timespec __user *timeout);
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags,
- struct compat_timespec __user *timeout);
+ struct old_timespec32 __user *timeout);
asmlinkage long compat_sys_wait4(compat_pid_t pid,
compat_uint_t __user *stat_addr, int options,
struct compat_rusage __user *ru);
@@ -928,7 +953,7 @@ asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
asmlinkage long compat_sys_open(const char __user *filename, int flags,
umode_t mode);
asmlinkage long compat_sys_utimes(const char __user *filename,
- struct compat_timeval __user *t);
+ struct old_timeval32 __user *t);
/* __ARCH_WANT_SYSCALL_NO_FLAGS */
asmlinkage long compat_sys_signalfd(int ufd,
@@ -942,15 +967,15 @@ asmlinkage long compat_sys_newlstat(const char __user *filename,
struct compat_stat __user *statbuf);
/* __ARCH_WANT_SYSCALL_DEPRECATED */
-asmlinkage long compat_sys_time(compat_time_t __user *tloc);
+asmlinkage long compat_sys_time(old_time32_t __user *tloc);
asmlinkage long compat_sys_utime(const char __user *filename,
- struct compat_utimbuf __user *t);
+ struct old_utimbuf32 __user *t);
asmlinkage long compat_sys_futimesat(unsigned int dfd,
const char __user *filename,
- struct compat_timeval __user *t);
+ struct old_timeval32 __user *t);
asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timeval __user *tvp);
+ struct old_timeval32 __user *tvp);
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
unsigned flags);
@@ -983,7 +1008,7 @@ asmlinkage long compat_sys_sigaction(int sig,
#endif
/* obsolete: kernel/time/time.c */
-asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
+asmlinkage long compat_sys_stime(old_time32_t __user *tptr);
/* obsolete: net/socket.c */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
@@ -1002,15 +1027,15 @@ static inline bool in_compat_syscall(void) { return is_compat_task(); }
#endif
/**
- * ns_to_compat_timeval - Compat version of ns_to_timeval
+ * ns_to_old_timeval32 - Compat version of ns_to_timeval
* @nsec: the nanoseconds value to be converted
*
- * Returns the compat_timeval representation of the nsec parameter.
+ * Returns the old_timeval32 representation of the nsec parameter.
*/
-static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
+static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec)
{
struct timeval tv;
- struct compat_timeval ctv;
+ struct old_timeval32 ctv;
tv = ns_to_timeval(nsec);
ctv.tv_sec = tv.tv_sec;
@@ -1033,9 +1058,9 @@ int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
-#ifndef in_compat_syscall
+/* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */
+#define in_compat_syscall in_compat_syscall
static inline bool in_compat_syscall(void) { return false; }
-#endif
#endif /* CONFIG_COMPAT */
diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h
deleted file mode 100644
index e70bfd1d2c3f..000000000000
--- a/include/linux/compat_time.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_COMPAT_TIME_H
-#define _LINUX_COMPAT_TIME_H
-
-#include <linux/types.h>
-#include <linux/time64.h>
-
-typedef s32 compat_time_t;
-
-struct compat_timespec {
- compat_time_t tv_sec;
- s32 tv_nsec;
-};
-
-struct compat_timeval {
- compat_time_t tv_sec;
- s32 tv_usec;
-};
-
-struct compat_itimerspec {
- struct compat_timespec it_interval;
- struct compat_timespec it_value;
-};
-
-extern int compat_get_timespec64(struct timespec64 *, const void __user *);
-extern int compat_put_timespec64(const struct timespec64 *, void __user *);
-extern int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits);
-extern int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits);
-
-#endif /* _LINUX_COMPAT_TIME_H */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 7087446c24c8..39f668d5066b 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -6,11 +6,7 @@
/* Some compiler specific definitions are overwritten here
* for Clang compiler
*/
-
-#ifdef uninitialized_var
-#undef uninitialized_var
#define uninitialized_var(x) x = *(&(x))
-#endif
/* same as gcc, this was present in clang-2.6 so we can assume it works
* with any version that can compile the kernel
@@ -20,17 +16,13 @@
/* all clang versions usable with the kernel support KASAN ABI version 5 */
#define KASAN_ABI_VERSION 5
+#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
/* emulate gcc's __SANITIZE_ADDRESS__ flag */
-#if __has_feature(address_sanitizer)
#define __SANITIZE_ADDRESS__
-#endif
-
-#undef __no_sanitize_address
-#define __no_sanitize_address __attribute__((no_sanitize("address")))
-
-/* Clang doesn't have a way to turn it off per-function, yet. */
-#ifdef __noretpoline
-#undef __noretpoline
+#define __no_sanitize_address \
+ __attribute__((no_sanitize("address", "hwaddress")))
+#else
+#define __no_sanitize_address
#endif
/*
@@ -40,9 +32,14 @@
* checks. Unfortunately, we don't know which version of gcc clang
* pretends to be, so the macro may or may not be defined.
*/
-#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
#if __has_builtin(__builtin_mul_overflow) && \
__has_builtin(__builtin_add_overflow) && \
__has_builtin(__builtin_sub_overflow)
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
+
+/* The following are for compatibility with GCC, from compiler-gcc.h,
+ * and may be redefined here because they should not be shared with other
+ * compilers, like ICC.
+ */
+#define barrier() __asm__ __volatile__("" : : : "memory")
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 250b9b7cfd60..5776da43da97 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -68,133 +68,20 @@
*/
#define uninitialized_var(x) x = x
-#ifdef __CHECKER__
-#define __must_be_array(a) 0
-#else
-/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
-#endif
-
-/*
- * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
- * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
- * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
- * defined so the gnu89 semantics are the default.
- */
-#ifdef __GNUC_STDC_INLINE__
-# define __gnu_inline __attribute__((gnu_inline))
-#else
-# define __gnu_inline
-#endif
-
-/*
- * Force always-inline if the user requests it so via the .config,
- * or if gcc is too old.
- * GCC does not warn about unused static inline functions for
- * -Wunused-function. This turns out to avoid the need for complex #ifdef
- * directives. Suppress the warning in clang as well by using "unused"
- * function attribute, which is redundant but not harmful for gcc.
- * Prefer gnu_inline, so that extern inline functions do not emit an
- * externally visible function. This makes extern inline behave as per gnu89
- * semantics rather than c99. This prevents multiple symbol definition errors
- * of extern inline functions at link time.
- * A lot of inline functions can cause havoc with function tracing.
- */
-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
- !defined(CONFIG_OPTIMIZE_INLINING)
-#define inline \
- inline __attribute__((always_inline, unused)) notrace __gnu_inline
-#else
-#define inline inline __attribute__((unused)) notrace __gnu_inline
-#endif
-
-#define __inline__ inline
-#define __inline inline
-#define __always_inline inline __attribute__((always_inline))
-#define noinline __attribute__((noinline))
-
-#define __packed __attribute__((packed))
-#define __weak __attribute__((weak))
-#define __alias(symbol) __attribute__((alias(#symbol)))
-
#ifdef RETPOLINE
-#define __noretpoline __attribute__((indirect_branch("keep")))
+#define __noretpoline __attribute__((__indirect_branch__("keep")))
#endif
-/*
- * it doesn't make sense on ARM (currently the only user of __naked)
- * to trace naked functions because then mcount is called without
- * stack and frame pointer being set up and there is no chance to
- * restore the lr register to the value before mcount was called.
- *
- * The asm() bodies of naked functions often depend on standard calling
- * conventions, therefore they must be noinline and noclone.
- *
- * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
- * See GCC PR44290.
- */
-#define __naked __attribute__((naked)) noinline __noclone notrace
-
-#define __noreturn __attribute__((noreturn))
-
-/*
- * From the GCC manual:
- *
- * Many functions have no effects except the return value and their
- * return value depends only on the parameters and/or global
- * variables. Such a function can be subject to common subexpression
- * elimination and loop optimization just as an arithmetic operator
- * would be.
- * [...]
- */
-#define __pure __attribute__((pure))
-#define __aligned(x) __attribute__((aligned(x)))
-#define __aligned_largest __attribute__((aligned))
-#define __printf(a, b) __attribute__((format(printf, a, b)))
-#define __scanf(a, b) __attribute__((format(scanf, a, b)))
-#define __attribute_const__ __attribute__((__const__))
-#define __maybe_unused __attribute__((unused))
-#define __always_unused __attribute__((unused))
-#define __mode(x) __attribute__((mode(x)))
-
-#define __must_check __attribute__((warn_unused_result))
-#define __malloc __attribute__((__malloc__))
-
-#define __used __attribute__((__used__))
-#define __compiler_offsetof(a, b) \
- __builtin_offsetof(a, b)
-
-/* Mark functions as cold. gcc will assume any path leading to a call
- * to them will be unlikely. This means a lot of manual unlikely()s
- * are unnecessary now for any paths leading to the usual suspects
- * like BUG(), printk(), panic() etc. [but let's keep them for now for
- * older compilers]
- *
- * Early snapshots of gcc 4.3 don't support this and we can't detect this
- * in the preprocessor, but we can live with this because they're unreleased.
- * Maketime probing would be overkill here.
- *
- * gcc also has a __attribute__((__hot__)) to move hot functions into
- * a special section, but I don't see any sense in this right now in
- * the kernel context
- */
-#define __cold __attribute__((__cold__))
-
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-#define __optimize(level) __attribute__((__optimize__(level)))
-#define __nostackprotector __optimize("no-stack-protector")
-
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-#ifndef __CHECKER__
-#define __compiletime_warning(message) __attribute__((warning(message)))
-#define __compiletime_error(message) __attribute__((error(message)))
+#define __compiletime_warning(message) __attribute__((__warning__(message)))
+#define __compiletime_error(message) __attribute__((__error__(message)))
-#ifdef LATENT_ENTROPY_PLUGIN
+#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
#define __latent_entropy __attribute__((latent_entropy))
#endif
-#endif /* __CHECKER__ */
/*
* calling noreturn functions, __builtin_unreachable() and __builtin_trap()
@@ -209,10 +96,6 @@
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
* control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
*/
#define unreachable() \
do { \
@@ -221,9 +104,6 @@
__builtin_unreachable(); \
} while (0)
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
-
#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
#define __randomize_layout __attribute__((randomize_layout))
#define __no_randomize_layout __attribute__((no_randomize_layout))
@@ -233,32 +113,6 @@
#endif
/*
- * When used with Link Time Optimization, gcc can optimize away C functions or
- * variables which are referenced only from assembly code. __visible tells the
- * optimizer that something else uses this function or variable, thus preventing
- * this.
- */
-#define __visible __attribute__((externally_visible))
-
-/* gcc version specific checks */
-
-#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
-/*
- * __assume_aligned(n, k): Tell the optimizer that the returned
- * pointer can be assumed to be k modulo n. The second argument is
- * optional (default 0), so we use a variadic macro to make the
- * shorthand.
- *
- * Beware: Do not apply this to functions which may return
- * ERR_PTRs. Also, it is probably unwise to apply it to functions
- * returning extra information in the low bits (but in that case the
- * compiler should see some alignment anyway, when the return value is
- * massaged by 'flags = ptr & 3; ptr &= ~3;').
- */
-#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
-#endif
-
-/*
* GCC 'asm goto' miscompiles certain code sequences:
*
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
@@ -289,32 +143,16 @@
#define KASAN_ABI_VERSION 3
#endif
-#if GCC_VERSION >= 40902
-/*
- * Tell the compiler that address safety instrumentation (KASAN)
- * should not be applied to that function.
- * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- */
+#if __has_attribute(__no_sanitize_address__)
#define __no_sanitize_address __attribute__((no_sanitize_address))
+#else
+#define __no_sanitize_address
#endif
#if GCC_VERSION >= 50100
-/*
- * Mark structures as requiring designated initializers.
- * https://gcc.gnu.org/onlinedocs/gcc/Designated-Inits.html
- */
-#define __designated_init __attribute__((designated_init))
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
-#if !defined(__noclone)
-#define __noclone /* not needed */
-#endif
-
-#if !defined(__no_sanitize_address)
-#define __no_sanitize_address
-#endif
-
/*
* Turn individual warnings and errors on and off locally, depending
* on version.
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 547cdc920a3c..517bd14e1222 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -14,10 +14,6 @@
/* Intel ECC compiler doesn't support gcc specific asm stmts.
* It uses intrinsics to do the equivalent things.
*/
-#undef barrier
-#undef barrier_data
-#undef RELOC_HIDE
-#undef OPTIMIZER_HIDE_VAR
#define barrier() __memory_barrier()
#define barrier_data(ptr) barrier()
@@ -33,18 +29,8 @@
*/
#define OPTIMIZER_HIDE_VAR(var) barrier()
-/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
-#define __must_be_array(a) 0
-
#endif
-#ifndef __HAVE_BUILTIN_BSWAP16__
/* icc has this, but it's called _bswap16 */
#define __HAVE_BUILTIN_BSWAP16__
#define __builtin_bswap16 _bswap16
-#endif
-
-/*
- * icc defines __GNUC__, but does not implement the builtin overflow checkers.
- */
-#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 42506e4d1f53..fc5004a4b07d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -23,8 +23,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define __branch_check__(x, expect, is_constant) ({ \
long ______r; \
static struct ftrace_likely_data \
- __attribute__((__aligned__(4))) \
- __attribute__((section("_ftrace_annotated_branch"))) \
+ __aligned(4) \
+ __section("_ftrace_annotated_branch") \
______f = { \
.data.func = __func__, \
.data.file = __FILE__, \
@@ -59,8 +59,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
({ \
int ______r; \
static struct ftrace_branch_data \
- __attribute__((__aligned__(4))) \
- __attribute__((section("_ftrace_branch"))) \
+ __aligned(4) \
+ __section("_ftrace_branch") \
______f = { \
.func = __func__, \
.file = __FILE__, \
@@ -124,7 +124,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define ASM_UNREACHABLE
#endif
#ifndef unreachable
-# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
+# define unreachable() do { \
+ annotate_unreachable(); \
+ __builtin_unreachable(); \
+} while (0)
#endif
/*
@@ -146,7 +149,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
extern typeof(sym) sym; \
static const unsigned long __kentry_##sym \
__used \
- __attribute__((section("___kentry" "+" #sym ), used)) \
+ __section("___kentry" "+" #sym ) \
= (unsigned long)&sym;
#endif
@@ -195,7 +198,7 @@ void __read_once_size(const volatile void *p, void *res, int size)
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
-# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif
@@ -280,11 +283,26 @@ unsigned long read_word_at_a_time(const void *addr)
#endif /* __KERNEL__ */
-#endif /* __ASSEMBLY__ */
+/*
+ * Force the compiler to emit 'sym' as a symbol, so that we can reference
+ * it from inline assembler. Necessary in case 'sym' could be inlined
+ * otherwise, or eliminated entirely due to lack of references that are
+ * visible to the compiler.
+ */
+#define __ADDRESSABLE(sym) \
+ static void * __section(".discard.addressable") __used \
+ __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
-#ifndef __optimize
-# define __optimize(level)
-#endif
+/**
+ * offset_to_ptr - convert a relative memory offset to an absolute pointer
+ * @off: the address of the 32-bit offset value
+ */
+static inline void *offset_to_ptr(const int *off)
+{
+ return (void *)((unsigned long)off + *off);
+}
+
+#endif /* __ASSEMBLY__ */
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
@@ -295,29 +313,14 @@ unsigned long read_word_at_a_time(const void *addr)
#endif
#ifndef __compiletime_error
# define __compiletime_error(message)
-/*
- * Sparse complains of variable sized arrays due to the temporary variable in
- * __compiletime_assert. Unfortunately we can't just expand it out to make
- * sparse see a constant array size without breaking compiletime_assert on old
- * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
- */
-# ifndef __CHECKER__
-# define __compiletime_error_fallback(condition) \
- do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
-# endif
-#endif
-#ifndef __compiletime_error_fallback
-# define __compiletime_error_fallback(condition) do { } while (0)
#endif
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
- bool __cond = !(condition); \
extern void prefix ## suffix(void) __compiletime_error(msg); \
- if (__cond) \
+ if (!(condition)) \
prefix ## suffix(); \
- __compiletime_error_fallback(__cond); \
} while (0)
#else
# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
@@ -342,4 +345,7 @@ unsigned long read_word_at_a_time(const void *addr)
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+
#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
new file mode 100644
index 000000000000..19f32b0c29af
--- /dev/null
+++ b/include/linux/compiler_attributes.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_ATTRIBUTES_H
+#define __LINUX_COMPILER_ATTRIBUTES_H
+
+/*
+ * The attributes in this file are unconditionally defined and they directly
+ * map to compiler attribute(s), unless one of the compilers does not support
+ * the attribute. In that case, __has_attribute is used to check for support
+ * and the reason is stated in its comment ("Optional: ...").
+ *
+ * Any other "attributes" (i.e. those that depend on a configuration option,
+ * on a compiler, on an architecture, on plugins, on other attributes...)
+ * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
+ * The intention is to keep this file as simple as possible, as well as
+ * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
+ *
+ * This file is meant to be sorted (by actual attribute name,
+ * not by #define identifier). Use the __attribute__((__name__)) syntax
+ * (i.e. with underscores) to avoid future collisions with other macros.
+ * Provide links to the documentation of each supported compiler, if it exists.
+ */
+
+/*
+ * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
+ * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
+ * by hand.
+ *
+ * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
+ * depending on the compiler used to build it; however, these attributes have
+ * no semantic effects for sparse, so it does not matter. Also note that,
+ * in order to avoid sparse's warnings, even the unsupported ones must be
+ * defined to 0.
+ */
+#ifndef __has_attribute
+# define __has_attribute(x) __GCC4_has_attribute_##x
+# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
+# define __GCC4_has_attribute___designated_init__ 0
+# define __GCC4_has_attribute___externally_visible__ 1
+# define __GCC4_has_attribute___noclone__ 1
+# define __GCC4_has_attribute___nonstring__ 0
+# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
+ */
+#define __alias(symbol) __attribute__((__alias__(#symbol)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-aligned-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-aligned-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-aligned-variable-attribute
+ */
+#define __aligned(x) __attribute__((__aligned__(x)))
+#define __aligned_largest __attribute__((__aligned__))
+
+/*
+ * Note: users of __always_inline currently do not write "inline" themselves,
+ * which seems to be required by gcc to apply the attribute according
+ * to its docs (and also "warning: always_inline function might not be
+ * inlinable [-Wattributes]" is emitted).
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-always_005finline-function-attribute
+ * clang: mentioned
+ */
+#define __always_inline inline __attribute__((__always_inline__))
+
+/*
+ * The second argument is optional (default 0), so we use a variadic macro
+ * to make the shorthand.
+ *
+ * Beware: Do not apply this to functions which may return
+ * ERR_PTRs. Also, it is probably unwise to apply it to functions
+ * returning extra information in the low bits (but in that case the
+ * compiler should see some alignment anyway, when the return value is
+ * massaged by 'flags = ptr & 3; ptr &= ~3;').
+ *
+ * Optional: only supported since gcc >= 4.9
+ * Optional: not supported by icc
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#assume-aligned
+ */
+#if __has_attribute(__assume_aligned__)
+# define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
+#else
+# define __assume_aligned(a, ...)
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+ */
+#define __cold __attribute__((__cold__))
+
+/*
+ * Note the long name.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute
+ */
+#define __attribute_const__ __attribute__((__const__))
+
+/*
+ * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
+ * attribute warnings entirely and for good") for more information.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-deprecated-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-deprecated-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-deprecated-variable-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Enumerator-Attributes.html#index-deprecated-enumerator-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#deprecated
+ */
+#define __deprecated
+
+/*
+ * Optional: only supported since gcc >= 5.1
+ * Optional: not supported by clang
+ * Optional: not supported by icc
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-designated_005finit-type-attribute
+ */
+#if __has_attribute(__designated_init__)
+# define __designated_init __attribute__((__designated_init__))
+#else
+# define __designated_init
+#endif
+
+/*
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute
+ */
+#if __has_attribute(__externally_visible__)
+# define __visible __attribute__((__externally_visible__))
+#else
+# define __visible
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-format-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#format
+ */
+#define __printf(a, b) __attribute__((__format__(printf, a, b)))
+#define __scanf(a, b) __attribute__((__format__(scanf, a, b)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-gnu_005finline-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#gnu-inline
+ */
+#define __gnu_inline __attribute__((__gnu_inline__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute
+ */
+#define __malloc __attribute__((__malloc__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-mode-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-mode-variable-attribute
+ */
+#define __mode(x) __attribute__((__mode__(x)))
+
+/*
+ * Optional: not supported by clang
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute
+ */
+#if __has_attribute(__noclone__)
+# define __noclone __attribute__((__noclone__))
+#else
+# define __noclone
+#endif
+
+/*
+ * Note the missing underscores.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute
+ * clang: mentioned
+ */
+#define noinline __attribute__((__noinline__))
+
+/*
+ * Optional: only supported since gcc >= 8
+ * Optional: not supported by clang
+ * Optional: not supported by icc
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-nonstring-variable-attribute
+ */
+#if __has_attribute(__nonstring__)
+# define __nonstring __attribute__((__nonstring__))
+#else
+# define __nonstring
+#endif
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#id1
+ */
+#define __noreturn __attribute__((__noreturn__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute
+ * clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute
+ */
+#define __packed __attribute__((__packed__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute
+ */
+#define __pure __attribute__((__pure__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-section-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate
+ */
+#define __section(S) __attribute__((__section__(#S)))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-unused-variable-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-unused-label-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#maybe-unused-unused
+ */
+#define __always_unused __attribute__((__unused__))
+#define __maybe_unused __attribute__((__unused__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-used-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-used-variable-attribute
+ */
+#define __used __attribute__((__used__))
+
+/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
+ */
+#define __weak __attribute__((__weak__))
+
+#endif /* __LINUX_COMPILER_ATTRIBUTES_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index fbf337933fd8..ba814f18cb4c 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_TYPES_H
#define __LINUX_COMPILER_TYPES_H
@@ -54,35 +55,32 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#ifdef __KERNEL__
-#ifdef __GNUC__
-#include <linux/compiler-gcc.h>
-#endif
-
-#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
-#define notrace __attribute__((hotpatch(0,0)))
-#else
-#define notrace __attribute__((no_instrument_function))
-#endif
-
-/* Intel compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
- */
-#ifdef __INTEL_COMPILER
-# include <linux/compiler-intel.h>
-#endif
+/* Attributes */
+#include <linux/compiler_attributes.h>
-/* Clang compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
- */
+/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
+#elif defined(__INTEL_COMPILER)
+#include <linux/compiler-intel.h>
+#elif defined(__GNUC__)
+/* The above compilers also define __GNUC__, so order is important here. */
+#include <linux/compiler-gcc.h>
+#else
+#error "Unknown compiler"
#endif
/*
- * Generic compiler-dependent macros required for kernel
- * build go below this comment. Actual compiler/compiler version
- * specific implementations come from the above header files
+ * Some architectures need to provide custom definitions of macros provided
+ * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that
+ * conditionally rather than using an asm-generic wrapper in order to avoid
+ * build failures if any C compilation, which will include this file via an
+ * -include argument in c_flags, occurs prior to the asm-generic wrappers being
+ * generated.
*/
+#ifdef CONFIG_HAVE_ARCH_COMPILER_H
+#include <asm/compiler.h>
+#endif
struct ftrace_branch_data {
const char *func;
@@ -106,62 +104,53 @@ struct ftrace_likely_data {
unsigned long constant;
};
-#endif /* __KERNEL__ */
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-
-/* Don't. Just don't. */
-#define __deprecated
-#define __deprecated_for_modules
-
-#ifndef __must_check
-#define __must_check
-#endif
-
-#ifndef CONFIG_ENABLE_MUST_CHECK
-#undef __must_check
+#ifdef CONFIG_ENABLE_MUST_CHECK
+#define __must_check __attribute__((__warn_unused_result__))
+#else
#define __must_check
#endif
-#ifndef __malloc
-#define __malloc
+#if defined(CC_USING_HOTPATCH)
+#define notrace __attribute__((hotpatch(0, 0)))
+#else
+#define notrace __attribute__((__no_instrument_function__))
#endif
/*
- * Allow us to avoid 'defined but not used' warnings on functions and data,
- * as well as force them to be emitted to the assembly file.
- *
- * As of gcc 3.4, static functions that are not marked with attribute((used))
- * may be elided from the assembly file. As of gcc 3.4, static data not so
- * marked will not be elided, but this may change in a future gcc version.
- *
- * NOTE: Because distributions shipped with a backported unit-at-a-time
- * compiler in gcc 3.3, we must define __used to be __attribute__((used))
- * for gcc >=3.3 instead of 3.4.
- *
- * In prior versions of gcc, such functions and data would be emitted, but
- * would be warned about except with attribute((unused)).
- *
- * Mark functions that are referenced only in inline assembly as __used so
- * the code is emitted even though it appears to be unreferenced.
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
*/
-#ifndef __used
-# define __used /* unimplemented */
-#endif
+#define __naked __attribute__((__naked__)) notrace
-#ifndef __maybe_unused
-# define __maybe_unused /* unimplemented */
-#endif
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
-#ifndef __always_unused
-# define __always_unused /* unimplemented */
+/*
+ * Force always-inline if the user requests it so via the .config.
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function. This turns out to avoid the need for complex #ifdef
+ * directives. Suppress the warning in clang as well by using "unused"
+ * function attribute, which is redundant but not harmful for gcc.
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
+ * Do not use __always_inline here, since currently it expands to inline again
+ * (which would break users of __always_inline).
+ */
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+ !defined(CONFIG_OPTIMIZE_INLINING)
+#define inline inline __attribute__((__always_inline__)) __gnu_inline \
+ __maybe_unused notrace
+#else
+#define inline inline __gnu_inline \
+ __maybe_unused notrace
#endif
-#ifndef noinline
-#define noinline
-#endif
+#define __inline__ inline
+#define __inline inline
/*
* Rather then using noinline to prevent stack consumption, use
@@ -169,34 +158,15 @@ struct ftrace_likely_data {
*/
#define noinline_for_stack noinline
-#ifndef __always_inline
-#define __always_inline inline
-#endif
-
#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+
/*
- * From the GCC manual:
- *
- * Many functions do not examine any values except their arguments,
- * and have no effects except the return value. Basically this is
- * just slightly more strict class than the `pure' attribute above,
- * since function is not allowed to read global memory.
- *
- * Note that a function that has pointer arguments and examines the
- * data pointed to must _not_ be declared `const'. Likewise, a
- * function that calls a non-`const' function usually must not be
- * `const'. It does not make sense for a `const' function to return
- * `void'.
+ * The below symbols may be defined for one or more, but not ALL, of the above
+ * compilers. We don't consider that to be an error, so set them to nothing.
+ * For example, some of them are for compiler specific plugins.
*/
-#ifndef __attribute_const__
-# define __attribute_const__ /* unimplemented */
-#endif
-
-#ifndef __designated_init
-# define __designated_init
-#endif
-
#ifndef __latent_entropy
# define __latent_entropy
#endif
@@ -214,46 +184,19 @@ struct ftrace_likely_data {
# define randomized_struct_fields_end
#endif
-/*
- * Tell gcc if a function is cold. The compiler will assume any path
- * directly leading to the call is unlikely.
- */
-
-#ifndef __cold
-#define __cold
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
#endif
-/* Simple shorthand for a section definition */
-#ifndef __section
-# define __section(S) __attribute__ ((__section__(#S)))
-#endif
-
-#ifndef __visible
-#define __visible
-#endif
-
-#ifndef __nostackprotector
-# define __nostackprotector
-#endif
-
-/*
- * Assume alignment of return value.
- */
-#ifndef __assume_aligned
-#define __assume_aligned(a, ...)
-#endif
-
-
/* Are two types/vars the same type (ignoring qualifiers)? */
-#ifndef __same_type
-# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
-#endif
+#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
/* Is this type a native word size -- useful for atomic operations */
-#ifndef __native_word
-# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-#endif
+#define __native_word(t) \
+ (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
+ sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+/* Helpers for emitting diagnostics in pragmas. */
#ifndef __diag
#define __diag(string)
#endif
diff --git a/include/linux/console.h b/include/linux/console.h
index f59f3dbca65c..ec9bdb3d7bab 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -14,6 +14,7 @@
#ifndef _LINUX_CONSOLE_H_
#define _LINUX_CONSOLE_H_ 1
+#include <linux/atomic.h>
#include <linux/types.h>
struct vc_data;
@@ -201,11 +202,14 @@ void vcs_make_sysfs(int index);
void vcs_remove_sysfs(int index);
/* Some debug stub to catch some of the obvious races in the VT code */
-#if 1
-#define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress)
-#else
-#define WARN_CONSOLE_UNLOCKED()
-#endif
+#define WARN_CONSOLE_UNLOCKED() \
+ WARN_ON(!atomic_read(&ignore_console_lock_warning) && \
+ !is_console_locked() && !oops_in_progress)
+/*
+ * Increment ignore_console_lock_warning if you need to quiet
+ * WARN_CONSOLE_UNLOCKED() for debugging purposes.
+ */
+extern atomic_t ignore_console_lock_warning;
/* VESA Blanking Levels */
#define VESA_NO_BLANKING 0
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index fea64f2692a0..ab137f97ecbd 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -141,7 +141,6 @@ struct vc_data {
struct uni_pagedir *vc_uni_pagedir;
struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
struct uni_screen *vc_uni_screen; /* unicode screen content */
- bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
/* additional information is in vt_kern.h */
};
diff --git a/include/linux/cordic.h b/include/linux/cordic.h
index cf68ca4a508c..3d656f54d64f 100644
--- a/include/linux/cordic.h
+++ b/include/linux/cordic.h
@@ -18,6 +18,15 @@
#include <linux/types.h>
+#define CORDIC_ANGLE_GEN 39797
+#define CORDIC_PRECISION_SHIFT 16
+#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2)
+
+#define CORDIC_FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT))
+#define CORDIC_FLOAT(X) (((X) >= 0) \
+ ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
+ : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
+
/**
* struct cordic_iq - i/q coordinate.
*
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 207aed96a5b7..abf4b4e65dbb 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -17,9 +17,9 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
extern void dump_truncate(struct coredump_params *cprm);
#ifdef CONFIG_COREDUMP
-extern void do_coredump(const siginfo_t *siginfo);
+extern void do_coredump(const kernel_siginfo_t *siginfo);
#else
-static inline void do_coredump(const siginfo_t *siginfo) {}
+static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
#endif
#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index d828a6efe0b1..46c67a764877 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -94,20 +94,15 @@ union coresight_dev_subtype {
* @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
* @name: name of the component as shown under sysfs.
* @nr_inport: number of input ports for this component.
- * @outports: list of remote endpoint port number.
- * @child_names:name of all child components connected to this device.
- * @child_ports:child component port number the current component is
- connected to.
* @nr_outport: number of output ports for this component.
+ * @conns: Array of nr_outport connections from this component
*/
struct coresight_platform_data {
int cpu;
const char *name;
int nr_inport;
- int *outports;
- const char **child_names;
- int *child_ports;
int nr_outport;
+ struct coresight_connection *conns;
};
/**
@@ -190,23 +185,15 @@ struct coresight_device {
* @disable: disables the sink.
* @alloc_buffer: initialises perf's ring buffer for trace collection.
* @free_buffer: release memory allocated in @get_config.
- * @set_buffer: initialises buffer mechanic before a trace session.
- * @reset_buffer: finalises buffer mechanic after a trace session.
* @update_buffer: update buffer pointers after a trace session.
*/
struct coresight_ops_sink {
- int (*enable)(struct coresight_device *csdev, u32 mode);
+ int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
void (*disable)(struct coresight_device *csdev);
void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
void **pages, int nr_pages, bool overwrite);
void (*free_buffer)(void *config);
- int (*set_buffer)(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config);
- unsigned long (*reset_buffer)(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config);
- void (*update_buffer)(struct coresight_device *csdev,
+ unsigned long (*update_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config);
};
@@ -270,6 +257,13 @@ extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
extern int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value);
+
+extern int coresight_claim_device(void __iomem *base);
+extern int coresight_claim_device_unlocked(void __iomem *base);
+
+extern void coresight_disclaim_device(void __iomem *base);
+extern void coresight_disclaim_device_unlocked(void __iomem *base);
+
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -279,6 +273,19 @@ coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
static inline void coresight_disable(struct coresight_device *csdev) {}
static inline int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value) { return 1; }
+static inline int coresight_claim_device_unlocked(void __iomem *base)
+{
+ return -EINVAL;
+}
+
+static inline int coresight_claim_device(void __iomem *base)
+{
+ return -EINVAL;
+}
+
+static inline void coresight_disclaim_device(void __iomem *base) {}
+static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
+
#endif
#ifdef CONFIG_OF
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index 986c06c88d81..84d3c81b5978 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -45,7 +45,7 @@
* 'asm/cpufeature.h' of your favorite architecture.
*/
#define module_cpu_feature_match(x, __initfunc) \
-static struct cpu_feature const cpu_feature_match_ ## x[] = \
+static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 882a9b9e34bc..c86d6d8bdfed 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -950,6 +950,14 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
}
#endif
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ struct cpufreq_governor *old_gov);
+#else
+static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ struct cpufreq_governor *old_gov) { }
+#endif
+
extern void arch_freq_prepare_all(void);
extern unsigned int arch_freq_get_on_cpu(int cpu);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index c49843c4d031..fd586d0301e7 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -126,6 +126,7 @@ enum cpuhp_state {
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
+ CPUHP_AP_CSKY_TIMER_STARTING,
CPUHP_AP_KVM_STARTING,
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
CPUHP_AP_KVM_ARM_VGIC_STARTING,
@@ -144,6 +145,7 @@ enum cpuhp_state {
CPUHP_AP_SMPBOOT_THREADS,
CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
+ CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
CPUHP_AP_PERF_ONLINE,
CPUHP_AP_PERF_X86_ONLINE,
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
@@ -162,6 +164,8 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
+ CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
+ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 4325d6fdde9b..4dff74f48d4b 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -33,6 +33,8 @@ struct cpuidle_state_usage {
unsigned long long disable;
unsigned long long usage;
unsigned long long time; /* in US */
+ unsigned long long above; /* Number of times it's been too deep */
+ unsigned long long below; /* Number of times it's been too shallow */
#ifdef CONFIG_SUSPEND
unsigned long long s2idle_usage;
unsigned long long s2idle_time; /* in US */
@@ -81,6 +83,7 @@ struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
unsigned int use_deepest_state:1;
+ unsigned int poll_time_limit:1;
unsigned int cpu;
int last_residency;
@@ -99,16 +102,6 @@ struct cpuidle_device {
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
-/**
- * cpuidle_get_last_residency - retrieves the last state's residency time
- * @dev: the target CPU
- */
-static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
-{
- return dev->last_residency;
-}
-
-
/****************************
* CPUIDLE DRIVER INTERFACE *
****************************/
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index b511f6d24b42..525510a9f965 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -60,6 +60,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_CONFIG(name) \
vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+extern unsigned char *vmcoreinfo_data;
+extern size_t vmcoreinfo_size;
extern u32 *vmcoreinfo_note;
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 3e4ba9d753c8..f774c5eb9e3c 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -26,6 +26,10 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
unsigned long, int);
+extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset,
+ int userbuf);
+
void vmcore_cleanup(void);
/* Architecture code defines this if there are other possible ELF
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index 1fe0cfcdea30..6bb0c0bf357b 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -6,6 +6,7 @@
#define CRC_T10DIF_DIGEST_SIZE 2
#define CRC_T10DIF_BLOCK_SIZE 1
+#define CRC_T10DIF_STRING "crct10dif"
extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
size_t len);
diff --git a/include/linux/crc64.h b/include/linux/crc64.h
new file mode 100644
index 000000000000..c756e65a1b58
--- /dev/null
+++ b/include/linux/crc64.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * See lib/crc64.c for the related specification and polynomial arithmetic.
+ */
+#ifndef _LINUX_CRC64_H
+#define _LINUX_CRC64_H
+
+#include <linux/types.h>
+
+u64 __pure crc64_be(u64 crc, const void *p, size_t len);
+#endif /* _LINUX_CRC64_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 7eed6101c791..4907c9df86b3 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -169,6 +169,7 @@ extern int change_create_files_as(struct cred *, struct inode *);
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
+extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
/*
@@ -236,7 +237,7 @@ static inline struct cred *get_new_cred(struct cred *cred)
* @cred: The credentials to reference
*
* Get a reference on the specified set of credentials. The caller must
- * release the reference.
+ * release the reference. If %NULL is passed, it is returned with no action.
*
* This is used to deal with a committed set of credentials. Although the
* pointer is const, this will temporarily discard the const and increment the
@@ -247,16 +248,29 @@ static inline struct cred *get_new_cred(struct cred *cred)
static inline const struct cred *get_cred(const struct cred *cred)
{
struct cred *nonconst_cred = (struct cred *) cred;
+ if (!cred)
+ return cred;
validate_creds(cred);
return get_new_cred(nonconst_cred);
}
+static inline const struct cred *get_cred_rcu(const struct cred *cred)
+{
+ struct cred *nonconst_cred = (struct cred *) cred;
+ if (!cred)
+ return NULL;
+ if (!atomic_inc_not_zero(&nonconst_cred->usage))
+ return NULL;
+ validate_creds(cred);
+ return cred;
+}
+
/**
* put_cred - Release a reference to a set of credentials
* @cred: The credentials to release
*
* Release a reference to a set of credentials, deleting them when the last ref
- * is released.
+ * is released. If %NULL is passed, nothing is done.
*
* This takes a const pointer to a set of credentials because the credentials
* on task_struct are attached by const pointers to prevent accidental
@@ -266,9 +280,11 @@ static inline void put_cred(const struct cred *_cred)
{
struct cred *cred = (struct cred *) _cred;
- validate_creds(cred);
- if (atomic_dec_and_test(&(cred)->usage))
- __put_cred(cred);
+ if (cred) {
+ validate_creds(cred);
+ if (atomic_dec_and_test(&(cred)->usage))
+ __put_cred(cred);
+ }
}
/**
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index e8839d3a7559..902ec171fc6d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -49,7 +49,6 @@
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
-#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
@@ -77,12 +76,6 @@
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
/*
- * This bit is set for symmetric key ciphers that have already been wrapped
- * with a generic IV generator to prevent them from being wrapped again.
- */
-#define CRYPTO_ALG_GENIV 0x00000200
-
-/*
* Set if the algorithm has passed automated run-time testing. Note that
* if there is no run-time testing for a given algorithm it is considered
* to have passed.
@@ -157,7 +150,6 @@ struct crypto_async_request;
struct crypto_blkcipher;
struct crypto_tfm;
struct crypto_type;
-struct skcipher_givcrypt_request;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -246,31 +238,16 @@ struct cipher_desc {
* be called in parallel with the same transformation object.
* @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
* and the conditions are exactly the same.
- * @givencrypt: Update the IV for encryption. With this function, a cipher
- * implementation may provide the function on how to update the IV
- * for encryption.
- * @givdecrypt: Update the IV for decryption. This is the reverse of
- * @givencrypt .
- * @geniv: The transformation implementation may use an "IV generator" provided
- * by the kernel crypto API. Several use cases have a predefined
- * approach how IVs are to be updated. For such use cases, the kernel
- * crypto API provides ready-to-use implementations that can be
- * referenced with this variable.
* @ivsize: IV size applicable for transformation. The consumer must provide an
* IV of exactly that size to perform the encrypt or decrypt operation.
*
- * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
- * mandatory and must be filled.
+ * All fields except @ivsize are mandatory and must be filled.
*/
struct ablkcipher_alg {
int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct ablkcipher_request *req);
int (*decrypt)(struct ablkcipher_request *req);
- int (*givencrypt)(struct skcipher_givcrypt_request *req);
- int (*givdecrypt)(struct skcipher_givcrypt_request *req);
-
- const char *geniv;
unsigned int min_keysize;
unsigned int max_keysize;
@@ -284,10 +261,9 @@ struct ablkcipher_alg {
* @setkey: see struct ablkcipher_alg
* @encrypt: see struct ablkcipher_alg
* @decrypt: see struct ablkcipher_alg
- * @geniv: see struct ablkcipher_alg
* @ivsize: see struct ablkcipher_alg
*
- * All fields except @geniv and @ivsize are mandatory and must be filled.
+ * All fields except @ivsize are mandatory and must be filled.
*/
struct blkcipher_alg {
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
@@ -299,8 +275,6 @@ struct blkcipher_alg {
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes);
- const char *geniv;
-
unsigned int min_keysize;
unsigned int max_keysize;
unsigned int ivsize;
@@ -369,6 +343,115 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
+#ifdef CONFIG_CRYPTO_STATS
+/*
+ * struct crypto_istat_aead - statistics for AEAD algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @err_cnt: number of error for AEAD requests
+ */
+struct crypto_istat_aead {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_akcipher - statistics for akcipher algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @verify_cnt: number of verify operation
+ * @sign_cnt: number of sign requests
+ * @err_cnt: number of error for akcipher requests
+ */
+struct crypto_istat_akcipher {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t verify_cnt;
+ atomic64_t sign_cnt;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_cipher - statistics for cipher algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @err_cnt: number of error for cipher requests
+ */
+struct crypto_istat_cipher {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_compress - statistics for compress algorithm
+ * @compress_cnt: number of compress requests
+ * @compress_tlen: total data size handled by compress requests
+ * @decompress_cnt: number of decompress requests
+ * @decompress_tlen: total data size handled by decompress requests
+ * @err_cnt: number of error for compress requests
+ */
+struct crypto_istat_compress {
+ atomic64_t compress_cnt;
+ atomic64_t compress_tlen;
+ atomic64_t decompress_cnt;
+ atomic64_t decompress_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_hash - statistics for has algorithm
+ * @hash_cnt: number of hash requests
+ * @hash_tlen: total data size hashed
+ * @err_cnt: number of error for hash requests
+ */
+struct crypto_istat_hash {
+ atomic64_t hash_cnt;
+ atomic64_t hash_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_kpp - statistics for KPP algorithm
+ * @setsecret_cnt: number of setsecrey operation
+ * @generate_public_key_cnt: number of generate_public_key operation
+ * @compute_shared_secret_cnt: number of compute_shared_secret operation
+ * @err_cnt: number of error for KPP requests
+ */
+struct crypto_istat_kpp {
+ atomic64_t setsecret_cnt;
+ atomic64_t generate_public_key_cnt;
+ atomic64_t compute_shared_secret_cnt;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_rng: statistics for RNG algorithm
+ * @generate_cnt: number of RNG generate requests
+ * @generate_tlen: total data size of generated data by the RNG
+ * @seed_cnt: number of times the RNG was seeded
+ * @err_cnt: number of error for RNG requests
+ */
+struct crypto_istat_rng {
+ atomic64_t generate_cnt;
+ atomic64_t generate_tlen;
+ atomic64_t seed_cnt;
+ atomic64_t err_cnt;
+};
+#endif /* CONFIG_CRYPTO_STATS */
#define cra_ablkcipher cra_u.ablkcipher
#define cra_blkcipher cra_u.blkcipher
@@ -454,6 +537,15 @@ struct compress_alg {
* @cra_refcnt: internally used
* @cra_destroy: internally used
*
+ * @stats: union of all possible crypto_istat_xxx structures
+ * @stats.aead: statistics for AEAD algorithm
+ * @stats.akcipher: statistics for akcipher algorithm
+ * @stats.cipher: statistics for cipher algorithm
+ * @stats.compress: statistics for compress algorithm
+ * @stats.hash: statistics for hash algorithm
+ * @stats.rng: statistics for rng algorithm
+ * @stats.kpp: statistics for KPP algorithm
+ *
* The struct crypto_alg describes a generic Crypto API algorithm and is common
* for all of the transformations. Any variable not documented here shall not
* be used by a cipher implementation as it is internal to the Crypto API.
@@ -487,8 +579,87 @@ struct crypto_alg {
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
+
+#ifdef CONFIG_CRYPTO_STATS
+ union {
+ struct crypto_istat_aead aead;
+ struct crypto_istat_akcipher akcipher;
+ struct crypto_istat_cipher cipher;
+ struct crypto_istat_compress compress;
+ struct crypto_istat_hash hash;
+ struct crypto_istat_rng rng;
+ struct crypto_istat_kpp kpp;
+ } stats;
+#endif /* CONFIG_CRYPTO_STATS */
+
} CRYPTO_MINALIGN_ATTR;
+#ifdef CONFIG_CRYPTO_STATS
+void crypto_stats_init(struct crypto_alg *alg);
+void crypto_stats_get(struct crypto_alg *alg);
+void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
+void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
+void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg);
+void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg);
+void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg);
+void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret);
+void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret);
+void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret);
+void crypto_stats_rng_seed(struct crypto_alg *alg, int ret);
+void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret);
+void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
+void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
+#else
+static inline void crypto_stats_init(struct crypto_alg *alg)
+{}
+static inline void crypto_stats_get(struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret)
+{}
+static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
+{}
+#endif
/*
* A helper struct for waiting for completion of async crypto ops
*/
@@ -734,14 +905,14 @@ static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
static inline u32 crypto_skcipher_type(u32 type)
{
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
return type;
}
static inline u32 crypto_skcipher_mask(u32 mask)
{
- mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ mask &= ~CRYPTO_ALG_TYPE_MASK;
mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
return mask;
}
@@ -922,7 +1093,14 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- return crt->encrypt(req);
+ struct crypto_alg *alg = crt->base->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
+ int ret;
+
+ crypto_stats_get(alg);
+ ret = crt->encrypt(req);
+ crypto_stats_ablkcipher_encrypt(nbytes, ret, alg);
+ return ret;
}
/**
@@ -940,7 +1118,14 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- return crt->decrypt(req);
+ struct crypto_alg *alg = crt->base->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
+ int ret;
+
+ crypto_stats_get(alg);
+ ret = crt->decrypt(req);
+ crypto_stats_ablkcipher_decrypt(nbytes, ret, alg);
+ return ret;
}
/**
diff --git a/include/linux/cuda.h b/include/linux/cuda.h
index 056867f09a01..45bfe9d61271 100644
--- a/include/linux/cuda.h
+++ b/include/linux/cuda.h
@@ -8,6 +8,7 @@
#ifndef _LINUX_CUDA_H
#define _LINUX_CUDA_H
+#include <linux/rtc.h>
#include <uapi/linux/cuda.h>
@@ -16,4 +17,7 @@ extern int cuda_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
extern void cuda_poll(void);
+extern time64_t cuda_get_time(void);
+extern int cuda_set_rtc_time(struct rtc_time *tm);
+
#endif /* _LINUX_CUDA_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index deb0f663252f..0dd316a74a29 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,6 +7,8 @@
#include <linux/radix-tree.h>
#include <asm/pgtable.h>
+typedef unsigned long dax_entry_t;
+
struct iomap_ops;
struct dax_device;
struct dax_operations {
@@ -88,6 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
+dax_entry_t dax_lock_page(struct page *page);
+void dax_unlock_page(struct page *page, dax_entry_t cookie);
#else
static inline bool bdev_dax_supported(struct block_device *bdev,
int blocksize)
@@ -119,6 +123,17 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
{
return -EOPNOTSUPP;
}
+
+static inline dax_entry_t dax_lock_page(struct page *page)
+{
+ if (IS_DAX(page->mapping->host))
+ return ~0UL;
+ return 0;
+}
+
+static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
+{
+}
#endif
int dax_read_lock(void);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d32957b423d5..ef4b70f64f33 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -145,8 +145,7 @@ struct dentry_operations {
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
- struct dentry *(*d_real)(struct dentry *, const struct inode *,
- unsigned int, unsigned int);
+ struct dentry *(*d_real)(struct dentry *, const struct inode *);
} ____cacheline_aligned;
/*
@@ -561,15 +560,10 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
return upper;
}
-/* d_real() flags */
-#define D_REAL_UPPER 0x2 /* return upper dentry or NULL if non-upper */
-
/**
* d_real - Return the real dentry
* @dentry: the dentry to query
* @inode: inode to select the dentry from multiple layers (can be NULL)
- * @open_flags: open flags to control copy-up behavior
- * @flags: flags to control what is returned by this function
*
* If dentry is on a union/overlay, then return the underlying, real dentry.
* Otherwise return the dentry itself.
@@ -577,11 +571,10 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
* See also: Documentation/filesystems/vfs.txt
*/
static inline struct dentry *d_real(struct dentry *dentry,
- const struct inode *inode,
- unsigned int open_flags, unsigned int flags)
+ const struct inode *inode)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
- return dentry->d_op->d_real(dentry, inode, open_flags, flags);
+ return dentry->d_op->d_real(dentry, inode);
else
return dentry;
}
@@ -596,7 +589,7 @@ static inline struct dentry *d_real(struct dentry *dentry,
static inline struct inode *d_real_inode(const struct dentry *dentry)
{
/* This usage of d_real() results in const dentry */
- return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0, 0));
+ return d_backing_inode(d_real((struct dentry *) dentry, NULL));
}
struct name_snapshot {
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 120225e9a366..257ab3c92cb8 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -8,8 +8,8 @@
struct task_struct;
-extern int debug_locks;
-extern int debug_locks_silent;
+extern int debug_locks __read_mostly;
+extern int debug_locks_silent __read_mostly;
static inline int __debug_locks_off(void)
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 31c865d1842e..577d1b25fccd 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -57,7 +57,12 @@ struct task_delay_info {
u64 freepages_start;
u64 freepages_delay; /* wait for memory reclaim */
+
+ u64 thrashing_start;
+ u64 thrashing_delay; /* wait for thrashing page */
+
u32 freepages_count; /* total count of memory reclaim */
+ u32 thrashing_count; /* total count of thrash waits */
};
#endif
@@ -76,6 +81,8 @@ extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
extern void __delayacct_freepages_end(void);
+extern void __delayacct_thrashing_start(void);
+extern void __delayacct_thrashing_end(void);
static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
{
@@ -156,6 +163,18 @@ static inline void delayacct_freepages_end(void)
__delayacct_freepages_end();
}
+static inline void delayacct_thrashing_start(void)
+{
+ if (current->delays)
+ __delayacct_thrashing_start();
+}
+
+static inline void delayacct_thrashing_end(void)
+{
+ if (current->delays)
+ __delayacct_thrashing_end();
+}
+
#else
static inline void delayacct_set_flag(int flag)
{}
@@ -182,6 +201,10 @@ static inline void delayacct_freepages_start(void)
{}
static inline void delayacct_freepages_end(void)
{}
+static inline void delayacct_thrashing_start(void)
+{}
+static inline void delayacct_thrashing_end(void)
+{}
#endif /* CONFIG_TASK_DELAY_ACCT */
diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h
deleted file mode 100644
index 92521471517f..000000000000
--- a/include/linux/dell-led.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DELL_LED_H__
-#define __DELL_LED_H__
-
-int dell_micmute_led_set(int on);
-
-#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 3aae5b3af87c..fbffa74bfc1b 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -131,6 +131,9 @@ struct devfreq_dev_profile {
* @scaling_min_freq: Limit minimum frequency requested by OPP interface
* @scaling_max_freq: Limit maximum frequency requested by OPP interface
* @stop_polling: devfreq polling status of a device.
+ * @suspend_freq: frequency of a device set during suspend phase.
+ * @resume_freq: frequency of a device set in resume phase.
+ * @suspend_count: suspend requests counter for a device.
* @total_trans: Number of devfreq transitions
* @trans_table: Statistics of devfreq transitions
* @time_in_state: Statistics of devfreq states
@@ -167,6 +170,10 @@ struct devfreq {
unsigned long scaling_max_freq;
bool stop_polling;
+ unsigned long suspend_freq;
+ unsigned long resume_freq;
+ atomic_t suspend_count;
+
/* information for device frequency transition */
unsigned int total_trans;
unsigned int *trans_table;
@@ -198,6 +205,17 @@ extern void devm_devfreq_remove_device(struct device *dev,
extern int devfreq_suspend_device(struct devfreq *devfreq);
extern int devfreq_resume_device(struct devfreq *devfreq);
+extern void devfreq_suspend(void);
+extern void devfreq_resume(void);
+
+/**
+ * update_devfreq() - Reevaluate the device and configure frequency
+ * @devfreq: the devfreq device
+ *
+ * Note: devfreq->lock must be held
+ */
+extern int update_devfreq(struct devfreq *devfreq);
+
/* Helper functions for devfreq user device driver with OPP. */
extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags);
@@ -316,6 +334,9 @@ static inline int devfreq_resume_device(struct devfreq *devfreq)
return 0;
}
+static inline void devfreq_suspend(void) {}
+static inline void devfreq_resume(void) {}
+
static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags)
{
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 6fb0808e87c8..e528baebad69 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -26,9 +26,8 @@ enum dm_queue_mode {
DM_TYPE_NONE = 0,
DM_TYPE_BIO_BASED = 1,
DM_TYPE_REQUEST_BASED = 2,
- DM_TYPE_MQ_REQUEST_BASED = 3,
- DM_TYPE_DAX_BIO_BASED = 4,
- DM_TYPE_NVME_BIO_BASED = 5,
+ DM_TYPE_DAX_BIO_BASED = 3,
+ DM_TYPE_NVME_BIO_BASED = 4,
};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -92,6 +91,11 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
+typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
+ struct blk_zone *zones,
+ unsigned int *nr_zones,
+ gfp_t gfp_mask);
+
/*
* These iteration functions are typically used to check (and combine)
* properties of underlying devices.
@@ -180,6 +184,9 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_prepare_ioctl_fn prepare_ioctl;
+#ifdef CONFIG_BLK_DEV_ZONED
+ dm_report_zones_fn report_zones;
+#endif
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
@@ -420,8 +427,8 @@ struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
-void dm_remap_zone_report(struct dm_target *ti, struct bio *bio,
- sector_t start);
+void dm_remap_zone_report(struct dm_target *ti, sector_t start,
+ struct blk_zone *zones, unsigned int *nr_zones);
union map_info *dm_get_rq_mapinfo(struct request *rq);
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
@@ -490,6 +497,7 @@ sector_t dm_table_get_size(struct dm_table *t);
unsigned int dm_table_get_num_targets(struct dm_table *t);
fmode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
+const char *dm_table_device_name(struct dm_table *t);
/*
* Trigger an event.
diff --git a/include/linux/device.h b/include/linux/device.h
index 8f882549edee..6cb4640b6160 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -55,6 +55,8 @@ struct bus_attribute {
struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
#define BUS_ATTR_RO(_name) \
struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
+#define BUS_ATTR_WO(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
extern int __must_check bus_create_file(struct bus_type *,
struct bus_attribute *);
@@ -692,8 +694,10 @@ static inline void *devm_kcalloc(struct device *dev,
{
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
}
-extern void devm_kfree(struct device *dev, void *p);
+extern void devm_kfree(struct device *dev, const void *p);
extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
+extern const char *devm_kstrdup_const(struct device *dev,
+ const char *s, gfp_t gfp);
extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
gfp_t gfp);
@@ -774,6 +778,30 @@ void device_connection_add(struct device_connection *con);
void device_connection_remove(struct device_connection *con);
/**
+ * device_connections_add - Add multiple device connections at once
+ * @cons: Zero terminated array of device connection descriptors
+ */
+static inline void device_connections_add(struct device_connection *cons)
+{
+ struct device_connection *c;
+
+ for (c = cons; c->endpoint[0]; c++)
+ device_connection_add(c);
+}
+
+/**
+ * device_connections_remove - Remove multiple device connections at once
+ * @cons: Zero terminated array of device connection descriptors
+ */
+static inline void device_connections_remove(struct device_connection *cons)
+{
+ struct device_connection *c;
+
+ for (c = cons; c->endpoint[0]; c++)
+ device_connection_remove(c);
+}
+
+/**
* enum device_link_state - Device link states.
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
* @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
@@ -927,6 +955,8 @@ struct dev_links_info {
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
+ * @dma_coherent: this particular device is dma coherent, even if the
+ * architecture supports non-coherent devices.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -1016,6 +1046,11 @@ struct device {
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+ bool dma_coherent:1;
+#endif
};
static inline struct device *kobj_to_dev(struct kobject *kobj)
@@ -1023,6 +1058,16 @@ static inline struct device *kobj_to_dev(struct kobject *kobj)
return container_of(kobj, struct device, kobj);
}
+/**
+ * device_iommu_mapped - Returns true when the device DMA is translated
+ * by an IOMMU
+ * @dev: Device to perform the check on
+ */
+static inline bool device_iommu_mapped(struct device *dev)
+{
+ return (dev->iommu_group != NULL);
+}
+
/* Get the wakeup routines, which depend on struct device */
#include <linux/pm_wakeup.h>
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index a785f2507159..2ad5c363d7d5 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -30,7 +30,8 @@ struct bus_type;
extern void dma_debug_add_bus(struct bus_type *bus);
-extern int dma_debug_resize_entries(u32 num_entries);
+extern void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len);
extern void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
@@ -69,17 +70,6 @@ extern void debug_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction);
-extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction);
-
-extern void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction);
-
extern void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nelems, int direction);
@@ -98,9 +88,9 @@ static inline void dma_debug_add_bus(struct bus_type *bus)
{
}
-static inline int dma_debug_resize_entries(u32 num_entries)
+static inline void debug_dma_map_single(struct device *dev, const void *addr,
+ unsigned long len)
{
- return 0;
}
static inline void debug_dma_map_page(struct device *dev, struct page *page,
@@ -166,22 +156,6 @@ static inline void debug_dma_sync_single_for_device(struct device *dev,
{
}
-static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction)
-{
-}
-
-static inline void debug_dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- int direction)
-{
-}
-
static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nelems, int direction)
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 8d9f33febde5..b7338702592a 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -27,7 +27,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return false;
- return addr + size - 1 <= *dev->dma_mask;
+ return addr + size - 1 <=
+ min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
}
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
@@ -47,23 +48,17 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return __sme_clr(__dma_to_phys(dev, daddr));
}
-#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
-void dma_mark_clean(void *addr, size_t size);
-#else
-static inline void dma_mark_clean(void *addr, size_t size)
-{
-}
-#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
-
+u64 dma_direct_get_required_mask(struct device *dev);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
-dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
- enum dma_data_direction dir, unsigned long attrs);
+void *dma_direct_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
+void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
+void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
int dma_direct_supported(struct device *dev, u64 mask);
-int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr);
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 02dba8cd033d..999e4b104410 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -541,6 +541,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
+struct dma_fence *dma_fence_get_stub(void);
u64 dma_fence_context_alloc(unsigned num);
#define DMA_FENCE_TRACE(f, fmt, args...) \
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index e8ca5e654277..e760dc5d1fa8 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -69,7 +69,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs);
-int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
/* The DMA API isn't _quite_ the whole story, though... */
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 1db6a6b46d0d..ba521d5506c9 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -128,16 +128,14 @@ struct dma_map_ops {
enum dma_data_direction dir);
void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
u64 (*get_required_mask)(struct device *dev);
-#endif
};
-extern const struct dma_map_ops dma_direct_ops;
-extern const struct dma_map_ops dma_noncoherent_ops;
+#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
+
extern const struct dma_map_ops dma_virt_ops;
+extern const struct dma_map_ops dma_dummy_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -223,6 +221,69 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
}
#endif
+static inline bool dma_is_direct(const struct dma_map_ops *ops)
+{
+ return likely(!ops);
+}
+
+/*
+ * All the dma_direct_* declarations are here just for the indirect call bypass,
+ * and must not be used directly drivers!
+ */
+dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir);
+void dma_direct_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir);
+#else
+static inline void dma_direct_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_direct_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
+void dma_direct_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir);
+void dma_direct_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir);
+#else
+static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline void dma_direct_unmap_sg(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+}
+static inline void dma_direct_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+}
+#endif
+
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction dir,
@@ -232,9 +293,13 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, virt_to_page(ptr),
- offset_in_page(ptr), size,
- dir, attrs);
+ debug_dma_map_single(dev, ptr, size);
+ if (dma_is_direct(ops))
+ addr = dma_direct_map_page(dev, virt_to_page(ptr),
+ offset_in_page(ptr), size, dir, attrs);
+ else
+ addr = ops->map_page(dev, virt_to_page(ptr),
+ offset_in_page(ptr), size, dir, attrs);
debug_dma_map_page(dev, virt_to_page(ptr),
offset_in_page(ptr), size,
dir, addr, true);
@@ -249,11 +314,19 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
+ if (dma_is_direct(ops))
+ dma_direct_unmap_page(dev, addr, size, dir, attrs);
+ else if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir, true);
}
+static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return dma_unmap_single_attrs(dev, addr, size, dir, attrs);
+}
+
/*
* dma_maps_sg_attrs returns 0 on error and > 0 on success.
* It should never return a value < 0.
@@ -266,7 +339,10 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int ents;
BUG_ON(!valid_dma_direction(dir));
- ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ if (dma_is_direct(ops))
+ ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
+ else
+ ents = ops->map_sg(dev, sg, nents, dir, attrs);
BUG_ON(ents < 0);
debug_dma_map_sg(dev, sg, nents, ents, dir);
@@ -281,7 +357,9 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
- if (ops->unmap_sg)
+ if (dma_is_direct(ops))
+ dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
+ else if (ops->unmap_sg)
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
@@ -295,25 +373,15 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, page, offset, size, dir, attrs);
+ if (dma_is_direct(ops))
+ addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
+ else
+ addr = ops->map_page(dev, page, offset, size, dir, attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr;
}
-static inline void dma_unmap_page_attrs(struct device *dev,
- dma_addr_t addr, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, attrs);
- debug_dma_unmap_page(dev, addr, size, dir, false);
-}
-
static inline dma_addr_t dma_map_resource(struct device *dev,
phys_addr_t phys_addr,
size_t size,
@@ -329,7 +397,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
addr = phys_addr;
- if (ops->map_resource)
+ if (ops && ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
@@ -344,7 +412,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_resource)
+ if (ops && ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs);
debug_dma_unmap_resource(dev, addr, size, dir);
}
@@ -356,11 +424,20 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
+ if (dma_is_direct(ops))
+ dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+ else if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
}
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
+}
+
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
@@ -368,37 +445,18 @@ static inline void dma_sync_single_for_device(struct device *dev,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
+ if (dma_is_direct(ops))
+ dma_direct_sync_single_for_device(dev, addr, size, dir);
+ else if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir);
}
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
- ops->sync_single_for_cpu(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
-}
-
static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
+ dma_addr_t addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
- ops->sync_single_for_device(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+ return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
static inline void
@@ -408,7 +466,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_cpu)
+ if (dma_is_direct(ops))
+ dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
+ else if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
@@ -420,7 +480,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_device)
+ if (dma_is_direct(ops))
+ dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
+ else if (ops->sync_sg_for_device)
ops->sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
@@ -433,19 +495,12 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->cache_sync)
- ops->cache_sync(dev, vaddr, size, dir);
-}
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir);
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
void *dma_common_contiguous_remap(struct page *page, size_t size,
unsigned long vm_flags,
@@ -456,111 +511,36 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
-/**
- * dma_mmap_attrs - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
- * @handle: device-view address returned from dma_alloc_attrs
- * @size: size of memory originally requested in dma_alloc_attrs
- * @attrs: attributes of mapping properties requested in dma_alloc_attrs
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
- * into user space. The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-static inline int
-dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->mmap)
- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
+int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
+bool dma_in_atomic_pool(void *start, size_t size);
+void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
+bool dma_free_from_pool(void *start, size_t size);
+int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
int
-dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-static inline int
-dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
- dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->get_sgtable)
- return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
- attrs);
- return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
-}
+dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, unsigned long attrs);
+int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
-#ifndef arch_dma_alloc_attrs
-#define arch_dma_alloc_attrs(dev) (true)
-#endif
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- void *cpu_addr;
-
- BUG_ON(!ops);
- WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
-
- if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
- return cpu_addr;
-
- /* let the implementation decide on the zone to allocate from: */
- flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
- if (!arch_dma_alloc_attrs(&dev))
- return NULL;
- if (!ops->alloc)
- return NULL;
-
- cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
- return cpu_addr;
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!ops);
-
- if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
- return;
- /*
- * On non-coherent platforms which implement DMA-coherent buffers via
- * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
- * this far in IRQ context is a) at risk of a BUG_ON() or trying to
- * sleep on some machines, and b) an indication that the driver is
- * probably misusing the coherent API anyway.
- */
- WARN_ON(irqs_disabled());
-
- if (!ops->free || !cpu_addr)
- return;
-
- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free(dev, size, cpu_addr, dma_handle, attrs);
-}
+void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, unsigned long attrs);
+void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t gfp)
{
- return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
+
+ return dma_alloc_attrs(dev, size, dma_handle, gfp,
+ (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
@@ -571,43 +551,16 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
debug_dma_mapping_error(dev, dma_addr);
- if (ops->mapping_error)
- return ops->mapping_error(dev, dma_addr);
- return 0;
-}
-static inline void dma_check_mask(struct device *dev, u64 mask)
-{
- if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
- dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
-}
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (!ops)
- return 0;
- if (!ops->dma_supported)
- return 1;
- return ops->dma_supported(dev, mask);
-}
-
-#ifndef HAVE_ARCH_DMA_SET_MASK
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- dma_check_mask(dev, mask);
-
- *dev->dma_mask = mask;
+ if (dma_addr == DMA_MAPPING_ERROR)
+ return -ENOMEM;
return 0;
}
-#endif
+
+int dma_supported(struct device *dev, u64 mask);
+int dma_set_mask(struct device *dev, u64 mask);
+int dma_set_coherent_mask(struct device *dev, u64 mask);
static inline u64 dma_get_mask(struct device *dev)
{
@@ -616,21 +569,6 @@ static inline u64 dma_get_mask(struct device *dev)
return DMA_BIT_MASK(32);
}
-#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
-int dma_set_coherent_mask(struct device *dev, u64 mask);
-#else
-static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
-{
- if (!dma_supported(dev, mask))
- return -EIO;
-
- dma_check_mask(dev, mask);
-
- dev->coherent_dma_mask = mask;
- return 0;
-}
-#endif
-
/*
* Set both the DMA mask and the coherent DMA mask to the same thing.
* Note that we don't check the return value from dma_set_coherent_mask()
@@ -674,8 +612,7 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline unsigned int dma_set_max_seg_size(struct device *dev,
- unsigned int size)
+static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
{
if (dev->dma_parms) {
dev->dma_parms->max_segment_size = size;
@@ -707,12 +644,13 @@ static inline unsigned long dma_max_pfn(struct device *dev)
}
#endif
+/*
+ * Please always use dma_alloc_coherent instead as it already zeroes the memory!
+ */
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- void *ret = dma_alloc_coherent(dev, size, dma_handle,
- flag | __GFP_ZERO);
- return ret;
+ return dma_alloc_coherent(dev, size, dma_handle, flag);
}
static inline int dma_get_cache_alignment(void)
@@ -753,18 +691,6 @@ dma_mark_declared_memory_occupied(struct device *dev,
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-#ifdef CONFIG_HAS_DMA
-int dma_configure(struct device *dev);
-void dma_deconfigure(struct device *dev);
-#else
-static inline int dma_configure(struct device *dev)
-{
- return 0;
-}
-
-static inline void dma_deconfigure(struct device *dev) {}
-#endif
-
/*
* Managed DMA API
*/
@@ -806,8 +732,12 @@ static inline void dmam_release_declared_memory(struct device *dev)
static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
- return dma_alloc_attrs(dev, size, dma_addr, gfp,
- DMA_ATTR_WRITE_COMBINE);
+ unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
+
+ if (gfp & __GFP_NOWARN)
+ attrs |= DMA_ATTR_NO_WARN;
+
+ return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
}
#ifndef dma_alloc_writecombine
#define dma_alloc_writecombine dma_alloc_wc
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index a0aa00cc909d..69b36ed31a99 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -4,24 +4,44 @@
#include <linux/dma-mapping.h>
+#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
+#include <asm/dma-coherence.h>
+#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return dev->dma_coherent;
+}
+#else
+static inline bool dev_is_dma_coherent(struct device *dev)
+{
+ return true;
+}
+#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
+
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr);
-#ifdef CONFIG_DMA_NONCOHERENT_MMAP
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
+#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
+pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs);
#else
-#define arch_dma_mmap NULL
-#endif /* CONFIG_DMA_NONCOHERENT_MMAP */
+# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot)
+#endif
#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#else
-#define arch_dma_cache_sync NULL
+static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
+ size_t size, enum dma_data_direction direction)
+{
+}
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -52,4 +72,6 @@ static inline void arch_sync_dma_for_cpu_all(struct device *dev)
}
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+void arch_dma_prep_coherent(struct page *page, size_t size);
+
#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h
index 9fc594f69eff..fceb5df07097 100644
--- a/include/linux/dma/pxa-dma.h
+++ b/include/linux/dma/pxa-dma.h
@@ -23,15 +23,4 @@ struct pxad_param {
enum pxad_chan_prio prio;
};
-struct dma_chan;
-
-#ifdef CONFIG_PXA_DMA
-bool pxad_filter_fn(struct dma_chan *chan, void *param);
-#else
-static inline bool pxad_filter_fn(struct dma_chan *chan, void *param)
-{
- return false;
-}
-#endif
-
#endif /* _PXA_DMA_H_ */
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
index b0115e340fbc..ab82df64682a 100644
--- a/include/linux/dma/sprd-dma.h
+++ b/include/linux/dma/sprd-dma.h
@@ -3,9 +3,65 @@
#ifndef _SPRD_DMA_H_
#define _SPRD_DMA_H_
-#define SPRD_DMA_REQ_SHIFT 16
-#define SPRD_DMA_FLAGS(req_mode, int_type) \
- ((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
+#define SPRD_DMA_REQ_SHIFT 8
+#define SPRD_DMA_TRG_MODE_SHIFT 16
+#define SPRD_DMA_CHN_MODE_SHIFT 24
+#define SPRD_DMA_FLAGS(chn_mode, trg_mode, req_mode, int_type) \
+ ((chn_mode) << SPRD_DMA_CHN_MODE_SHIFT | \
+ (trg_mode) << SPRD_DMA_TRG_MODE_SHIFT | \
+ (req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
+
+/*
+ * The Spreadtrum DMA controller supports channel 2-stage tansfer, that means
+ * we can request 2 dma channels, one for source channel, and another one for
+ * destination channel. Each channel is independent, and has its own
+ * configurations. Once the source channel's transaction is done, it will
+ * trigger the destination channel's transaction automatically by hardware
+ * signal.
+ *
+ * To support 2-stage tansfer, we must configure the channel mode and trigger
+ * mode as below definition.
+ */
+
+/*
+ * enum sprd_dma_chn_mode: define the DMA channel mode for 2-stage transfer
+ * @SPRD_DMA_CHN_MODE_NONE: No channel mode setting which means channel doesn't
+ * support the 2-stage transfer.
+ * @SPRD_DMA_SRC_CHN0: Channel used as source channel 0.
+ * @SPRD_DMA_SRC_CHN1: Channel used as source channel 1.
+ * @SPRD_DMA_DST_CHN0: Channel used as destination channel 0.
+ * @SPRD_DMA_DST_CHN1: Channel used as destination channel 1.
+ *
+ * Now the DMA controller can supports 2 groups 2-stage transfer.
+ */
+enum sprd_dma_chn_mode {
+ SPRD_DMA_CHN_MODE_NONE,
+ SPRD_DMA_SRC_CHN0,
+ SPRD_DMA_SRC_CHN1,
+ SPRD_DMA_DST_CHN0,
+ SPRD_DMA_DST_CHN1,
+};
+
+/*
+ * enum sprd_dma_trg_mode: define the DMA channel trigger mode for 2-stage
+ * transfer
+ * @SPRD_DMA_NO_TRG: No trigger setting.
+ * @SPRD_DMA_FRAG_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's fragment request is done.
+ * @SPRD_DMA_BLOCK_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's block request is done.
+ * @SPRD_DMA_TRANS_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's transfer request is done.
+ * @SPRD_DMA_LIST_DONE_TRG: Trigger the transaction of destination channel
+ * automatically once the source channel's link-list request is done.
+ */
+enum sprd_dma_trg_mode {
+ SPRD_DMA_NO_TRG,
+ SPRD_DMA_FRAG_DONE_TRG,
+ SPRD_DMA_BLOCK_DONE_TRG,
+ SPRD_DMA_TRANS_DONE_TRG,
+ SPRD_DMA_LIST_DONE_TRG,
+};
/*
* enum sprd_dma_req_mode: define the DMA request mode
@@ -58,4 +114,73 @@ enum sprd_dma_int_type {
SPRD_DMA_CFGERR_INT,
};
+/*
+ * struct sprd_dma_linklist - DMA link-list address structure
+ * @virt_addr: link-list virtual address to configure link-list node
+ * @phy_addr: link-list physical address to link DMA transfer
+ *
+ * The Spreadtrum DMA controller supports the link-list mode, that means slaves
+ * can supply several groups configurations (each configuration represents one
+ * DMA transfer) saved in memory, and DMA controller will link these groups
+ * configurations by writing the physical address of each configuration into the
+ * link-list register.
+ *
+ * Just as shown below, the link-list pointer register will be pointed to the
+ * physical address of 'configuration 1', and the 'configuration 1' link-list
+ * pointer will be pointed to 'configuration 2', and so on.
+ * Once trigger the DMA transfer, the DMA controller will load 'configuration
+ * 1' to its registers automatically, after 'configuration 1' transaction is
+ * done, DMA controller will load 'configuration 2' automatically, until all
+ * DMA transactions are done.
+ *
+ * Note: The last link-list pointer should point to the physical address
+ * of 'configuration 1', which can avoid DMA controller loads incorrect
+ * configuration when the last configuration transaction is done.
+ *
+ * DMA controller linklist memory
+ * ====================== -----------------------
+ *| | | configuration 1 |<---
+ *| DMA controller | ------->| | |
+ *| | | | | |
+ *| | | | | |
+ *| | | | | |
+ *| linklist pointer reg |---- ----| linklist pointer | |
+ * ====================== | ----------------------- |
+ * | |
+ * | ----------------------- |
+ * | | configuration 2 | |
+ * --->| | |
+ * | | |
+ * | | |
+ * | | |
+ * ----| linklist pointer | |
+ * | ----------------------- |
+ * | |
+ * | ----------------------- |
+ * | | configuration 3 | |
+ * --->| | |
+ * | | |
+ * | . | |
+ * . |
+ * . |
+ * . |
+ * | . |
+ * | ----------------------- |
+ * | | configuration n | |
+ * --->| | |
+ * | | |
+ * | | |
+ * | | |
+ * | linklist pointer |----
+ * -----------------------
+ *
+ * To support the link-list mode, DMA slaves should allocate one segment memory
+ * from always-on IRAM or dma coherent memory to store these groups of DMA
+ * configuration, and pass the virtual and physical address to DMA controller.
+ */
+struct sprd_dma_linklist {
+ unsigned long virt_addr;
+ phys_addr_t phy_addr;
+};
+
#endif
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
deleted file mode 100644
index 21b3e7d33d68..000000000000
--- a/include/linux/dma_remapping.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DMA_REMAPPING_H
-#define _DMA_REMAPPING_H
-
-/*
- * VT-d hardware uses 4KiB page size regardless of host page size.
- */
-#define VTD_PAGE_SHIFT (12)
-#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
-#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
-#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
-
-#define VTD_STRIDE_SHIFT (9)
-#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
-
-#define DMA_PTE_READ (1)
-#define DMA_PTE_WRITE (2)
-#define DMA_PTE_LARGE_PAGE (1 << 7)
-#define DMA_PTE_SNP (1 << 11)
-
-#define CONTEXT_TT_MULTI_LEVEL 0
-#define CONTEXT_TT_DEV_IOTLB 1
-#define CONTEXT_TT_PASS_THROUGH 2
-/* Extended context entry types */
-#define CONTEXT_TT_PT_PASID 4
-#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5
-#define CONTEXT_TT_MASK (7ULL << 2)
-
-#define CONTEXT_DINVE (1ULL << 8)
-#define CONTEXT_PRS (1ULL << 9)
-#define CONTEXT_PASIDE (1ULL << 11)
-
-struct intel_iommu;
-struct dmar_domain;
-struct root_entry;
-
-
-#ifdef CONFIG_INTEL_IOMMU
-extern int iommu_calculate_agaw(struct intel_iommu *iommu);
-extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
-extern int dmar_disabled;
-extern int intel_iommu_enabled;
-extern int intel_iommu_tboot_noforce;
-#else
-static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-#define dmar_disabled (1)
-#define intel_iommu_enabled (0)
-#endif
-
-
-#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index e2433bc50210..f8af1d770520 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -39,6 +39,7 @@ struct acpi_dmar_header;
/* DMAR Flags */
#define DMAR_INTR_REMAP 0x1
#define DMAR_X2APIC_OPT_OUT 0x2
+#define DMAR_PLATFORM_OPT_IN 0x4
struct intel_iommu;
@@ -170,6 +171,8 @@ static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{ return 0; }
#endif /* CONFIG_IRQ_REMAP */
+extern bool dmar_platform_optin(void);
+
#else /* CONFIG_DMAR_TABLE */
static inline int dmar_device_add(void *handle)
@@ -182,6 +185,11 @@ static inline int dmar_device_remove(void *handle)
return 0;
}
+static inline bool dmar_platform_optin(void)
+{
+ return false;
+}
+
#endif /* CONFIG_DMAR_TABLE */
struct irte {
@@ -265,11 +273,6 @@ static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
#define PDA_LOW_BIT 26
#define PDA_HIGH_BIT 32
-enum {
- IRQ_REMAP_XAPIC_MODE,
- IRQ_REMAP_X2APIC_MODE,
-};
-
/* Can't use the common MSI interrupt functions
* since DMAR is not a pci device
*/
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h
index 6ac3cad9aef1..34a744a1bafc 100644
--- a/include/linux/dns_resolver.h
+++ b/include/linux/dns_resolver.h
@@ -24,11 +24,9 @@
#ifndef _LINUX_DNS_RESOLVER_H
#define _LINUX_DNS_RESOLVER_H
-#ifdef __KERNEL__
+#include <uapi/linux/dns_resolver.h>
extern int dns_query(const char *type, const char *name, size_t namelen,
const char *options, char **_result, time64_t *_expiry);
-#endif /* KERNEL */
-
#endif /* _LINUX_DNS_RESOLVER_H */
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 2d0259327721..a19d98367f08 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -51,7 +51,7 @@
#endif
extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.4.10"
+#define REL_VERSION "8.4.11"
#define API_VERSION 1
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 101
diff --git a/include/linux/edac.h b/include/linux/edac.h
index bffb97828ed6..342dabda9c7e 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -17,6 +17,7 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
+#include <linux/numa.h>
#define EDAC_DEVICE_NAME_LEN 31
@@ -451,6 +452,8 @@ struct dimm_info {
u32 nr_pages; /* number of pages on this dimm */
unsigned csrow, cschannel; /* Points to the old API data */
+
+ u16 smbios_handle; /* Handle for SMBIOS type 17 */
};
/**
@@ -666,10 +669,4 @@ struct mem_ctl_info {
bool fake_inject_ue;
u16 fake_inject_count;
};
-
-/*
- * Maximum number of memory controllers in the coherent fabric.
- */
-#define EDAC_MAX_MCS 16
-
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 401e4b254e30..45ff763fba76 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -663,6 +663,10 @@ void efi_native_runtime_setup(void);
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
+#define EFI_CERT_SHA256_GUID EFI_GUID(0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28)
+#define EFI_CERT_X509_GUID EFI_GUID(0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72)
+#define EFI_CERT_X509_SHA256_GUID EFI_GUID(0x3bd2a492, 0x96c0, 0x4079, 0xb4, 0x20, 0xfc, 0xf9, 0x8e, 0xf1, 0x03, 0xed)
+
/*
* This GUID is used to pass to the kernel proper the struct screen_info
* structure that was populated by the stub based on the GOP protocol instance
@@ -672,6 +676,7 @@ void efi_native_runtime_setup(void);
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
+#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
typedef struct {
efi_guid_t guid;
@@ -933,6 +938,27 @@ typedef struct {
efi_memory_desc_t entry[0];
} efi_memory_attributes_table_t;
+typedef struct {
+ efi_guid_t signature_owner;
+ u8 signature_data[];
+} efi_signature_data_t;
+
+typedef struct {
+ efi_guid_t signature_type;
+ u32 signature_list_size;
+ u32 signature_header_size;
+ u32 signature_size;
+ u8 signature_header[];
+ /* efi_signature_data_t signatures[][] */
+} efi_signature_list_t;
+
+typedef u8 efi_sha256_hash_t[32];
+
+typedef struct {
+ efi_sha256_hash_t to_be_signed_hash;
+ efi_time_t time_of_revocation;
+} efi_cert_x509_sha256_t;
+
/*
* All runtime access to EFI goes through this structure:
*/
@@ -957,6 +983,7 @@ extern struct efi {
unsigned long mem_attr_table; /* memory attributes table */
unsigned long rng_seed; /* UEFI firmware random seed */
unsigned long tpm_log; /* TPM2 Event Log table */
+ unsigned long mem_reserve; /* Linux EFI memreserve table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -998,13 +1025,11 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec64 *ts);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#ifdef CONFIG_X86
-extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
extern void efi_find_mirror(void);
#else
-static inline void efi_free_boot_services(void) {}
static inline efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
@@ -1041,9 +1066,9 @@ extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
+extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
-extern void efi_reserve_boot_services(void);
extern int efi_get_fdt_params(struct efi_fdt_params *params);
extern struct kobject *efi_kobj;
@@ -1113,6 +1138,15 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
char * __init efi_md_typeattr_format(char *buf, size_t size,
const efi_memory_desc_t *md);
+
+typedef void (*efi_element_handler_t)(const char *source,
+ const void *element_data,
+ size_t element_size);
+extern int __init parse_efi_signature_list(
+ const char *source,
+ const void *data, size_t size,
+ efi_element_handler_t (*get_handler_for_guid)(const efi_guid_t *));
+
/**
* efi_range_is_wc - check the WC bit on an address range
* @start: starting kvirt address
@@ -1164,6 +1198,8 @@ static inline bool efi_enabled(int feature)
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
extern bool efi_is_table_address(unsigned long phys_addr);
+
+extern int efi_apply_persistent_mem_reservations(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -1182,6 +1218,11 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
{
return false;
}
+
+static inline int efi_apply_persistent_mem_reservations(void)
+{
+ return 0;
+}
#endif
extern int efi_status_to_err(efi_status_t status);
@@ -1659,7 +1700,65 @@ struct linux_efi_tpm_eventlog {
extern int efi_tpm_eventlog_init(void);
+/*
+ * efi_runtime_service() function identifiers.
+ * "NONE" is used by efi_recover_from_page_fault() to check if the page
+ * fault happened while executing an efi runtime service.
+ */
+enum efi_rts_ids {
+ NONE,
+ GET_TIME,
+ SET_TIME,
+ GET_WAKEUP_TIME,
+ SET_WAKEUP_TIME,
+ GET_VARIABLE,
+ GET_NEXT_VARIABLE,
+ SET_VARIABLE,
+ QUERY_VARIABLE_INFO,
+ GET_NEXT_HIGH_MONO_COUNT,
+ RESET_SYSTEM,
+ UPDATE_CAPSULE,
+ QUERY_CAPSULE_CAPS,
+};
+
+/*
+ * efi_runtime_work: Details of EFI Runtime Service work
+ * @arg<1-5>: EFI Runtime Service function arguments
+ * @status: Status of executing EFI Runtime Service
+ * @efi_rts_id: EFI Runtime Service function identifier
+ * @efi_rts_comp: Struct used for handling completions
+ */
+struct efi_runtime_work {
+ void *arg1;
+ void *arg2;
+ void *arg3;
+ void *arg4;
+ void *arg5;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+};
+
+extern struct efi_runtime_work efi_rts_work;
+
/* Workqueue to queue EFI Runtime Services */
extern struct workqueue_struct *efi_rts_wq;
+struct linux_efi_memreserve {
+ int size; // allocated size of the array
+ atomic_t count; // number of entries used
+ phys_addr_t next; // pa of next struct instance
+ struct {
+ phys_addr_t base;
+ phys_addr_t size;
+ } entry[0];
+};
+
+#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \
+ (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+
+#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
+ / sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index a02deea30185..2e9e2763bf47 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -23,74 +23,6 @@ enum elv_merge {
ELEVATOR_DISCARD_MERGE = 3,
};
-typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **,
- struct bio *);
-
-typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
-
-typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge);
-
-typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
- struct request *, struct bio *);
-
-typedef int (elevator_allow_rq_merge_fn) (struct request_queue *,
- struct request *, struct request *);
-
-typedef void (elevator_bio_merged_fn) (struct request_queue *,
- struct request *, struct bio *);
-
-typedef int (elevator_dispatch_fn) (struct request_queue *, int);
-
-typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
-typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
-typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int);
-
-typedef void (elevator_init_icq_fn) (struct io_cq *);
-typedef void (elevator_exit_icq_fn) (struct io_cq *);
-typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
- struct bio *, gfp_t);
-typedef void (elevator_put_req_fn) (struct request *);
-typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
-typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
-
-typedef int (elevator_init_fn) (struct request_queue *,
- struct elevator_type *e);
-typedef void (elevator_exit_fn) (struct elevator_queue *);
-typedef void (elevator_registered_fn) (struct request_queue *);
-
-struct elevator_ops
-{
- elevator_merge_fn *elevator_merge_fn;
- elevator_merged_fn *elevator_merged_fn;
- elevator_merge_req_fn *elevator_merge_req_fn;
- elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn;
- elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn;
- elevator_bio_merged_fn *elevator_bio_merged_fn;
-
- elevator_dispatch_fn *elevator_dispatch_fn;
- elevator_add_req_fn *elevator_add_req_fn;
- elevator_activate_req_fn *elevator_activate_req_fn;
- elevator_deactivate_req_fn *elevator_deactivate_req_fn;
-
- elevator_completed_req_fn *elevator_completed_req_fn;
-
- elevator_request_list_fn *elevator_former_req_fn;
- elevator_request_list_fn *elevator_latter_req_fn;
-
- elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */
- elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */
-
- elevator_set_req_fn *elevator_set_req_fn;
- elevator_put_req_fn *elevator_put_req_fn;
-
- elevator_may_queue_fn *elevator_may_queue_fn;
-
- elevator_init_fn *elevator_init_fn;
- elevator_exit_fn *elevator_exit_fn;
- elevator_registered_fn *elevator_registered_fn;
-};
-
struct blk_mq_alloc_data;
struct blk_mq_hw_ctx;
@@ -111,7 +43,7 @@ struct elevator_mq_ops {
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
bool (*has_work)(struct blk_mq_hw_ctx *);
- void (*completed_request)(struct request *);
+ void (*completed_request)(struct request *, u64);
void (*started_request)(struct request *);
void (*requeue_request)(struct request *);
struct request *(*former_request)(struct request_queue *, struct request *);
@@ -137,17 +69,14 @@ struct elevator_type
struct kmem_cache *icq_cache;
/* fields provided by elevator implementation */
- union {
- struct elevator_ops sq;
- struct elevator_mq_ops mq;
- } ops;
+ struct elevator_mq_ops ops;
+
size_t icq_size; /* see iocontext.h */
size_t icq_align; /* ditto */
struct elv_fs_entry *elevator_attrs;
char elevator_name[ELV_NAME_MAX];
const char *elevator_alias;
struct module *elevator_owner;
- bool uses_mq;
#ifdef CONFIG_BLK_DEBUG_FS
const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
@@ -175,40 +104,25 @@ struct elevator_queue
struct kobject kobj;
struct mutex sysfs_lock;
unsigned int registered:1;
- unsigned int uses_mq:1;
DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
};
/*
* block elevator interface
*/
-extern void elv_dispatch_sort(struct request_queue *, struct request *);
-extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
-extern void elv_add_request(struct request_queue *, struct request *, int);
-extern void __elv_add_request(struct request_queue *, struct request *, int);
extern enum elv_merge elv_merge(struct request_queue *, struct request **,
struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *,
enum elv_merge);
-extern void elv_bio_merged(struct request_queue *q, struct request *,
- struct bio *);
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
-extern void elv_requeue_request(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
-extern int elv_may_queue(struct request_queue *, unsigned int);
-extern void elv_completed_request(struct request_queue *, struct request *);
-extern int elv_set_request(struct request_queue *q, struct request *rq,
- struct bio *bio, gfp_t gfp_mask);
-extern void elv_put_request(struct request_queue *, struct request *);
-extern void elv_drain_elevator(struct request_queue *);
/*
* io scheduler registration
*/
-extern void __init load_default_elevator_module(void);
extern int elv_register(struct elevator_type *);
extern void elv_unregister(struct elevator_type *);
@@ -260,9 +174,5 @@ enum {
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
-#else /* CONFIG_BLOCK */
-
-static inline void load_default_elevator_module(void) { }
-
#endif /* CONFIG_BLOCK */
#endif
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index b5f2efdd05e0..7a37f4ce9fd2 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -27,10 +27,10 @@ struct compat_elf_prstatus
compat_pid_t pr_ppid;
compat_pid_t pr_pgrp;
compat_pid_t pr_sid;
- struct compat_timeval pr_utime;
- struct compat_timeval pr_stime;
- struct compat_timeval pr_cutime;
- struct compat_timeval pr_cstime;
+ struct old_timeval32 pr_utime;
+ struct old_timeval32 pr_stime;
+ struct old_timeval32 pr_cutime;
+ struct old_timeval32 pr_cstime;
compat_elf_gregset_t pr_reg;
#ifdef CONFIG_BINFMT_ELF_FDPIC
compat_ulong_t pr_exec_fdpic_loadmap;
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
new file mode 100644
index 000000000000..aa027f7bcb3e
--- /dev/null
+++ b/include/linux/energy_model.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ENERGY_MODEL_H
+#define _LINUX_ENERGY_MODEL_H
+#include <linux/cpumask.h>
+#include <linux/jump_label.h>
+#include <linux/kobject.h>
+#include <linux/rcupdate.h>
+#include <linux/sched/cpufreq.h>
+#include <linux/sched/topology.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_ENERGY_MODEL
+/**
+ * em_cap_state - Capacity state of a performance domain
+ * @frequency: The CPU frequency in KHz, for consistency with CPUFreq
+ * @power: The power consumed by 1 CPU at this level, in milli-watts
+ * @cost: The cost coefficient associated with this level, used during
+ * energy calculation. Equal to: power * max_frequency / frequency
+ */
+struct em_cap_state {
+ unsigned long frequency;
+ unsigned long power;
+ unsigned long cost;
+};
+
+/**
+ * em_perf_domain - Performance domain
+ * @table: List of capacity states, in ascending order
+ * @nr_cap_states: Number of capacity states
+ * @cpus: Cpumask covering the CPUs of the domain
+ *
+ * A "performance domain" represents a group of CPUs whose performance is
+ * scaled together. All CPUs of a performance domain must have the same
+ * micro-architecture. Performance domains often have a 1-to-1 mapping with
+ * CPUFreq policies.
+ */
+struct em_perf_domain {
+ struct em_cap_state *table;
+ int nr_cap_states;
+ unsigned long cpus[0];
+};
+
+#define EM_CPU_MAX_POWER 0xFFFF
+
+struct em_data_callback {
+ /**
+ * active_power() - Provide power at the next capacity state of a CPU
+ * @power : Active power at the capacity state in mW (modified)
+ * @freq : Frequency at the capacity state in kHz (modified)
+ * @cpu : CPU for which we do this operation
+ *
+ * active_power() must find the lowest capacity state of 'cpu' above
+ * 'freq' and update 'power' and 'freq' to the matching active power
+ * and frequency.
+ *
+ * The power is the one of a single CPU in the domain, expressed in
+ * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER]
+ * range.
+ *
+ * Return 0 on success.
+ */
+ int (*active_power)(unsigned long *power, unsigned long *freq, int cpu);
+};
+#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb }
+
+struct em_perf_domain *em_cpu_get(int cpu);
+int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
+ struct em_data_callback *cb);
+
+/**
+ * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain
+ * @pd : performance domain for which energy has to be estimated
+ * @max_util : highest utilization among CPUs of the domain
+ * @sum_util : sum of the utilization of all CPUs in the domain
+ *
+ * Return: the sum of the energy consumed by the CPUs of the domain assuming
+ * a capacity state satisfying the max utilization of the domain.
+ */
+static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util)
+{
+ unsigned long freq, scale_cpu;
+ struct em_cap_state *cs;
+ int i, cpu;
+
+ /*
+ * In order to predict the capacity state, map the utilization of the
+ * most utilized CPU of the performance domain to a requested frequency,
+ * like schedutil.
+ */
+ cpu = cpumask_first(to_cpumask(pd->cpus));
+ scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+ cs = &pd->table[pd->nr_cap_states - 1];
+ freq = map_util_freq(max_util, cs->frequency, scale_cpu);
+
+ /*
+ * Find the lowest capacity state of the Energy Model above the
+ * requested frequency.
+ */
+ for (i = 0; i < pd->nr_cap_states; i++) {
+ cs = &pd->table[i];
+ if (cs->frequency >= freq)
+ break;
+ }
+
+ /*
+ * The capacity of a CPU in the domain at that capacity state (cs)
+ * can be computed as:
+ *
+ * cs->freq * scale_cpu
+ * cs->cap = -------------------- (1)
+ * cpu_max_freq
+ *
+ * So, ignoring the costs of idle states (which are not available in
+ * the EM), the energy consumed by this CPU at that capacity state is
+ * estimated as:
+ *
+ * cs->power * cpu_util
+ * cpu_nrg = -------------------- (2)
+ * cs->cap
+ *
+ * since 'cpu_util / cs->cap' represents its percentage of busy time.
+ *
+ * NOTE: Although the result of this computation actually is in
+ * units of power, it can be manipulated as an energy value
+ * over a scheduling period, since it is assumed to be
+ * constant during that interval.
+ *
+ * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
+ * of two terms:
+ *
+ * cs->power * cpu_max_freq cpu_util
+ * cpu_nrg = ------------------------ * --------- (3)
+ * cs->freq scale_cpu
+ *
+ * The first term is static, and is stored in the em_cap_state struct
+ * as 'cs->cost'.
+ *
+ * Since all CPUs of the domain have the same micro-architecture, they
+ * share the same 'cs->cost', and the same CPU capacity. Hence, the
+ * total energy of the domain (which is the simple sum of the energy of
+ * all of its CPUs) can be factorized as:
+ *
+ * cs->cost * \Sum cpu_util
+ * pd_nrg = ------------------------ (4)
+ * scale_cpu
+ */
+ return cs->cost * sum_util / scale_cpu;
+}
+
+/**
+ * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain
+ * @pd : performance domain for which this must be done
+ *
+ * Return: the number of capacity states in the performance domain table
+ */
+static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+{
+ return pd->nr_cap_states;
+}
+
+#else
+struct em_perf_domain {};
+struct em_data_callback {};
+#define EM_DATA_CB(_active_power_cb) { }
+
+static inline int em_register_perf_domain(cpumask_t *span,
+ unsigned int nr_states, struct em_data_callback *cb)
+{
+ return -EINVAL;
+}
+static inline struct em_perf_domain *em_cpu_get(int cpu)
+{
+ return NULL;
+}
+static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util)
+{
+ return 0;
+}
+static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 572e11bb8696..2c0af7b00715 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -32,6 +32,7 @@
struct device;
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
unsigned char *arch_get_platform_mac_address(void);
+int nvmem_get_mac_address(struct device *dev, void *addrbuf);
u32 eth_get_headlen(void *data, unsigned int max_len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index f8a2245b70ac..afd9596ce636 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -183,14 +183,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
/**
* struct ethtool_ops - optional netdev operations
- * @get_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
- * API. Get various device settings including Ethernet link
- * settings. The @cmd parameter is expected to have been cleared
- * before get_settings is called. Returns a negative error code
- * or zero.
- * @set_settings: DEPRECATED, use %get_link_ksettings/%set_link_ksettings
- * API. Set various device settings including Ethernet link
- * settings. Returns a negative error code or zero.
* @get_drvinfo: Report driver/device information. Should only set the
* @driver, @version, @fw_version and @bus_info fields. If not
* implemented, the @driver and @bus_info fields will be filled in
@@ -297,19 +289,16 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* a TX queue has this number, return -EINVAL. If only a RX queue or a TX
* queue has this number, ignore the inapplicable fields.
* Returns a negative error code or zero.
- * @get_link_ksettings: When defined, takes precedence over the
- * %get_settings method. Get various device settings
- * including Ethernet link settings. The %cmd and
- * %link_mode_masks_nwords fields should be ignored (use
- * %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter), any
- * change to them will be overwritten by kernel. Returns a
- * negative error code or zero.
- * @set_link_ksettings: When defined, takes precedence over the
- * %set_settings method. Set various device settings including
- * Ethernet link settings. The %cmd and %link_mode_masks_nwords
- * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
- * instead of the latter), any change to them will be overwritten
- * by kernel. Returns a negative error code or zero.
+ * @get_link_ksettings: Get various device settings including Ethernet link
+ * settings. The %cmd and %link_mode_masks_nwords fields should be
+ * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
+ * any change to them will be overwritten by kernel. Returns a negative
+ * error code or zero.
+ * @set_link_ksettings: Set various device settings including Ethernet link
+ * settings. The %cmd and %link_mode_masks_nwords fields should be
+ * ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS instead of the latter),
+ * any change to them will be overwritten by kernel. Returns a negative
+ * error code or zero.
* @get_fecparam: Get the network device Forward Error Correction parameters.
* @set_fecparam: Set the network device Forward Error Correction parameters.
* @get_ethtool_phy_stats: Return extended statistics about the PHY device.
@@ -329,8 +318,6 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* of the generic netdev features interface.
*/
struct ethtool_ops {
- int (*get_settings)(struct net_device *, struct ethtool_cmd *);
- int (*set_settings)(struct net_device *, struct ethtool_cmd *);
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
diff --git a/include/linux/export.h b/include/linux/export.h
index b768d6dd3c90..fd8711ed9ac4 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -10,20 +10,7 @@
* hackers place grumpy comments in header files.
*/
-#define __VMLINUX_SYMBOL(x) x
-#define __VMLINUX_SYMBOL_STR(x) #x
-
-/* Indirect, so macros are expanded before pasting. */
-#define VMLINUX_SYMBOL(x) __VMLINUX_SYMBOL(x)
-#define VMLINUX_SYMBOL_STR(x) __VMLINUX_SYMBOL_STR(x)
-
#ifndef __ASSEMBLY__
-struct kernel_symbol
-{
- unsigned long value;
- const char *name;
-};
-
#ifdef MODULE
extern struct module __this_module;
#define THIS_MODULE (&__this_module)
@@ -54,34 +41,73 @@ extern struct module __this_module;
#define __CRC_SYMBOL(sym, sec)
#endif
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#include <linux/compiler.h>
+/*
+ * Emit the ksymtab entry as a pair of relative references: this reduces
+ * the size by half on 64-bit architectures, and eliminates the need for
+ * absolute relocations that require runtime processing on relocatable
+ * kernels.
+ */
+#define __KSYMTAB_ENTRY(sym, sec) \
+ __ADDRESSABLE(sym) \
+ asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
+ " .balign 8 \n" \
+ "__ksymtab_" #sym ": \n" \
+ " .long " #sym "- . \n" \
+ " .long __kstrtab_" #sym "- . \n" \
+ " .previous \n")
+
+struct kernel_symbol {
+ int value_offset;
+ int name_offset;
+};
+#else
+#define __KSYMTAB_ENTRY(sym, sec) \
+ static const struct kernel_symbol __ksymtab_##sym \
+ __attribute__((section("___ksymtab" sec "+" #sym), used)) \
+ = { (unsigned long)&sym, __kstrtab_##sym }
+
+struct kernel_symbol {
+ unsigned long value;
+ const char *name;
+};
+#endif
+
/* For every exported symbol, place a struct in the __ksymtab section */
#define ___EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), aligned(1))) \
+ __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
= #sym; \
- static const struct kernel_symbol __ksymtab_##sym \
- __used \
- __attribute__((section("___ksymtab" sec "+" #sym), used)) \
- = { (unsigned long)&sym, __kstrtab_##sym }
+ __KSYMTAB_ENTRY(sym, sec)
-#if defined(__KSYM_DEPS__)
+#if defined(__DISABLE_EXPORTS)
/*
- * For fine grained build dependencies, we want to tell the build system
- * about each possible exported symbol even if they're not actually exported.
- * We use a string pattern that is unlikely to be valid code that the build
- * system filters out from the preprocessor output (see ksym_dep_filter
- * in scripts/Kbuild.include).
+ * Allow symbol exports to be disabled completely so that C code may
+ * be reused in other execution contexts such as the UEFI stub or the
+ * decompressor.
*/
-#define __EXPORT_SYMBOL(sym, sec) === __KSYM_##sym ===
+#define __EXPORT_SYMBOL(sym, sec)
#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
#include <generated/autoksyms.h>
+/*
+ * For fine grained build dependencies, we want to tell the build system
+ * about each possible exported symbol even if they're not actually exported.
+ * We use a symbol pattern __ksym_marker_<symbol> that the build system filters
+ * from the $(NM) output (see scripts/gen_ksymdeps.sh). These symbols are
+ * discarded in the final link stage.
+ */
+#define __ksym_marker(sym) \
+ static int __ksym_marker_##sym[0] __section(".discard.ksym") __used
+
#define __EXPORT_SYMBOL(sym, sec) \
+ __ksym_marker(sym); \
__cond_export_sym(sym, sec, __is_defined(__KSYM_##sym))
#define __cond_export_sym(sym, sec, conf) \
___cond_export_sym(sym, sec, conf)
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index aa5db8b5521a..d7711048ef93 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* include/linux/f2fs_fs.h
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_F2FS_FS_H
#define _LINUX_F2FS_FS_H
@@ -112,12 +109,15 @@ struct f2fs_super_block {
struct f2fs_device devs[MAX_DEVICES]; /* device list */
__le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */
__u8 hot_ext_count; /* # of hot file extension */
- __u8 reserved[314]; /* valid reserved region */
+ __u8 reserved[310]; /* valid reserved region */
+ __le32 crc; /* checksum of superblock */
} __packed;
/*
* For checkpoint
*/
+#define CP_DISABLED_FLAG 0x00001000
+#define CP_QUOTA_NEED_FSCK_FLAG 0x00000800
#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400
#define CP_NOCRC_RECOVERY_FLAG 0x00000200
#define CP_TRIMMED_FLAG 0x00000100
@@ -304,11 +304,6 @@ struct f2fs_node {
* For NAT entries
*/
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
-#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
-#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \
- ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \
- BITS_PER_LONG * BITS_PER_LONG)
-
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index 096c96f4f16a..9e2142795335 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -4,6 +4,62 @@
#include <uapi/linux/fanotify.h>
-/* not valid from userspace, only kernel internal */
-#define FAN_MARK_ONDIR 0x00000100
+#define FAN_GROUP_FLAG(group, flag) \
+ ((group)->fanotify_data.flags & (flag))
+
+/*
+ * Flags allowed to be passed from/to userspace.
+ *
+ * We intentionally do not add new bits to the old FAN_ALL_* constants, because
+ * they are uapi exposed constants. If there are programs out there using
+ * these constant, the programs may break if re-compiled with new uapi headers
+ * and then run on an old kernel.
+ */
+#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \
+ FAN_CLASS_PRE_CONTENT)
+
+#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \
+ FAN_REPORT_TID | \
+ FAN_CLOEXEC | FAN_NONBLOCK | \
+ FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS)
+
+#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
+ FAN_MARK_FILESYSTEM)
+
+#define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \
+ FAN_MARK_ADD | \
+ FAN_MARK_REMOVE | \
+ FAN_MARK_DONT_FOLLOW | \
+ FAN_MARK_ONLYDIR | \
+ FAN_MARK_IGNORED_MASK | \
+ FAN_MARK_IGNORED_SURV_MODIFY | \
+ FAN_MARK_FLUSH)
+
+/* Events that user can request to be notified on */
+#define FANOTIFY_EVENTS (FAN_ACCESS | FAN_MODIFY | \
+ FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC)
+
+/* Events that require a permission response from user */
+#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \
+ FAN_OPEN_EXEC_PERM)
+
+/* Extra flags that may be reported with event or control handling of events */
+#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
+
+/* Events that may be reported to user */
+#define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \
+ FANOTIFY_PERM_EVENTS | \
+ FAN_Q_OVERFLOW)
+
+#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \
+ FANOTIFY_EVENT_FLAGS)
+
+/* Do not use these old uapi constants internally */
+#undef FAN_ALL_CLASS_BITS
+#undef FAN_ALL_INIT_FLAGS
+#undef FAN_ALL_MARK_FLAGS
+#undef FAN_ALL_EVENTS
+#undef FAN_ALL_PERM_EVENTS
+#undef FAN_ALL_OUTGOING_EVENTS
+
#endif /* _LINUX_FANOTIFY_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index aa74a228bb92..a3cab6dc9b44 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -126,7 +126,7 @@ struct fb_cursor_user {
/* The resolution of the passed in fb_info about to change */
#define FB_EVENT_MODE_CHANGE 0x01
-/* The display on this fb_info is beeing suspended, no access to the
+/* The display on this fb_info is being suspended, no access to the
* framebuffer is allowed any more after that call returns
*/
#define FB_EVENT_SUSPEND 0x02
@@ -159,9 +159,9 @@ struct fb_cursor_user {
#define FB_EVENT_FB_UNBIND 0x0E
/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */
#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
-/* A hardware display blank early change occured */
+/* A hardware display blank early change occurred */
#define FB_EARLY_EVENT_BLANK 0x10
-/* A hardware display blank revert early change occured */
+/* A hardware display blank revert early change occurred */
#define FB_R_EARLY_EVENT_BLANK 0x11
struct fb_event {
@@ -456,10 +456,13 @@ struct fb_tile_ops {
* and host endianness. Drivers should not use this flag.
*/
#define FBINFO_BE_MATH 0x100000
+/*
+ * Hide smem_start in the FBIOGET_FSCREENINFO IOCTL. This is used by modern DRM
+ * drivers to stop userspace from trying to share buffers behind the kernel's
+ * back. Instead dma-buf based buffer sharing should be used.
+ */
+#define FBINFO_HIDE_SMEM_START 0x200000
-/* report to the VT layer that this fb driver can accept forced console
- output like oopses */
-#define FBINFO_CAN_FORCE_OUTPUT 0x200000
struct fb_info {
atomic_t count;
@@ -632,6 +635,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info);
extern int unlink_framebuffer(struct fb_info *fb_info);
+extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id,
+ const char *name);
extern int remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
@@ -650,6 +655,10 @@ extern struct fb_info *registered_fb[FB_MAX];
extern int num_registered_fb;
extern struct class *fb_class;
+#define for_each_registered_fb(i) \
+ for (i = 0; i < FB_MAX; i++) \
+ if (!registered_fb[i]) {} else
+
extern int lock_fb_info(struct fb_info *info);
static inline void unlock_fb_info(struct fb_info *info)
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 41615f38bcff..f07c55ea0c22 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -121,6 +121,7 @@ extern void __fd_install(struct files_struct *files,
unsigned int fd, struct file *file);
extern int __close_fd(struct files_struct *files,
unsigned int fd);
+extern int __close_fd_get_file(unsigned int fd, struct file **res);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6791a0ac0139..ad106d845b22 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -53,14 +53,10 @@ struct sock_reuseport;
#define BPF_REG_D BPF_REG_8 /* data, callee-saved */
#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */
-/* Kernel hidden auxiliary/helper register for hardening step.
- * Only used by eBPF JITs. It's nothing more than a temporary
- * register that JITs use internally, only that here it's part
- * of eBPF instructions that have been rewritten for blinding
- * constants. See JIT pre-step in bpf_jit_blind_constants().
- */
+/* Kernel hidden auxiliary/helper register. */
#define BPF_REG_AX MAX_BPF_REG
-#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
+#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0
@@ -449,6 +445,13 @@ struct sock_reuseport;
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \
offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
+#if BITS_PER_LONG == 64
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
+#else
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
+#endif /* BITS_PER_LONG == 64 */
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \
@@ -520,24 +523,6 @@ struct bpf_skb_data_end {
void *data_end;
};
-struct sk_msg_buff {
- void *data;
- void *data_end;
- __u32 apply_bytes;
- __u32 cork_bytes;
- int sg_copybreak;
- int sg_start;
- int sg_curr;
- int sg_end;
- struct scatterlist sg_data[MAX_SKB_FRAGS];
- bool sg_copy[MAX_SKB_FRAGS];
- __u32 flags;
- struct sock *sk_redir;
- struct sock *sk;
- struct sk_buff *skb;
- struct list_head list;
-};
-
struct bpf_redirect_info {
u32 ifindex;
u32 flags;
@@ -566,6 +551,27 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
cb->data_end = skb->data + skb_headlen(skb);
}
+/* Similar to bpf_compute_data_pointers(), except that save orginal
+ * data in cb->data and cb->meta_data for restore.
+ */
+static inline void bpf_compute_and_save_data_end(
+ struct sk_buff *skb, void **saved_data_end)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ *saved_data_end = cb->data_end;
+ cb->data_end = skb->data + skb_headlen(skb);
+}
+
+/* Restore data saved by bpf_compute_data_pointers(). */
+static inline void bpf_restore_data_end(
+ struct sk_buff *skb, void *saved_data_end)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ cb->data_end = saved_data_end;
+}
+
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
@@ -665,24 +671,10 @@ static inline u32 bpf_ctx_off_adjust_machine(u32 size)
return size;
}
-static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
- u32 size_default)
-{
- size_default = bpf_ctx_off_adjust_machine(size_default);
- size_access = bpf_ctx_off_adjust_machine(size_access);
-
-#ifdef __LITTLE_ENDIAN
- return (off & (size_default - 1)) == 0;
-#else
- return (off & (size_default - 1)) + size_access == size_default;
-#endif
-}
-
static inline bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
{
- return bpf_ctx_narrow_align_ok(off, size, size_default) &&
- size <= size_default && (size & (size - 1)) == 0;
+ return size <= size_default && (size & (size - 1)) == 0;
}
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
@@ -729,6 +721,13 @@ void bpf_prog_free(struct bpf_prog *fp);
bool bpf_opcode_in_insntable(u8 code);
+void bpf_prog_free_linfo(struct bpf_prog *prog);
+void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
+ const u32 *insn_to_jit_off);
+int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
+void bpf_prog_free_jited_linfo(struct bpf_prog *prog);
+void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog);
+
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
@@ -833,9 +832,6 @@ void xdp_do_flush_map(void);
void bpf_warn_invalid_xdp_action(u32 act);
-struct sock *do_sk_redirect_map(struct sk_buff *skb);
-struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
-
#ifdef CONFIG_INET
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
@@ -854,6 +850,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
+extern long bpf_jit_limit;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -865,6 +862,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
void bpf_jit_free(struct bpf_prog *fp);
+int bpf_jit_get_func_addr(const struct bpf_prog *prog,
+ const struct bpf_insn *insn, bool extra_pass,
+ u64 *func_addr, bool *func_addr_fixed);
+
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
diff --git a/include/linux/firmware/imx/ipc.h b/include/linux/firmware/imx/ipc.h
new file mode 100644
index 000000000000..6312c8cb084a
--- /dev/null
+++ b/include/linux/firmware/imx/ipc.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018 NXP
+ *
+ * Header file for the IPC implementation.
+ */
+
+#ifndef _SC_IPC_H
+#define _SC_IPC_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define IMX_SC_RPC_VERSION 1
+#define IMX_SC_RPC_MAX_MSG 8
+
+struct imx_sc_ipc;
+
+enum imx_sc_rpc_svc {
+ IMX_SC_RPC_SVC_UNKNOWN = 0,
+ IMX_SC_RPC_SVC_RETURN = 1,
+ IMX_SC_RPC_SVC_PM = 2,
+ IMX_SC_RPC_SVC_RM = 3,
+ IMX_SC_RPC_SVC_TIMER = 5,
+ IMX_SC_RPC_SVC_PAD = 6,
+ IMX_SC_RPC_SVC_MISC = 7,
+ IMX_SC_RPC_SVC_IRQ = 8,
+ IMX_SC_RPC_SVC_ABORT = 9
+};
+
+struct imx_sc_rpc_msg {
+ uint8_t ver;
+ uint8_t size;
+ uint8_t svc;
+ uint8_t func;
+};
+
+/*
+ * This is an function to send an RPC message over an IPC channel.
+ * It is called by client-side SCFW API function shims.
+ *
+ * @param[in] ipc IPC handle
+ * @param[in,out] msg handle to a message
+ * @param[in] have_resp response flag
+ *
+ * If have_resp is true then this function waits for a response
+ * and returns the result in msg.
+ */
+int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp);
+
+/*
+ * This function gets the default ipc handle used by SCU
+ *
+ * @param[out] ipc sc ipc handle
+ *
+ * @return Returns an error code (0 = success, failed if < 0)
+ */
+int imx_scu_get_handle(struct imx_sc_ipc **ipc);
+#endif /* _SC_IPC_H */
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
new file mode 100644
index 000000000000..ebc55098faee
--- /dev/null
+++ b/include/linux/firmware/imx/sci.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ * Header file containing the public System Controller Interface (SCI)
+ * definitions.
+ */
+
+#ifndef _SC_SCI_H
+#define _SC_SCI_H
+
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/types.h>
+
+#include <linux/firmware/imx/svc/misc.h>
+#include <linux/firmware/imx/svc/pm.h>
+#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/imx/svc/misc.h b/include/linux/firmware/imx/svc/misc.h
new file mode 100644
index 000000000000..e21c49aba92f
--- /dev/null
+++ b/include/linux/firmware/imx/svc/misc.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Miscellaneous (MISC) function.
+ *
+ * MISC_SVC (SVC) Miscellaneous Service
+ *
+ * Module for the Miscellaneous (MISC) service.
+ */
+
+#ifndef _SC_MISC_API_H
+#define _SC_MISC_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC MISC function calls.
+ */
+enum imx_misc_func {
+ IMX_SC_MISC_FUNC_UNKNOWN = 0,
+ IMX_SC_MISC_FUNC_SET_CONTROL = 1,
+ IMX_SC_MISC_FUNC_GET_CONTROL = 2,
+ IMX_SC_MISC_FUNC_SET_MAX_DMA_GROUP = 4,
+ IMX_SC_MISC_FUNC_SET_DMA_GROUP = 5,
+ IMX_SC_MISC_FUNC_SECO_IMAGE_LOAD = 8,
+ IMX_SC_MISC_FUNC_SECO_AUTHENTICATE = 9,
+ IMX_SC_MISC_FUNC_DEBUG_OUT = 10,
+ IMX_SC_MISC_FUNC_WAVEFORM_CAPTURE = 6,
+ IMX_SC_MISC_FUNC_BUILD_INFO = 15,
+ IMX_SC_MISC_FUNC_UNIQUE_ID = 19,
+ IMX_SC_MISC_FUNC_SET_ARI = 3,
+ IMX_SC_MISC_FUNC_BOOT_STATUS = 7,
+ IMX_SC_MISC_FUNC_BOOT_DONE = 14,
+ IMX_SC_MISC_FUNC_OTP_FUSE_READ = 11,
+ IMX_SC_MISC_FUNC_OTP_FUSE_WRITE = 17,
+ IMX_SC_MISC_FUNC_SET_TEMP = 12,
+ IMX_SC_MISC_FUNC_GET_TEMP = 13,
+ IMX_SC_MISC_FUNC_GET_BOOT_DEV = 16,
+ IMX_SC_MISC_FUNC_GET_BUTTON_STATUS = 18,
+};
+
+/*
+ * Control Functions
+ */
+
+int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 val);
+
+int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
+ u8 ctrl, u32 *val);
+
+#endif /* _SC_MISC_API_H */
diff --git a/include/linux/firmware/imx/svc/pm.h b/include/linux/firmware/imx/svc/pm.h
new file mode 100644
index 000000000000..1f6975dd37b0
--- /dev/null
+++ b/include/linux/firmware/imx/svc/pm.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Power Management (PM) function. This includes functions for power state
+ * control, clock control, reset control, and wake-up event control.
+ *
+ * PM_SVC (SVC) Power Management Service
+ *
+ * Module for the Power Management (PM) service.
+ */
+
+#ifndef _SC_PM_API_H
+#define _SC_PM_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC PM function calls.
+ */
+enum imx_sc_pm_func {
+ IMX_SC_PM_FUNC_UNKNOWN = 0,
+ IMX_SC_PM_FUNC_SET_SYS_POWER_MODE = 19,
+ IMX_SC_PM_FUNC_SET_PARTITION_POWER_MODE = 1,
+ IMX_SC_PM_FUNC_GET_SYS_POWER_MODE = 2,
+ IMX_SC_PM_FUNC_SET_RESOURCE_POWER_MODE = 3,
+ IMX_SC_PM_FUNC_GET_RESOURCE_POWER_MODE = 4,
+ IMX_SC_PM_FUNC_REQ_LOW_POWER_MODE = 16,
+ IMX_SC_PM_FUNC_SET_CPU_RESUME_ADDR = 17,
+ IMX_SC_PM_FUNC_REQ_SYS_IF_POWER_MODE = 18,
+ IMX_SC_PM_FUNC_SET_CLOCK_RATE = 5,
+ IMX_SC_PM_FUNC_GET_CLOCK_RATE = 6,
+ IMX_SC_PM_FUNC_CLOCK_ENABLE = 7,
+ IMX_SC_PM_FUNC_SET_CLOCK_PARENT = 14,
+ IMX_SC_PM_FUNC_GET_CLOCK_PARENT = 15,
+ IMX_SC_PM_FUNC_RESET = 13,
+ IMX_SC_PM_FUNC_RESET_REASON = 10,
+ IMX_SC_PM_FUNC_BOOT = 8,
+ IMX_SC_PM_FUNC_REBOOT = 9,
+ IMX_SC_PM_FUNC_REBOOT_PARTITION = 12,
+ IMX_SC_PM_FUNC_CPU_START = 11,
+};
+
+/*
+ * Defines for ALL parameters
+ */
+#define IMX_SC_PM_CLK_ALL UINT8_MAX /* All clocks */
+
+/*
+ * Defines for SC PM Power Mode
+ */
+#define IMX_SC_PM_PW_MODE_OFF 0 /* Power off */
+#define IMX_SC_PM_PW_MODE_STBY 1 /* Power in standby */
+#define IMX_SC_PM_PW_MODE_LP 2 /* Power in low-power */
+#define IMX_SC_PM_PW_MODE_ON 3 /* Power on */
+
+/*
+ * Defines for SC PM CLK
+ */
+#define IMX_SC_PM_CLK_SLV_BUS 0 /* Slave bus clock */
+#define IMX_SC_PM_CLK_MST_BUS 1 /* Master bus clock */
+#define IMX_SC_PM_CLK_PER 2 /* Peripheral clock */
+#define IMX_SC_PM_CLK_PHY 3 /* Phy clock */
+#define IMX_SC_PM_CLK_MISC 4 /* Misc clock */
+#define IMX_SC_PM_CLK_MISC0 0 /* Misc 0 clock */
+#define IMX_SC_PM_CLK_MISC1 1 /* Misc 1 clock */
+#define IMX_SC_PM_CLK_MISC2 2 /* Misc 2 clock */
+#define IMX_SC_PM_CLK_MISC3 3 /* Misc 3 clock */
+#define IMX_SC_PM_CLK_MISC4 4 /* Misc 4 clock */
+#define IMX_SC_PM_CLK_CPU 2 /* CPU clock */
+#define IMX_SC_PM_CLK_PLL 4 /* PLL */
+#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */
+
+/*
+ * Defines for SC PM CLK Parent
+ */
+#define IMX_SC_PM_PARENT_XTAL 0 /* Parent is XTAL. */
+#define IMX_SC_PM_PARENT_PLL0 1 /* Parent is PLL0 */
+#define IMX_SC_PM_PARENT_PLL1 2 /* Parent is PLL1 or PLL0/2 */
+#define IMX_SC_PM_PARENT_PLL2 3 /* Parent in PLL2 or PLL0/4 */
+#define IMX_SC_PM_PARENT_BYPS 4 /* Parent is a bypass clock. */
+
+#endif /* _SC_PM_API_H */
diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h
new file mode 100644
index 000000000000..80821100e85f
--- /dev/null
+++ b/include/linux/firmware/imx/types.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ * Header file containing types used across multiple service APIs.
+ */
+
+#ifndef _SC_TYPES_H
+#define _SC_TYPES_H
+
+/*
+ * This type is used to indicate a control.
+ */
+enum imx_sc_ctrl {
+ IMX_SC_C_TEMP = 0,
+ IMX_SC_C_TEMP_HI = 1,
+ IMX_SC_C_TEMP_LOW = 2,
+ IMX_SC_C_PXL_LINK_MST1_ADDR = 3,
+ IMX_SC_C_PXL_LINK_MST2_ADDR = 4,
+ IMX_SC_C_PXL_LINK_MST_ENB = 5,
+ IMX_SC_C_PXL_LINK_MST1_ENB = 6,
+ IMX_SC_C_PXL_LINK_MST2_ENB = 7,
+ IMX_SC_C_PXL_LINK_SLV1_ADDR = 8,
+ IMX_SC_C_PXL_LINK_SLV2_ADDR = 9,
+ IMX_SC_C_PXL_LINK_MST_VLD = 10,
+ IMX_SC_C_PXL_LINK_MST1_VLD = 11,
+ IMX_SC_C_PXL_LINK_MST2_VLD = 12,
+ IMX_SC_C_SINGLE_MODE = 13,
+ IMX_SC_C_ID = 14,
+ IMX_SC_C_PXL_CLK_POLARITY = 15,
+ IMX_SC_C_LINESTATE = 16,
+ IMX_SC_C_PCIE_G_RST = 17,
+ IMX_SC_C_PCIE_BUTTON_RST = 18,
+ IMX_SC_C_PCIE_PERST = 19,
+ IMX_SC_C_PHY_RESET = 20,
+ IMX_SC_C_PXL_LINK_RATE_CORRECTION = 21,
+ IMX_SC_C_PANIC = 22,
+ IMX_SC_C_PRIORITY_GROUP = 23,
+ IMX_SC_C_TXCLK = 24,
+ IMX_SC_C_CLKDIV = 25,
+ IMX_SC_C_DISABLE_50 = 26,
+ IMX_SC_C_DISABLE_125 = 27,
+ IMX_SC_C_SEL_125 = 28,
+ IMX_SC_C_MODE = 29,
+ IMX_SC_C_SYNC_CTRL0 = 30,
+ IMX_SC_C_KACHUNK_CNT = 31,
+ IMX_SC_C_KACHUNK_SEL = 32,
+ IMX_SC_C_SYNC_CTRL1 = 33,
+ IMX_SC_C_DPI_RESET = 34,
+ IMX_SC_C_MIPI_RESET = 35,
+ IMX_SC_C_DUAL_MODE = 36,
+ IMX_SC_C_VOLTAGE = 37,
+ IMX_SC_C_PXL_LINK_SEL = 38,
+ IMX_SC_C_OFS_SEL = 39,
+ IMX_SC_C_OFS_AUDIO = 40,
+ IMX_SC_C_OFS_PERIPH = 41,
+ IMX_SC_C_OFS_IRQ = 42,
+ IMX_SC_C_RST0 = 43,
+ IMX_SC_C_RST1 = 44,
+ IMX_SC_C_SEL0 = 45,
+ IMX_SC_C_LAST
+};
+
+#endif /* _SC_TYPES_H */
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
new file mode 100644
index 000000000000..5be5dab50b13
--- /dev/null
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2018, Intel Corporation
+ */
+
+#ifndef __STRATIX10_SMC_H
+#define __STRATIX10_SMC_H
+
+#include <linux/arm-smccc.h>
+#include <linux/bitops.h>
+
+/**
+ * This file defines the Secure Monitor Call (SMC) message protocol used for
+ * service layer driver in normal world (EL1) to communicate with secure
+ * monitor software in Secure Monitor Exception Level 3 (EL3).
+ *
+ * This file is shared with secure firmware (FW) which is out of kernel tree.
+ *
+ * An ARM SMC instruction takes a function identifier and up to 6 64-bit
+ * register values as arguments, and can return up to 4 64-bit register
+ * value. The operation of the secure monitor is determined by the parameter
+ * values passed in through registers.
+ *
+ * EL1 and EL3 communicates pointer as physical address rather than the
+ * virtual address.
+ *
+ * Functions specified by ARM SMC Calling convention:
+ *
+ * FAST call executes atomic operations, returns when the requested operation
+ * has completed.
+ * STD call starts a operation which can be preempted by a non-secure
+ * interrupt. The call can return before the requested operation has
+ * completed.
+ *
+ * a0..a7 is used as register names in the descriptions below, on arm32
+ * that translates to r0..r7 and on arm64 to w0..w7.
+ */
+
+/**
+ * @func_num: function ID
+ */
+#define INTEL_SIP_SMC_STD_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, (func_num))
+
+#define INTEL_SIP_SMC_FAST_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, (func_num))
+
+/**
+ * Return values in INTEL_SIP_SMC_* call
+ *
+ * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION:
+ * Secure monitor software doesn't recognize the request.
+ *
+ * INTEL_SIP_SMC_STATUS_OK:
+ * FPGA configuration completed successfully,
+ * In case of FPGA configuration write operation, it means secure monitor
+ * software can accept the next chunk of FPGA configuration data.
+ *
+ * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY:
+ * In case of FPGA configuration write operation, it means secure monitor
+ * software is still processing previous data & can't accept the next chunk
+ * of data. Service driver needs to issue
+ * INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE call to query the
+ * completed block(s).
+ *
+ * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
+ * There is error during the FPGA configuration process.
+ *
+ * INTEL_SIP_SMC_REG_ERROR:
+ * There is error during a read or write operation of the protected registers.
+ *
+ * INTEL_SIP_SMC_RSU_ERROR:
+ * There is error during a remote status update.
+ */
+#define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
+#define INTEL_SIP_SMC_STATUS_OK 0x0
+#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY 0x1
+#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED 0x2
+#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR 0x4
+#define INTEL_SIP_SMC_REG_ERROR 0x5
+#define INTEL_SIP_SMC_RSU_ERROR 0x7
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_START
+ *
+ * Sync call used by service driver at EL1 to request the FPGA in EL3 to
+ * be prepare to receive a new configuration.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_START.
+ * a1: flag for full or partial configuration. 0 for full and 1 for partial
+ * configuration.
+ * a2-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START 1
+#define INTEL_SIP_SMC_FPGA_CONFIG_START \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_WRITE
+ *
+ * Async call used by service driver at EL1 to provide FPGA configuration data
+ * to secure world.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_WRITE.
+ * a1: 64bit physical address of the configuration data memory block
+ * a2: Size of configuration data block.
+ * a3-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
+ * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a1: 64bit physical address of 1st completed memory block if any completed
+ * block, otherwise zero value.
+ * a2: 64bit physical address of 2nd completed memory block if any completed
+ * block, otherwise zero value.
+ * a3: 64bit physical address of 3rd completed memory block if any completed
+ * block, otherwise zero value.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE 2
+#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE
+ *
+ * Sync call used by service driver at EL1 to track the completed write
+ * transactions. This request is called after INTEL_SIP_SMC_FPGA_CONFIG_WRITE
+ * call returns INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
+ * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a1: 64bit physical address of 1st completed memory block.
+ * a2: 64bit physical address of 2nd completed memory block if
+ * any completed block, otherwise zero value.
+ * a3: 64bit physical address of 3rd completed memory block if
+ * any completed block, otherwise zero value.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE 3
+#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE \
+INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_ISDONE
+ *
+ * Sync call used by service driver at EL1 to inform secure world that all
+ * data are sent, to check whether or not the secure world had completed
+ * the FPGA configuration process.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_ISDONE.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
+ * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE 4
+#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM
+ *
+ * Sync call used by service driver at EL1 to query the physical address of
+ * memory block reserved by secure monitor software.
+ *
+ * Call register usage:
+ * a0:INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a1: start of physical address of reserved memory block.
+ * a2: size of reserved memory block.
+ * a3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM 5
+#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM)
+
+/**
+ * Request INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK
+ *
+ * For SMC loop-back mode only, used for internal integration, debugging
+ * or troubleshooting.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK.
+ * a1-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK 6
+#define INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK)
+
+/*
+ * Request INTEL_SIP_SMC_REG_READ
+ *
+ * Read a protected register at EL3
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_REG_READ.
+ * a1: register address.
+ * a2-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
+ * a1: value in the register
+ * a2-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_REG_READ 7
+#define INTEL_SIP_SMC_REG_READ \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ)
+
+/*
+ * Request INTEL_SIP_SMC_REG_WRITE
+ *
+ * Write a protected register at EL3
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_REG_WRITE.
+ * a1: register address
+ * a2: value to program into register.
+ * a3-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
+ * a1-3: not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_REG_WRITE 8
+#define INTEL_SIP_SMC_REG_WRITE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
+
+/*
+ * Request INTEL_SIP_SMC_FUNCID_REG_UPDATE
+ *
+ * Update one or more bits in a protected register at EL3 using a
+ * read-modify-write operation.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_REG_UPDATE.
+ * a1: register address
+ * a2: write Mask.
+ * a3: value to write.
+ * a4-7: not used.
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR.
+ * a1-3: Not used.
+ */
+#define INTEL_SIP_SMC_FUNCID_REG_UPDATE 9
+#define INTEL_SIP_SMC_REG_UPDATE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_UPDATE)
+
+/*
+ * Request INTEL_SIP_SMC_RSU_STATUS
+ *
+ * Request remote status update boot log, call is synchronous.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_STATUS
+ * a1-7 not used
+ *
+ * Return status
+ * a0: Current Image
+ * a1: Last Failing Image
+ * a2: Version | State
+ * a3: Error details | Error location
+ *
+ * Or
+ *
+ * a0: INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_STATUS 11
+#define INTEL_SIP_SMC_RSU_STATUS \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_STATUS)
+
+/*
+ * Request INTEL_SIP_SMC_RSU_UPDATE
+ *
+ * Request to set the offset of the bitstream to boot after reboot, call
+ * is synchronous.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_UPDATE
+ * a1 64bit physical address of the configuration data memory in flash
+ * a2-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_UPDATE 12
+#define INTEL_SIP_SMC_RSU_UPDATE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE)
+#endif
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
new file mode 100644
index 000000000000..e521f172a47a
--- /dev/null
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017-2018, Intel Corporation
+ */
+
+#ifndef __STRATIX10_SVC_CLIENT_H
+#define __STRATIX10_SVC_CLIENT_H
+
+/**
+ * Service layer driver supports client names
+ *
+ * fpga: for FPGA configuration
+ * rsu: for remote status update
+ */
+#define SVC_CLIENT_FPGA "fpga"
+#define SVC_CLIENT_RSU "rsu"
+
+/**
+ * Status of the sent command, in bit number
+ *
+ * SVC_COMMAND_STATUS_RECONFIG_REQUEST_OK:
+ * Secure firmware accepts the request of FPGA reconfiguration.
+ *
+ * SVC_STATUS_RECONFIG_BUFFER_SUBMITTED:
+ * Service client successfully submits FPGA configuration
+ * data buffer to secure firmware.
+ *
+ * SVC_COMMAND_STATUS_RECONFIG_BUFFER_DONE:
+ * Secure firmware completes data process, ready to accept the
+ * next WRITE transaction.
+ *
+ * SVC_COMMAND_STATUS_RECONFIG_COMPLETED:
+ * Secure firmware completes FPGA configuration successfully, FPGA should
+ * be in user mode.
+ *
+ * SVC_COMMAND_STATUS_RECONFIG_BUSY:
+ * FPGA configuration is still in process.
+ *
+ * SVC_COMMAND_STATUS_RECONFIG_ERROR:
+ * Error encountered during FPGA configuration.
+ *
+ * SVC_STATUS_RSU_OK:
+ * Secure firmware accepts the request of remote status update (RSU).
+ */
+#define SVC_STATUS_RECONFIG_REQUEST_OK 0
+#define SVC_STATUS_RECONFIG_BUFFER_SUBMITTED 1
+#define SVC_STATUS_RECONFIG_BUFFER_DONE 2
+#define SVC_STATUS_RECONFIG_COMPLETED 3
+#define SVC_STATUS_RECONFIG_BUSY 4
+#define SVC_STATUS_RECONFIG_ERROR 5
+#define SVC_STATUS_RSU_OK 6
+#define SVC_STATUS_RSU_ERROR 7
+/**
+ * Flag bit for COMMAND_RECONFIG
+ *
+ * COMMAND_RECONFIG_FLAG_PARTIAL:
+ * Set to FPGA configuration type (full or partial), the default
+ * is full reconfig.
+ */
+#define COMMAND_RECONFIG_FLAG_PARTIAL 0
+
+/**
+ * Timeout settings for service clients:
+ * timeout value used in Stratix10 FPGA manager driver.
+ * timeout value used in RSU driver
+ */
+#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 100
+#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 240
+#define SVC_RSU_REQUEST_TIMEOUT_MS 300
+
+struct stratix10_svc_chan;
+
+/**
+ * enum stratix10_svc_command_code - supported service commands
+ *
+ * @COMMAND_NOOP: do 'dummy' request for integration/debug/trouble-shooting
+ *
+ * @COMMAND_RECONFIG: ask for FPGA configuration preparation, return status
+ * is SVC_STATUS_RECONFIG_REQUEST_OK
+ *
+ * @COMMAND_RECONFIG_DATA_SUBMIT: submit buffer(s) of bit-stream data for the
+ * FPGA configuration, return status is SVC_STATUS_RECONFIG_BUFFER_SUBMITTED,
+ * or SVC_STATUS_RECONFIG_ERROR
+ *
+ * @COMMAND_RECONFIG_DATA_CLAIM: check the status of the configuration, return
+ * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or
+ * SVC_STATUS_RECONFIG_ERROR
+ *
+ * @COMMAND_RECONFIG_STATUS: check the status of the configuration, return
+ * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or
+ * SVC_STATUS_RECONFIG_ERROR
+ *
+ * @COMMAND_RSU_STATUS: request remote system update boot log, return status
+ * is log data or SVC_STATUS_RSU_ERROR
+ *
+ * @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot,
+ * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR
+ */
+enum stratix10_svc_command_code {
+ COMMAND_NOOP = 0,
+ COMMAND_RECONFIG,
+ COMMAND_RECONFIG_DATA_SUBMIT,
+ COMMAND_RECONFIG_DATA_CLAIM,
+ COMMAND_RECONFIG_STATUS,
+ COMMAND_RSU_STATUS,
+ COMMAND_RSU_UPDATE
+};
+
+/**
+ * struct stratix10_svc_client_msg - message sent by client to service
+ * @payload: starting address of data need be processed
+ * @payload_length: data size in bytes
+ * @command: service command
+ * @arg: args to be passed via registers and not physically mapped buffers
+ */
+struct stratix10_svc_client_msg {
+ void *payload;
+ size_t payload_length;
+ enum stratix10_svc_command_code command;
+ u64 arg[3];
+};
+
+/**
+ * struct stratix10_svc_command_config_type - config type
+ * @flags: flag bit for the type of FPGA configuration
+ */
+struct stratix10_svc_command_config_type {
+ u32 flags;
+};
+
+/**
+ * struct stratix10_svc_cb_data - callback data structure from service layer
+ * @status: the status of sent command
+ * @kaddr1: address of 1st completed data block
+ * @kaddr2: address of 2nd completed data block
+ * @kaddr3: address of 3rd completed data block
+ */
+struct stratix10_svc_cb_data {
+ u32 status;
+ void *kaddr1;
+ void *kaddr2;
+ void *kaddr3;
+};
+
+/**
+ * struct stratix10_svc_client - service client structure
+ * @dev: the client device
+ * @receive_cb: callback to provide service client the received data
+ * @priv: client private data
+ */
+struct stratix10_svc_client {
+ struct device *dev;
+ void (*receive_cb)(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *cb_data);
+ void *priv;
+};
+
+/**
+ * stratix10_svc_request_channel_byname() - request service channel
+ * @client: identity of the client requesting the channel
+ * @name: supporting client name defined above
+ *
+ * Return: a pointer to channel assigned to the client on success,
+ * or ERR_PTR() on error.
+ */
+struct stratix10_svc_chan
+*stratix10_svc_request_channel_byname(struct stratix10_svc_client *client,
+ const char *name);
+
+/**
+ * stratix10_svc_free_channel() - free service channel.
+ * @chan: service channel to be freed
+ */
+void stratix10_svc_free_channel(struct stratix10_svc_chan *chan);
+
+/**
+ * stratix10_svc_allocate_memory() - allocate the momory
+ * @chan: service channel assigned to the client
+ * @size: number of bytes client requests
+ *
+ * Service layer allocates the requested number of bytes from the memory
+ * pool for the client.
+ *
+ * Return: the starting address of allocated memory on success, or
+ * ERR_PTR() on error.
+ */
+void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
+ size_t size);
+
+/**
+ * stratix10_svc_free_memory() - free allocated memory
+ * @chan: service channel assigned to the client
+ * @kaddr: starting address of memory to be free back to pool
+ */
+void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr);
+
+/**
+ * stratix10_svc_send() - send a message to the remote
+ * @chan: service channel assigned to the client
+ * @msg: message data to be sent, in the format of
+ * struct stratix10_svc_client_msg
+ *
+ * Return: 0 for success, -ENOMEM or -ENOBUFS on error.
+ */
+int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg);
+
+/**
+ * intel_svc_done() - complete service request
+ * @chan: service channel assigned to the client
+ *
+ * This function is used by service client to inform service layer that
+ * client's service requests are completed, or there is an error in the
+ * request process.
+ */
+void stratix10_svc_done(struct stratix10_svc_chan *chan);
+#endif
+
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 37a5eaea69dd..f98c20dd266e 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -17,6 +17,7 @@ enum {
SM_EFUSE_READ,
SM_EFUSE_WRITE,
SM_EFUSE_USER_MAX,
+ SM_GET_CHIP_ID,
};
struct meson_sm_firmware;
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
new file mode 100644
index 000000000000..3c3c28eff56a
--- /dev/null
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2018 Xilinx
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#ifndef __FIRMWARE_ZYNQMP_H__
+#define __FIRMWARE_ZYNQMP_H__
+
+#define ZYNQMP_PM_VERSION_MAJOR 1
+#define ZYNQMP_PM_VERSION_MINOR 0
+
+#define ZYNQMP_PM_VERSION ((ZYNQMP_PM_VERSION_MAJOR << 16) | \
+ ZYNQMP_PM_VERSION_MINOR)
+
+#define ZYNQMP_TZ_VERSION_MAJOR 1
+#define ZYNQMP_TZ_VERSION_MINOR 0
+
+#define ZYNQMP_TZ_VERSION ((ZYNQMP_TZ_VERSION_MAJOR << 16) | \
+ ZYNQMP_TZ_VERSION_MINOR)
+
+/* SMC SIP service Call Function Identifier Prefix */
+#define PM_SIP_SVC 0xC2000000
+#define PM_GET_TRUSTZONE_VERSION 0xa03
+
+/* Number of 32bits values in payload */
+#define PAYLOAD_ARG_CNT 4U
+
+enum pm_api_id {
+ PM_GET_API_VERSION = 1,
+ PM_IOCTL = 34,
+ PM_QUERY_DATA,
+ PM_CLOCK_ENABLE,
+ PM_CLOCK_DISABLE,
+ PM_CLOCK_GETSTATE,
+ PM_CLOCK_SETDIVIDER,
+ PM_CLOCK_GETDIVIDER,
+ PM_CLOCK_SETRATE,
+ PM_CLOCK_GETRATE,
+ PM_CLOCK_SETPARENT,
+ PM_CLOCK_GETPARENT,
+};
+
+/* PMU-FW return status codes */
+enum pm_ret_status {
+ XST_PM_SUCCESS = 0,
+ XST_PM_INTERNAL = 2000,
+ XST_PM_CONFLICT,
+ XST_PM_NO_ACCESS,
+ XST_PM_INVALID_NODE,
+ XST_PM_DOUBLE_REQ,
+ XST_PM_ABORT_SUSPEND,
+};
+
+enum pm_ioctl_id {
+ IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_GET_PLL_FRAC_MODE,
+ IOCTL_SET_PLL_FRAC_DATA,
+ IOCTL_GET_PLL_FRAC_DATA,
+};
+
+enum pm_query_id {
+ PM_QID_INVALID,
+ PM_QID_CLOCK_GET_NAME,
+ PM_QID_CLOCK_GET_TOPOLOGY,
+ PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
+ PM_QID_CLOCK_GET_PARENTS,
+ PM_QID_CLOCK_GET_ATTRIBUTES,
+ PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
+};
+
+/**
+ * struct zynqmp_pm_query_data - PM query data
+ * @qid: query ID
+ * @arg1: Argument 1 of query data
+ * @arg2: Argument 2 of query data
+ * @arg3: Argument 3 of query data
+ */
+struct zynqmp_pm_query_data {
+ u32 qid;
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+};
+
+struct zynqmp_eemi_ops {
+ int (*get_api_version)(u32 *version);
+ int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
+ int (*clock_enable)(u32 clock_id);
+ int (*clock_disable)(u32 clock_id);
+ int (*clock_getstate)(u32 clock_id, u32 *state);
+ int (*clock_setdivider)(u32 clock_id, u32 divider);
+ int (*clock_getdivider)(u32 clock_id, u32 *divider);
+ int (*clock_setrate)(u32 clock_id, u64 rate);
+ int (*clock_getrate)(u32 clock_id, u64 *rate);
+ int (*clock_setparent)(u32 clock_id, u32 parent_id);
+ int (*clock_getparent)(u32 clock_id, u32 *parent_id);
+ int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out);
+};
+
+#if IS_REACHABLE(CONFIG_ARCH_ZYNQMP)
+const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
+#else
+static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __FIRMWARE_ZYNQMP_H__ */
diff --git a/include/linux/font.h b/include/linux/font.h
index d6821769dd1e..51b91c8b69d5 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -32,6 +32,7 @@ struct font_desc {
#define ACORN8x8_IDX 8
#define MINI4x6_IDX 9
#define FONT6x10_IDX 10
+#define TER16x32_IDX 11
extern const struct font_desc font_vga_8x8,
font_vga_8x16,
@@ -43,7 +44,8 @@ extern const struct font_desc font_vga_8x8,
font_sun_12x22,
font_acorn_8x8,
font_mini_4x6,
- font_6x10;
+ font_6x10,
+ font_ter_16x32;
/* Find a font with a specific name */
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index ce550fcf6360..817600a32c93 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -69,4 +69,8 @@ void fpga_bridge_free(struct fpga_bridge *br);
int fpga_bridge_register(struct fpga_bridge *br);
void fpga_bridge_unregister(struct fpga_bridge *br);
+struct fpga_bridge
+*devm_fpga_bridge_create(struct device *dev, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv);
+
#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 8942e61f0028..e8ca62b2cb5b 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -53,12 +53,20 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_OPERATING,
};
-/*
- * FPGA Manager flags
- * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
- * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
- * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
- * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
+/**
+ * DOC: FPGA Manager flags
+ *
+ * Flags used in the &fpga_image_info->flags field
+ *
+ * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ *
+ * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+ *
+ * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
+ *
+ * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
+ *
+ * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
@@ -190,4 +198,8 @@ void fpga_mgr_free(struct fpga_manager *mgr);
int fpga_mgr_register(struct fpga_manager *mgr);
void fpga_mgr_unregister(struct fpga_manager *mgr);
+struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
+ const struct fpga_manager_ops *mops,
+ void *priv);
+
#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index 0521b7f577a4..27cb706275db 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -44,4 +44,8 @@ void fpga_region_free(struct fpga_region *region);
int fpga_region_register(struct fpga_region *region);
void fpga_region_unregister(struct fpga_region *region);
+struct fpga_region
+*devm_fpga_region_create(struct device *dev, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *));
+
#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a9242f336f02..811c77743dad 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -74,6 +74,8 @@ extern struct inodes_stat_t inodes_stat;
extern int leases_enable, lease_break_time;
extern int sysctl_protected_symlinks;
extern int sysctl_protected_hardlinks;
+extern int sysctl_protected_fifos;
+extern int sysctl_protected_regular;
typedef __kernel_rwf_t rwf_t;
@@ -157,6 +159,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File is capable of returning -EAGAIN if I/O will block */
#define FMODE_NOWAIT ((__force fmode_t)0x8000000)
+/* File does not contribute to nr_files count */
+#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
+
/*
* Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
* that indicates that they should check the contents of the iovec are
@@ -398,24 +403,40 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
+/**
+ * struct address_space - Contents of a cacheable, mappable object.
+ * @host: Owner, either the inode or the block_device.
+ * @i_pages: Cached pages.
+ * @gfp_mask: Memory allocation flags to use for allocating pages.
+ * @i_mmap_writable: Number of VM_SHARED mappings.
+ * @i_mmap: Tree of private and shared mappings.
+ * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
+ * @nrpages: Number of page entries, protected by the i_pages lock.
+ * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock.
+ * @writeback_index: Writeback starts here.
+ * @a_ops: Methods.
+ * @flags: Error bits and flags (AS_*).
+ * @wb_err: The most recent error which has occurred.
+ * @private_lock: For use by the owner of the address_space.
+ * @private_list: For use by the owner of the address_space.
+ * @private_data: For use by the owner of the address_space.
+ */
struct address_space {
- struct inode *host; /* owner: inode, block_device */
- struct radix_tree_root i_pages; /* cached pages */
- atomic_t i_mmap_writable;/* count VM_SHARED mappings */
- struct rb_root_cached i_mmap; /* tree of private and shared mappings */
- struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
- /* Protected by the i_pages lock */
- unsigned long nrpages; /* number of total pages */
- /* number of shadow or DAX exceptional entries */
+ struct inode *host;
+ struct xarray i_pages;
+ gfp_t gfp_mask;
+ atomic_t i_mmap_writable;
+ struct rb_root_cached i_mmap;
+ struct rw_semaphore i_mmap_rwsem;
+ unsigned long nrpages;
unsigned long nrexceptional;
- pgoff_t writeback_index;/* writeback starts here */
- const struct address_space_operations *a_ops; /* methods */
- unsigned long flags; /* error bits */
- spinlock_t private_lock; /* for use by the address_space */
- gfp_t gfp_mask; /* implicit gfp mask for allocations */
- struct list_head private_list; /* for use by the address_space */
- void *private_data; /* ditto */
+ pgoff_t writeback_index;
+ const struct address_space_operations *a_ops;
+ unsigned long flags;
errseq_t wb_err;
+ spinlock_t private_lock;
+ struct list_head private_list;
+ void *private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
@@ -462,15 +483,18 @@ struct block_device {
struct mutex bd_fsfreeze_mutex;
} __randomize_layout;
+/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
+#define PAGECACHE_TAG_DIRTY XA_MARK_0
+#define PAGECACHE_TAG_WRITEBACK XA_MARK_1
+#define PAGECACHE_TAG_TOWRITE XA_MARK_2
+
/*
- * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
- * radix trees
+ * Returns true if any of the pages in the mapping are marked with the tag.
*/
-#define PAGECACHE_TAG_DIRTY 0
-#define PAGECACHE_TAG_WRITEBACK 1
-#define PAGECACHE_TAG_TOWRITE 2
-
-int mapping_tagged(struct address_space *mapping, int tag);
+static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
+{
+ return xa_marked(&mapping->i_pages, tag);
+}
static inline void i_mmap_lock_write(struct address_space *mapping)
{
@@ -1020,10 +1044,15 @@ bool opens_in_grace(struct net *);
* Obviously, the last two criteria only matter for POSIX locks.
*/
struct file_lock {
- struct file_lock *fl_next; /* singly linked list for this inode */
+ struct file_lock *fl_blocker; /* The lock, that is blocking us */
struct list_head fl_list; /* link into file_lock_context */
struct hlist_node fl_link; /* node in global lists */
- struct list_head fl_block; /* circular list of blocked processes */
+ struct list_head fl_blocked_requests; /* list of requests with
+ * ->fl_blocker pointing here
+ */
+ struct list_head fl_blocked_member; /* node in
+ * ->fl_blocker->fl_blocked_requests
+ */
fl_owner_t fl_owner;
unsigned int fl_flags;
unsigned char fl_type;
@@ -1067,17 +1096,7 @@ struct file_lock_context {
extern void send_sigio(struct fown_struct *fown, int fd, int band);
-/*
- * Return the inode to use for locking
- *
- * For overlayfs this should be the overlay inode, not the real inode returned
- * by file_inode(). For any other fs file_inode(filp) and locks_inode(filp) are
- * equal.
- */
-static inline struct inode *locks_inode(const struct file *f)
-{
- return f->f_path.dentry->d_inode;
-}
+#define locks_inode(f) file_inode(f)
#ifdef CONFIG_FILE_LOCKING
extern int fcntl_getlk(struct file *, unsigned int, struct flock *);
@@ -1105,7 +1124,7 @@ extern void locks_remove_file(struct file *);
extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_unblock_lock(struct file_lock *);
+extern int locks_delete_block(struct file_lock *);
extern int vfs_test_lock(struct file *, struct file_lock *);
extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
@@ -1195,7 +1214,7 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
return -ENOLCK;
}
-static inline int posix_unblock_lock(struct file_lock *waiter)
+static inline int locks_delete_block(struct file_lock *waiter)
{
return -ENOENT;
}
@@ -1262,7 +1281,7 @@ static inline struct inode *file_inode(const struct file *f)
static inline struct dentry *file_dentry(const struct file *file)
{
- return d_real(file->f_path.dentry, file_inode(file), 0, 0);
+ return d_real(file->f_path.dentry, file_inode(file));
}
static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
@@ -1318,7 +1337,6 @@ extern int send_sigurg(struct fown_struct *fown);
/* These sb flags are internal to the kernel */
#define SB_SUBMOUNT (1<<26)
-#define SB_NOREMOTELOCK (1<<27)
#define SB_NOSEC (1<<28)
#define SB_BORN (1<<29)
#define SB_ACTIVE (1<<30)
@@ -1399,17 +1417,26 @@ struct super_block {
struct sb_writers s_writers;
+ /*
+ * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
+ * s_fsnotify_marks together for cache efficiency. They are frequently
+ * accessed and rarely modified.
+ */
+ void *s_fs_info; /* Filesystem private info */
+
+ /* Granularity of c/m/atime in ns (cannot be worse than a second) */
+ u32 s_time_gran;
+#ifdef CONFIG_FSNOTIFY
+ __u32 s_fsnotify_mask;
+ struct fsnotify_mark_connector __rcu *s_fsnotify_marks;
+#endif
+
char s_id[32]; /* Informational name */
uuid_t s_uuid; /* UUID */
- void *s_fs_info; /* Filesystem private info */
unsigned int s_max_links;
fmode_t s_mode;
- /* Granularity of c/m/atime in ns.
- Cannot be worse than a second */
- u32 s_time_gran;
-
/*
* The next field is for VFS *only*. No filesystems have any business
* even looking at it. You had been warned.
@@ -1434,6 +1461,9 @@ struct super_block {
/* Number of inodes with nlink == 0 but still referenced */
atomic_long_t s_remove_count;
+ /* Pending fsnotify inode refs */
+ atomic_long_t s_fsnotify_inode_refs;
+
/* Being remounted read-only */
int s_readonly_remount;
@@ -1647,6 +1677,8 @@ int vfs_mkobj(struct dentry *, umode_t,
int (*f)(struct dentry *, umode_t, void *),
void *);
+extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
/*
* VFS file helper functions.
*/
@@ -1725,6 +1757,25 @@ struct block_device_operations;
#define NOMMU_VMFLAGS \
(NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
+/*
+ * These flags control the behavior of the remap_file_range function pointer.
+ * If it is called with len == 0 that means "remap to end of source file".
+ * See Documentation/filesystems/vfs.txt for more details about this call.
+ *
+ * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate)
+ * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request
+ */
+#define REMAP_FILE_DEDUP (1 << 0)
+#define REMAP_FILE_CAN_SHORTEN (1 << 1)
+
+/*
+ * These flags signal that the caller is ok with altering various aspects of
+ * the behavior of the remap operation. The changes must be made by the
+ * implementation; the vfs remap helper functions can take advantage of them.
+ * Flags in this category exist to preserve the quirky behavior of the hoisted
+ * btrfs clone/dedupe ioctls.
+ */
+#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
struct iov_iter;
@@ -1763,10 +1814,10 @@ struct file_operations {
#endif
ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
loff_t, size_t, unsigned int);
- int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t,
- u64);
- ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *,
- u64);
+ loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
+ int (*fadvise)(struct file *, loff_t, loff_t, int);
} __randomize_layout;
struct inode_operations {
@@ -1828,16 +1879,22 @@ extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
-extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
- struct inode *inode_out, loff_t pos_out,
- u64 *len, bool is_dedupe);
-extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len);
-extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
- struct inode *dest, loff_t destoff,
- loff_t len, bool *is_same);
+extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *count,
+ unsigned int remap_flags);
+extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
+extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t len, unsigned int remap_flags);
extern int vfs_dedupe_file_range(struct file *file,
struct file_dedupe_range *same);
+extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
+ struct file *dst_file, loff_t dst_pos,
+ loff_t len, unsigned int remap_flags);
+
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
@@ -1969,7 +2026,7 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
.ki_filp = filp,
.ki_flags = iocb_flags(filp),
.ki_hint = ki_hint_validate(file_write_hint(filp)),
- .ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0),
+ .ki_ioprio = get_current_ioprio(),
};
}
@@ -2096,6 +2153,7 @@ enum file_time_flags {
S_VERSION = 8,
};
+extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
static inline void file_accessed(struct file *file)
{
@@ -2441,6 +2499,8 @@ extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int, umode_t);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
+extern struct file * open_with_fake_path(const struct path *, int,
+ struct inode*, const struct cred *);
static inline struct file *file_clone_open(struct file *file)
{
return dentry_open(&file->f_path, file->f_flags, file->f_cred);
@@ -2769,19 +2829,6 @@ static inline void file_end_write(struct file *file)
__sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
}
-static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- u64 len)
-{
- int ret;
-
- file_start_write(file_out);
- ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
- file_end_write(file_out);
-
- return ret;
-}
-
/*
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
@@ -2974,6 +3021,9 @@ extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
+extern int generic_remap_checks(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *count, unsigned int remap_flags);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
@@ -3219,8 +3269,12 @@ extern int generic_check_addressable(unsigned, u64);
extern int buffer_migrate_page(struct address_space *,
struct page *, struct page *,
enum migrate_mode);
+extern int buffer_migrate_page_norefs(struct address_space *,
+ struct page *, struct page *,
+ enum migrate_mode);
#else
#define buffer_migrate_page NULL
+#define buffer_migrate_page_norefs NULL
#endif
extern int setattr_prepare(struct dentry *, struct iattr *);
@@ -3456,4 +3510,8 @@ static inline bool dir_relax_shared(struct inode *inode)
extern bool path_noexec(const struct path *path);
extern void inode_nohighmem(struct inode *inode);
+/* mm/fadvise.c */
+extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
+ int advice);
+
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 34cf0fdd7dc7..610815e3f1aa 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
int n_pages)
{
- atomic_sub(n_pages, &op->n_pages);
- if (atomic_read(&op->n_pages) <= 0)
+ if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
fscache_op_complete(&op->op, false);
}
diff --git a/include/linux/fsi-occ.h b/include/linux/fsi-occ.h
new file mode 100644
index 000000000000..d4cdc2aa6e33
--- /dev/null
+++ b/include/linux/fsi-occ.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef LINUX_FSI_OCC_H
+#define LINUX_FSI_OCC_H
+
+struct device;
+
+#define OCC_RESP_CMD_IN_PRG 0xFF
+#define OCC_RESP_SUCCESS 0
+#define OCC_RESP_CMD_INVAL 0x11
+#define OCC_RESP_CMD_LEN_INVAL 0x12
+#define OCC_RESP_DATA_INVAL 0x13
+#define OCC_RESP_CHKSUM_ERR 0x14
+#define OCC_RESP_INT_ERR 0x15
+#define OCC_RESP_BAD_STATE 0x16
+#define OCC_RESP_CRIT_EXCEPT 0xE0
+#define OCC_RESP_CRIT_INIT 0xE1
+#define OCC_RESP_CRIT_WATCHDOG 0xE2
+#define OCC_RESP_CRIT_OCB 0xE3
+#define OCC_RESP_CRIT_HW 0xE4
+
+int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
+ void *response, size_t *resp_len);
+
+#endif /* LINUX_FSI_OCC_H */
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index f27cb14088a4..741f567253ef 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -210,8 +210,8 @@ struct mc_cmd_header {
};
struct fsl_mc_command {
- u64 header;
- u64 params[MC_CMD_NUM_OF_PARAMS];
+ __le64 header;
+ __le64 params[MC_CMD_NUM_OF_PARAMS];
};
enum mc_cmd_status {
@@ -238,11 +238,11 @@ enum mc_cmd_status {
/* Command completion flag */
#define MC_CMD_FLAG_INTR_DIS 0x01
-static inline u64 mc_encode_cmd_header(u16 cmd_id,
- u32 cmd_flags,
- u16 token)
+static inline __le64 mc_encode_cmd_header(u16 cmd_id,
+ u32 cmd_flags,
+ u16 token)
{
- u64 header = 0;
+ __le64 header = 0;
struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
hdr->cmd_id = cpu_to_le16(cmd_id);
@@ -351,6 +351,14 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd);
#define dev_is_fsl_mc(_dev) (0)
#endif
+/* Macro to check if a device is a container device */
+#define fsl_mc_is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & \
+ FSL_MC_IS_DPRC)
+
+/* Macro to get the container device of a MC device */
+#define fsl_mc_cont_dev(_dev) (fsl_mc_is_cont_dev(_dev) ? \
+ (_dev) : (_dev)->parent)
+
/*
* module_fsl_mc_driver() - Helper macro for drivers that don't do
* anything special in module init/exit. This eliminates a lot of
@@ -405,6 +413,7 @@ extern struct device_type fsl_mc_bus_dpcon_type;
extern struct device_type fsl_mc_bus_dpmcp_type;
extern struct device_type fsl_mc_bus_dpmac_type;
extern struct device_type fsl_mc_bus_dprtc_type;
+extern struct device_type fsl_mc_bus_dpseci_type;
static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
{
@@ -451,6 +460,11 @@ static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
}
+static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
+}
+
/*
* Data Path Buffer Pool (DPBP) API
* Contains initialization APIs and runtime control APIs for DPBP
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index 3fdfede2f0f3..5f343b796ad9 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -274,6 +274,8 @@
*/
/* Auto Boot Mode */
#define IFC_NAND_NCFGR_BOOT 0x80000000
+/* SRAM Initialization */
+#define IFC_NAND_NCFGR_SRAM_INIT_EN 0x20000000
/* Addressing Mode-ROW0+n/COL0 */
#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000
/* Addressing Mode-ROW0+n/COL0+n */
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index bdaf22582f6e..2ccb08cb5d6a 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -26,34 +26,46 @@ static inline int fsnotify_parent(const struct path *path, struct dentry *dentry
return __fsnotify_parent(path, dentry, mask);
}
-/* simple call site for access decisions */
+/*
+ * Simple wrapper to consolidate calls fsnotify_parent()/fsnotify() when
+ * an event is on a path.
+ */
+static inline int fsnotify_path(struct inode *inode, const struct path *path,
+ __u32 mask)
+{
+ int ret = fsnotify_parent(path, NULL, mask);
+
+ if (ret)
+ return ret;
+ return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+}
+
+/* Simple call site for access decisions */
static inline int fsnotify_perm(struct file *file, int mask)
{
+ int ret;
const struct path *path = &file->f_path;
- /*
- * Do not use file_inode() here or anywhere in this file to get the
- * inode. That would break *notity on overlayfs.
- */
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = file_inode(file);
__u32 fsnotify_mask = 0;
- int ret;
if (file->f_mode & FMODE_NONOTIFY)
return 0;
if (!(mask & (MAY_READ | MAY_OPEN)))
return 0;
- if (mask & MAY_OPEN)
+ if (mask & MAY_OPEN) {
fsnotify_mask = FS_OPEN_PERM;
- else if (mask & MAY_READ)
- fsnotify_mask = FS_ACCESS_PERM;
- else
- BUG();
- ret = fsnotify_parent(path, NULL, fsnotify_mask);
- if (ret)
- return ret;
+ if (file->f_flags & __FMODE_EXEC) {
+ ret = fsnotify_path(inode, path, FS_OPEN_EXEC_PERM);
- return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ if (ret)
+ return ret;
+ }
+ } else if (mask & MAY_READ) {
+ fsnotify_mask = FS_ACCESS_PERM;
+ }
+
+ return fsnotify_path(inode, path, fsnotify_mask);
}
/*
@@ -178,16 +190,14 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
static inline void fsnotify_access(struct file *file)
{
const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = file_inode(file);
__u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ if (!(file->f_mode & FMODE_NONOTIFY))
+ fsnotify_path(inode, path, mask);
}
/*
@@ -196,16 +206,14 @@ static inline void fsnotify_access(struct file *file)
static inline void fsnotify_modify(struct file *file)
{
const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = file_inode(file);
__u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ if (!(file->f_mode & FMODE_NONOTIFY))
+ fsnotify_path(inode, path, mask);
}
/*
@@ -214,14 +222,15 @@ static inline void fsnotify_modify(struct file *file)
static inline void fsnotify_open(struct file *file)
{
const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = file_inode(file);
__u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
+ if (file->f_flags & __FMODE_EXEC)
+ mask |= FS_OPEN_EXEC;
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ fsnotify_path(inode, path, mask);
}
/*
@@ -230,17 +239,15 @@ static inline void fsnotify_open(struct file *file)
static inline void fsnotify_close(struct file *file)
{
const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = file_inode(file);
fmode_t mode = file->f_mode;
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ if (!(file->f_mode & FMODE_NONOTIFY))
+ fsnotify_path(inode, path, mask);
}
/*
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index b8f4182f42f1..7639774e7475 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -38,6 +38,7 @@
#define FS_DELETE 0x00000200 /* Subfile was deleted */
#define FS_DELETE_SELF 0x00000400 /* Self was deleted */
#define FS_MOVE_SELF 0x00000800 /* Self was moved */
+#define FS_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
@@ -45,6 +46,7 @@
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
+#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
#define FS_ISDIR 0x40000000 /* event occurred against dir */
@@ -62,21 +64,29 @@
#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
- FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM)
+ FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM | \
+ FS_OPEN_EXEC | FS_OPEN_EXEC_PERM)
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
-#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM)
+#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
+ FS_OPEN_EXEC_PERM)
+/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \
FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
- FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \
- FS_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \
+ FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME | \
+ FS_OPEN_EXEC | FS_OPEN_EXEC_PERM)
+
+/* Extra flags that may be reported with event or control handling of events */
+#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \
FS_DN_MULTISHOT | FS_EVENT_ON_CHILD)
+#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS)
+
struct fsnotify_group;
struct fsnotify_event;
struct fsnotify_mark;
@@ -189,10 +199,10 @@ struct fsnotify_group {
/* allows a group to block waiting for a userspace response */
struct list_head access_list;
wait_queue_head_t access_waitq;
- int f_flags;
+ int flags; /* flags from fanotify_init() */
+ int f_flags; /* event_f_flags from fanotify_init() */
unsigned int max_marks;
struct user_struct *user;
- bool audit;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
@@ -206,12 +216,14 @@ struct fsnotify_group {
enum fsnotify_obj_type {
FSNOTIFY_OBJ_TYPE_INODE,
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
+ FSNOTIFY_OBJ_TYPE_SB,
FSNOTIFY_OBJ_TYPE_COUNT,
FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
};
#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE)
#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
+#define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB)
#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
static inline bool fsnotify_valid_obj_type(unsigned int type)
@@ -255,6 +267,7 @@ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
FSNOTIFY_ITER_FUNCS(inode, INODE)
FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
+FSNOTIFY_ITER_FUNCS(sb, SB)
#define fsnotify_foreach_obj_type(type) \
for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
@@ -267,8 +280,8 @@ struct fsnotify_mark_connector;
typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
/*
- * Inode / vfsmount point to this structure which tracks all marks attached to
- * the inode / vfsmount. The reference to inode / vfsmount is held by this
+ * Inode/vfsmount/sb point to this structure which tracks all marks attached to
+ * the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this
* structure. We destroy this structure when there are no more marks attached
* to it. The structure is protected by fsnotify_mark_srcu.
*/
@@ -335,6 +348,7 @@ extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int dat
extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
+extern void fsnotify_sb_delete(struct super_block *sb);
extern u32 fsnotify_get_cookie(void);
static inline int fsnotify_inode_watches_children(struct inode *inode)
@@ -455,9 +469,13 @@ static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *gr
{
fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL);
}
+/* run all the marks in a group, and clear all of the sn marks */
+static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB_FL);
+}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
-extern void fsnotify_unmount_inodes(struct super_block *sb);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
@@ -484,6 +502,9 @@ static inline void __fsnotify_inode_delete(struct inode *inode)
static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{}
+static inline void fsnotify_sb_delete(struct super_block *sb)
+{}
+
static inline void fsnotify_update_flags(struct dentry *dentry)
{}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index ebb77674be90..730876187344 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -234,10 +234,6 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1,
*/
#define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; })
-static inline int ftrace_nr_registered_ops(void)
-{
- return 0;
-}
static inline void ftrace_kill(void) { }
static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
@@ -328,8 +324,6 @@ struct seq_file;
extern int ftrace_text_reserved(const void *start, const void *end);
-extern int ftrace_nr_registered_ops(void);
-
struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
bool is_ftrace_trampoline(unsigned long addr);
@@ -395,6 +389,7 @@ enum {
FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
FTRACE_START_FUNC_RET = (1 << 3),
FTRACE_STOP_FUNC_RET = (1 << 4),
+ FTRACE_MAY_SLEEP = (1 << 5),
};
/*
@@ -426,6 +421,9 @@ enum {
};
void arch_ftrace_update_code(int command);
+void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
+void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
+void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
struct ftrace_rec_iter;
@@ -707,16 +705,7 @@ static inline unsigned long get_lock_parent_ip(void)
return CALLER_ADDR2;
}
-#ifdef CONFIG_IRQSOFF_TRACER
- extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
- extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
-#else
- static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
- static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
-#endif
-
-#if defined(CONFIG_PREEMPT_TRACER) || \
- (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
+#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else
@@ -764,6 +753,11 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+struct fgraph_ops {
+ trace_func_graph_ent_t entryfunc;
+ trace_func_graph_ret_t retfunc;
+};
+
/*
* Stack of return addresses for functions
* of a thread.
@@ -792,8 +786,11 @@ struct ftrace_ret_stack {
extern void return_to_handler(void);
extern int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
- unsigned long frame_pointer, unsigned long *retp);
+function_graph_enter(unsigned long ret, unsigned long func,
+ unsigned long frame_pointer, unsigned long *retp);
+
+struct ftrace_ret_stack *
+ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
@@ -805,11 +802,11 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
*/
#define __notrace_funcgraph notrace
-#define FTRACE_NOTRACE_DEPTH 65536
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
-extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
- trace_func_graph_ent_t entryfunc);
+
+extern int register_ftrace_graph(struct fgraph_ops *ops);
+extern void unregister_ftrace_graph(struct fgraph_ops *ops);
extern bool ftrace_graph_is_dead(void);
extern void ftrace_graph_stop(void);
@@ -818,17 +815,10 @@ extern void ftrace_graph_stop(void);
extern trace_func_graph_ret_t ftrace_graph_return;
extern trace_func_graph_ent_t ftrace_graph_entry;
-extern void unregister_ftrace_graph(void);
-
extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
-static inline int task_curr_ret_stack(struct task_struct *t)
-{
- return t->curr_ret_stack;
-}
-
static inline void pause_graph_tracing(void)
{
atomic_inc(&current->tracing_graph_pause);
@@ -846,17 +836,9 @@ static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
-static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
- trace_func_graph_ent_t entryfunc)
-{
- return -1;
-}
-static inline void unregister_ftrace_graph(void) { }
-
-static inline int task_curr_ret_stack(struct task_struct *tsk)
-{
- return -1;
-}
+/* Define as macros as fgraph_ops may not be defined */
+#define register_ftrace_graph(ops) ({ -1; })
+#define unregister_ftrace_graph(ops) do { } while (0)
static inline unsigned long
ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 821ae502d3d8..ccaef0097785 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -9,9 +9,6 @@ struct inode;
struct mm_struct;
struct task_struct;
-extern int
-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
-
/*
* Futexes are matched on equal values of this key.
* The key type depends on whether it's a shared or private mapping.
@@ -55,11 +52,6 @@ extern void exit_robust_list(struct task_struct *curr);
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
-#define futex_cmpxchg_enabled 1
-#else
-extern int futex_cmpxchg_enabled;
-#endif
#else
static inline void exit_robust_list(struct task_struct *curr)
{
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 872f930f1b06..dd0a452373e7 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -51,7 +51,8 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool,
+ unsigned long start_addr);
/*
* General purpose special memory pool descriptor.
@@ -131,24 +132,24 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool);
+ struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_first_fit_align(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool);
+ struct gen_pool *pool, unsigned long start_addr);
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 57864422a2c8..06c0fd594097 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -17,6 +17,7 @@
#include <linux/percpu-refcount.h>
#include <linux/uuid.h>
#include <linux/blk_types.h>
+#include <asm/local.h>
#ifdef CONFIG_BLOCK
@@ -83,12 +84,13 @@ struct partition {
} __attribute__((packed));
struct disk_stats {
+ u64 nsecs[NR_STAT_GROUPS];
unsigned long sectors[NR_STAT_GROUPS];
unsigned long ios[NR_STAT_GROUPS];
unsigned long merges[NR_STAT_GROUPS];
- unsigned long ticks[NR_STAT_GROUPS];
unsigned long io_ticks;
unsigned long time_in_queue;
+ local_t in_flight[2];
};
#define PARTITION_META_INFO_VOLNAMELTH 64
@@ -122,14 +124,13 @@ struct hd_struct {
int make_it_fail;
#endif
unsigned long stamp;
- atomic_t in_flight[2];
#ifdef CONFIG_SMP
struct disk_stats __percpu *dkstats;
#else
struct disk_stats dkstats;
#endif
struct percpu_ref ref;
- struct rcu_head rcu_head;
+ struct rcu_work rcu_work;
};
#define GENHD_FL_REMOVABLE 1
@@ -295,8 +296,11 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
-#define __part_stat_add(cpu, part, field, addnd) \
- (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
+#define part_stat_get_cpu(part, field, cpu) \
+ (per_cpu_ptr((part)->dkstats, (cpu))->field)
+
+#define part_stat_get(part, field) \
+ part_stat_get_cpu(part, field, smp_processor_id())
#define part_stat_read(part, field) \
({ \
@@ -333,10 +337,9 @@ static inline void free_part_stats(struct hd_struct *part)
#define part_stat_lock() ({ rcu_read_lock(); 0; })
#define part_stat_unlock() rcu_read_unlock()
-#define __part_stat_add(cpu, part, field, addnd) \
- ((part)->dkstats.field += addnd)
-
-#define part_stat_read(part, field) ((part)->dkstats.field)
+#define part_stat_get(part, field) ((part)->dkstats.field)
+#define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field)
+#define part_stat_read(part, field) part_stat_get(part, field)
static inline void part_stat_set_all(struct hd_struct *part, int value)
{
@@ -354,27 +357,41 @@ static inline void free_part_stats(struct hd_struct *part)
#endif /* CONFIG_SMP */
+#define part_stat_read_msecs(part, which) \
+ div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC)
+
#define part_stat_read_accum(part, field) \
(part_stat_read(part, field[STAT_READ]) + \
part_stat_read(part, field[STAT_WRITE]) + \
part_stat_read(part, field[STAT_DISCARD]))
-#define part_stat_add(cpu, part, field, addnd) do { \
- __part_stat_add((cpu), (part), field, addnd); \
+#define __part_stat_add(part, field, addnd) \
+ (part_stat_get(part, field) += (addnd))
+
+#define part_stat_add(part, field, addnd) do { \
+ __part_stat_add((part), field, addnd); \
if ((part)->partno) \
- __part_stat_add((cpu), &part_to_disk((part))->part0, \
+ __part_stat_add(&part_to_disk((part))->part0, \
field, addnd); \
} while (0)
-#define part_stat_dec(cpu, gendiskp, field) \
- part_stat_add(cpu, gendiskp, field, -1)
-#define part_stat_inc(cpu, gendiskp, field) \
- part_stat_add(cpu, gendiskp, field, 1)
-#define part_stat_sub(cpu, gendiskp, field, subnd) \
- part_stat_add(cpu, gendiskp, field, -subnd)
-
-void part_in_flight(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2]);
+#define part_stat_dec(gendiskp, field) \
+ part_stat_add(gendiskp, field, -1)
+#define part_stat_inc(gendiskp, field) \
+ part_stat_add(gendiskp, field, 1)
+#define part_stat_sub(gendiskp, field, subnd) \
+ part_stat_add(gendiskp, field, -subnd)
+
+#define part_stat_local_dec(gendiskp, field) \
+ local_dec(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_inc(gendiskp, field) \
+ local_inc(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_read(gendiskp, field) \
+ local_read(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_read_cpu(gendiskp, field, cpu) \
+ local_read(&(part_stat_get_cpu(gendiskp, field, cpu)))
+
+unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part);
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]);
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
@@ -395,14 +412,14 @@ static inline void free_part_info(struct hd_struct *part)
kfree(part->info);
}
-/* block/blk-core.c */
-extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
+void update_io_ticks(struct hd_struct *part, unsigned long now);
/* block/genhd.c */
-extern void device_add_disk(struct device *parent, struct gendisk *disk);
+extern void device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
static inline void add_disk(struct gendisk *disk)
{
- device_add_disk(NULL, disk);
+ device_add_disk(NULL, disk, NULL);
}
extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
static inline void add_disk_no_queue_reg(struct gendisk *disk)
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index 5972e4969197..eeae59d3ceb7 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -191,6 +191,7 @@ static inline void ct_assert_unique_operations(void)
{
switch (0) {
#include GENL_MAGIC_INCLUDE_FILE
+ case 0:
;
}
}
@@ -209,6 +210,7 @@ static inline void ct_assert_unique_top_level_attributes(void)
{
switch (0) {
#include GENL_MAGIC_INCLUDE_FILE
+ case 0:
;
}
}
@@ -218,7 +220,8 @@ static inline void ct_assert_unique_top_level_attributes(void)
static inline void ct_assert_unique_ ## s_name ## _attributes(void) \
{ \
switch (0) { \
- s_fields \
+ s_fields \
+ case 0: \
; \
} \
}
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index a6afcec53795..5f5e25fd6149 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -59,29 +59,32 @@ struct vm_area_struct;
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-/*
+/**
+ * DOC: Page mobility and placement hints
+ *
* Page mobility and placement hints
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* These flags provide hints about how mobile the page is. Pages with similar
* mobility are placed within the same pageblocks to minimise problems due
* to external fragmentation.
*
- * __GFP_MOVABLE (also a zone modifier) indicates that the page can be
- * moved by page migration during memory compaction or can be reclaimed.
+ * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
+ * moved by page migration during memory compaction or can be reclaimed.
*
- * __GFP_RECLAIMABLE is used for slab allocations that specify
- * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
+ * %__GFP_RECLAIMABLE is used for slab allocations that specify
+ * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
*
- * __GFP_WRITE indicates the caller intends to dirty the page. Where possible,
- * these pages will be spread between local zones to avoid all the dirty
- * pages being in one zone (fair zone allocation policy).
+ * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
+ * these pages will be spread between local zones to avoid all the dirty
+ * pages being in one zone (fair zone allocation policy).
*
- * __GFP_HARDWALL enforces the cpuset memory allocation policy.
+ * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
*
- * __GFP_THISNODE forces the allocation to be satisified from the requested
- * node with no fallbacks or placement policy enforcements.
+ * %__GFP_THISNODE forces the allocation to be satisfied from the requested
+ * node with no fallbacks or placement policy enforcements.
*
- * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
+ * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
*/
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
@@ -89,54 +92,60 @@ struct vm_area_struct;
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
-/*
+/**
+ * DOC: Watermark modifiers
+ *
* Watermark modifiers -- controls access to emergency reserves
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
- * __GFP_HIGH indicates that the caller is high-priority and that granting
- * the request is necessary before the system can make forward progress.
- * For example, creating an IO context to clean pages.
+ * %__GFP_HIGH indicates that the caller is high-priority and that granting
+ * the request is necessary before the system can make forward progress.
+ * For example, creating an IO context to clean pages.
*
- * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
- * high priority. Users are typically interrupt handlers. This may be
- * used in conjunction with __GFP_HIGH
+ * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
+ * high priority. Users are typically interrupt handlers. This may be
+ * used in conjunction with %__GFP_HIGH
*
- * __GFP_MEMALLOC allows access to all memory. This should only be used when
- * the caller guarantees the allocation will allow more memory to be freed
- * very shortly e.g. process exiting or swapping. Users either should
- * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ * %__GFP_MEMALLOC allows access to all memory. This should only be used when
+ * the caller guarantees the allocation will allow more memory to be freed
+ * very shortly e.g. process exiting or swapping. Users either should
+ * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
*
- * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
- * This takes precedence over the __GFP_MEMALLOC flag if both are set.
+ * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
+ * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
*/
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
-/*
+/**
+ * DOC: Reclaim modifiers
+ *
* Reclaim modifiers
+ * ~~~~~~~~~~~~~~~~~
*
- * __GFP_IO can start physical IO.
+ * %__GFP_IO can start physical IO.
*
- * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the
- * allocator recursing into the filesystem which might already be holding
- * locks.
+ * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
+ * allocator recursing into the filesystem which might already be holding
+ * locks.
*
- * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
- * This flag can be cleared to avoid unnecessary delays when a fallback
- * option is available.
+ * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
+ * This flag can be cleared to avoid unnecessary delays when a fallback
+ * option is available.
*
- * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
- * the low watermark is reached and have it reclaim pages until the high
- * watermark is reached. A caller may wish to clear this flag when fallback
- * options are available and the reclaim is likely to disrupt the system. The
- * canonical example is THP allocation where a fallback is cheap but
- * reclaim/compaction may cause indirect stalls.
+ * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
+ * the low watermark is reached and have it reclaim pages until the high
+ * watermark is reached. A caller may wish to clear this flag when fallback
+ * options are available and the reclaim is likely to disrupt the system. The
+ * canonical example is THP allocation where a fallback is cheap but
+ * reclaim/compaction may cause indirect stalls.
*
- * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
+ * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > PAGE_ALLOC_COSTLY_ORDER).
+ * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
* !costly allocations are too essential to fail so they are implicitly
* non-failing by default (with some exceptions like OOM victims might fail so
* the caller still has to check for failures) while costly requests try to be
@@ -144,40 +153,40 @@ struct vm_area_struct;
* The following three modifiers might be used to override some of these
* implicit rules
*
- * __GFP_NORETRY: The VM implementation will try only very lightweight
- * memory direct reclaim to get some memory under memory pressure (thus
- * it can sleep). It will avoid disruptive actions like OOM killer. The
- * caller must handle the failure which is quite likely to happen under
- * heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
- *
- * __GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
- * procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
- * compaction (which removes fragmentation) and page-out.
- * There is still a definite limit to the number of retries, but it is
- * a larger limit than with __GFP_NORETRY.
- * Allocations with this flag may fail, but only when there is
- * genuinely little unused memory. While these allocations do not
- * directly trigger the OOM killer, their failure indicates that
- * the system is likely to need to use the OOM killer soon. The
- * caller must handle failure, but can reasonably do so by failing
- * a higher-level request, or completing it only in a much less
- * efficient manner.
- * If the allocation does fail, and the caller is in a position to
- * free some non-essential memory, doing so could benefit the system
- * as a whole.
- *
- * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. The allocation could block
- * indefinitely but will never return with failure. Testing for
- * failure is pointless.
- * New users should be evaluated carefully (and the flag should be
- * used only when there is no reasonable failure policy) but it is
- * definitely preferable to use the flag rather than opencode endless
- * loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
+ * %__GFP_NORETRY: The VM implementation will try only very lightweight
+ * memory direct reclaim to get some memory under memory pressure (thus
+ * it can sleep). It will avoid disruptive actions like OOM killer. The
+ * caller must handle the failure which is quite likely to happen under
+ * heavy memory pressure. The flag is suitable when failure can easily be
+ * handled at small cost, such as reduced throughput
+ *
+ * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication
+ * that progress has been made else where. It can wait for other
+ * tasks to attempt high level approaches to freeing memory such as
+ * compaction (which removes fragmentation) and page-out.
+ * There is still a definite limit to the number of retries, but it is
+ * a larger limit than with %__GFP_NORETRY.
+ * Allocations with this flag may fail, but only when there is
+ * genuinely little unused memory. While these allocations do not
+ * directly trigger the OOM killer, their failure indicates that
+ * the system is likely to need to use the OOM killer soon. The
+ * caller must handle failure, but can reasonably do so by failing
+ * a higher-level request, or completing it only in a much less
+ * efficient manner.
+ * If the allocation does fail, and the caller is in a position to
+ * free some non-essential memory, doing so could benefit the system
+ * as a whole.
+ *
+ * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures. The allocation could block
+ * indefinitely but will never return with failure. Testing for
+ * failure is pointless.
+ * New users should be evaluated carefully (and the flag should be
+ * used only when there is no reasonable failure policy) but it is
+ * definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ * Using this flag for costly allocations is _highly_ discouraged.
*/
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
@@ -188,14 +197,17 @@ struct vm_area_struct;
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
-/*
+/**
+ * DOC: Action modifiers
+ *
* Action modifiers
+ * ~~~~~~~~~~~~~~~~
*
- * __GFP_NOWARN suppresses allocation failure reports.
+ * %__GFP_NOWARN suppresses allocation failure reports.
*
- * __GFP_COMP address compound page metadata.
+ * %__GFP_COMP address compound page metadata.
*
- * __GFP_ZERO returns a zeroed page on success.
+ * %__GFP_ZERO returns a zeroed page on success.
*/
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
@@ -208,66 +220,71 @@ struct vm_area_struct;
#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-/*
+/**
+ * DOC: Useful GFP flag combinations
+ *
+ * Useful GFP flag combinations
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
* Useful GFP flag combinations that are commonly used. It is recommended
* that subsystems start with one of these combinations and then set/clear
- * __GFP_FOO flags as necessary.
- *
- * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
- * watermark is applied to allow access to "atomic reserves"
- *
- * GFP_KERNEL is typical for kernel-internal allocations. The caller requires
- * ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
- *
- * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
- * accounted to kmemcg.
- *
- * GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
- *
- * GFP_NOIO will use direct reclaim to discard clean pages or slab pages
- * that do not require the starting of any physical IO.
- * Please try to avoid using this flag directly and instead use
- * memalloc_noio_{save,restore} to mark the whole scope which cannot
- * perform any IO with a short explanation why. All allocation requests
- * will inherit GFP_NOIO implicitly.
- *
- * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
- * Please try to avoid using this flag directly and instead use
- * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
- * recurse into the FS layer with a short explanation why. All allocation
- * requests will inherit GFP_NOFS implicitly.
- *
- * GFP_USER is for userspace allocations that also need to be directly
- * accessibly by the kernel or hardware. It is typically used by hardware
- * for buffers that are mapped to userspace (e.g. graphics) that hardware
- * still must DMA to. cpuset limits are enforced for these allocations.
- *
- * GFP_DMA exists for historical reasons and should be avoided where possible.
- * The flags indicates that the caller requires that the lowest zone be
- * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
- * it would require careful auditing as some users really require it and
- * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the
- * lowest zone as a type of emergency reserve.
- *
- * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit
- * address.
- *
- * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
- * do not need to be directly accessible by the kernel but that cannot
- * move once in use. An example may be a hardware allocation that maps
- * data directly into userspace but has no addressing limitations.
- *
- * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
- * need direct access to but can use kmap() when access is required. They
- * are expected to be movable via page reclaim or page migration. Typically,
- * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
- *
- * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are
- * compound allocations that will generally fail quickly if memory is not
- * available and will not wake kswapd/kcompactd on failure. The _LIGHT
- * version does not attempt reclaim/compaction at all and is by default used
- * in page fault path, while the non-light is used by khugepaged.
+ * %__GFP_FOO flags as necessary.
+ *
+ * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
+ * watermark is applied to allow access to "atomic reserves"
+ *
+ * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
+ * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
+ *
+ * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
+ * accounted to kmemcg.
+ *
+ * %GFP_NOWAIT is for kernel allocations that should not stall for direct
+ * reclaim, start physical IO or use any filesystem callback.
+ *
+ * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
+ * that do not require the starting of any physical IO.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_noio_{save,restore} to mark the whole scope which cannot
+ * perform any IO with a short explanation why. All allocation requests
+ * will inherit GFP_NOIO implicitly.
+ *
+ * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
+ * recurse into the FS layer with a short explanation why. All allocation
+ * requests will inherit GFP_NOFS implicitly.
+ *
+ * %GFP_USER is for userspace allocations that also need to be directly
+ * accessibly by the kernel or hardware. It is typically used by hardware
+ * for buffers that are mapped to userspace (e.g. graphics) that hardware
+ * still must DMA to. cpuset limits are enforced for these allocations.
+ *
+ * %GFP_DMA exists for historical reasons and should be avoided where possible.
+ * The flags indicates that the caller requires that the lowest zone be
+ * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
+ * it would require careful auditing as some users really require it and
+ * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
+ * lowest zone as a type of emergency reserve.
+ *
+ * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
+ * address.
+ *
+ * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
+ * do not need to be directly accessible by the kernel but that cannot
+ * move once in use. An example may be a hardware allocation that maps
+ * data directly into userspace but has no addressing limitations.
+ *
+ * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
+ * need direct access to but can use kmap() when access is required. They
+ * are expected to be movable via page reclaim or page migration. Typically,
+ * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
+ *
+ * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
+ * are compound allocations that will generally fail quickly if memory is not
+ * available and will not wake kswapd/kcompactd on failure. The _LIGHT
+ * version does not attempt reclaim/compaction at all and is by default used
+ * in page fault path, while the non-light is used by khugepaged.
*/
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
@@ -494,14 +511,14 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
alloc_pages(gfp_mask, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 21ddbe440030..9ddcf50a3c59 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -18,10 +18,19 @@ struct device;
struct gpio_desc;
/**
+ * Opaque descriptor for a structure of GPIO array attributes. This structure
+ * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be
+ * passed back to get/set array functions in order to activate fast processing
+ * path if applicable.
+ */
+struct gpio_array;
+
+/**
* Struct containing an array of descriptors that can be obtained using
* gpiod_get_array().
*/
struct gpio_descs {
+ struct gpio_array *info;
unsigned int ndescs;
struct gpio_desc *desc[];
};
@@ -30,6 +39,7 @@ struct gpio_descs {
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3)
+#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
/**
* Optional flags that can be passed to one of gpiod_* to configure direction
@@ -94,6 +104,7 @@ struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
+void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc);
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
int gpiod_get_direction(struct gpio_desc *desc);
@@ -104,36 +115,46 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
int gpiod_get_array_value(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_value(struct gpio_desc *desc, int value);
-void gpiod_set_array_value(unsigned int array_size,
- struct gpio_desc **desc_array, int *value_array);
+int gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_get_raw_value(const struct gpio_desc *desc);
int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
int gpiod_set_raw_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
-void gpiod_set_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+int gpiod_set_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array);
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
@@ -142,7 +163,7 @@ int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
int gpiod_to_irq(const struct gpio_desc *desc);
-void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
+int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
/* Convert between the old gpio_ and new gpiod_ interfaces */
struct gpio_desc *gpio_to_desc(unsigned gpio);
@@ -152,6 +173,10 @@ int desc_to_gpio(const struct gpio_desc *desc);
struct device_node;
struct fwnode_handle;
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label);
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
struct device_node *node,
const char *propname, int index,
@@ -225,6 +250,15 @@ static inline void gpiod_put(struct gpio_desc *desc)
WARN_ON(1);
}
+static inline void devm_gpiod_unhinge(struct device *dev,
+ struct gpio_desc *desc)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline void gpiod_put_array(struct gpio_descs *descs)
{
might_sleep();
@@ -330,7 +364,8 @@ static inline int gpiod_get_value(const struct gpio_desc *desc)
}
static inline int gpiod_get_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -341,12 +376,14 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+static inline int gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
+ return 0;
}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
@@ -356,7 +393,8 @@ static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
}
static inline int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -368,8 +406,9 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
WARN_ON(1);
}
static inline int gpiod_set_raw_array_value(unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_desc **desc_array,
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -384,7 +423,8 @@ static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
}
static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -395,12 +435,14 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
/* GPIO can never have been requested */
WARN_ON(1);
}
-static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
+static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
+ return 0;
}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
@@ -410,7 +452,8 @@ static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
}
static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -424,7 +467,8 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
}
static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
- int *value_array)
+ struct gpio_array *array_info,
+ unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
WARN_ON(1);
@@ -465,15 +509,17 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
return -EINVAL;
}
-static inline void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
+ const char *name)
{
/* GPIO can never have been requested */
WARN_ON(1);
+ return -EINVAL;
}
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
- return ERR_PTR(-EINVAL);
+ return NULL;
}
static inline int desc_to_gpio(const struct gpio_desc *desc)
@@ -488,6 +534,15 @@ struct device_node;
struct fwnode_handle;
static inline
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
struct device_node *node,
const char *propname, int index,
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 0ea328e71ec9..07cddbf45186 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -17,6 +17,7 @@ struct device_node;
struct seq_file;
struct gpio_device;
struct module;
+enum gpiod_flags;
#ifdef CONFIG_GPIOLIB
@@ -66,9 +67,15 @@ struct gpio_irq_chip {
/**
* @lock_key:
*
- * Per GPIO IRQ chip lockdep classes.
+ * Per GPIO IRQ chip lockdep class for IRQ lock.
*/
struct lock_class_key *lock_key;
+
+ /**
+ * @request_key:
+ *
+ * Per GPIO IRQ chip lockdep class for IRQ request.
+ */
struct lock_class_key *request_key;
/**
@@ -95,6 +102,13 @@ struct gpio_irq_chip {
unsigned int num_parents;
/**
+ * @parent_irq:
+ *
+ * For use by gpiochip_set_cascaded_irqchip()
+ */
+ unsigned int parent_irq;
+
+ /**
* @parents:
*
* A list of interrupt parents of a GPIO chip. This is owned by the
@@ -138,12 +152,21 @@ struct gpio_irq_chip {
* will allocate and map all IRQs during initialization.
*/
unsigned int first;
-};
-static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
-{
- return container_of(chip, struct gpio_irq_chip, chip);
-}
+ /**
+ * @irq_enable:
+ *
+ * Store old irq_chip irq_enable callback
+ */
+ void (*irq_enable)(struct irq_data *data);
+
+ /**
+ * @irq_disable:
+ *
+ * Store old irq_chip irq_disable callback
+ */
+ void (*irq_disable)(struct irq_data *data);
+};
#endif
/**
@@ -158,9 +181,13 @@ static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
* @free: optional hook for chip-specific deactivation, such as
* disabling module power and clock; may sleep
* @get_direction: returns direction for signal "offset", 0=out, 1=in,
- * (same as GPIOF_DIR_XXX), or negative error
+ * (same as GPIOF_DIR_XXX), or negative error.
+ * It is recommended to always implement this function, even on
+ * input-only or output-only gpio chips.
* @direction_input: configures signal "offset" as input, or returns error
+ * This can be omitted on input-only or output-only gpio chips.
* @direction_output: configures signal "offset" as output, or returns error
+ * This can be omitted on input-only or output-only gpio chips.
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
* @get_multiple: reads values for multiple signals defined by "mask" and
* stores them in "bits", returns 0 on success or negative error
@@ -256,6 +283,9 @@ struct gpio_chip {
void (*dbg_show)(struct seq_file *s,
struct gpio_chip *chip);
+
+ int (*init_valid_mask)(struct gpio_chip *chip);
+
int base;
u16 ngpio;
const char *const *names;
@@ -294,7 +324,9 @@ struct gpio_chip {
/**
* @need_valid_mask:
*
- * If set core allocates @valid_mask with all bits set to one.
+ * If set core allocates @valid_mask with all its values initialized
+ * with init_valid_mask() or set to one if init_valid_mask() is not
+ * defined
*/
bool need_valid_mask;
@@ -386,7 +418,6 @@ static inline int gpiochip_add(struct gpio_chip *chip)
extern void gpiochip_remove(struct gpio_chip *chip);
extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
void *data);
-extern void devm_gpiochip_remove(struct device *dev, struct gpio_chip *chip);
extern struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip, void *data));
@@ -395,6 +426,10 @@ extern struct gpio_chip *gpiochip_find(void *data,
int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
+int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset);
+void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset);
+void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset);
+void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset);
/* Line status inquiry for drivers */
bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset);
@@ -570,7 +605,8 @@ gpiochip_remove_pin_ranges(struct gpio_chip *chip)
#endif /* CONFIG_PINCTRL */
struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
- const char *label);
+ const char *label,
+ enum gpiod_flags flags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB */
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index d271ff23984f..d2bacf502429 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -101,8 +101,8 @@ enum hdmi_extended_colorimetry {
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
- HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
- HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
+ HDMI_EXTENDED_COLORIMETRY_OPYCC_601,
+ HDMI_EXTENDED_COLORIMETRY_OPRGB,
/* The following EC values are only defined in CEA-861-F. */
HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
@@ -163,6 +163,9 @@ struct hdmi_avi_infoframe {
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame);
enum hdmi_spd_sdi {
HDMI_SPD_SDI_UNKNOWN,
@@ -194,6 +197,9 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
const char *vendor, const char *product);
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame);
enum hdmi_audio_coding_type {
HDMI_AUDIO_CODING_TYPE_STREAM,
@@ -272,6 +278,9 @@ struct hdmi_audio_infoframe {
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame);
enum hdmi_3d_structure {
HDMI_3D_STRUCTURE_INVALID = -1,
@@ -299,6 +308,9 @@ struct hdmi_vendor_infoframe {
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame);
union hdmi_vendor_any_infoframe {
struct {
@@ -330,10 +342,14 @@ union hdmi_infoframe {
struct hdmi_audio_infoframe audio;
};
-ssize_t
-hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
-int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
+ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_infoframe_check(union hdmi_infoframe *frame);
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+ const void *buffer, size_t size);
void hdmi_infoframe_log(const char *level, struct device *dev,
- union hdmi_infoframe *frame);
+ const union hdmi_infoframe *frame);
#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 331dc377c275..dc12f5c4b076 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -177,6 +177,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
* @attr_usage_id: Attribute usage id as per spec
* @report_id: Report id to look for
* @flag: Synchronous or asynchronous read
+* @is_signed: If true then fields < 32 bits will be sign-extended
*
* Issues a synchronous or asynchronous read request for an input attribute.
* Returns data upto 32 bits.
@@ -190,7 +191,8 @@ enum sensor_hub_read_flags {
int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
u32 attr_usage_id, u32 report_id,
- enum sensor_hub_read_flags flag
+ enum sensor_hub_read_flags flag,
+ bool is_signed
);
/**
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 834e6461a690..a355d61940f2 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -526,6 +526,7 @@ struct hid_input {
const char *name;
bool registered;
struct list_head reports; /* the list of reports */
+ unsigned int application; /* application usage for this input */
};
enum hid_type {
@@ -721,8 +722,8 @@ struct hid_usage_id {
* input will not be passed to raw_event unless hid_device_io_start is
* called.
*
- * raw_event and event should return 0 on no action performed, 1 when no
- * further processing should be done and negative on error
+ * raw_event and event should return negative on error, any other value will
+ * pass the event on to .event() typically return 0 for success.
*
* input_mapping shall return a negative value to completely ignore this usage
* (e.g. doubled or invalid usage), zero to continue with parsing of this
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 0690679832d4..ea5cdbd8c2c3 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -36,7 +36,31 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
-extern unsigned long totalhigh_pages;
+extern atomic_long_t _totalhigh_pages;
+static inline unsigned long totalhigh_pages(void)
+{
+ return (unsigned long)atomic_long_read(&_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_inc(void)
+{
+ atomic_long_inc(&_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_dec(void)
+{
+ atomic_long_dec(&_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_add(long count)
+{
+ atomic_long_add(count, &_totalhigh_pages);
+}
+
+static inline void totalhigh_pages_set(long val)
+{
+ atomic_long_set(&_totalhigh_pages, val);
+}
void kmap_flush_unused(void);
@@ -51,7 +75,7 @@ static inline struct page *kmap_to_page(void *addr)
return virt_to_page(addr);
}
-#define totalhigh_pages 0UL
+static inline unsigned long totalhigh_pages(void) { return 0UL; }
#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 4c92e3ba3e16..66f9ebbb1df3 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -11,7 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * Authors: Jérôme Glisse <jglisse@redhat.com>
+ * Authors: Jérôme Glisse <jglisse@redhat.com>
*/
/*
* Heterogeneous Memory Management (HMM)
@@ -69,6 +69,7 @@
#define LINUX_HMM_H
#include <linux/kconfig.h>
+#include <asm/pgtable.h>
#if IS_ENABLED(CONFIG_HMM)
@@ -107,7 +108,7 @@ enum hmm_pfn_flag_e {
* HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
* HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
* HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
- * result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not
+ * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
* be mirrored by a device, because the entry will never have HMM_PFN_VALID
* set and the pfn value is undefined.
*
@@ -274,14 +275,29 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
struct hmm_mirror;
/*
- * enum hmm_update_type - type of update
+ * enum hmm_update_event - type of update
* @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
*/
-enum hmm_update_type {
+enum hmm_update_event {
HMM_UPDATE_INVALIDATE,
};
/*
+ * struct hmm_update - HMM update informations for callback
+ *
+ * @start: virtual start address of the range to update
+ * @end: virtual end address of the range to update
+ * @event: event triggering the update (what is happening)
+ * @blockable: can the callback block/sleep ?
+ */
+struct hmm_update {
+ unsigned long start;
+ unsigned long end;
+ enum hmm_update_event event;
+ bool blockable;
+};
+
+/*
* struct hmm_mirror_ops - HMM mirror device operations callback
*
* @update: callback to update range on a device
@@ -300,9 +316,9 @@ struct hmm_mirror_ops {
/* sync_cpu_device_pagetables() - synchronize page tables
*
* @mirror: pointer to struct hmm_mirror
- * @update_type: type of update that occurred to the CPU page table
- * @start: virtual start address of the range to update
- * @end: virtual end address of the range to update
+ * @update: update informations (see struct hmm_update)
+ * Returns: -EAGAIN if update.blockable false and callback need to
+ * block, 0 otherwise.
*
* This callback ultimately originates from mmu_notifiers when the CPU
* page table is updated. The device driver must update its page table
@@ -313,10 +329,8 @@ struct hmm_mirror_ops {
* page tables are completely updated (TLBs flushed, etc); this is a
* synchronous call.
*/
- void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
- enum hmm_update_type update_type,
- unsigned long start,
- unsigned long end);
+ int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
+ const struct hmm_update *update);
};
/*
@@ -473,6 +487,7 @@ struct hmm_devmem_ops {
* @device: device to bind resource to
* @ops: memory operations callback
* @ref: per CPU refcount
+ * @page_fault: callback when CPU fault on an unaddressable device page
*
* This an helper structure for device drivers that do not wish to implement
* the gory details related to hotplugging new memoy and allocating struct
@@ -480,7 +495,28 @@ struct hmm_devmem_ops {
*
* Device drivers can directly use ZONE_DEVICE memory on their own if they
* wish to do so.
+ *
+ * The page_fault() callback must migrate page back, from device memory to
+ * system memory, so that the CPU can access it. This might fail for various
+ * reasons (device issues, device have been unplugged, ...). When such error
+ * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and
+ * set the CPU page table entry to "poisoned".
+ *
+ * Note that because memory cgroup charges are transferred to the device memory,
+ * this should never fail due to memory restrictions. However, allocation
+ * of a regular system page might still fail because we are out of memory. If
+ * that happens, the page_fault() callback must return VM_FAULT_OOM.
+ *
+ * The page_fault() callback can also try to migrate back multiple pages in one
+ * chunk, as an optimization. It must, however, prioritize the faulting address
+ * over all the others.
*/
+typedef int (*dev_page_fault_t)(struct vm_area_struct *vma,
+ unsigned long addr,
+ const struct page *page,
+ unsigned int flags,
+ pmd_t *pmdp);
+
struct hmm_devmem {
struct completion completion;
unsigned long pfn_first;
@@ -490,6 +526,7 @@ struct hmm_devmem {
struct dev_pagemap pagemap;
const struct hmm_devmem_ops *ops;
struct percpu_ref ref;
+ dev_page_fault_t page_fault;
};
/*
@@ -499,8 +536,7 @@ struct hmm_devmem {
* enough and allocate struct page for it.
*
* The device driver can wrap the hmm_devmem struct inside a private device
- * driver struct. The device driver must call hmm_devmem_remove() before the
- * device goes away and before freeing the hmm_devmem struct memory.
+ * driver struct.
*/
struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
struct device *device,
@@ -508,7 +544,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
struct device *device,
struct resource *res);
-void hmm_devmem_remove(struct hmm_devmem *devmem);
/*
* hmm_devmem_page_set_drvdata - set per-page driver data field
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 3892e9c8b2de..2e8957eac4d4 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * include/linux/hrtimer.h
- *
* hrtimers - High-resolution kernel timers
*
* Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
@@ -9,8 +8,6 @@
* data type definitions, declarations, prototypes
*
* Started by: Thomas Gleixner and Ingo Molnar
- *
- * For licencing details see kernel-base/COPYING
*/
#ifndef _LINUX_HRTIMER_H
#define _LINUX_HRTIMER_H
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a8a126259bc4..381e872bfde0 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -3,10 +3,11 @@
#define _LINUX_HUGE_MM_H
#include <linux/sched/coredump.h>
+#include <linux/mm_types.h>
#include <linux/fs.h> /* only for vma_is_dax() */
-extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
+extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma);
@@ -23,7 +24,7 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
}
#endif
-extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
+extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmd,
@@ -42,13 +43,13 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned char *vec);
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
+ pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
-int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, pfn_t pfn, bool write);
-int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, pfn_t pfn, bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
@@ -92,7 +93,11 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
extern unsigned long transparent_hugepage_flags;
-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+/*
+ * to be used on vmas which are known to support THP.
+ * Use transparent_hugepage_enabled otherwise
+ */
+static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_NOHUGEPAGE)
return false;
@@ -116,6 +121,8 @@ static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
return false;
}
+bool transparent_hugepage_enabled(struct vm_area_struct *vma);
+
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
@@ -212,11 +219,11 @@ static inline int hpage_nr_pages(struct page *page)
}
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, int flags);
+ pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, int flags);
+ pud_t *pud, int flags, struct dev_pagemap **pgmap);
-extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *huge_zero_page;
@@ -256,6 +263,11 @@ static inline bool thp_migration_supported(void)
#define hpage_nr_pages(x) 1
+static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+{
+ return false;
+}
+
static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
{
return false;
@@ -321,7 +333,8 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
return NULL;
}
-static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
+static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
+ pmd_t orig_pmd)
{
return 0;
}
@@ -342,13 +355,13 @@ static inline void mm_put_huge_zero_page(struct mm_struct *mm)
}
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmd, int flags)
+ unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
{
return NULL;
}
static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
- unsigned long addr, pud_t *pud, int flags)
+ unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
{
return NULL;
}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index c39d9170a8a0..087fd5f48c91 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -105,7 +105,7 @@ void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(int, char *);
void hugetlb_show_meminfo(void);
unsigned long hugetlb_total_pages(void);
-int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
struct vm_area_struct *dst_vma,
@@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pd(struct vm_area_struct *vma,
@@ -170,6 +172,18 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}
+static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
+ pte_t *ptep)
+{
+ return 0;
+}
+
+static inline void adjust_range_if_pmd_sharing_possible(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index bee0827766a3..c0b93e0ff0c0 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -33,7 +33,8 @@
* and max is a multiple of 4 and >= 32 bytes.
* @priv: Private data, for use by the RNG driver.
* @quality: Estimation of true entropy in RNG's bitstream
- * (per mill).
+ * (in bits of entropy per 1024 bits of input;
+ * valid values: 1 to 1024, or 0 for unknown).
*/
struct hwrng {
const char *name;
diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
index 1c7b89ae6bdc..473897bbd898 100644
--- a/include/linux/hwmon-sysfs.h
+++ b/include/linux/hwmon-sysfs.h
@@ -33,10 +33,28 @@ struct sensor_device_attribute{
{ .dev_attr = __ATTR(_name, _mode, _show, _store), \
.index = _index }
+#define SENSOR_ATTR_RO(_name, _func, _index) \
+ SENSOR_ATTR(_name, 0444, _func##_show, NULL, _index)
+
+#define SENSOR_ATTR_RW(_name, _func, _index) \
+ SENSOR_ATTR(_name, 0644, _func##_show, _func##_store, _index)
+
+#define SENSOR_ATTR_WO(_name, _func, _index) \
+ SENSOR_ATTR(_name, 0200, NULL, _func##_store, _index)
+
#define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \
struct sensor_device_attribute sensor_dev_attr_##_name \
= SENSOR_ATTR(_name, _mode, _show, _store, _index)
+#define SENSOR_DEVICE_ATTR_RO(_name, _func, _index) \
+ SENSOR_DEVICE_ATTR(_name, 0444, _func##_show, NULL, _index)
+
+#define SENSOR_DEVICE_ATTR_RW(_name, _func, _index) \
+ SENSOR_DEVICE_ATTR(_name, 0644, _func##_show, _func##_store, _index)
+
+#define SENSOR_DEVICE_ATTR_WO(_name, _func, _index) \
+ SENSOR_DEVICE_ATTR(_name, 0200, NULL, _func##_store, _index)
+
struct sensor_device_attribute_2 {
struct device_attribute dev_attr;
u8 index;
@@ -50,8 +68,29 @@ struct sensor_device_attribute_2 {
.index = _index, \
.nr = _nr }
+#define SENSOR_ATTR_2_RO(_name, _func, _nr, _index) \
+ SENSOR_ATTR_2(_name, 0444, _func##_show, NULL, _nr, _index)
+
+#define SENSOR_ATTR_2_RW(_name, _func, _nr, _index) \
+ SENSOR_ATTR_2(_name, 0644, _func##_show, _func##_store, _nr, _index)
+
+#define SENSOR_ATTR_2_WO(_name, _func, _nr, _index) \
+ SENSOR_ATTR_2(_name, 0200, NULL, _func##_store, _nr, _index)
+
#define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \
struct sensor_device_attribute_2 sensor_dev_attr_##_name \
= SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index)
+#define SENSOR_DEVICE_ATTR_2_RO(_name, _func, _nr, _index) \
+ SENSOR_DEVICE_ATTR_2(_name, 0444, _func##_show, NULL, \
+ _nr, _index)
+
+#define SENSOR_DEVICE_ATTR_2_RW(_name, _func, _nr, _index) \
+ SENSOR_DEVICE_ATTR_2(_name, 0644, _func##_show, _func##_store, \
+ _nr, _index)
+
+#define SENSOR_DEVICE_ATTR_2_WO(_name, _func, _nr, _index) \
+ SENSOR_DEVICE_ATTR_2(_name, 0200, NULL, _func##_store, \
+ _nr, _index)
+
#endif /* _LINUX_HWMON_SYSFS_H */
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 9493d4a388db..99e0c1b0b5fb 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -118,6 +118,7 @@ enum hwmon_in_attributes {
hwmon_in_max_alarm,
hwmon_in_lcrit_alarm,
hwmon_in_crit_alarm,
+ hwmon_in_enable,
};
#define HWMON_I_INPUT BIT(hwmon_in_input)
@@ -135,6 +136,7 @@ enum hwmon_in_attributes {
#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
+#define HWMON_I_ENABLE BIT(hwmon_in_enable)
enum hwmon_curr_attributes {
hwmon_curr_input,
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index efda23cf32c7..f0885cc01db6 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -739,8 +739,9 @@ struct vmbus_channel {
u32 ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
- void *ringbuffer_pages;
+ struct page *ringbuffer_page;
u32 ringbuffer_pagecount;
+ u32 ringbuffer_send_offset;
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
@@ -830,15 +831,6 @@ struct vmbus_channel {
*/
struct list_head sc_list;
/*
- * Current number of sub-channels.
- */
- int num_sc;
- /*
- * Number of a sub-channel (position within sc_list) which is supposed
- * to be used as the next outgoing channel.
- */
- int next_oc;
- /*
* The primary channel this sub-channel belongs to.
* This will be NULL for the primary channel.
*/
@@ -904,6 +896,13 @@ struct vmbus_channel {
bool probe_done;
+ /*
+ * We must offload the handling of the primary/sub channels
+ * from the single-threaded vmbus_connection.work_queue to
+ * two different workqueue, otherwise we can block
+ * vmbus_connection.work_queue and hang: see vmbus_process_offer().
+ */
+ struct work_struct add_channel_work;
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
@@ -965,14 +964,6 @@ void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *));
/*
- * Retrieve the (sub) channel on which to send an outgoing request.
- * When a primary channel has multiple sub-channels, we choose a
- * channel whose VCPU binding is closest to the VCPU on which
- * this call is being made.
- */
-struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
-
-/*
* Check if sub-channels have already been offerred. This API will be useful
* when the driver is unloaded after establishing sub-channels. In this case,
* when the driver is re-loaded, the driver would have to check if the
@@ -1021,6 +1012,14 @@ struct vmbus_packet_mpb_array {
struct hv_mpb_array range;
} __packed;
+int vmbus_alloc_ring(struct vmbus_channel *channel,
+ u32 send_size, u32 recv_size);
+void vmbus_free_ring(struct vmbus_channel *channel);
+
+int vmbus_connect_ring(struct vmbus_channel *channel,
+ void (*onchannel_callback)(void *context),
+ void *context);
+int vmbus_disconnect_ring(struct vmbus_channel *channel);
extern int vmbus_open(struct vmbus_channel *channel,
u32 send_ringbuffersize,
@@ -1125,6 +1124,7 @@ struct hv_device {
u16 device_id;
struct device device;
+ char *driver_override; /* Driver name to force a match */
struct vmbus_channel *channel;
struct kset *channels_kset;
@@ -1442,7 +1442,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
-void hv_process_channel_removal(u32 relid);
+void hv_process_channel_removal(struct vmbus_channel *channel);
void vmbus_setevent(struct vmbus_channel *channel);
/*
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 465afb092fa7..65b4eaed1d96 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -231,7 +231,6 @@ enum i2c_alert_protocol {
/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
- * @attach_adapter: Callback for bus addition (deprecated)
* @probe: Callback for device binding - soon to be deprecated
* @probe_new: New callback for device binding
* @remove: Callback for device unbinding
@@ -268,11 +267,6 @@ enum i2c_alert_protocol {
struct i2c_driver {
unsigned int class;
- /* Notifies the driver that a new bus has appeared. You should avoid
- * using this, it will be removed in a near future.
- */
- int (*attach_adapter)(struct i2c_adapter *) __deprecated;
-
/* Standard driver model interfaces */
int (*probe)(struct i2c_client *, const struct i2c_device_id *);
int (*remove)(struct i2c_client *);
@@ -564,6 +558,7 @@ struct i2c_lock_operations {
* @scl_fall_ns: time SCL signal takes to fall in ns; t(f) in the I2C specification
* @scl_int_delay_ns: time IP core additionally needs to setup SCL in ns
* @sda_fall_ns: time SDA signal takes to fall in ns; t(f) in the I2C specification
+ * @sda_hold_ns: time IP core additionally needs to hold SDA in ns
*/
struct i2c_timings {
u32 bus_freq_hz;
@@ -571,6 +566,7 @@ struct i2c_timings {
u32 scl_fall_ns;
u32 scl_int_delay_ns;
u32 sda_fall_ns;
+ u32 sda_hold_ns;
};
/**
@@ -581,12 +577,14 @@ struct i2c_timings {
* recovery. Populated internally for generic GPIO recovery.
* @set_scl: This sets/clears the SCL line. Mandatory for generic SCL recovery.
* Populated internally for generic GPIO recovery.
- * @get_sda: This gets current value of SDA line. Optional for generic SCL
- * recovery. Populated internally, if sda_gpio is a valid GPIO, for generic
- * GPIO recovery.
- * @set_sda: This sets/clears the SDA line. Optional for generic SCL recovery.
- * Populated internally, if sda_gpio is a valid GPIO, for generic GPIO
- * recovery.
+ * @get_sda: This gets current value of SDA line. This or set_sda() is mandatory
+ * for generic SCL recovery. Populated internally, if sda_gpio is a valid
+ * GPIO, for generic GPIO recovery.
+ * @set_sda: This sets/clears the SDA line. This or get_sda() is mandatory for
+ * generic SCL recovery. Populated internally, if sda_gpio is a valid GPIO,
+ * for generic GPIO recovery.
+ * @get_bus_free: Returns the bus free state as seen from the IP core in case it
+ * has a more complex internal logic than just reading SDA. Optional.
* @prepare_recovery: This will be called before starting recovery. Platform may
* configure padmux here for SDA/SCL line or something else they want.
* @unprepare_recovery: This will be called after completing recovery. Platform
@@ -601,6 +599,7 @@ struct i2c_bus_recovery_info {
void (*set_scl)(struct i2c_adapter *adap, int val);
int (*get_sda)(struct i2c_adapter *adap);
void (*set_sda)(struct i2c_adapter *adap, int val);
+ int (*get_bus_free)(struct i2c_adapter *adap);
void (*prepare_recovery)(struct i2c_adapter *adap);
void (*unprepare_recovery)(struct i2c_adapter *adap);
@@ -658,6 +657,10 @@ struct i2c_adapter_quirks {
I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR)
/* clock stretching is not supported */
#define I2C_AQ_NO_CLK_STRETCH BIT(4)
+/* message cannot have length of 0 */
+#define I2C_AQ_NO_ZERO_LEN_READ BIT(5)
+#define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6)
+#define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
@@ -759,18 +762,6 @@ i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
adapter->lock_ops->unlock_bus(adapter, flags);
}
-static inline void
-i2c_lock_adapter(struct i2c_adapter *adapter)
-{
- i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
-}
-
-static inline void
-i2c_unlock_adapter(struct i2c_adapter *adapter)
-{
- i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
-}
-
/*flags for the client struct: */
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
@@ -864,7 +855,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
}
u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
-void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf);
+void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred);
int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
/**
diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
new file mode 100644
index 000000000000..73b0982cc519
--- /dev/null
+++ b/include/linux/i3c/ccc.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_CCC_H
+#define I3C_CCC_H
+
+#include <linux/bitops.h>
+#include <linux/i3c/device.h>
+
+/* I3C CCC (Common Command Codes) related definitions */
+#define I3C_CCC_DIRECT BIT(7)
+
+#define I3C_CCC_ID(id, broadcast) \
+ ((id) | ((broadcast) ? 0 : I3C_CCC_DIRECT))
+
+/* Commands valid in both broadcast and unicast modes */
+#define I3C_CCC_ENEC(broadcast) I3C_CCC_ID(0x0, broadcast)
+#define I3C_CCC_DISEC(broadcast) I3C_CCC_ID(0x1, broadcast)
+#define I3C_CCC_ENTAS(as, broadcast) I3C_CCC_ID(0x2 + (as), broadcast)
+#define I3C_CCC_RSTDAA(broadcast) I3C_CCC_ID(0x6, broadcast)
+#define I3C_CCC_SETMWL(broadcast) I3C_CCC_ID(0x9, broadcast)
+#define I3C_CCC_SETMRL(broadcast) I3C_CCC_ID(0xa, broadcast)
+#define I3C_CCC_SETXTIME(broadcast) ((broadcast) ? 0x28 : 0x98)
+#define I3C_CCC_VENDOR(id, broadcast) ((id) + ((broadcast) ? 0x61 : 0xe0))
+
+/* Broadcast-only commands */
+#define I3C_CCC_ENTDAA I3C_CCC_ID(0x7, true)
+#define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true)
+#define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true)
+#define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true)
+
+/* Unicast-only commands */
+#define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false)
+#define I3C_CCC_SETNEWDA I3C_CCC_ID(0x8, false)
+#define I3C_CCC_GETMWL I3C_CCC_ID(0xb, false)
+#define I3C_CCC_GETMRL I3C_CCC_ID(0xc, false)
+#define I3C_CCC_GETPID I3C_CCC_ID(0xd, false)
+#define I3C_CCC_GETBCR I3C_CCC_ID(0xe, false)
+#define I3C_CCC_GETDCR I3C_CCC_ID(0xf, false)
+#define I3C_CCC_GETSTATUS I3C_CCC_ID(0x10, false)
+#define I3C_CCC_GETACCMST I3C_CCC_ID(0x11, false)
+#define I3C_CCC_SETBRGTGT I3C_CCC_ID(0x13, false)
+#define I3C_CCC_GETMXDS I3C_CCC_ID(0x14, false)
+#define I3C_CCC_GETHDRCAP I3C_CCC_ID(0x15, false)
+#define I3C_CCC_GETXTIME I3C_CCC_ID(0x19, false)
+
+#define I3C_CCC_EVENT_SIR BIT(0)
+#define I3C_CCC_EVENT_MR BIT(1)
+#define I3C_CCC_EVENT_HJ BIT(3)
+
+/**
+ * struct i3c_ccc_events - payload passed to ENEC/DISEC CCC
+ *
+ * @events: bitmask of I3C_CCC_EVENT_xxx events.
+ *
+ * Depending on the CCC command, the specific events coming from all devices
+ * (broadcast version) or a specific device (unicast version) will be
+ * enabled (ENEC) or disabled (DISEC).
+ */
+struct i3c_ccc_events {
+ u8 events;
+};
+
+/**
+ * struct i3c_ccc_mwl - payload passed to SETMWL/GETMWL CCC
+ *
+ * @len: maximum write length in bytes
+ *
+ * The maximum write length is only applicable to SDR private messages or
+ * extended Write CCCs (like SETXTIME).
+ */
+struct i3c_ccc_mwl {
+ __be16 len;
+};
+
+/**
+ * struct i3c_ccc_mrl - payload passed to SETMRL/GETMRL CCC
+ *
+ * @len: maximum read length in bytes
+ * @ibi_len: maximum IBI payload length
+ *
+ * The maximum read length is only applicable to SDR private messages or
+ * extended Read CCCs (like GETXTIME).
+ * The IBI length is only valid if the I3C slave is IBI capable
+ * (%I3C_BCR_IBI_REQ_CAP is set).
+ */
+struct i3c_ccc_mrl {
+ __be16 read_len;
+ u8 ibi_len;
+} __packed;
+
+/**
+ * struct i3c_ccc_dev_desc - I3C/I2C device descriptor used for DEFSLVS
+ *
+ * @dyn_addr: dynamic address assigned to the I3C slave or 0 if the entry is
+ * describing an I2C slave.
+ * @dcr: DCR value (not applicable to entries describing I2C devices)
+ * @lvr: LVR value (not applicable to entries describing I3C devices)
+ * @bcr: BCR value or 0 if this entry is describing an I2C slave
+ * @static_addr: static address or 0 if the device does not have a static
+ * address
+ *
+ * The DEFSLVS command should be passed an array of i3c_ccc_dev_desc
+ * descriptors (one entry per I3C/I2C dev controlled by the master).
+ */
+struct i3c_ccc_dev_desc {
+ u8 dyn_addr;
+ union {
+ u8 dcr;
+ u8 lvr;
+ };
+ u8 bcr;
+ u8 static_addr;
+};
+
+/**
+ * struct i3c_ccc_defslvs - payload passed to DEFSLVS CCC
+ *
+ * @count: number of dev descriptors
+ * @master: descriptor describing the current master
+ * @slaves: array of descriptors describing slaves controlled by the
+ * current master
+ *
+ * Information passed to the broadcast DEFSLVS to propagate device
+ * information to all masters currently acting as slaves on the bus.
+ * This is only meaningful if you have more than one master.
+ */
+struct i3c_ccc_defslvs {
+ u8 count;
+ struct i3c_ccc_dev_desc master;
+ struct i3c_ccc_dev_desc slaves[0];
+} __packed;
+
+/**
+ * enum i3c_ccc_test_mode - enum listing all available test modes
+ *
+ * @I3C_CCC_EXIT_TEST_MODE: exit test mode
+ * @I3C_CCC_VENDOR_TEST_MODE: enter vendor test mode
+ */
+enum i3c_ccc_test_mode {
+ I3C_CCC_EXIT_TEST_MODE,
+ I3C_CCC_VENDOR_TEST_MODE,
+};
+
+/**
+ * struct i3c_ccc_enttm - payload passed to ENTTM CCC
+ *
+ * @mode: one of the &enum i3c_ccc_test_mode modes
+ *
+ * Information passed to the ENTTM CCC to instruct an I3C device to enter a
+ * specific test mode.
+ */
+struct i3c_ccc_enttm {
+ u8 mode;
+};
+
+/**
+ * struct i3c_ccc_setda - payload passed to SETNEWDA and SETDASA CCCs
+ *
+ * @addr: dynamic address to assign to an I3C device
+ *
+ * Information passed to the SETNEWDA and SETDASA CCCs to assign/change the
+ * dynamic address of an I3C device.
+ */
+struct i3c_ccc_setda {
+ u8 addr;
+};
+
+/**
+ * struct i3c_ccc_getpid - payload passed to GETPID CCC
+ *
+ * @pid: 48 bits PID in big endian
+ */
+struct i3c_ccc_getpid {
+ u8 pid[6];
+};
+
+/**
+ * struct i3c_ccc_getbcr - payload passed to GETBCR CCC
+ *
+ * @bcr: BCR (Bus Characteristic Register) value
+ */
+struct i3c_ccc_getbcr {
+ u8 bcr;
+};
+
+/**
+ * struct i3c_ccc_getdcr - payload passed to GETDCR CCC
+ *
+ * @dcr: DCR (Device Characteristic Register) value
+ */
+struct i3c_ccc_getdcr {
+ u8 dcr;
+};
+
+#define I3C_CCC_STATUS_PENDING_INT(status) ((status) & GENMASK(3, 0))
+#define I3C_CCC_STATUS_PROTOCOL_ERROR BIT(5)
+#define I3C_CCC_STATUS_ACTIVITY_MODE(status) \
+ (((status) & GENMASK(7, 6)) >> 6)
+
+/**
+ * struct i3c_ccc_getstatus - payload passed to GETSTATUS CCC
+ *
+ * @status: status of the I3C slave (see I3C_CCC_STATUS_xxx macros for more
+ * information).
+ */
+struct i3c_ccc_getstatus {
+ __be16 status;
+};
+
+/**
+ * struct i3c_ccc_getaccmst - payload passed to GETACCMST CCC
+ *
+ * @newmaster: address of the master taking bus ownership
+ */
+struct i3c_ccc_getaccmst {
+ u8 newmaster;
+};
+
+/**
+ * struct i3c_ccc_bridged_slave_desc - bridged slave descriptor
+ *
+ * @addr: dynamic address of the bridged device
+ * @id: ID of the slave device behind the bridge
+ */
+struct i3c_ccc_bridged_slave_desc {
+ u8 addr;
+ __be16 id;
+} __packed;
+
+/**
+ * struct i3c_ccc_setbrgtgt - payload passed to SETBRGTGT CCC
+ *
+ * @count: number of bridged slaves
+ * @bslaves: bridged slave descriptors
+ */
+struct i3c_ccc_setbrgtgt {
+ u8 count;
+ struct i3c_ccc_bridged_slave_desc bslaves[0];
+} __packed;
+
+/**
+ * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers
+ */
+enum i3c_sdr_max_data_rate {
+ I3C_SDR0_FSCL_MAX,
+ I3C_SDR1_FSCL_8MHZ,
+ I3C_SDR2_FSCL_6MHZ,
+ I3C_SDR3_FSCL_4MHZ,
+ I3C_SDR4_FSCL_2MHZ,
+};
+
+/**
+ * enum i3c_tsco - clock to data turn-around
+ */
+enum i3c_tsco {
+ I3C_TSCO_8NS,
+ I3C_TSCO_9NS,
+ I3C_TSCO_10NS,
+ I3C_TSCO_11NS,
+ I3C_TSCO_12NS,
+};
+
+#define I3C_CCC_MAX_SDR_FSCL_MASK GENMASK(2, 0)
+#define I3C_CCC_MAX_SDR_FSCL(x) ((x) & I3C_CCC_MAX_SDR_FSCL_MASK)
+
+/**
+ * struct i3c_ccc_getmxds - payload passed to GETMXDS CCC
+ *
+ * @maxwr: write limitations
+ * @maxrd: read limitations
+ * @maxrdturn: maximum read turn-around expressed micro-seconds and
+ * little-endian formatted
+ */
+struct i3c_ccc_getmxds {
+ u8 maxwr;
+ u8 maxrd;
+ u8 maxrdturn[3];
+} __packed;
+
+#define I3C_CCC_HDR_MODE(mode) BIT(mode)
+
+/**
+ * struct i3c_ccc_gethdrcap - payload passed to GETHDRCAP CCC
+ *
+ * @modes: bitmap of supported HDR modes
+ */
+struct i3c_ccc_gethdrcap {
+ u8 modes;
+} __packed;
+
+/**
+ * enum i3c_ccc_setxtime_subcmd - SETXTIME sub-commands
+ */
+enum i3c_ccc_setxtime_subcmd {
+ I3C_CCC_SETXTIME_ST = 0x7f,
+ I3C_CCC_SETXTIME_DT = 0xbf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE0 = 0xdf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE1 = 0xef,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE2 = 0xf7,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE3 = 0xfb,
+ I3C_CCC_SETXTIME_ASYNC_TRIGGER = 0xfd,
+ I3C_CCC_SETXTIME_TPH = 0x3f,
+ I3C_CCC_SETXTIME_TU = 0x9f,
+ I3C_CCC_SETXTIME_ODR = 0x8f,
+};
+
+/**
+ * struct i3c_ccc_setxtime - payload passed to SETXTIME CCC
+ *
+ * @subcmd: one of the sub-commands ddefined in &enum i3c_ccc_setxtime_subcmd
+ * @data: sub-command payload. Amount of data is determined by
+ * &i3c_ccc_setxtime->subcmd
+ */
+struct i3c_ccc_setxtime {
+ u8 subcmd;
+ u8 data[0];
+} __packed;
+
+#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0)
+#define I3C_CCC_GETXTIME_ASYNC_MODE(x) BIT((x) + 1)
+#define I3C_CCC_GETXTIME_OVERFLOW BIT(7)
+
+/**
+ * struct i3c_ccc_getxtime - payload retrieved from GETXTIME CCC
+ *
+ * @supported_modes: bitmap describing supported XTIME modes
+ * @state: current status (enabled mode and overflow status)
+ * @frequency: slave's internal oscillator frequency in 500KHz steps
+ * @inaccuracy: slave's internal oscillator inaccuracy in 0.1% steps
+ */
+struct i3c_ccc_getxtime {
+ u8 supported_modes;
+ u8 state;
+ u8 frequency;
+ u8 inaccuracy;
+} __packed;
+
+/**
+ * struct i3c_ccc_cmd_payload - CCC payload
+ *
+ * @len: payload length
+ * @data: payload data. This buffer must be DMA-able
+ */
+struct i3c_ccc_cmd_payload {
+ u16 len;
+ void *data;
+};
+
+/**
+ * struct i3c_ccc_cmd_dest - CCC command destination
+ *
+ * @addr: can be an I3C device address or the broadcast address if this is a
+ * broadcast CCC
+ * @payload: payload to be sent to this device or broadcasted
+ */
+struct i3c_ccc_cmd_dest {
+ u8 addr;
+ struct i3c_ccc_cmd_payload payload;
+};
+
+/**
+ * struct i3c_ccc_cmd - CCC command
+ *
+ * @rnw: true if the CCC should retrieve data from the device. Only valid for
+ * unicast commands
+ * @id: CCC command id
+ * @ndests: number of destinations. Should always be one for broadcast commands
+ * @dests: array of destinations and associated payload for this CCC. Most of
+ * the time, only one destination is provided
+ * @err: I3C error code
+ */
+struct i3c_ccc_cmd {
+ u8 rnw;
+ u8 id;
+ unsigned int ndests;
+ struct i3c_ccc_cmd_dest *dests;
+ enum i3c_error_code err;
+};
+
+#endif /* I3C_CCC_H */
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
new file mode 100644
index 000000000000..5ecb055fd375
--- /dev/null
+++ b/include/linux/i3c/device.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_DEV_H
+#define I3C_DEV_H
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kconfig.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+/**
+ * enum i3c_error_code - I3C error codes
+ *
+ * These are the standard error codes as defined by the I3C specification.
+ * When -EIO is returned by the i3c_device_do_priv_xfers() or
+ * i3c_device_send_hdr_cmds() one can check the error code in
+ * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
+ * what went wrong.
+ *
+ * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
+ * related
+ * @I3C_ERROR_M0: M0 error
+ * @I3C_ERROR_M1: M1 error
+ * @I3C_ERROR_M2: M2 error
+ */
+enum i3c_error_code {
+ I3C_ERROR_UNKNOWN = 0,
+ I3C_ERROR_M0 = 1,
+ I3C_ERROR_M1,
+ I3C_ERROR_M2,
+};
+
+/**
+ * enum i3c_hdr_mode - HDR mode ids
+ * @I3C_HDR_DDR: DDR mode
+ * @I3C_HDR_TSP: TSP mode
+ * @I3C_HDR_TSL: TSL mode
+ */
+enum i3c_hdr_mode {
+ I3C_HDR_DDR,
+ I3C_HDR_TSP,
+ I3C_HDR_TSL,
+};
+
+/**
+ * struct i3c_priv_xfer - I3C SDR private transfer
+ * @rnw: encodes the transfer direction. true for a read, false for a write
+ * @len: transfer length in bytes of the transfer
+ * @data: input/output buffer
+ * @data.in: input buffer. Must point to a DMA-able buffer
+ * @data.out: output buffer. Must point to a DMA-able buffer
+ * @err: I3C error code
+ */
+struct i3c_priv_xfer {
+ u8 rnw;
+ u16 len;
+ union {
+ void *in;
+ const void *out;
+ } data;
+ enum i3c_error_code err;
+};
+
+/**
+ * enum i3c_dcr - I3C DCR values
+ * @I3C_DCR_GENERIC_DEVICE: generic I3C device
+ */
+enum i3c_dcr {
+ I3C_DCR_GENERIC_DEVICE = 0,
+};
+
+#define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33)
+#define I3C_PID_RND_LOWER_32BITS(pid) (!!((pid) & BIT_ULL(32)))
+#define I3C_PID_RND_VAL(pid) ((pid) & GENMASK_ULL(31, 0))
+#define I3C_PID_PART_ID(pid) (((pid) & GENMASK_ULL(31, 16)) >> 16)
+#define I3C_PID_INSTANCE_ID(pid) (((pid) & GENMASK_ULL(15, 12)) >> 12)
+#define I3C_PID_EXTRA_INFO(pid) ((pid) & GENMASK_ULL(11, 0))
+
+#define I3C_BCR_DEVICE_ROLE(bcr) ((bcr) & GENMASK(7, 6))
+#define I3C_BCR_I3C_SLAVE (0 << 6)
+#define I3C_BCR_I3C_MASTER (1 << 6)
+#define I3C_BCR_HDR_CAP BIT(5)
+#define I3C_BCR_BRIDGE BIT(4)
+#define I3C_BCR_OFFLINE_CAP BIT(3)
+#define I3C_BCR_IBI_PAYLOAD BIT(2)
+#define I3C_BCR_IBI_REQ_CAP BIT(1)
+#define I3C_BCR_MAX_DATA_SPEED_LIM BIT(0)
+
+/**
+ * struct i3c_device_info - I3C device information
+ * @pid: Provisional ID
+ * @bcr: Bus Characteristic Register
+ * @dcr: Device Characteristic Register
+ * @static_addr: static/I2C address
+ * @dyn_addr: dynamic address
+ * @hdr_cap: supported HDR modes
+ * @max_read_ds: max read speed information
+ * @max_write_ds: max write speed information
+ * @max_ibi_len: max IBI payload length
+ * @max_read_turnaround: max read turn-around time in micro-seconds
+ * @max_read_len: max private SDR read length in bytes
+ * @max_write_len: max private SDR write length in bytes
+ *
+ * These are all basic information that should be advertised by an I3C device.
+ * Some of them are optional depending on the device type and device
+ * capabilities.
+ * For each I3C slave attached to a master with
+ * i3c_master_add_i3c_dev_locked(), the core will send the relevant CCC command
+ * to retrieve these data.
+ */
+struct i3c_device_info {
+ u64 pid;
+ u8 bcr;
+ u8 dcr;
+ u8 static_addr;
+ u8 dyn_addr;
+ u8 hdr_cap;
+ u8 max_read_ds;
+ u8 max_write_ds;
+ u8 max_ibi_len;
+ u32 max_read_turnaround;
+ u16 max_read_len;
+ u16 max_write_len;
+};
+
+/*
+ * I3C device internals are kept hidden from I3C device users. It's just
+ * simpler to refactor things when everything goes through getter/setters, and
+ * I3C device drivers should not have to worry about internal representation
+ * anyway.
+ */
+struct i3c_device;
+
+/* These macros should be used to i3c_device_id entries. */
+#define I3C_MATCH_MANUF_AND_PART (I3C_MATCH_MANUF | I3C_MATCH_PART)
+
+#define I3C_DEVICE(_manufid, _partid, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .data = _drvdata, \
+ }
+
+#define I3C_DEVICE_EXTRA_INFO(_manufid, _partid, _info, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART | \
+ I3C_MATCH_EXTRA_INFO, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .extra_info = _info, \
+ .data = _drvdata, \
+ }
+
+#define I3C_CLASS(_dcr, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_DCR, \
+ .dcr = _dcr, \
+ }
+
+/**
+ * struct i3c_driver - I3C device driver
+ * @driver: inherit from device_driver
+ * @probe: I3C device probe method
+ * @remove: I3C device remove method
+ * @id_table: I3C device match table. Will be used by the framework to decide
+ * which device to bind to this driver
+ */
+struct i3c_driver {
+ struct device_driver driver;
+ int (*probe)(struct i3c_device *dev);
+ int (*remove)(struct i3c_device *dev);
+ const struct i3c_device_id *id_table;
+};
+
+static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
+{
+ return container_of(drv, struct i3c_driver, driver);
+}
+
+struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
+struct i3c_device *dev_to_i3cdev(struct device *dev);
+
+static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev,
+ void *data)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ dev_set_drvdata(dev, data);
+}
+
+static inline void *i3cdev_get_drvdata(struct i3c_device *i3cdev)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ return dev_get_drvdata(dev);
+}
+
+int i3c_driver_register_with_owner(struct i3c_driver *drv,
+ struct module *owner);
+void i3c_driver_unregister(struct i3c_driver *drv);
+
+#define i3c_driver_register(__drv) \
+ i3c_driver_register_with_owner(__drv, THIS_MODULE)
+
+/**
+ * module_i3c_driver() - Register a module providing an I3C driver
+ * @__drv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * driver.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_driver(__drv) \
+ module_driver(__drv, i3c_driver_register, i3c_driver_unregister)
+
+/**
+ * i3c_i2c_driver_register() - Register an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function registers both @i2cdev and @i3cdev, and fails if one of these
+ * registrations fails. This is mainly useful for devices that support both I2C
+ * and I3C modes.
+ * Note that when CONFIG_I3C is not enabled, this function only registers the
+ * I2C driver.
+ *
+ * Return: 0 if both registrations succeeds, a negative error code otherwise.
+ */
+static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ int ret;
+
+ ret = i2c_add_driver(i2cdrv);
+ if (ret || !IS_ENABLED(CONFIG_I3C))
+ return ret;
+
+ ret = i3c_driver_register(i3cdrv);
+ if (ret)
+ i2c_del_driver(i2cdrv);
+
+ return ret;
+}
+
+/**
+ * i3c_i2c_driver_unregister() - Unregister an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function unregisters both @i3cdrv and @i2cdrv.
+ * Note that when CONFIG_I3C is not enabled, this function only unregisters the
+ * @i2cdrv.
+ */
+static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ if (IS_ENABLED(CONFIG_I3C))
+ i3c_driver_unregister(i3cdrv);
+
+ i2c_del_driver(i2cdrv);
+}
+
+/**
+ * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C
+ * driver
+ * @__i3cdrv: the I3C driver to register
+ * @__i2cdrv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * and an I2C driver.
+ * This macro can be used even if CONFIG_I3C is disabled, in this case, only
+ * the I2C driver will be registered.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \
+ module_driver(__i3cdrv, \
+ i3c_i2c_driver_register, \
+ i3c_i2c_driver_unregister)
+
+int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+
+void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info);
+
+struct i3c_ibi_payload {
+ unsigned int len;
+ const void *data;
+};
+
+/**
+ * struct i3c_ibi_setup - IBI setup object
+ * @max_payload_len: maximum length of the payload associated to an IBI. If one
+ * IBI appears to have a payload that is bigger than this
+ * number, the IBI will be rejected.
+ * @num_slots: number of pre-allocated IBI slots. This should be chosen so that
+ * the system never runs out of IBI slots, otherwise you'll lose
+ * IBIs.
+ * @handler: IBI handler, every time an IBI is received. This handler is called
+ * in a workqueue context. It is allowed to sleep and send new
+ * messages on the bus, though it's recommended to keep the
+ * processing done there as fast as possible to avoid delaying
+ * processing of other queued on the same workqueue.
+ *
+ * Temporary structure used to pass information to i3c_device_request_ibi().
+ * This object can be allocated on the stack since i3c_device_request_ibi()
+ * copies every bit of information and do not use it after
+ * i3c_device_request_ibi() has returned.
+ */
+struct i3c_ibi_setup {
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+int i3c_device_request_ibi(struct i3c_device *dev,
+ const struct i3c_ibi_setup *setup);
+void i3c_device_free_ibi(struct i3c_device *dev);
+int i3c_device_enable_ibi(struct i3c_device *dev);
+int i3c_device_disable_ibi(struct i3c_device *dev);
+
+#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
new file mode 100644
index 000000000000..f13fd8b1dd79
--- /dev/null
+++ b/include/linux/i3c/master.h
@@ -0,0 +1,648 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_MASTER_H
+#define I3C_MASTER_H
+
+#include <asm/bitsperlong.h>
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/i3c/ccc.h>
+#include <linux/i3c/device.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define I3C_HOT_JOIN_ADDR 0x2
+#define I3C_BROADCAST_ADDR 0x7e
+#define I3C_MAX_ADDR GENMASK(6, 0)
+
+struct i3c_master_controller;
+struct i3c_bus;
+struct i2c_device;
+struct i3c_device;
+
+/**
+ * struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor
+ * @node: node element used to insert the slot into the I2C or I3C device
+ * list
+ * @master: I3C master that instantiated this device. Will be used to do
+ * I2C/I3C transfers
+ * @master_priv: master private data assigned to the device. Can be used to
+ * add master specific information
+ *
+ * This structure is describing common I3C/I2C dev information.
+ */
+struct i3c_i2c_dev_desc {
+ struct list_head node;
+ struct i3c_master_controller *master;
+ void *master_priv;
+};
+
+#define I3C_LVR_I2C_INDEX_MASK GENMASK(7, 5)
+#define I3C_LVR_I2C_INDEX(x) ((x) << 5)
+#define I3C_LVR_I2C_FM_MODE BIT(4)
+
+#define I2C_MAX_ADDR GENMASK(9, 0)
+
+/**
+ * struct i2c_dev_boardinfo - I2C device board information
+ * @node: used to insert the boardinfo object in the I2C boardinfo list
+ * @base: regular I2C board information
+ * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
+ * the I2C device limitations
+ *
+ * This structure is used to attach board-level information to an I2C device.
+ * Each I2C device connected on the I3C bus should have one.
+ */
+struct i2c_dev_boardinfo {
+ struct list_head node;
+ struct i2c_board_info base;
+ u8 lvr;
+};
+
+/**
+ * struct i2c_dev_desc - I2C device descriptor
+ * @common: common part of the I2C device descriptor
+ * @boardinfo: pointer to the boardinfo attached to this I2C device
+ * @dev: I2C device object registered to the I2C framework
+ *
+ * Each I2C device connected on the bus will have an i2c_dev_desc.
+ * This object is created by the core and later attached to the controller
+ * using &struct_i3c_master_controller->ops->attach_i2c_dev().
+ *
+ * &struct_i2c_dev_desc is the internal representation of an I2C device
+ * connected on an I3C bus. This object is also passed to all
+ * &struct_i3c_master_controller_ops hooks.
+ */
+struct i2c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ const struct i2c_dev_boardinfo *boardinfo;
+ struct i2c_client *dev;
+};
+
+/**
+ * struct i3c_ibi_slot - I3C IBI (In-Band Interrupt) slot
+ * @work: work associated to this slot. The IBI handler will be called from
+ * there
+ * @dev: the I3C device that has generated this IBI
+ * @len: length of the payload associated to this IBI
+ * @data: payload buffer
+ *
+ * An IBI slot is an object pre-allocated by the controller and used when an
+ * IBI comes in.
+ * Every time an IBI comes in, the I3C master driver should find a free IBI
+ * slot in its IBI slot pool, retrieve the IBI payload and queue the IBI using
+ * i3c_master_queue_ibi().
+ *
+ * How IBI slots are allocated is left to the I3C master driver, though, for
+ * simple kmalloc-based allocation, the generic IBI slot pool can be used.
+ */
+struct i3c_ibi_slot {
+ struct work_struct work;
+ struct i3c_dev_desc *dev;
+ unsigned int len;
+ void *data;
+};
+
+/**
+ * struct i3c_device_ibi_info - IBI information attached to a specific device
+ * @all_ibis_handled: used to be informed when no more IBIs are waiting to be
+ * processed. Used by i3c_device_disable_ibi() to wait for
+ * all IBIs to be dequeued
+ * @pending_ibis: count the number of pending IBIs. Each pending IBI has its
+ * work element queued to the controller workqueue
+ * @max_payload_len: maximum payload length for an IBI coming from this device.
+ * this value is specified when calling
+ * i3c_device_request_ibi() and should not change at run
+ * time. All messages IBIs exceeding this limit should be
+ * rejected by the master
+ * @num_slots: number of IBI slots reserved for this device
+ * @enabled: reflect the IBI status
+ * @handler: IBI handler specified at i3c_device_request_ibi() call time. This
+ * handler will be called from the controller workqueue, and as such
+ * is allowed to sleep (though it is recommended to process the IBI
+ * as fast as possible to not stall processing of other IBIs queued
+ * on the same workqueue).
+ * New I3C messages can be sent from the IBI handler
+ *
+ * The &struct_i3c_device_ibi_info object is allocated when
+ * i3c_device_request_ibi() is called and attached to a specific device. This
+ * object is here to manage IBIs coming from a specific I3C device.
+ *
+ * Note that this structure is the generic view of the IBI management
+ * infrastructure. I3C master drivers may have their own internal
+ * representation which they can associate to the device using
+ * controller-private data.
+ */
+struct i3c_device_ibi_info {
+ struct completion all_ibis_handled;
+ atomic_t pending_ibis;
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ unsigned int enabled;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+/**
+ * struct i3c_dev_boardinfo - I3C device board information
+ * @node: used to insert the boardinfo object in the I3C boardinfo list
+ * @init_dyn_addr: initial dynamic address requested by the FW. We provide no
+ * guarantee that the device will end up using this address,
+ * but try our best to assign this specific address to the
+ * device
+ * @static_addr: static address the I3C device listen on before it's been
+ * assigned a dynamic address by the master. Will be used during
+ * bus initialization to assign it a specific dynamic address
+ * before starting DAA (Dynamic Address Assignment)
+ * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
+ * that may be used to attach boardinfo to i3c_dev_desc when the device
+ * does not have a static address
+ * @of_node: optional DT node in case the device has been described in the DT
+ *
+ * This structure is used to attach board-level information to an I3C device.
+ * Not all I3C devices connected on the bus will have a boardinfo. It's only
+ * needed if you want to attach extra resources to a device or assign it a
+ * specific dynamic address.
+ */
+struct i3c_dev_boardinfo {
+ struct list_head node;
+ u8 init_dyn_addr;
+ u8 static_addr;
+ u64 pid;
+ struct device_node *of_node;
+};
+
+/**
+ * struct i3c_dev_desc - I3C device descriptor
+ * @common: common part of the I3C device descriptor
+ * @info: I3C device information. Will be automatically filled when you create
+ * your device with i3c_master_add_i3c_dev_locked()
+ * @ibi_lock: lock used to protect the &struct_i3c_device->ibi
+ * @ibi: IBI info attached to a device. Should be NULL until
+ * i3c_device_request_ibi() is called
+ * @dev: pointer to the I3C device object exposed to I3C device drivers. This
+ * should never be accessed from I3C master controller drivers. Only core
+ * code should manipulate it in when updating the dev <-> desc link or
+ * when propagating IBI events to the driver
+ * @boardinfo: pointer to the boardinfo attached to this I3C device
+ *
+ * Internal representation of an I3C device. This object is only used by the
+ * core and passed to I3C master controller drivers when they're requested to
+ * do some operations on the device.
+ * The core maintains the link between the internal I3C dev descriptor and the
+ * object exposed to the I3C device drivers (&struct_i3c_device).
+ */
+struct i3c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ struct i3c_device_info info;
+ struct mutex ibi_lock;
+ struct i3c_device_ibi_info *ibi;
+ struct i3c_device *dev;
+ const struct i3c_dev_boardinfo *boardinfo;
+};
+
+/**
+ * struct i3c_device - I3C device object
+ * @dev: device object to register the I3C dev to the device model
+ * @desc: pointer to an i3c device descriptor object. This link is updated
+ * every time the I3C device is rediscovered with a different dynamic
+ * address assigned
+ * @bus: I3C bus this device is attached to
+ *
+ * I3C device object exposed to I3C device drivers. The takes care of linking
+ * this object to the relevant &struct_i3c_dev_desc one.
+ * All I3C devs on the I3C bus are represented, including I3C masters. For each
+ * of them, we have an instance of &struct i3c_device.
+ */
+struct i3c_device {
+ struct device dev;
+ struct i3c_dev_desc *desc;
+ struct i3c_bus *bus;
+};
+
+/*
+ * The I3C specification says the maximum number of devices connected on the
+ * bus is 11, but this number depends on external parameters like trace length,
+ * capacitive load per Device, and the types of Devices present on the Bus.
+ * I3C master can also have limitations, so this number is just here as a
+ * reference and should be adjusted on a per-controller/per-board basis.
+ */
+#define I3C_BUS_MAX_DEVS 11
+
+#define I3C_BUS_MAX_I3C_SCL_RATE 12900000
+#define I3C_BUS_TYP_I3C_SCL_RATE 12500000
+#define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000
+#define I3C_BUS_I2C_FM_SCL_RATE 400000
+#define I3C_BUS_TLOW_OD_MIN_NS 200
+
+/**
+ * enum i3c_bus_mode - I3C bus mode
+ * @I3C_BUS_MODE_PURE: only I3C devices are connected to the bus. No limitation
+ * expected
+ * @I3C_BUS_MODE_MIXED_FAST: I2C devices with 50ns spike filter are present on
+ * the bus. The only impact in this mode is that the
+ * high SCL pulse has to stay below 50ns to trick I2C
+ * devices when transmitting I3C frames
+ * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present
+ * on the bus
+ */
+enum i3c_bus_mode {
+ I3C_BUS_MODE_PURE,
+ I3C_BUS_MODE_MIXED_FAST,
+ I3C_BUS_MODE_MIXED_SLOW,
+};
+
+/**
+ * enum i3c_addr_slot_status - I3C address slot status
+ * @I3C_ADDR_SLOT_FREE: address is free
+ * @I3C_ADDR_SLOT_RSVD: address is reserved
+ * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device
+ * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device
+ * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask
+ *
+ * On an I3C bus, addresses are assigned dynamically, and we need to know which
+ * addresses are free to use and which ones are already assigned.
+ *
+ * Addresses marked as reserved are those reserved by the I3C protocol
+ * (broadcast address, ...).
+ */
+enum i3c_addr_slot_status {
+ I3C_ADDR_SLOT_FREE,
+ I3C_ADDR_SLOT_RSVD,
+ I3C_ADDR_SLOT_I2C_DEV,
+ I3C_ADDR_SLOT_I3C_DEV,
+ I3C_ADDR_SLOT_STATUS_MASK = 3,
+};
+
+/**
+ * struct i3c_bus - I3C bus object
+ * @cur_master: I3C master currently driving the bus. Since I3C is multi-master
+ * this can change over the time. Will be used to let a master
+ * know whether it needs to request bus ownership before sending
+ * a frame or not
+ * @id: bus ID. Assigned by the framework when register the bus
+ * @addrslots: a bitmap with 2-bits per-slot to encode the address status and
+ * ease the DAA (Dynamic Address Assignment) procedure (see
+ * &enum i3c_addr_slot_status)
+ * @mode: bus mode (see &enum i3c_bus_mode)
+ * @scl_rate.i3c: maximum rate for the clock signal when doing I3C SDR/priv
+ * transfers
+ * @scl_rate.i2c: maximum rate for the clock signal when doing I2C transfers
+ * @scl_rate: SCL signal rate for I3C and I2C mode
+ * @devs.i3c: contains a list of I3C device descriptors representing I3C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs.i2c: contains a list of I2C device descriptors representing I2C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs: 2 lists containing all I3C/I2C devices connected to the bus
+ * @lock: read/write lock on the bus. This is needed to protect against
+ * operations that have an impact on the whole bus and the devices
+ * connected to it. For example, when asking slaves to drop their
+ * dynamic address (RSTDAA CCC), we need to make sure no one is trying
+ * to send I3C frames to these devices.
+ * Note that this lock does not protect against concurrency between
+ * devices: several drivers can send different I3C/I2C frames through
+ * the same master in parallel. This is the responsibility of the
+ * master to guarantee that frames are actually sent sequentially and
+ * not interlaced
+ *
+ * The I3C bus is represented with its own object and not implicitly described
+ * by the I3C master to cope with the multi-master functionality, where one bus
+ * can be shared amongst several masters, each of them requesting bus ownership
+ * when they need to.
+ */
+struct i3c_bus {
+ struct i3c_dev_desc *cur_master;
+ int id;
+ unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG];
+ enum i3c_bus_mode mode;
+ struct {
+ unsigned long i3c;
+ unsigned long i2c;
+ } scl_rate;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } devs;
+ struct rw_semaphore lock;
+};
+
+/**
+ * struct i3c_master_controller_ops - I3C master methods
+ * @bus_init: hook responsible for the I3C bus initialization. You should at
+ * least call master_set_info() from there and set the bus mode.
+ * You can also put controller specific initialization in there.
+ * This method is mandatory.
+ * @bus_cleanup: cleanup everything done in
+ * &i3c_master_controller_ops->bus_init().
+ * This method is optional.
+ * @attach_i3c_dev: called every time an I3C device is attached to the bus. It
+ * can be after a DAA or when a device is statically declared
+ * by the FW, in which case it will only have a static address
+ * and the dynamic address will be 0.
+ * When this function is called, device information have not
+ * been retrieved yet.
+ * This is a good place to attach master controller specific
+ * data to I3C devices.
+ * This method is optional.
+ * @reattach_i3c_dev: called every time an I3C device has its addressed
+ * changed. It can be because the device has been powered
+ * down and has lost its address, or it can happen when a
+ * device had a static address and has been assigned a
+ * dynamic address with SETDASA.
+ * This method is optional.
+ * @detach_i3c_dev: called when an I3C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @do_daa: do a DAA (Dynamic Address Assignment) procedure. This is procedure
+ * should send an ENTDAA CCC command and then add all devices
+ * discovered sure the DAA using i3c_master_add_i3c_dev_locked().
+ * Add devices added with i3c_master_add_i3c_dev_locked() will then be
+ * attached or re-attached to the controller.
+ * This method is mandatory.
+ * @supports_ccc_cmd: should return true if the CCC command is supported, false
+ * otherwise.
+ * This method is optional, if not provided the core assumes
+ * all CCC commands are supported.
+ * @send_ccc_cmd: send a CCC command
+ * This method is mandatory.
+ * @priv_xfers: do one or several private I3C SDR transfers
+ * This method is mandatory.
+ * @attach_i2c_dev: called every time an I2C device is attached to the bus.
+ * This is a good place to attach master controller specific
+ * data to I2C devices.
+ * This method is optional.
+ * @detach_i2c_dev: called when an I2C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @i2c_xfers: do one or several I2C transfers. Note that, unlike i3c
+ * transfers, the core does not guarantee that buffers attached to
+ * the transfers are DMA-safe. If drivers want to have DMA-safe
+ * buffers, they should use the i2c_get_dma_safe_msg_buf()
+ * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C
+ * framework.
+ * This method is mandatory.
+ * @i2c_funcs: expose the supported I2C functionalities.
+ * This method is mandatory.
+ * @request_ibi: attach an IBI handler to an I3C device. This implies defining
+ * an IBI handler and the constraints of the IBI (maximum payload
+ * length and number of pre-allocated slots).
+ * Some controllers support less IBI-capable devices than regular
+ * devices, so this method might return -%EBUSY if there's no
+ * more space for an extra IBI registration
+ * This method is optional.
+ * @free_ibi: free an IBI previously requested with ->request_ibi(). The IBI
+ * should have been disabled with ->disable_irq() prior to that
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @enable_ibi: enable the IBI. Only valid if ->request_ibi() has been called
+ * prior to ->enable_ibi(). The controller should first enable
+ * the IBI on the controller end (for example, unmask the hardware
+ * IRQ) and then send the ENEC CCC command (with the IBI flag set)
+ * to the I3C device.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @disable_ibi: disable an IBI. First send the DISEC CCC command with the IBI
+ * flag set and then deactivate the hardware IRQ on the
+ * controller end.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @recycle_ibi_slot: recycle an IBI slot. Called every time an IBI has been
+ * processed by its handler. The IBI slot should be put back
+ * in the IBI slot pool so that the controller can re-use it
+ * for a future IBI
+ * This method is mandatory only if ->request_ibi is not
+ * NULL.
+ */
+struct i3c_master_controller_ops {
+ int (*bus_init)(struct i3c_master_controller *master);
+ void (*bus_cleanup)(struct i3c_master_controller *master);
+ int (*attach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr);
+ void (*detach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*do_daa)(struct i3c_master_controller *master);
+ bool (*supports_ccc_cmd)(struct i3c_master_controller *master,
+ const struct i3c_ccc_cmd *cmd);
+ int (*send_ccc_cmd)(struct i3c_master_controller *master,
+ struct i3c_ccc_cmd *cmd);
+ int (*priv_xfers)(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+ int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
+ void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
+ int (*i2c_xfers)(struct i2c_dev_desc *dev,
+ const struct i2c_msg *xfers, int nxfers);
+ u32 (*i2c_funcs)(struct i3c_master_controller *master);
+ int (*request_ibi)(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+ void (*free_ibi)(struct i3c_dev_desc *dev);
+ int (*enable_ibi)(struct i3c_dev_desc *dev);
+ int (*disable_ibi)(struct i3c_dev_desc *dev);
+ void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot);
+};
+
+/**
+ * struct i3c_master_controller - I3C master controller object
+ * @dev: device to be registered to the device-model
+ * @this: an I3C device object representing this master. This device will be
+ * added to the list of I3C devs available on the bus
+ * @i2c: I2C adapter used for backward compatibility. This adapter is
+ * registered to the I2C subsystem to be as transparent as possible to
+ * existing I2C drivers
+ * @ops: master operations. See &struct i3c_master_controller_ops
+ * @secondary: true if the master is a secondary master
+ * @init_done: true when the bus initialization is done
+ * @boardinfo.i3c: list of I3C boardinfo objects
+ * @boardinfo.i2c: list of I2C boardinfo objects
+ * @boardinfo: board-level information attached to devices connected on the bus
+ * @bus: I3C bus exposed by this master
+ * @wq: workqueue used to execute IBI handlers. Can also be used by master
+ * drivers if they need to postpone operations that need to take place
+ * in a thread context. Typical examples are Hot Join processing which
+ * requires taking the bus lock in maintenance, which in turn, can only
+ * be done from a sleep-able context
+ *
+ * A &struct i3c_master_controller has to be registered to the I3C subsystem
+ * through i3c_master_register(). None of &struct i3c_master_controller fields
+ * should be set manually, just pass appropriate values to
+ * i3c_master_register().
+ */
+struct i3c_master_controller {
+ struct device dev;
+ struct i3c_dev_desc *this;
+ struct i2c_adapter i2c;
+ const struct i3c_master_controller_ops *ops;
+ unsigned int secondary : 1;
+ unsigned int init_done : 1;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } boardinfo;
+ struct i3c_bus bus;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * i3c_bus_for_each_i2cdev() - iterate over all I2C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: an I2C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I2C devs present on the bus.
+ */
+#define i3c_bus_for_each_i2cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i2c, common.node)
+
+/**
+ * i3c_bus_for_each_i3cdev() - iterate over all I3C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: and I3C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I3C devs present on the bus.
+ */
+#define i3c_bus_for_each_i3cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i3c, common.node)
+
+int i3c_master_do_i2c_xfers(struct i3c_master_controller *master,
+ const struct i2c_msg *xfers,
+ int nxfers);
+
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_entdaa_locked(struct i3c_master_controller *master);
+int i3c_master_defslvs_locked(struct i3c_master_controller *master);
+
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+ u8 start_addr);
+
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ u8 addr);
+int i3c_master_do_daa(struct i3c_master_controller *master);
+
+int i3c_master_set_info(struct i3c_master_controller *master,
+ const struct i3c_device_info *info);
+
+int i3c_master_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary);
+int i3c_master_unregister(struct i3c_master_controller *master);
+
+/**
+ * i3c_dev_get_master_data() - get master private data attached to an I3C
+ * device descriptor
+ * @dev: the I3C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i3c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i3c_dev_get_master_data(const struct i3c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i3c_dev_set_master_data() - attach master private data to an I3C device
+ * descriptor
+ * @dev: the I3C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i3c_dev_get_master_data().
+ */
+static inline void i3c_dev_set_master_data(struct i3c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i2c_dev_get_master_data() - get master private data attached to an I2C
+ * device descriptor
+ * @dev: the I2C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i2c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i2c_dev_get_master_data(const struct i2c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i2c_dev_set_master_data() - attach master private data to an I2C device
+ * descriptor
+ * @dev: the I2C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i2c_device_get_master_data().
+ */
+static inline void i2c_dev_set_master_data(struct i2c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i3c_dev_get_master() - get master used to communicate with a device
+ * @dev: I3C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i3c_dev_get_master(struct i3c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i2c_dev_get_master() - get master used to communicate with a device
+ * @dev: I2C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i2c_dev_get_master(struct i2c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i3c_master_get_bus() - get the bus attached to a master
+ * @master: master object
+ *
+ * Return: the I3C bus @master is connected to
+ */
+static inline struct i3c_bus *
+i3c_master_get_bus(struct i3c_master_controller *master)
+{
+ return &master->bus;
+}
+
+struct i3c_generic_ibi_pool;
+
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool);
+
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool);
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+ struct i3c_ibi_slot *slot);
+
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
+
+struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+
+#endif /* I3C_MASTER_H */
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index e6bb36a97519..8336b2f6f834 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,6 +21,7 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
+extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index c74b0321922a..e7d29ae633cd 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -10,7 +10,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/ata.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
@@ -50,6 +50,7 @@ struct ide_request {
struct scsi_request sreq;
u8 sense[SCSI_SENSE_BUFFERSIZE];
u8 type;
+ void *special;
};
static inline struct ide_request *ide_req(struct request *rq)
@@ -529,6 +530,10 @@ struct ide_drive_s {
struct request_queue *queue; /* request queue */
+ bool (*prep_rq)(struct ide_drive_s *, struct request *);
+
+ struct blk_mq_tag_set tag_set;
+
struct request *rq; /* current request */
void *driver_data; /* extra driver data */
u16 *id; /* identification info */
@@ -612,6 +617,10 @@ struct ide_drive_s {
bool sense_rq_armed;
struct request *sense_rq;
struct request_sense sense_data;
+
+ /* async sense insertion */
+ struct work_struct rq_work;
+ struct list_head rq_list;
};
typedef struct ide_drive_s ide_drive_t;
@@ -1089,6 +1098,7 @@ extern int ide_pci_clk;
int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
void ide_kill_rq(ide_drive_t *, struct request *);
+void ide_insert_request_head(ide_drive_t *, struct request *);
void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
@@ -1208,7 +1218,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
extern void ide_timer_expiry(struct timer_list *t);
extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern void do_ide_request(struct request_queue *);
+extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
void ide_init_disk(struct gendisk *, ide_drive_t *);
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 3e8215b2c371..60daf34b625d 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -214,8 +214,7 @@ static inline void idr_preload_end(void)
++id, (entry) = idr_get_next((idr), &(id)))
/*
- * IDA - IDR based id allocator, use when translation from id to
- * pointer isn't necessary.
+ * IDA - ID Allocator, use when translation from id to pointer isn't necessary.
*/
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
@@ -225,45 +224,82 @@ struct ida_bitmap {
unsigned long bitmap[IDA_BITMAP_LONGS];
};
-DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
-
struct ida {
- struct radix_tree_root ida_rt;
+ struct xarray xa;
};
+#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
+
#define IDA_INIT(name) { \
- .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \
+ .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
}
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
-int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
-int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
-void ida_remove(struct ida *ida, int id);
+int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
+void ida_free(struct ida *, unsigned int id);
void ida_destroy(struct ida *ida);
-int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
- gfp_t gfp_mask);
-void ida_simple_remove(struct ida *ida, unsigned int id);
+/**
+ * ida_alloc() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between 0 and %INT_MAX, inclusive.
+ *
+ * Context: Any context.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
+ */
+static inline int ida_alloc(struct ida *ida, gfp_t gfp)
+{
+ return ida_alloc_range(ida, 0, ~0, gfp);
+}
-static inline void ida_init(struct ida *ida)
+/**
+ * ida_alloc_min() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @min: Lowest ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between @min and %INT_MAX, inclusive.
+ *
+ * Context: Any context.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
+ */
+static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
{
- INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
+ return ida_alloc_range(ida, min, ~0, gfp);
}
/**
- * ida_get_new - allocate new ID
- * @ida: idr handle
- * @p_id: pointer to the allocated handle
+ * ida_alloc_max() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @max: Highest ID to allocate.
+ * @gfp: Memory allocation flags.
*
- * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
+ * Allocate an ID between 0 and @max, inclusive.
+ *
+ * Context: Any context.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
*/
-static inline int ida_get_new(struct ida *ida, int *p_id)
+static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
{
- return ida_get_new_above(ida, 0, p_id);
+ return ida_alloc_range(ida, 0, max, gfp);
}
+static inline void ida_init(struct ida *ida)
+{
+ xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
+}
+
+#define ida_simple_get(ida, start, end, gfp) \
+ ida_alloc_range(ida, start, (end) - 1, gfp)
+#define ida_simple_remove(ida, id) ida_free(ida, id)
+
static inline bool ida_is_empty(const struct ida *ida)
{
- return radix_tree_empty(&ida->ida_rt);
+ return xa_empty(&ida->xa);
}
#endif /* __IDR_H__ */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 9c03a7d5e400..3b04e72315e1 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -812,6 +812,8 @@ enum mesh_config_capab_flags {
IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40,
};
+#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
+
/**
* mesh channel switch parameters element's flag indicator
*
@@ -1460,13 +1462,16 @@ struct ieee80211_ht_operation {
* STA can receive. Rate expressed in units of 1 Mbps.
* If this field is 0 this value should not be used to
* consider the highest RX data rate supported.
- * The top 3 bits of this field are reserved.
+ * The top 3 bits of this field indicate the Maximum NSTS,total
+ * (a beamformee capability.)
* @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
* @tx_highest: Indicates highest long GI VHT PPDU data rate
* STA can transmit. Rate expressed in units of 1 Mbps.
* If this field is 0 this value should not be used to
* consider the highest TX data rate supported.
- * The top 3 bits of this field are reserved.
+ * The top 2 bits of this field are reserved, the
+ * 3rd bit from the top indiciates VHT Extended NSS BW
+ * Capability.
*/
struct ieee80211_vht_mcs_info {
__le16 rx_mcs_map;
@@ -1475,6 +1480,13 @@ struct ieee80211_vht_mcs_info {
__le16 tx_highest;
} __packed;
+/* for rx_highest */
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT)
+
+/* for tx_highest */
+#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13)
+
/**
* enum ieee80211_vht_mcs_support - VHT MCS support definitions
* @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
@@ -1545,11 +1557,11 @@ struct ieee80211_vht_operation {
* struct ieee80211_he_cap_elem - HE capabilities element
*
* This structure is the "HE capabilities element" fixed fields as
- * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3
+ * described in P802.11ax_D3.0 section 9.4.2.237.2 and 9.4.2.237.3
*/
struct ieee80211_he_cap_elem {
- u8 mac_cap_info[5];
- u8 phy_cap_info[9];
+ u8 mac_cap_info[6];
+ u8 phy_cap_info[11];
} __packed;
#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
@@ -1607,7 +1619,7 @@ struct ieee80211_he_mcs_nss_supp {
* struct ieee80211_he_operation - HE capabilities element
*
* This structure is the "HE operation element" fields as
- * described in P802.11ax_D2.0 section 9.4.2.238
+ * described in P802.11ax_D3.0 section 9.4.2.238
*/
struct ieee80211_he_operation {
__le32 he_oper_params;
@@ -1650,6 +1662,7 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2
#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
@@ -1659,6 +1672,7 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
+#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8
#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
@@ -1678,6 +1692,26 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000
+
+/**
+ * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS
+ * @cap: VHT capabilities of the peer
+ * @bw: bandwidth to use
+ * @mcs: MCS index to use
+ * @ext_nss_bw_capable: indicates whether or not the local transmitter
+ * (rate scaling algorithm) can deal with the new logic
+ * (dot11VHTExtendedNSSBWCapable)
+ *
+ * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
+ * vary for a given BW/MCS. This function parses the data.
+ *
+ * Note: This function is exported by cfg80211.
+ */
+int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
+ enum ieee80211_vht_chanwidth bw,
+ int mcs, bool ext_nss_bw_capable);
/* 802.11ax HE MAC capabilities */
#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
@@ -1707,15 +1741,15 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70
/* Link adaptation is split between byte HE_MAC_CAP1 and
* HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
@@ -1729,14 +1763,13 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
-#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04
+#define IEEE80211_HE_MAC_CAP2_TRS 0x04
#define IEEE80211_HE_MAC_CAP2_BSR 0x08
#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
-#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01
#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
@@ -1744,25 +1777,34 @@ struct ieee80211_mu_edca_param_set {
* A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
* same field in the HE capabilities.
*/
-#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00
-#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08
-#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10
-#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18
-#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18
-#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_USE_VHT 0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_1 0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2 0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED 0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
+#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
#define IEEE80211_HE_MAC_CAP4_QTP 0x02
#define IEEE80211_HE_MAC_CAP4_BQR 0x04
-#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08
+#define IEEE80211_HE_MAC_CAP4_SRP_RESP 0x08
#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
#define IEEE80211_HE_MAC_CAP4_OPS 0x20
#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40
+/* Multi TID agg TX is split between byte #4 and #5
+ * The value is a combination of B39,B40,B41
+ */
+#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80
+
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
+#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04
+#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
+#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
/* 802.11ax HE PHY capabilities */
-#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
@@ -1779,10 +1821,10 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
-/* Midamble RX Max NSTS is split between byte #2 and byte #3 */
-#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80
+/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */
+#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80
-#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01
+#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01
#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
@@ -1883,7 +1925,19 @@ struct ieee80211_mu_edca_param_set {
#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
-#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20
+#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ 0x00
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ 0x40
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ 0x80
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ 0xc0
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01
+#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02
+#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04
+#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
/* 802.11ax HE TX/RX MCS NSS Support */
#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
@@ -1957,17 +2011,17 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
}
/* HE Operation defines */
-#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f
-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0
-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6
-#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200
-#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00
-#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10
-#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000
-#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000
-#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000
-#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000
-#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003
+#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000
+#define IEEE80211_HE_OPERATION_CO_LOCATED_BSS 0x00008000
+#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
/*
* ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
@@ -1992,7 +2046,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
he_oper_params = le32_to_cpu(he_oper->he_oper_params);
if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
oper_len += 3;
- if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP)
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_LOCATED_BSS)
oper_len++;
/* Add the first byte (extension ID) to the total length */
@@ -2633,6 +2687,10 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7)
+/* Defines support for TWT Requester and TWT Responder */
+#define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5)
+#define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6)
+
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index c20c7e197d07..627b788ba0ff 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -119,6 +119,8 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct net_device *br_fdb_find_port(const struct net_device *br_dev,
const unsigned char *addr,
__u16 vid);
+void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
+bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
#else
static inline struct net_device *
br_fdb_find_port(const struct net_device *br_dev,
@@ -127,6 +129,16 @@ br_fdb_find_port(const struct net_device *br_dev,
{
return NULL;
}
+
+static inline void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
+{
+}
+
+static inline bool
+br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 3d2996dc7d85..12e3eebf0ce6 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -16,9 +16,23 @@
#define __IF_TUN_H
#include <uapi/linux/if_tun.h>
+#include <uapi/linux/virtio_net.h>
#define TUN_XDP_FLAG 0x1UL
+#define TUN_MSG_UBUF 1
+#define TUN_MSG_PTR 2
+struct tun_msg_ctl {
+ unsigned short type;
+ unsigned short num;
+ void *ptr;
+};
+
+struct tun_xdp_hdr {
+ int buflen;
+ struct virtio_net_hdr gso;
+};
+
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 83ea4df6ab81..4cca4da7a6de 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -65,8 +65,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT 13
-#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
-#define VLAN_TAG_PRESENT VLAN_CFI_MASK
+#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
#define VLAN_N_VID 4096
@@ -78,10 +77,11 @@ static inline bool is_vlan_dev(const struct net_device *dev)
return dev->priv_flags & IFF_802_1Q_VLAN;
}
-#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present)
+#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
-#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
+#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
+#define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
{
@@ -133,6 +133,9 @@ struct vlan_pcpu_stats {
extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id);
+extern int vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid,
+ void *arg), void *arg);
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
@@ -236,6 +239,14 @@ __vlan_find_dev_deep_rcu(struct net_device *real_dev,
return NULL;
}
+static inline int
+vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid, void *arg),
+ void *arg)
+{
+ return 0;
+}
+
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
BUG();
@@ -461,6 +472,31 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
return skb;
}
+/**
+ * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
+ * @skb: skbuff to clear
+ *
+ * Clears the VLAN information from @skb
+ */
+static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
+{
+ skb->vlan_present = 0;
+}
+
+/**
+ * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
+ * @dst: skbuff to copy to
+ * @src: skbuff to copy from
+ *
+ * Copies VLAN information from @src to @dst (for branchless code)
+ */
+static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
+{
+ dst->vlan_present = src->vlan_present;
+ dst->vlan_proto = src->vlan_proto;
+ dst->vlan_tci = src->vlan_tci;
+}
+
/*
* __vlan_hwaccel_push_inside - pushes vlan tag to the payload
* @skb: skbuff to tag
@@ -475,7 +511,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
if (likely(skb))
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
return skb;
}
@@ -491,7 +527,8 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
skb->vlan_proto = vlan_proto;
- skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
+ skb->vlan_tci = vlan_tci;
+ skb->vlan_present = 1;
}
/**
@@ -531,8 +568,6 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
}
}
-#define HAVE_VLAN_GET_TAG
-
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 730ead1a46df..7e84351fa2c0 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -39,6 +39,8 @@ struct iio_dev;
* if there is just one read-only sample data shift register.
* @addr_shift: Shift of the register address in the communications register.
* @read_mask: Mask for the communications register having the read bit set.
+ * @data_reg: Address of the data register, if 0 the default address of 0x3 will
+ * be used.
*/
struct ad_sigma_delta_info {
int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
@@ -47,6 +49,7 @@ struct ad_sigma_delta_info {
bool has_registers;
unsigned int addr_shift;
unsigned int read_mask;
+ unsigned int data_reg;
};
/**
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index f9bd6e8ab138..8092b8e7f37e 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -40,7 +40,7 @@
#define ST_SENSORS_DEFAULT_STAT_ADDR 0x27
#define ST_SENSORS_MAX_NAME 17
-#define ST_SENSORS_MAX_4WAI 7
+#define ST_SENSORS_MAX_4WAI 8
#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
ch2, s, endian, rbits, sbits, addr) \
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 97914a2833d1..b5e16b8c50b7 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -30,6 +30,21 @@ extern void ima_post_path_mknod(struct dentry *dentry);
extern void ima_add_kexec_buffer(struct kimage *image);
#endif
+#if defined(CONFIG_X86) && defined(CONFIG_EFI)
+extern bool arch_ima_get_secureboot(void);
+extern const char * const *arch_get_ima_policy(void);
+#else
+static inline bool arch_ima_get_secureboot(void)
+{
+ return false;
+}
+
+static inline const char * const *arch_get_ima_policy(void)
+{
+ return NULL;
+}
+#endif
+
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
{
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
new file mode 100644
index 000000000000..00d7e8e919c6
--- /dev/null
+++ b/include/linux/indirect_call_wrapper.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H
+#define _LINUX_INDIRECT_CALL_WRAPPER_H
+
+#ifdef CONFIG_RETPOLINE
+
+/*
+ * INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
+ * @f: function pointer
+ * @f$NR: builtin functions names, up to $NR of them
+ * @__VA_ARGS__: arguments for @f
+ *
+ * Avoid retpoline overhead for known builtin, checking @f vs each of them and
+ * eventually invoking directly the builtin function. The functions are check
+ * in the given order. Fallback to the indirect call.
+ */
+#define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+ likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
+ })
+#define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+ likely(f == f2) ? f2(__VA_ARGS__) : \
+ INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
+ })
+
+#define INDIRECT_CALLABLE_DECLARE(f) f
+#define INDIRECT_CALLABLE_SCOPE
+
+#else
+#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALLABLE_DECLARE(f)
+#define INDIRECT_CALLABLE_SCOPE static
+#endif
+
+/*
+ * We can use INDIRECT_CALL_$NR for ipv6 related functions only if ipv6 is
+ * builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6
+ * alternatives
+ */
+#if IS_BUILTIN(CONFIG_IPV6)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) \
+ INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
+#elif IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
+#endif
+
+#endif
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index c759d1cbcedd..a64f21a97369 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -37,7 +37,9 @@ struct in_device {
unsigned long mr_v1_seen;
unsigned long mr_v2_seen;
unsigned long mr_maxdelay;
- unsigned char mr_qrv;
+ unsigned long mr_qi; /* Query Interval */
+ unsigned long mr_qri; /* Query Response Interval */
+ unsigned char mr_qrv; /* Query Robustness Variable */
unsigned char mr_gq_running;
unsigned char mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
diff --git a/include/linux/init.h b/include/linux/init.h
index bc27cf03c41e..5255069f5a9f 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -116,8 +116,23 @@
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
-extern initcall_t __con_initcall_start[], __con_initcall_end[];
-extern initcall_t __security_initcall_start[], __security_initcall_end[];
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+typedef int initcall_entry_t;
+
+static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
+{
+ return offset_to_ptr(entry);
+}
+#else
+typedef initcall_t initcall_entry_t;
+
+static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
+{
+ return *entry;
+}
+#endif
+
+extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
/* Used for contructor calls. */
typedef void (*ctor_fn_t)(void);
@@ -131,7 +146,6 @@ extern unsigned int reset_devices;
/* used by init/main.c */
void setup_arch(char **);
void prepare_namespace(void);
-void __init load_default_modules(void);
int __init init_rootfs(void);
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
@@ -167,9 +181,20 @@ extern bool initcall_debug;
* as KEEP() in the linker script.
*/
-#define __define_initcall(fn, id) \
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#define ___define_initcall(fn, id, __sec) \
+ __ADDRESSABLE(fn) \
+ asm(".section \"" #__sec ".init\", \"a\" \n" \
+ "__initcall_" #fn #id ": \n" \
+ ".long " #fn " - . \n" \
+ ".previous \n");
+#else
+#define ___define_initcall(fn, id, __sec) \
static initcall_t __initcall_##fn##id __used \
- __attribute__((__section__(".initcall" #id ".init"))) = fn;
+ __attribute__((__section__(#__sec ".init"))) = fn;
+#endif
+
+#define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id)
/*
* Early initcalls run before initializing SMP.
@@ -208,13 +233,7 @@ extern bool initcall_debug;
#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
-#define console_initcall(fn) \
- static initcall_t __initcall_##fn \
- __used __section(.con_initcall.init) = fn
-
-#define security_initcall(fn) \
- static initcall_t __initcall_##fn \
- __used __section(.security_initcall.init) = fn
+#define console_initcall(fn) ___define_initcall(fn,, .con_initcall)
struct obs_kernel_param {
const char *str;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a454b8aeb938..a7083a45a26c 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -46,15 +46,6 @@ extern struct cred init_cred;
#define INIT_CPU_TIMERS(s)
#endif
-#define INIT_PID_LINK(type) \
-{ \
- .node = { \
- .next = NULL, \
- .pprev = NULL, \
- }, \
- .pid = &init_struct_pid, \
-}
-
#define INIT_TASK_COMM "swapper"
/* Attach to the init_task data structure for proper alignment */
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index 84b423044088..14beaff9b445 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -21,4 +21,7 @@ extern int initrd_below_start_ok;
extern unsigned long initrd_start, initrd_end;
extern void free_initrd_mem(unsigned long, unsigned long);
+extern phys_addr_t phys_initrd_start;
+extern unsigned long phys_initrd_size;
+
extern unsigned int real_root_dev;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index ef169d67df92..0605f3bf6e79 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -26,19 +26,39 @@
#include <linux/iova.h>
#include <linux/io.h>
#include <linux/idr.h>
-#include <linux/dma_remapping.h>
#include <linux/mmu_notifier.h>
#include <linux/list.h>
#include <linux/iommu.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmar.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
/*
- * Intel IOMMU register specification per version 1.0 public spec.
+ * VT-d hardware uses 4KiB page size regardless of host page size.
*/
+#define VTD_PAGE_SHIFT (12)
+#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
+#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
+#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
+
+#define VTD_STRIDE_SHIFT (9)
+#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
+
+#define DMA_PTE_READ (1)
+#define DMA_PTE_WRITE (2)
+#define DMA_PTE_LARGE_PAGE (1 << 7)
+#define DMA_PTE_SNP (1 << 11)
+
+#define CONTEXT_TT_MULTI_LEVEL 0
+#define CONTEXT_TT_DEV_IOTLB 1
+#define CONTEXT_TT_PASS_THROUGH 2
+#define CONTEXT_PASIDE BIT_ULL(3)
+/*
+ * Intel IOMMU register specification per version 1.0 public spec.
+ */
#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
@@ -71,6 +91,42 @@
#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
+#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
+#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
+#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
+#define DMAR_MTRR_FIX16K_80000_REG 0x128
+#define DMAR_MTRR_FIX16K_A0000_REG 0x130
+#define DMAR_MTRR_FIX4K_C0000_REG 0x138
+#define DMAR_MTRR_FIX4K_C8000_REG 0x140
+#define DMAR_MTRR_FIX4K_D0000_REG 0x148
+#define DMAR_MTRR_FIX4K_D8000_REG 0x150
+#define DMAR_MTRR_FIX4K_E0000_REG 0x158
+#define DMAR_MTRR_FIX4K_E8000_REG 0x160
+#define DMAR_MTRR_FIX4K_F0000_REG 0x168
+#define DMAR_MTRR_FIX4K_F8000_REG 0x170
+#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
+#define DMAR_MTRR_PHYSMASK0_REG 0x188
+#define DMAR_MTRR_PHYSBASE1_REG 0x190
+#define DMAR_MTRR_PHYSMASK1_REG 0x198
+#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
+#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
+#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
+#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
+#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
+#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
+#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
+#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
+#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
+#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
+#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
+#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
+#define DMAR_MTRR_PHYSBASE8_REG 0x200
+#define DMAR_MTRR_PHYSMASK8_REG 0x208
+#define DMAR_MTRR_PHYSBASE9_REG 0x210
+#define DMAR_MTRR_PHYSMASK9_REG 0x218
+#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */
+#define DMAR_VCMD_REG 0xe10 /* Virtual command register */
+#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */
#define OFFSET_STRIDE (9)
@@ -114,6 +170,11 @@
* Extended Capability Register
*/
+#define ecap_smpwc(e) (((e) >> 48) & 0x1)
+#define ecap_flts(e) (((e) >> 47) & 0x1)
+#define ecap_slts(e) (((e) >> 46) & 0x1)
+#define ecap_smts(e) (((e) >> 43) & 0x1)
+#define ecap_dit(e) ((e >> 41) & 0x1)
#define ecap_pasid(e) ((e >> 40) & 0x1)
#define ecap_pss(e) ((e >> 35) & 0x1f)
#define ecap_eafs(e) ((e >> 34) & 0x1)
@@ -191,6 +252,7 @@
/* DMA_RTADDR_REG */
#define DMA_RTADDR_RTT (((u64)1) << 11)
+#define DMA_RTADDR_SMT (((u64)1) << 10)
/* CCMD_REG */
#define DMA_CCMD_ICC (((u64)1) << 63)
@@ -284,6 +346,7 @@ enum {
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
@@ -308,6 +371,7 @@ enum {
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
@@ -334,13 +398,18 @@ enum {
#define QI_GRAN_NONG_PASID 2
#define QI_GRAN_PSI_PASID 3
+#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
+
struct qi_desc {
- u64 low, high;
+ u64 qw0;
+ u64 qw1;
+ u64 qw2;
+ u64 qw3;
};
struct q_inval {
raw_spinlock_t q_lock;
- struct qi_desc *desc; /* invalidation queue */
+ void *desc; /* invalidation queue */
int *desc_status; /* desc status */
int free_head; /* first free entry */
int free_tail; /* last free entry */
@@ -385,6 +454,69 @@ struct pasid_entry;
struct pasid_state_entry;
struct page_req_dsc;
+/*
+ * 0: Present
+ * 1-11: Reserved
+ * 12-63: Context Ptr (12 - (haw-1))
+ * 64-127: Reserved
+ */
+struct root_entry {
+ u64 lo;
+ u64 hi;
+};
+
+/*
+ * low 64 bits:
+ * 0: present
+ * 1: fault processing disable
+ * 2-3: translation type
+ * 12-63: address space root
+ * high 64 bits:
+ * 0-2: address width
+ * 3-6: aval
+ * 8-23: domain id
+ */
+struct context_entry {
+ u64 lo;
+ u64 hi;
+};
+
+struct dmar_domain {
+ int nid; /* node id */
+
+ unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
+ /* Refcount of devices per iommu */
+
+
+ u16 iommu_did[DMAR_UNITS_SUPPORTED];
+ /* Domain ids per IOMMU. Use u16 since
+ * domain ids are 16 bit wide according
+ * to VT-d spec, section 9.3 */
+
+ bool has_iotlb_device;
+ struct list_head devices; /* all devices' list */
+ struct iova_domain iovad; /* iova's that belong to this domain */
+
+ struct dma_pte *pgd; /* virtual address */
+ int gaw; /* max guest address width */
+
+ /* adjusted guest address width, 0 is level 2 30-bit */
+ int agaw;
+
+ int flags; /* flags to find out type of domain */
+
+ int iommu_coherency;/* indicate coherency of iommu access */
+ int iommu_snooping; /* indicate snooping control feature*/
+ int iommu_count; /* reference count of iommu */
+ int iommu_superpage;/* Level of superpages supported:
+ 0 == 4KiB (no superpages), 1 == 2MiB,
+ 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
+ u64 max_addr; /* maximum mapped address */
+
+ struct iommu_domain domain; /* generic domain data structure for
+ iommu core */
+};
+
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
@@ -409,17 +541,8 @@ struct intel_iommu {
struct iommu_flush flush;
#endif
#ifdef CONFIG_INTEL_IOMMU_SVM
- /* These are large and need to be contiguous, so we allocate just
- * one for now. We'll maybe want to rethink that if we truly give
- * devices away to userspace processes (e.g. for DPDK) and don't
- * want to trust that userspace will use *only* the PASID it was
- * told to. But while it's all driver-arbitrated, we're fine. */
- struct pasid_entry *pasid_table;
- struct pasid_state_entry *pasid_state_table;
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
- struct idr pasid_idr;
- u32 pasid_max;
#endif
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
@@ -434,6 +557,27 @@ struct intel_iommu {
u32 flags; /* Software defined flags */
};
+/* PCI domain-device relationship */
+struct device_domain_info {
+ struct list_head link; /* link to domain siblings */
+ struct list_head global; /* link to global list */
+ struct list_head table; /* link to pasid table */
+ u8 bus; /* PCI bus number */
+ u8 devfn; /* PCI devfn number */
+ u16 pfsid; /* SRIOV physical function source ID */
+ u8 pasid_supported:3;
+ u8 pasid_enabled:1;
+ u8 pri_supported:1;
+ u8 pri_enabled:1;
+ u8 ats_supported:1;
+ u8 ats_enabled:1;
+ u8 ats_qdep;
+ struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
+ struct intel_iommu *iommu; /* IOMMU used by this device */
+ struct dmar_domain *domain; /* pointer to domain */
+ struct pasid_table *pasid_table; /* pasid table */
+};
+
static inline void __iommu_flush_cache(
struct intel_iommu *iommu, void *addr, int size)
{
@@ -441,6 +585,49 @@ static inline void __iommu_flush_cache(
clflush_cache_range(addr, size);
}
+/*
+ * 0: readable
+ * 1: writable
+ * 2-6: reserved
+ * 7: super page
+ * 8-10: available
+ * 11: snoop behavior
+ * 12-63: Host physcial address
+ */
+struct dma_pte {
+ u64 val;
+};
+
+static inline void dma_clear_pte(struct dma_pte *pte)
+{
+ pte->val = 0;
+}
+
+static inline u64 dma_pte_addr(struct dma_pte *pte)
+{
+#ifdef CONFIG_64BIT
+ return pte->val & VTD_PAGE_MASK;
+#else
+ /* Must have a full atomic 64-bit read */
+ return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
+#endif
+}
+
+static inline bool dma_pte_present(struct dma_pte *pte)
+{
+ return (pte->val & 3) != 0;
+}
+
+static inline bool dma_pte_superpage(struct dma_pte *pte)
+{
+ return (pte->val & DMA_PTE_LARGE_PAGE);
+}
+
+static inline int first_pte_in_page(struct dma_pte *pte)
+{
+ return !((unsigned long)pte & ~VTD_PAGE_MASK);
+}
+
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
@@ -453,16 +640,22 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
- u64 addr, unsigned mask);
-
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned mask);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);
+struct dmar_domain *get_valid_domain_for_dev(struct device *dev);
+void *alloc_pgtable_page(int node);
+void free_pgtable_page(void *vaddr);
+struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
+int for_each_device_domain(int (*fn)(struct device_domain_info *info,
+ void *data), void *data);
+void iommu_flush_write_buffer(struct intel_iommu *iommu);
+
#ifdef CONFIG_INTEL_IOMMU_SVM
-extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu);
-extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu);
+int intel_svm_init(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
@@ -486,12 +679,41 @@ struct intel_svm {
int flags;
int pasid;
struct list_head devs;
+ struct list_head list;
};
extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
#endif
+#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
+void intel_iommu_debugfs_init(void);
+#else
+static inline void intel_iommu_debugfs_init(void) {}
+#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
+
extern const struct attribute_group *intel_iommu_groups[];
+bool context_present(struct context_entry *context);
+struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
+ u8 devfn, int alloc);
+
+#ifdef CONFIG_INTEL_IOMMU
+extern int iommu_calculate_agaw(struct intel_iommu *iommu);
+extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
+extern int dmar_disabled;
+extern int intel_iommu_enabled;
+extern int intel_iommu_tboot_noforce;
+#else
+static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
+static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
+#define dmar_disabled (1)
+#define intel_iommu_enabled (0)
+#endif
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index eeceac3376fc..c672f34235e7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -45,7 +45,7 @@
* IRQF_PERCPU - Interrupt is per cpu
* IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
- * registered first in an shared interrupt is considered for
+ * registered first in a shared interrupt is considered for
* performance reasons)
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
@@ -247,10 +247,23 @@ struct irq_affinity_notify {
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
* the MSI(-X) vector space
+ * @nr_sets: Length of passed in *sets array
+ * @sets: Number of affinitized sets
*/
struct irq_affinity {
int pre_vectors;
int post_vectors;
+ int nr_sets;
+ int *sets;
+};
+
+/**
+ * struct irq_affinity_desc - Interrupt affinity descriptor
+ * @mask: cpumask to hold the affinity assignment
+ */
+struct irq_affinity_desc {
+ struct cpumask mask;
+ unsigned int is_managed : 1;
};
#if defined(CONFIG_SMP)
@@ -299,7 +312,9 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
-struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
+struct irq_affinity_desc *
+irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
+
int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
#else /* CONFIG_SMP */
@@ -333,7 +348,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
return 0;
}
-static inline struct cpumask *
+static inline struct irq_affinity_desc *
irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
{
return NULL;
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 3555d54bf79a..9a4258154b25 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -6,6 +6,7 @@
#include <linux/bitmap.h>
#include <linux/mm.h>
#include <linux/types.h>
+#include <linux/mm_types.h>
struct address_space;
struct fiemap_extent_info;
@@ -141,7 +142,8 @@ int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops);
-int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops);
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
+ const struct iomap_ops *ops);
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
loff_t start, loff_t len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 19938ee6eb31..e90da6b6f3d1 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -124,6 +124,7 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMU_ENABLE,
DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_NESTING, /* two stages of translation */
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
DOMAIN_ATTR_MAX,
};
@@ -166,11 +167,9 @@ struct iommu_resv_region {
* @detach_dev: detach device from an iommu domain
* @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain
- * @map_sg: map a scatter-gather list of physically contiguous memory chunks
- * to an iommu domain
* @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
- * @tlb_range_add: Add a given iova range to the flush queue for this domain
- * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
+ * @iotlb_range_add: Add a given iova range to the flush queue for this domain
+ * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
* @iova_to_phys: translate iova to physical address
* @add_device: add device to iommu grouping
@@ -183,8 +182,6 @@ struct iommu_resv_region {
* @apply_resv_region: Temporary helper call-back for iova reserved ranges
* @domain_window_enable: Configure and enable a particular window for a domain
* @domain_window_disable: Disable a particular window for a domain
- * @domain_set_windows: Set the number of windows for a domain
- * @domain_get_windows: Return the number of windows for a domain
* @of_xlate: add OF master IDs to iommu grouping
* @pgsize_bitmap: bitmap of all possible supported page sizes
*/
@@ -201,8 +198,6 @@ struct iommu_ops {
phys_addr_t paddr, size_t size, int prot);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size);
- size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot);
void (*flush_iotlb_all)(struct iommu_domain *domain);
void (*iotlb_range_add)(struct iommu_domain *domain,
unsigned long iova, size_t size);
@@ -227,10 +222,6 @@ struct iommu_ops {
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot);
void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
- /* Set the number of windows per domain */
- int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
- /* Get the number of windows per domain */
- u32 (*domain_get_windows)(struct iommu_domain *domain);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
@@ -297,15 +288,15 @@ extern int iommu_attach_device(struct iommu_domain *domain,
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
+extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size);
-extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg,unsigned int nents,
- int prot);
+extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg,unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
@@ -378,17 +369,12 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain)
domain->ops->iotlb_sync(domain);
}
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
-{
- return domain->ops->map_sg(domain, iova, sg, nents, prot);
-}
-
/* PCI device grouping function */
extern struct iommu_group *pci_device_group(struct device *dev);
/* Generic device grouping function */
extern struct iommu_group *generic_device_group(struct device *dev);
+/* FSL-MC device grouping function */
+struct iommu_group *fsl_mc_device_group(struct device *dev);
/**
* struct iommu_fwspec - per-device IOMMU instance data
@@ -412,6 +398,20 @@ void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
+static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
+{
+ return dev->iommu_fwspec;
+}
+
+static inline void dev_iommu_fwspec_set(struct device *dev,
+ struct iommu_fwspec *fwspec)
+{
+ dev->iommu_fwspec = fwspec;
+}
+
+int iommu_probe_device(struct device *dev);
+void iommu_release_device(struct device *dev);
+
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -698,4 +698,11 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
#endif /* CONFIG_IOMMU_API */
+#ifdef CONFIG_IOMMU_DEBUGFS
+extern struct dentry *iommu_debugfs_dir;
+void iommu_debugfs_setup(void);
+#else
+static inline void iommu_debugfs_setup(void) {}
+#endif
+
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 9e30ed6443db..e9bfe6972aed 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -71,6 +71,19 @@ static inline int task_nice_ioclass(struct task_struct *task)
}
/*
+ * If the calling process has set an I/O priority, use that. Otherwise, return
+ * the default I/O priority.
+ */
+static inline int get_current_ioprio(void)
+{
+ struct io_context *ioc = current->io_context;
+
+ if (ioc)
+ return ioc->ioprio;
+ return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+}
+
+/*
* For inheritance, return the highest of the two given priorities
*/
extern int ioprio_best(unsigned short aprio, unsigned short bprio);
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 928442dda565..0b93bf96693e 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -75,6 +75,7 @@ struct iova_domain {
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
+ unsigned long max32_alloc_size; /* Size of last failed allocation */
struct iova anchor; /* rbtree lookup anchor */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 6cea726612b7..6ab8c1bada3f 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -16,10 +16,9 @@ struct user_namespace;
struct ipc_ids {
int in_use;
unsigned short seq;
- bool tables_initialized;
struct rw_semaphore rwsem;
struct idr ipcs_idr;
- int max_id;
+ int max_idx;
#ifdef CONFIG_CHECKPOINT_RESTORE
int next_id;
#endif
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 41f5c086f670..ef61676cfe05 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -27,7 +27,7 @@ struct device;
* Opaque type for a IPMI message user. One of these is needed to
* send and receive messages.
*/
-typedef struct ipmi_user *ipmi_user_t;
+struct ipmi_user;
/*
* Stuff coming from the receive interface comes as one of these.
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 7d5fd38d5282..8c4e2ab696c3 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -28,7 +28,7 @@ struct device;
*/
/* Structure for the low-level drivers. */
-typedef struct ipmi_smi *ipmi_smi_t;
+struct ipmi_smi;
/*
* Messages to/from the lower layer. The smi interface will take one
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 8415bf1a9776..495e834c1367 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -274,7 +274,8 @@ struct ipv6_pinfo {
*/
dontfrag:1,
autoflowlabel:1,
- autoflowlabel_set:1;
+ autoflowlabel_set:1,
+ mc_all:1;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 201de12a9957..def2b2aac8b1 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -27,6 +27,7 @@
struct seq_file;
struct module;
struct msi_msg;
+struct irq_affinity_desc;
enum irqchip_irq_state;
/*
@@ -834,11 +835,12 @@ struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
- struct module *owner, const struct cpumask *affinity);
+ struct module *owner,
+ const struct irq_affinity_desc *affinity);
int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
unsigned int cnt, int node, struct module *owner,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
/* use macros to avoid needing export.h for THIS_MODULE */
#define irq_alloc_descs(irq, from, cnt, node) \
@@ -1151,7 +1153,8 @@ void irq_matrix_offline(struct irq_matrix *m);
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+ unsigned int *mapped_cpu);
void irq_matrix_reserve(struct irq_matrix *m);
void irq_matrix_remove_reserved(struct irq_matrix *m);
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
index 630a57e55db6..4500d453a63e 100644
--- a/include/linux/irq_sim.h
+++ b/include/linux/irq_sim.h
@@ -16,7 +16,7 @@
struct irq_sim_work_ctx {
struct irq_work work;
- int irq;
+ unsigned long *pending;
};
struct irq_sim_irq_ctx {
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 89c34b200671..950e4b2458f0 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -19,7 +19,7 @@
* the association between their DT compatible string and their
* initialization function.
*
- * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * @name: name that must be unique across all IRQCHIP_DECLARE of the
* same file.
* @compstr: compatible string of the irqchip driver
* @fn: initialization function
@@ -30,7 +30,7 @@
* This macro must be used by the different irqchip drivers to declare
* the association between their version and their initialization function.
*
- * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the
+ * @name: name that must be unique across all IRQCHIP_ACPI_DECLARE of the
* same file.
* @subtable: Subtable to be identified in MADT
* @validate: Function to be called on that subtable to check its validity.
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index 0a83b4379f34..9a1a479a2bf4 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -13,6 +13,12 @@
#include <linux/types.h>
#include <linux/ioport.h>
+#define GICD_INT_DEF_PRI 0xa0
+#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
+ (GICD_INT_DEF_PRI << 16) |\
+ (GICD_INT_DEF_PRI << 8) |\
+ GICD_INT_DEF_PRI)
+
enum gic_type {
GIC_V2,
GIC_V3,
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 9d2ea3e907d0..071b4cbdf010 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -61,6 +61,16 @@
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+#define GICD_IIDR_IMPLEMENTER_SHIFT 0
+#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
+#define GICD_IIDR_REVISION_SHIFT 12
+#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT)
+#define GICD_IIDR_VARIANT_SHIFT 16
+#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT)
+#define GICD_IIDR_PRODUCT_ID_SHIFT 24
+#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT)
+
+
/*
* In systems with a single security state (what we emulate in KVM)
* the meaning of the interrupt group enable bits is slightly different
@@ -347,6 +357,8 @@
#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt)
#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb)
+#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12))
+
#define GITS_BASER_NR_REGS 8
#define GITS_BASER_VALID (1ULL << 63)
@@ -378,6 +390,9 @@
#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
#define GITS_BASER_PHYS_52_to_48(phys) \
(((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
+#define GITS_BASER_ADDR_48_to_52(baser) \
+ (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48)
+
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
@@ -575,8 +590,10 @@ struct rdists {
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
+ bool lpi_enabled;
} __percpu *rdist;
- struct page *prop_page;
+ phys_addr_t prop_table_pa;
+ void *prop_table_va;
u64 flags;
u32 gicd_typer;
bool has_vlpis;
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 68d8b1f73682..626179077bb0 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -65,11 +65,16 @@
#define GICD_INT_EN_CLR_X32 0xffffffff
#define GICD_INT_EN_SET_SGI 0x0000ffff
#define GICD_INT_EN_CLR_PPI 0xffff0000
-#define GICD_INT_DEF_PRI 0xa0
-#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
- (GICD_INT_DEF_PRI << 16) |\
- (GICD_INT_DEF_PRI << 8) |\
- GICD_INT_DEF_PRI)
+
+#define GICD_IIDR_IMPLEMENTER_SHIFT 0
+#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
+#define GICD_IIDR_REVISION_SHIFT 12
+#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT)
+#define GICD_IIDR_VARIANT_SHIFT 16
+#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT)
+#define GICD_IIDR_PRODUCT_ID_SHIFT 24
+#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT)
+
#define GICH_HCR 0x0
#define GICH_VTR 0x4
@@ -94,6 +99,7 @@
#define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29)
#define GICH_LR_EOI (1 << 19)
+#define GICH_LR_GROUP1 (1 << 30)
#define GICH_LR_HW (1 << 31)
#define GICH_VMCR_ENABLE_GRP0_SHIFT 0
diff --git a/include/linux/irqchip/irq-madera.h b/include/linux/irqchip/irq-madera.h
new file mode 100644
index 000000000000..1160fa3769ae
--- /dev/null
+++ b/include/linux/irqchip/irq-madera.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interrupt support for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2016-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef IRQCHIP_MADERA_H
+#define IRQCHIP_MADERA_H
+
+#include <linux/interrupt.h>
+#include <linux/mfd/madera/core.h>
+
+#define MADERA_IRQ_FLL1_LOCK 0
+#define MADERA_IRQ_FLL2_LOCK 1
+#define MADERA_IRQ_FLL3_LOCK 2
+#define MADERA_IRQ_FLLAO_LOCK 3
+#define MADERA_IRQ_CLK_SYS_ERR 4
+#define MADERA_IRQ_CLK_ASYNC_ERR 5
+#define MADERA_IRQ_CLK_DSP_ERR 6
+#define MADERA_IRQ_HPDET 7
+#define MADERA_IRQ_MICDET1 8
+#define MADERA_IRQ_MICDET2 9
+#define MADERA_IRQ_JD1_RISE 10
+#define MADERA_IRQ_JD1_FALL 11
+#define MADERA_IRQ_JD2_RISE 12
+#define MADERA_IRQ_JD2_FALL 13
+#define MADERA_IRQ_MICD_CLAMP_RISE 14
+#define MADERA_IRQ_MICD_CLAMP_FALL 15
+#define MADERA_IRQ_DRC2_SIG_DET 16
+#define MADERA_IRQ_DRC1_SIG_DET 17
+#define MADERA_IRQ_ASRC1_IN1_LOCK 18
+#define MADERA_IRQ_ASRC1_IN2_LOCK 19
+#define MADERA_IRQ_ASRC2_IN1_LOCK 20
+#define MADERA_IRQ_ASRC2_IN2_LOCK 21
+#define MADERA_IRQ_DSP_IRQ1 22
+#define MADERA_IRQ_DSP_IRQ2 23
+#define MADERA_IRQ_DSP_IRQ3 24
+#define MADERA_IRQ_DSP_IRQ4 25
+#define MADERA_IRQ_DSP_IRQ5 26
+#define MADERA_IRQ_DSP_IRQ6 27
+#define MADERA_IRQ_DSP_IRQ7 28
+#define MADERA_IRQ_DSP_IRQ8 29
+#define MADERA_IRQ_DSP_IRQ9 30
+#define MADERA_IRQ_DSP_IRQ10 31
+#define MADERA_IRQ_DSP_IRQ11 32
+#define MADERA_IRQ_DSP_IRQ12 33
+#define MADERA_IRQ_DSP_IRQ13 34
+#define MADERA_IRQ_DSP_IRQ14 35
+#define MADERA_IRQ_DSP_IRQ15 36
+#define MADERA_IRQ_DSP_IRQ16 37
+#define MADERA_IRQ_HP1L_SC 38
+#define MADERA_IRQ_HP1R_SC 39
+#define MADERA_IRQ_HP2L_SC 40
+#define MADERA_IRQ_HP2R_SC 41
+#define MADERA_IRQ_HP3L_SC 42
+#define MADERA_IRQ_HP3R_SC 43
+#define MADERA_IRQ_SPKOUTL_SC 44
+#define MADERA_IRQ_SPKOUTR_SC 45
+#define MADERA_IRQ_HP1L_ENABLE_DONE 46
+#define MADERA_IRQ_HP1R_ENABLE_DONE 47
+#define MADERA_IRQ_HP2L_ENABLE_DONE 48
+#define MADERA_IRQ_HP2R_ENABLE_DONE 49
+#define MADERA_IRQ_HP3L_ENABLE_DONE 50
+#define MADERA_IRQ_HP3R_ENABLE_DONE 51
+#define MADERA_IRQ_SPKOUTL_ENABLE_DONE 52
+#define MADERA_IRQ_SPKOUTR_ENABLE_DONE 53
+#define MADERA_IRQ_SPK_SHUTDOWN 54
+#define MADERA_IRQ_SPK_OVERHEAT 55
+#define MADERA_IRQ_SPK_OVERHEAT_WARN 56
+#define MADERA_IRQ_GPIO1 57
+#define MADERA_IRQ_GPIO2 58
+#define MADERA_IRQ_GPIO3 59
+#define MADERA_IRQ_GPIO4 60
+#define MADERA_IRQ_GPIO5 61
+#define MADERA_IRQ_GPIO6 62
+#define MADERA_IRQ_GPIO7 63
+#define MADERA_IRQ_GPIO8 64
+#define MADERA_IRQ_DSP1_BUS_ERR 65
+#define MADERA_IRQ_DSP2_BUS_ERR 66
+#define MADERA_IRQ_DSP3_BUS_ERR 67
+#define MADERA_IRQ_DSP4_BUS_ERR 68
+#define MADERA_IRQ_DSP5_BUS_ERR 69
+#define MADERA_IRQ_DSP6_BUS_ERR 70
+#define MADERA_IRQ_DSP7_BUS_ERR 71
+
+#define MADERA_NUM_IRQ 72
+
+/*
+ * These wrapper functions are for use by other child drivers of the
+ * same parent MFD.
+ */
+static inline int madera_get_irq_mapping(struct madera *madera, int irq)
+{
+ if (!madera->irq_dev)
+ return -ENODEV;
+
+ return regmap_irq_get_virq(madera->irq_data, irq);
+}
+
+static inline int madera_request_irq(struct madera *madera, int irq,
+ const char *name,
+ irq_handler_t handler, void *data)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return irq;
+
+ return request_threaded_irq(irq, NULL, handler, IRQF_ONESHOT, name,
+ data);
+}
+
+static inline void madera_free_irq(struct madera *madera, int irq, void *data)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+
+static inline int madera_set_irq_wake(struct madera *madera, int irq, int on)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return irq;
+
+ return irq_set_irq_wake(irq, on);
+}
+
+#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index dccfa65aee96..35965f41d7be 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -43,6 +43,7 @@ struct irq_chip;
struct irq_data;
struct cpumask;
struct seq_file;
+struct irq_affinity_desc;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
@@ -75,6 +76,7 @@ struct irq_fwspec {
enum irq_domain_bus_token {
DOMAIN_BUS_ANY = 0,
DOMAIN_BUS_WIRED,
+ DOMAIN_BUS_GENERIC_MSI,
DOMAIN_BUS_PCI_MSI,
DOMAIN_BUS_PLATFORM_MSI,
DOMAIN_BUS_NEXUS,
@@ -265,7 +267,7 @@ extern bool irq_domain_check_msi_remap(void);
extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
{
@@ -448,7 +450,8 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par
extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
- bool realloc, const struct cpumask *affinity);
+ bool realloc,
+ const struct irq_affinity_desc *affinity);
extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 9700f00bbc04..21619c92c377 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -15,9 +15,20 @@
#include <linux/typecheck.h>
#include <asm/irqflags.h>
-#ifdef CONFIG_TRACE_IRQFLAGS
+/* Currently trace_softirqs_on/off is used only by lockdep */
+#ifdef CONFIG_PROVE_LOCKING
extern void trace_softirqs_on(unsigned long ip);
extern void trace_softirqs_off(unsigned long ip);
+ extern void lockdep_hardirqs_on(unsigned long ip);
+ extern void lockdep_hardirqs_off(unsigned long ip);
+#else
+ static inline void trace_softirqs_on(unsigned long ip) { }
+ static inline void trace_softirqs_off(unsigned long ip) { }
+ static inline void lockdep_hardirqs_on(unsigned long ip) { }
+ static inline void lockdep_hardirqs_off(unsigned long ip) { }
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
# define trace_hardirq_context(p) ((p)->hardirq_context)
@@ -43,8 +54,6 @@ do { \
#else
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
-# define trace_softirqs_on(ip) do { } while (0)
-# define trace_softirqs_off(ip) do { } while (0)
# define trace_hardirq_context(p) 0
# define trace_softirq_context(p) 0
# define trace_hardirqs_enabled(p) 0
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index b708e5169d1d..0f919d5fe84f 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -575,6 +575,7 @@ struct transaction_s
enum {
T_RUNNING,
T_LOCKED,
+ T_SWITCH,
T_FLUSH,
T_COMMIT,
T_COMMIT_DFLUSH,
@@ -662,13 +663,13 @@ struct transaction_s
/*
* Number of outstanding updates running on this transaction
- * [t_handle_lock]
+ * [none]
*/
atomic_t t_updates;
/*
* Number of buffers reserved for use by all handles in this transaction
- * handle but not yet modified. [t_handle_lock]
+ * handle but not yet modified. [none]
*/
atomic_t t_outstanding_credits;
@@ -690,7 +691,7 @@ struct transaction_s
ktime_t t_start_time;
/*
- * How many handles used this transaction? [t_handle_lock]
+ * How many handles used this transaction? [none]
*/
atomic_t t_handle_count;
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 1a0b6f17a5d6..5df6a621e464 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -119,6 +119,68 @@ struct static_key {
#ifdef HAVE_JUMP_LABEL
#include <asm/jump_label.h>
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
+
+struct jump_entry {
+ s32 code;
+ s32 target;
+ long key; // key may be far away from the core kernel under KASLR
+};
+
+static inline unsigned long jump_entry_code(const struct jump_entry *entry)
+{
+ return (unsigned long)&entry->code + entry->code;
+}
+
+static inline unsigned long jump_entry_target(const struct jump_entry *entry)
+{
+ return (unsigned long)&entry->target + entry->target;
+}
+
+static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
+{
+ long offset = entry->key & ~3L;
+
+ return (struct static_key *)((unsigned long)&entry->key + offset);
+}
+
+#else
+
+static inline unsigned long jump_entry_code(const struct jump_entry *entry)
+{
+ return entry->code;
+}
+
+static inline unsigned long jump_entry_target(const struct jump_entry *entry)
+{
+ return entry->target;
+}
+
+static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
+{
+ return (struct static_key *)((unsigned long)entry->key & ~3UL);
+}
+
+#endif
+
+static inline bool jump_entry_is_branch(const struct jump_entry *entry)
+{
+ return (unsigned long)entry->key & 1UL;
+}
+
+static inline bool jump_entry_is_init(const struct jump_entry *entry)
+{
+ return (unsigned long)entry->key & 2UL;
+}
+
+static inline void jump_entry_set_init(struct jump_entry *entry)
+{
+ entry->key |= 2;
+}
+
+#endif
#endif
#ifndef __ASSEMBLY__
@@ -151,7 +213,6 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
-extern void jump_label_invalidate_initmem(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -199,8 +260,6 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
-static inline void jump_label_invalidate_initmem(void) {}
-
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely(static_key_count(key) > 0))
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 46aae129917c..b40ea104dd36 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -14,13 +14,13 @@ struct task_struct;
#include <asm/kasan.h>
#include <asm/pgtable.h>
-extern unsigned char kasan_zero_page[PAGE_SIZE];
-extern pte_t kasan_zero_pte[PTRS_PER_PTE];
-extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
-extern pud_t kasan_zero_pud[PTRS_PER_PUD];
-extern p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D];
+extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
+extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
+extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
+extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
+extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
-int kasan_populate_zero_shadow(const void *shadow_start,
+int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
static inline void *kasan_mem_to_shadow(const void *addr)
@@ -45,22 +45,24 @@ void kasan_free_pages(struct page *page, unsigned int order);
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags);
-void kasan_cache_shrink(struct kmem_cache *cache);
-void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
-void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
+void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
+ const void *object);
-void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags);
void kasan_kfree_large(void *ptr, unsigned long ip);
void kasan_poison_kfree(void *ptr, unsigned long ip);
-void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
- gfp_t flags);
-void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
+void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size, gfp_t flags);
+void * __must_check kasan_krealloc(const void *object, size_t new_size,
+ gfp_t flags);
-void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
+void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags);
bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
struct kasan_cache {
@@ -97,27 +99,40 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
-static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
-static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
void *object) {}
-static inline void kasan_init_slab_obj(struct kmem_cache *cache,
- const void *object) {}
+static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
+ const void *object)
+{
+ return (void *)object;
+}
-static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
+static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
+{
+ return ptr;
+}
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
-static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
- size_t size, gfp_t flags) {}
-static inline void kasan_krealloc(const void *object, size_t new_size,
- gfp_t flags) {}
+static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size, gfp_t flags)
+{
+ return (void *)object;
+}
+static inline void *kasan_krealloc(const void *object, size_t new_size,
+ gfp_t flags)
+{
+ return (void *)object;
+}
-static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
- gfp_t flags) {}
+static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags)
+{
+ return object;
+}
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
unsigned long ip)
{
@@ -140,4 +155,40 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
#endif /* CONFIG_KASAN */
+#ifdef CONFIG_KASAN_GENERIC
+
+#define KASAN_SHADOW_INIT 0
+
+void kasan_cache_shrink(struct kmem_cache *cache);
+void kasan_cache_shutdown(struct kmem_cache *cache);
+
+#else /* CONFIG_KASAN_GENERIC */
+
+static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
+static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
+
+#endif /* CONFIG_KASAN_GENERIC */
+
+#ifdef CONFIG_KASAN_SW_TAGS
+
+#define KASAN_SHADOW_INIT 0xFF
+
+void kasan_init_tags(void);
+
+void *kasan_reset_tag(const void *addr);
+
+void kasan_report(unsigned long addr, size_t size,
+ bool is_write, unsigned long ip);
+
+#else /* CONFIG_KASAN_SW_TAGS */
+
+static inline void kasan_init_tags(void) { }
+
+static inline void *kasan_reset_tag(const void *addr)
+{
+ return (void *)addr;
+}
+
+#endif /* CONFIG_KASAN_SW_TAGS */
+
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index 8de55e4b5ee9..8c3f8c14eeaa 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -12,11 +12,13 @@ enum kcore_type {
KCORE_VMEMMAP,
KCORE_USER,
KCORE_OTHER,
+ KCORE_REMAP,
};
struct kcore_list {
struct list_head list;
unsigned long addr;
+ unsigned long vaddr;
size_t size;
int type;
};
@@ -35,12 +37,23 @@ struct vmcoredd_node {
};
#ifdef CONFIG_PROC_KCORE
-extern void kclist_add(struct kcore_list *, void *, size_t, int type);
+void __init kclist_add(struct kcore_list *, void *, size_t, int type);
+static inline
+void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
+{
+ m->vaddr = (unsigned long)vaddr;
+ kclist_add(m, addr, sz, KCORE_REMAP);
+}
#else
static inline
void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
{
}
+
+static inline
+void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
+{
+}
#endif
#endif /* _LINUX_KCORE_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 941dc0a5a877..8f0e68e250a7 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -85,7 +85,23 @@
* arguments just once each.
*/
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+/**
+ * round_up - round up to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round up to (must be a power of 2)
+ *
+ * Rounds @x up to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding up, use roundup() below.
+ */
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+/**
+ * round_down - round down to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round down to (must be a power of 2)
+ *
+ * Rounds @x down to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding down, use rounddown() below.
+ */
#define round_down(x, y) ((x) & ~__round_mask(x, y))
/**
@@ -110,13 +126,30 @@
# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
#endif
-/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
+/**
+ * roundup - round up to the next specified multiple
+ * @x: the value to up
+ * @y: multiple to round up to
+ *
+ * Rounds @x up to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_up().
+ *
+ * The `const' here prevents gcc-3.3 from calling __divdi3
+ */
#define roundup(x, y) ( \
{ \
const typeof(y) __y = y; \
(((x) + (__y - 1)) / __y) * __y; \
} \
)
+/**
+ * rounddown - round down to next specified multiple
+ * @x: the value to round
+ * @y: multiple to round down to
+ *
+ * Rounds @x down to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_down().
+ */
#define rounddown(x, y) ( \
{ \
typeof(x) __x = (x); \
@@ -494,6 +527,7 @@ static inline u32 int_sqrt64(u64 x)
extern void bust_spinlocks(int yes);
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
extern int panic_timeout;
+extern unsigned long panic_print;
extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 814643f7ee52..5b36b1287a5a 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -477,10 +477,11 @@ static inline void kernfs_init(void) { }
* @buf: buffer to copy @kn's name into
* @buflen: size of @buf
*
- * Builds and returns the full path of @kn in @buf of @buflen bytes. The
- * path is built from the end of @buf so the returned pointer usually
- * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
- * and %NULL is returned.
+ * If @kn is NULL result will be "(null)".
+ *
+ * Returns the length of the full path. If the full length is equal to or
+ * greater than @buflen, @buf contains the truncated path with the trailing
+ * '\0'. On error, -errno is returned.
*/
static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
{
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 9e4e638fb505..b9b1bc5f9669 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -143,6 +143,15 @@ extern const struct kexec_file_ops * const kexec_file_loaders[];
int kexec_image_probe_default(struct kimage *image, void *buf,
unsigned long buf_len);
+int kexec_image_post_load_cleanup_default(struct kimage *image);
+
+/*
+ * If kexec_buf.mem is set to this value, kexec_locate_mem_hole()
+ * will try to allocate free memory. Arch may overwrite it.
+ */
+#ifndef KEXEC_BUF_MEM_UNKNOWN
+#define KEXEC_BUF_MEM_UNKNOWN 0
+#endif
/**
* struct kexec_buf - parameters for finding a place for a buffer in memory
@@ -174,6 +183,7 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
+void * __weak arch_kexec_kernel_image_load(struct kimage *image);
int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
@@ -183,8 +193,6 @@ int __weak arch_kexec_apply_relocations(struct purgatory_info *pi,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
-int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
- int (*func)(struct resource *, void *));
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 05d8fb5a06c4..bc9af551fc83 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -17,6 +17,9 @@
#ifdef CONFIG_KEYS
+struct kernel_pkey_query;
+struct kernel_pkey_params;
+
/*
* key under-construction record
* - passed to the request_key actor if supplied
@@ -155,6 +158,14 @@ struct key_type {
*/
struct key_restriction *(*lookup_restriction)(const char *params);
+ /* Asymmetric key accessor functions. */
+ int (*asym_query)(const struct kernel_pkey_params *params,
+ struct kernel_pkey_query *info);
+ int (*asym_eds_op)(struct kernel_pkey_params *params,
+ const void *in, void *out);
+ int (*asym_verify_signature)(struct kernel_pkey_params *params,
+ const void *in, const void *in2);
+
/* internal fields */
struct list_head link; /* link in types list */
struct lock_class_key lock_class; /* key->sem lock class */
diff --git a/include/linux/key.h b/include/linux/key.h
index e58ee10f6e58..7099985e35a9 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -346,6 +346,9 @@ static inline key_serial_t key_serial(const struct key *key)
extern void key_set_timeout(struct key *, unsigned);
+extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
+ key_perm_t perm);
+
/*
* The permissions required on a key that we're looking up.
*/
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
new file mode 100644
index 000000000000..c7c48c79ce0e
--- /dev/null
+++ b/include/linux/keyctl.h
@@ -0,0 +1,46 @@
+/* keyctl kernel bits
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef __LINUX_KEYCTL_H
+#define __LINUX_KEYCTL_H
+
+#include <uapi/linux/keyctl.h>
+
+struct kernel_pkey_query {
+ __u32 supported_ops; /* Which ops are supported */
+ __u32 key_size; /* Size of the key in bits */
+ __u16 max_data_size; /* Maximum size of raw data to sign in bytes */
+ __u16 max_sig_size; /* Maximum size of signature in bytes */
+ __u16 max_enc_size; /* Maximum size of encrypted blob in bytes */
+ __u16 max_dec_size; /* Maximum size of decrypted blob in bytes */
+};
+
+enum kernel_pkey_operation {
+ kernel_pkey_encrypt,
+ kernel_pkey_decrypt,
+ kernel_pkey_sign,
+ kernel_pkey_verify,
+};
+
+struct kernel_pkey_params {
+ struct key *key;
+ const char *encoding; /* Encoding (eg. "oaep" or "raw" for none) */
+ const char *hash_algo; /* Digest algorithm used (eg. "sha1") or NULL if N/A */
+ char *info; /* Modified info string to be released later */
+ __u32 in_len; /* Input data size */
+ union {
+ __u32 out_len; /* Output buffer size (enc/dec/sign) */
+ __u32 in2_len; /* 2nd input data size (verify) */
+ };
+ enum kernel_pkey_operation op : 8;
+};
+
+#endif /* __LINUX_KEYCTL_H */
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index e465bb15912d..fbf144aaa749 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -177,22 +177,28 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
struct pt_regs *regs);
/**
+ * kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU
+ * @ignored: This parameter is only here to match the prototype.
+ *
+ * If you're using the default implementation of kgdb_roundup_cpus()
+ * this function will be called per CPU. If you don't implement
+ * kgdb_call_nmi_hook() a default will be used.
+ */
+
+extern void kgdb_call_nmi_hook(void *ignored);
+
+/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
- * @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
- * the NMI approach is not used for rounding up all the CPUs. For example,
- * in case of MIPS, smp_call_function() is used to roundup CPUs. In
- * this case, we have to make sure that interrupts are enabled before
- * calling smp_call_function(). The argument to this function is
- * the flags that will be used when restoring the interrupts. There is
- * local_irq_save() call before kgdb_roundup_cpus().
+ * the NMI approach is not used for rounding up all the CPUs. Normally
+ * those architectures can just not implement this and get the default.
*
* On non-SMP systems, this is not called.
*/
-extern void kgdb_roundup_cpus(unsigned long flags);
+extern void kgdb_roundup_cpus(void);
/**
* kgdb_arch_set_pc - Generic call back to the program counter
@@ -281,7 +287,7 @@ struct kgdb_io {
int is_console;
};
-extern struct kgdb_arch arch_kgdb_ops;
+extern const struct kgdb_arch arch_kgdb_ops;
extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index e909413e4e38..e07e91daaacc 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -242,10 +242,13 @@ extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
+extern int arch_populate_kprobe_blacklist(void);
extern bool arch_kprobe_on_func_entry(unsigned long offset);
extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
+extern int kprobe_add_ksym_blacklist(unsigned long entry);
+extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end);
struct kprobe_insn_cache {
struct mutex mutex;
@@ -379,6 +382,9 @@ int enable_kprobe(struct kprobe *kp);
void dump_kprobe(struct kprobe *kp);
+void *alloc_insn_page(void);
+void free_insn_page(void *page);
+
#else /* !CONFIG_KPROBES: */
static inline int kprobes_built_in(void)
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 29220724bf1c..cb00a0268061 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -53,10 +53,7 @@ static inline void kref_get(struct kref *kref)
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
- * in as this function. If the caller does pass kfree to this
- * function, you will be publicly mocked mercilessly by the kref
- * maintainer, and anyone else who happens to notice it. You have
- * been warned.
+ * in as this function.
*
* Decrement the refcount, and if 0, call release().
* Return 1 if the object was removed, otherwise return 0. Beware, if this
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7c7362dd2faa..c38cc5eb7e73 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -449,6 +449,7 @@ struct kvm {
#endif
long tlbs_dirty;
struct list_head devices;
+ bool manual_dirty_log_protect;
struct dentry *debugfs_dentry;
struct kvm_stat_data **debugfs_stat_data;
struct srcu_struct srcu;
@@ -694,7 +695,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len);
+ void *data, unsigned int offset,
+ unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
@@ -733,8 +735,6 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);
@@ -755,7 +755,9 @@ int kvm_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log, int *is_dirty);
int kvm_get_dirty_log_protect(struct kvm *kvm,
- struct kvm_dirty_log *log, bool *is_dirty);
+ struct kvm_dirty_log *log, bool *flush);
+int kvm_clear_dirty_log_protect(struct kvm *kvm,
+ struct kvm_clear_dirty_log *log, bool *flush);
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
@@ -764,9 +766,13 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log);
+int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
+ struct kvm_clear_dirty_log *log);
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status);
+int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ struct kvm_enable_cap *cap);
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -1289,8 +1295,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end, bool blockable);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
diff --git a/include/linux/lantiq.h b/include/linux/lantiq.h
new file mode 100644
index 000000000000..67921169d84d
--- /dev/null
+++ b/include/linux/lantiq.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_LANTIQ_H
+#define __LINUX_LANTIQ_H
+
+#ifdef CONFIG_LANTIQ
+#include <lantiq_soc.h>
+#else
+
+#ifndef LTQ_EARLY_ASC
+#define LTQ_EARLY_ASC 0
+#endif
+
+#ifndef CPHYSADDR
+#define CPHYSADDR(a) 0
+#endif
+
+static inline struct clk *clk_get_fpi(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_LANTIQ */
+#endif /* __LINUX_LANTIQ_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 834683d603f9..5263f87e1d2c 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -22,6 +22,7 @@
#include <linux/workqueue.h>
struct device;
+struct led_pattern;
/*
* LED Core
*/
@@ -50,6 +51,7 @@ struct led_classdev {
#define LED_PANIC_INDICATOR BIT(20)
#define LED_BRIGHT_HW_CHANGED BIT(21)
#define LED_RETAIN_AT_SHUTDOWN BIT(22)
+#define LED_INIT_DEFAULT_TRIGGER BIT(23)
/* set_brightness_work / blink_timer flags, atomic, private. */
unsigned long work_flags;
@@ -88,6 +90,10 @@ struct led_classdev {
unsigned long *delay_on,
unsigned long *delay_off);
+ int (*pattern_set)(struct led_classdev *led_cdev,
+ struct led_pattern *pattern, u32 len, int repeat);
+ int (*pattern_clear)(struct led_classdev *led_cdev);
+
struct device *dev;
const struct attribute_group **groups;
@@ -472,4 +478,34 @@ static inline void led_classdev_notify_brightness_hw_changed(
struct led_classdev *led_cdev, enum led_brightness brightness) { }
#endif
+/**
+ * struct led_pattern - pattern interval settings
+ * @delta_t: pattern interval delay, in milliseconds
+ * @brightness: pattern interval brightness
+ */
+struct led_pattern {
+ u32 delta_t;
+ int brightness;
+};
+
+enum led_audio {
+ LED_AUDIO_MUTE, /* master mute LED */
+ LED_AUDIO_MICMUTE, /* mic mute LED */
+ NUM_AUDIO_LEDS
+};
+
+#if IS_ENABLED(CONFIG_LEDS_TRIGGER_AUDIO)
+enum led_brightness ledtrig_audio_get(enum led_audio type);
+void ledtrig_audio_set(enum led_audio type, enum led_brightness state);
+#else
+static inline enum led_brightness ledtrig_audio_get(enum led_audio type)
+{
+ return LED_OFF;
+}
+static inline void ledtrig_audio_set(enum led_audio type,
+ enum led_brightness state)
+{
+}
+#endif
+
#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bc4f87cbe7f4..68133842e6d7 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -135,7 +135,6 @@ enum {
ATA_SHT_EMULATED = 1,
ATA_SHT_THIS_ID = -1,
- ATA_SHT_USE_CLUSTERING = 1,
/* struct ata_taskfile flags */
ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
@@ -523,7 +522,8 @@ enum ata_lpm_policy {
ATA_LPM_MAX_POWER,
ATA_LPM_MED_POWER,
ATA_LPM_MED_POWER_WITH_DIPM, /* Med power + DIPM as win IRST does */
- ATA_LPM_MIN_POWER,
+ ATA_LPM_MIN_POWER_WITH_PARTIAL, /* Min Power + partial and slumber */
+ ATA_LPM_MIN_POWER, /* Min power + no partial (slumber only) */
};
enum ata_lpm_hints {
@@ -1359,7 +1359,6 @@ extern struct device_attribute *ata_common_sdev_attrs[];
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
- .use_clustering = ATA_SHT_USE_CLUSTERING, \
.proc_name = drv_name, \
.slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
@@ -1858,8 +1857,6 @@ extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
-extern unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc,
- unsigned char *buf, unsigned int buflen, int rw);
extern void ata_sff_irq_on(struct ata_port *ap);
extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index c6ac1fe7ec68..edb0f0c30904 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -2,6 +2,7 @@
#ifndef LIBFDT_ENV_H
#define LIBFDT_ENV_H
+#include <linux/kernel.h> /* For INT_MAX */
#include <linux/string.h>
#include <asm/byteorder.h>
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 097072c5a852..5440f11b0907 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -38,6 +38,10 @@ enum {
NDD_UNARMED = 1,
/* locked memory devices should not be accessed */
NDD_LOCKED = 2,
+ /* memory under security wipes should not be accessed */
+ NDD_SECURITY_OVERWRITE = 3,
+ /* tracking whether or not there is a pending device reference */
+ NDD_WORK_PENDING = 4,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
@@ -87,7 +91,7 @@ struct nvdimm_bus_descriptor {
ndctl_fn ndctl;
int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
- struct nvdimm *nvdimm, unsigned int cmd);
+ struct nvdimm *nvdimm, unsigned int cmd, void *data);
};
struct nd_cmd_desc {
@@ -155,6 +159,46 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
}
+enum nvdimm_security_state {
+ NVDIMM_SECURITY_DISABLED,
+ NVDIMM_SECURITY_UNLOCKED,
+ NVDIMM_SECURITY_LOCKED,
+ NVDIMM_SECURITY_FROZEN,
+ NVDIMM_SECURITY_OVERWRITE,
+};
+
+#define NVDIMM_PASSPHRASE_LEN 32
+#define NVDIMM_KEY_DESC_LEN 22
+
+struct nvdimm_key_data {
+ u8 data[NVDIMM_PASSPHRASE_LEN];
+};
+
+enum nvdimm_passphrase_type {
+ NVDIMM_USER,
+ NVDIMM_MASTER,
+};
+
+struct nvdimm_security_ops {
+ enum nvdimm_security_state (*state)(struct nvdimm *nvdimm,
+ enum nvdimm_passphrase_type pass_type);
+ int (*freeze)(struct nvdimm *nvdimm);
+ int (*change_key)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *old_data,
+ const struct nvdimm_key_data *new_data,
+ enum nvdimm_passphrase_type pass_type);
+ int (*unlock)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
+ int (*disable)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
+ int (*erase)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data,
+ enum nvdimm_passphrase_type pass_type);
+ int (*overwrite)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
+ int (*query_overwrite)(struct nvdimm *nvdimm);
+};
+
void badrange_init(struct badrange *badrange);
int badrange_add(struct badrange *badrange, u64 addr, u64 length);
void badrange_forget(struct badrange *badrange, phys_addr_t start,
@@ -165,6 +209,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
struct nvdimm_bus_descriptor *nfit_desc);
void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
+struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm);
struct nvdimm *to_nvdimm(struct device *dev);
struct nd_region *to_nd_region(struct device *dev);
struct device *nd_region_dev(struct nd_region *nd_region);
@@ -175,10 +220,21 @@ const char *nvdimm_name(struct nvdimm *nvdimm);
struct kobject *nvdimm_kobj(struct nvdimm *nvdimm);
unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm);
void *nvdimm_provider_data(struct nvdimm *nvdimm);
-struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
- const struct attribute_group **groups, unsigned long flags,
- unsigned long cmd_mask, int num_flush,
- struct resource *flush_wpq);
+struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
+ void *provider_data, const struct attribute_group **groups,
+ unsigned long flags, unsigned long cmd_mask, int num_flush,
+ struct resource *flush_wpq, const char *dimm_id,
+ const struct nvdimm_security_ops *sec_ops);
+static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
+ void *provider_data, const struct attribute_group **groups,
+ unsigned long flags, unsigned long cmd_mask, int num_flush,
+ struct resource *flush_wpq)
+{
+ return __nvdimm_create(nvdimm_bus, provider_data, groups, flags,
+ cmd_mask, num_flush, flush_wpq, NULL, NULL);
+}
+
+int nvdimm_security_setup_events(struct nvdimm *nvdimm);
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
@@ -204,6 +260,16 @@ u64 nd_fletcher64(void *addr, size_t len, bool le);
void nvdimm_flush(struct nd_region *nd_region);
int nvdimm_has_flush(struct nd_region *nd_region);
int nvdimm_has_cache(struct nd_region *nd_region);
+int nvdimm_in_overwrite(struct nvdimm *nvdimm);
+
+static inline int nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len, int *cmd_rc)
+{
+ struct nvdimm_bus *nvdimm_bus = nvdimm_to_bus(nvdimm);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+
+ return nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, cmd_rc);
+}
#ifdef CONFIG_ARCH_HAS_PMEM_API
#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index e9e0d1c7eaf5..5d865a5d5cdc 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -86,11 +86,11 @@ struct nvm_chk_meta;
typedef int (nvm_id_fn)(struct nvm_dev *);
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
-typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *,
- sector_t, int);
+typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
+ struct nvm_chk_meta *);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
+typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
typedef void (nvm_destroy_dma_pool_fn)(void *);
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
dma_addr_t *);
@@ -305,6 +305,8 @@ struct nvm_rq {
u64 ppa_status; /* ppa media status */
int error;
+ int is_seq; /* Sequential hint flag. 1.2 only */
+
void *private;
};
@@ -318,6 +320,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
return rqdata + 1;
}
+static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
+{
+ return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+}
+
enum {
NVM_BLK_ST_FREE = 0x1, /* Free block */
NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
@@ -350,6 +357,7 @@ struct nvm_geo {
u32 clba; /* sectors per chunk */
u16 csecs; /* sector size */
u16 sos; /* out-of-band area size */
+ bool ext; /* metadata in extended data buffer */
/* device write constrains */
u32 ws_min; /* minimum write size */
@@ -485,6 +493,144 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
return l;
}
+static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
+ struct ppa_addr p)
+{
+ struct nvm_geo *geo = &dev->geo;
+ u64 caddr;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
+
+ caddr = (u64)p.g.pg << ppaf->pg_offset;
+ caddr |= (u64)p.g.pl << ppaf->pln_offset;
+ caddr |= (u64)p.g.sec << ppaf->sec_offset;
+ } else {
+ caddr = p.m.sec;
+ }
+
+ return caddr;
+}
+
+static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
+ void *addrf, u32 ppa32)
+{
+ struct ppa_addr ppa64;
+
+ ppa64.ppa = 0;
+
+ if (ppa32 == -1) {
+ ppa64.ppa = ADDR_EMPTY;
+ } else if (ppa32 & (1U << 31)) {
+ ppa64.c.line = ppa32 & ((~0U) >> 1);
+ ppa64.c.is_cached = 1;
+ } else {
+ struct nvm_geo *geo = &dev->geo;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = addrf;
+
+ ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
+ ppaf->ch_offset;
+ ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
+ ppaf->lun_offset;
+ ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
+ ppaf->blk_offset;
+ ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
+ ppaf->pg_offset;
+ ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
+ ppaf->pln_offset;
+ ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
+ ppaf->sec_offset;
+ } else {
+ struct nvm_addrf *lbaf = addrf;
+
+ ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
+ lbaf->ch_offset;
+ ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
+ lbaf->lun_offset;
+ ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
+ lbaf->chk_offset;
+ ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
+ lbaf->sec_offset;
+ }
+ }
+
+ return ppa64;
+}
+
+static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
+ void *addrf, struct ppa_addr ppa64)
+{
+ u32 ppa32 = 0;
+
+ if (ppa64.ppa == ADDR_EMPTY) {
+ ppa32 = ~0U;
+ } else if (ppa64.c.is_cached) {
+ ppa32 |= ppa64.c.line;
+ ppa32 |= 1U << 31;
+ } else {
+ struct nvm_geo *geo = &dev->geo;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = addrf;
+
+ ppa32 |= ppa64.g.ch << ppaf->ch_offset;
+ ppa32 |= ppa64.g.lun << ppaf->lun_offset;
+ ppa32 |= ppa64.g.blk << ppaf->blk_offset;
+ ppa32 |= ppa64.g.pg << ppaf->pg_offset;
+ ppa32 |= ppa64.g.pl << ppaf->pln_offset;
+ ppa32 |= ppa64.g.sec << ppaf->sec_offset;
+ } else {
+ struct nvm_addrf *lbaf = addrf;
+
+ ppa32 |= ppa64.m.grp << lbaf->ch_offset;
+ ppa32 |= ppa64.m.pu << lbaf->lun_offset;
+ ppa32 |= ppa64.m.chk << lbaf->chk_offset;
+ ppa32 |= ppa64.m.sec << lbaf->sec_offset;
+ }
+ }
+
+ return ppa32;
+}
+
+static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
+ struct ppa_addr *ppa)
+{
+ struct nvm_geo *geo = &dev->geo;
+ int last = 0;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ int sec = ppa->g.sec;
+
+ sec++;
+ if (sec == geo->ws_min) {
+ int pg = ppa->g.pg;
+
+ sec = 0;
+ pg++;
+ if (pg == geo->num_pg) {
+ int pl = ppa->g.pl;
+
+ pg = 0;
+ pl++;
+ if (pl == geo->num_pln)
+ last = 1;
+
+ ppa->g.pl = pl;
+ }
+ ppa->g.pg = pg;
+ }
+ ppa->g.sec = sec;
+ } else {
+ ppa->m.sec++;
+ if (ppa->m.sec == geo->clba)
+ last = 1;
+ }
+
+ return last;
+}
+
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
@@ -493,9 +639,15 @@ typedef void (nvm_tgt_exit_fn)(void *, bool);
typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
+enum {
+ NVM_TGT_F_DEV_L2P = 0,
+ NVM_TGT_F_HOST_L2P = 1 << 0,
+};
+
struct nvm_tgt_type {
const char *name;
unsigned int version[3];
+ int flags;
/* target entry points */
nvm_tgt_make_rq_fn *make_rq;
@@ -524,18 +676,13 @@ extern struct nvm_dev *nvm_alloc_dev(int);
extern int nvm_register(struct nvm_dev *);
extern void nvm_unregister(struct nvm_dev *);
-
-extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev,
- struct nvm_chk_meta *meta, struct ppa_addr ppa,
- int nchks);
-
-extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
+extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
+ int, struct nvm_chk_meta *);
+extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
int, int);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
extern void nvm_end_io(struct nvm_rq *);
-extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
-extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
#else /* CONFIG_NVM */
struct nvm_dev_ops;
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index d7618c41f74c..7e020782ade2 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -79,6 +79,12 @@
#define ALIGN __ALIGN
#define ALIGN_STR __ALIGN_STR
+#ifndef GLOBAL
+#define GLOBAL(name) \
+ .globl name ASM_NL \
+ name:
+#endif
+
#ifndef ENTRY
#define ENTRY(name) \
.globl name ASM_NL \
@@ -90,6 +96,7 @@
#ifndef WEAK
#define WEAK(name) \
.weak name ASM_NL \
+ ALIGN ASM_NL \
name:
#endif
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
new file mode 100644
index 000000000000..a99c58866860
--- /dev/null
+++ b/include/linux/linkmode.h
@@ -0,0 +1,85 @@
+#ifndef __LINKMODE_H
+#define __LINKMODE_H
+
+#include <linux/bitmap.h>
+#include <linux/ethtool.h>
+#include <uapi/linux/ethtool.h>
+
+static inline void linkmode_zero(unsigned long *dst)
+{
+ bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
+{
+ bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_and(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_or(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline bool linkmode_empty(const unsigned long *src)
+{
+ return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
+{
+ __set_bit(nr, addr);
+}
+
+static inline void linkmode_set_bit_array(const int *array, int array_size,
+ unsigned long *addr)
+{
+ int i;
+
+ for (i = 0; i < array_size; i++)
+ linkmode_set_bit(array[i], addr);
+}
+
+static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
+{
+ __clear_bit(nr, addr);
+}
+
+static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr,
+ int set)
+{
+ if (set)
+ linkmode_set_bit(nr, addr);
+ else
+ linkmode_clear_bit(nr, addr);
+}
+
+static inline void linkmode_change_bit(int nr, volatile unsigned long *addr)
+{
+ __change_bit(nr, addr);
+}
+
+static inline int linkmode_test_bit(int nr, volatile unsigned long *addr)
+{
+ return test_bit(nr, addr);
+}
+
+static inline int linkmode_equal(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+#endif /* __LINKMODE_H */
diff --git a/include/linux/list.h b/include/linux/list.h
index de04cc5ed536..edb7628e46ed 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -184,6 +184,29 @@ static inline void list_move_tail(struct list_head *list,
}
/**
+ * list_bulk_move_tail - move a subsection of a list to its tail
+ * @head: the head that will follow our entry
+ * @first: first entry to move
+ * @last: last entry to move, can be the same as first
+ *
+ * Move all entries between @first and including @last before @head.
+ * All three entries must belong to the same linked list.
+ */
+static inline void list_bulk_move_tail(struct list_head *head,
+ struct list_head *first,
+ struct list_head *last)
+{
+ first->prev->next = last->next;
+ last->next->prev = first->prev;
+
+ head->prev->next = first;
+ first->prev = head->prev;
+
+ last->next = head;
+ head->prev = last;
+}
+
+/**
* list_is_last - tests whether @list is the last entry in list @head
* @list: the entry to test
* @head: the head of the list
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 4fd95dbeb52f..b065ef406770 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -299,7 +299,7 @@ int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
{
- return file_inode(file->f_file);
+ return locks_inode(file->f_file);
}
static inline int __nlm_privileged_request4(const struct sockaddr *sap)
@@ -359,7 +359,7 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp)
static inline int nlm_compare_locks(const struct file_lock *fl1,
const struct file_lock *fl2)
{
- return file_inode(fl1->fl_file) == file_inode(fl2->fl_file)
+ return locks_inode(fl1->fl_file) == locks_inode(fl2->fl_file)
&& fl1->fl_pid == fl2->fl_pid
&& fl1->fl_owner == fl2->fl_owner
&& fl1->fl_start == fl2->fl_start
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 6fc77d4dbdcd..c5335df2372f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -97,15 +97,8 @@ struct lock_class {
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
- unsigned int version;
-
- /*
- * Statistics counter:
- */
- unsigned long ops;
-
- const char *name;
int name_version;
+ const char *name;
#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
@@ -266,7 +259,7 @@ struct held_lock {
/*
* Initialization, self-test and debugging-output methods:
*/
-extern void lockdep_info(void);
+extern void lockdep_init(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size);
@@ -406,7 +399,7 @@ static inline void lockdep_on(void)
# define lock_downgrade(l, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
-# define lockdep_info() do { } while (0)
+# define lockdep_init() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
@@ -532,7 +525,7 @@ do { \
#endif /* CONFIG_LOCKDEP */
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_PROVE_LOCKING
extern void print_irqtrace_events(struct task_struct *curr);
#else
static inline void print_irqtrace_events(struct task_struct *curr)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 97a020c616ad..9a0bdf91e646 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -672,7 +672,7 @@
* Return 0 if permission is granted.
* @task_kill:
* Check permission before sending signal @sig to @p. @info can be NULL,
- * the constant 1, or a pointer to a siginfo structure. If @info is 1 or
+ * the constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or
* SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
* from the kernel and should typically be permitted.
* SIGIO signals are handled separately by the send_sigiotask hook in
@@ -1461,9 +1461,10 @@ union security_list_options {
int (*sb_alloc_security)(struct super_block *sb);
void (*sb_free_security)(struct super_block *sb);
- int (*sb_copy_data)(char *orig, char *copy);
- int (*sb_remount)(struct super_block *sb, void *data);
- int (*sb_kern_mount)(struct super_block *sb, int flags, void *data);
+ void (*sb_free_mnt_opts)(void *mnt_opts);
+ int (*sb_eat_lsm_opts)(char *orig, void **mnt_opts);
+ int (*sb_remount)(struct super_block *sb, void *mnt_opts);
+ int (*sb_kern_mount)(struct super_block *sb);
int (*sb_show_options)(struct seq_file *m, struct super_block *sb);
int (*sb_statfs)(struct dentry *dentry);
int (*sb_mount)(const char *dev_name, const struct path *path,
@@ -1471,14 +1472,15 @@ union security_list_options {
int (*sb_umount)(struct vfsmount *mnt, int flags);
int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path);
int (*sb_set_mnt_opts)(struct super_block *sb,
- struct security_mnt_opts *opts,
+ void *mnt_opts,
unsigned long kern_flags,
unsigned long *set_kern_flags);
int (*sb_clone_mnt_opts)(const struct super_block *oldsb,
struct super_block *newsb,
unsigned long kern_flags,
unsigned long *set_kern_flags);
- int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts);
+ int (*sb_add_mnt_opt)(const char *option, const char *val, int len,
+ void **mnt_opts);
int (*dentry_init_security)(struct dentry *dentry, int mode,
const struct qstr *name, void **ctx,
u32 *ctxlen);
@@ -1606,7 +1608,7 @@ union security_list_options {
int (*task_setscheduler)(struct task_struct *p);
int (*task_getscheduler)(struct task_struct *p);
int (*task_movememory)(struct task_struct *p);
- int (*task_kill)(struct task_struct *p, struct siginfo *info,
+ int (*task_kill)(struct task_struct *p, struct kernel_siginfo *info,
int sig, const struct cred *cred);
int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
@@ -1800,7 +1802,8 @@ struct security_hook_heads {
struct hlist_head bprm_committed_creds;
struct hlist_head sb_alloc_security;
struct hlist_head sb_free_security;
- struct hlist_head sb_copy_data;
+ struct hlist_head sb_free_mnt_opts;
+ struct hlist_head sb_eat_lsm_opts;
struct hlist_head sb_remount;
struct hlist_head sb_kern_mount;
struct hlist_head sb_show_options;
@@ -1810,7 +1813,7 @@ struct security_hook_heads {
struct hlist_head sb_pivotroot;
struct hlist_head sb_set_mnt_opts;
struct hlist_head sb_clone_mnt_opts;
- struct hlist_head sb_parse_opts_str;
+ struct hlist_head sb_add_mnt_opt;
struct hlist_head dentry_init_security;
struct hlist_head dentry_create_files_as;
#ifdef CONFIG_SECURITY_PATH
@@ -2039,6 +2042,18 @@ extern char *lsm_names;
extern void security_add_hooks(struct security_hook_list *hooks, int count,
char *lsm);
+struct lsm_info {
+ const char *name; /* Required. */
+ int (*init)(void); /* Required. */
+};
+
+extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
+
+#define DEFINE_LSM(lsm) \
+ static struct lsm_info __lsm_##lsm \
+ __used __section(.lsm_info.init) \
+ __aligned(sizeof(unsigned long))
+
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
/*
* Assuring the safety of deleting a security module is up to
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 44348710953f..faa7da3c9c8b 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -44,6 +44,7 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name);
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
int mbox_send_message(struct mbox_chan *chan, void *mssg);
+int mbox_flush(struct mbox_chan *chan, unsigned long timeout);
void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 74deadb42d76..4994a438444c 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -24,6 +24,9 @@ struct mbox_chan;
* transmission of data is reported by the controller via
* mbox_chan_txdone (if it has some TX ACK irq). It must not
* sleep.
+ * @flush: Called when a client requests transmissions to be blocking but
+ * the context doesn't allow sleeping. Typically the controller
+ * will implement a busy loop waiting for the data to flush out.
* @startup: Called when a client requests the chan. The controller
* could ask clients for additional parameters of communication
* to be provided via client's chan_data. This call may
@@ -46,6 +49,7 @@ struct mbox_chan;
*/
struct mbox_chan_ops {
int (*send_data)(struct mbox_chan *chan, void *data);
+ int (*flush)(struct mbox_chan *chan, unsigned long timeout);
int (*startup)(struct mbox_chan *chan);
void (*shutdown)(struct mbox_chan *chan);
bool (*last_tx_done)(struct mbox_chan *chan);
@@ -131,4 +135,9 @@ void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */
void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */
void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
+int devm_mbox_controller_register(struct device *dev,
+ struct mbox_controller *mbox);
+void devm_mbox_controller_unregister(struct device *dev,
+ struct mbox_controller *mbox);
+
#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 837f2f2d1d34..bb2c84afb80c 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -281,4 +281,7 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
}
#endif /* mul_u64_u32_div */
+#define DIV64_U64_ROUND_UP(ll, d) \
+ ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
+
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 516920549378..64c41cf45590 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,7 +2,6 @@
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__
-#ifdef CONFIG_HAVE_MEMBLOCK
/*
* Logical memory blocks.
*
@@ -16,6 +15,19 @@
#include <linux/init.h>
#include <linux/mm.h>
+#include <asm/dma.h>
+
+extern unsigned long max_low_pfn;
+extern unsigned long min_low_pfn;
+
+/*
+ * highest page
+ */
+extern unsigned long max_pfn;
+/*
+ * highest possible page
+ */
+extern unsigned long long max_possible_pfn;
#define INIT_MEMBLOCK_REGIONS 128
#define INIT_PHYSMEM_REGIONS 4
@@ -120,6 +132,10 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
enum memblock_flags choose_memblock_flags(void);
+unsigned long memblock_free_all(void);
+void reset_node_managed_pages(pg_data_t *pgdat);
+void reset_all_zones_managed_pages(void);
+
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size,
@@ -138,7 +154,6 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
phys_addr_t *out_end);
-void __memblock_free_early(phys_addr_t base, phys_addr_t size);
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
/**
@@ -265,21 +280,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
nid, flags, p_start, p_end, p_nid)
-/**
- * for_each_resv_unavail_range - iterate through reserved and unavailable memory
- * @i: u64 used as loop variable
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- *
- * Walks over unavailable but reserved (reserved && !memory) areas of memblock.
- * Available as soon as memblock is initialized.
- * Note: because this memory does not belong to any physical node, flags and
- * nid arguments do not make sense and thus not exported as arguments.
- */
-#define for_each_resv_unavail_range(i, p_start, p_end) \
- for_each_mem_range(i, &memblock.reserved, &memblock.memory, \
- NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
-
static inline void memblock_set_region_flags(struct memblock_region *r,
enum memblock_flags flags)
{
@@ -316,10 +316,117 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
-phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+/* Flags for memblock allocation APIs */
+#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
+#define MEMBLOCK_ALLOC_ACCESSIBLE 0
+#define MEMBLOCK_ALLOC_KASAN 1
+
+/* We are using top down, so it is safe to use 0 here */
+#define MEMBLOCK_LOW_LIMIT 0
-phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
+phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
+phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+
+phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align);
+
+void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
+
+static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_raw(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_from(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t min_addr)
+{
+ return memblock_alloc_try_nid(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_nopanic(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_low(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
+}
+static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size,
+ phys_addr_t align)
+{
+ return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
+ ARCH_LOW_ADDRESS_LIMIT,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size,
+ phys_addr_t align,
+ phys_addr_t min_addr)
+{
+ return memblock_alloc_try_nid_nopanic(size, align, min_addr,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_alloc_node(phys_addr_t size,
+ phys_addr_t align, int nid)
+{
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
+
+static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size,
+ int nid)
+{
+ return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES,
+ MEMBLOCK_LOW_LIMIT,
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+}
+
+static inline void __init memblock_free_early(phys_addr_t base,
+ phys_addr_t size)
+{
+ memblock_free(base, size);
+}
+
+static inline void __init memblock_free_early_nid(phys_addr_t base,
+ phys_addr_t size, int nid)
+{
+ memblock_free(base, size);
+}
+
+static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
+{
+ __memblock_free_late(base, size);
+}
/*
* Set the allocation direction to bottom-up or top-down.
@@ -339,10 +446,6 @@ static inline bool memblock_bottom_up(void)
return memblock.bottom_up;
}
-/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
-#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
-#define MEMBLOCK_ALLOC_ACCESSIBLE 0
-
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
enum memblock_flags flags);
@@ -448,6 +551,31 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
i < memblock_type->cnt; \
i++, rgn = &memblock_type->regions[i])
+extern void *alloc_large_system_hash(const char *tablename,
+ unsigned long bucketsize,
+ unsigned long numentries,
+ int scale,
+ int flags,
+ unsigned int *_hash_shift,
+ unsigned int *_hash_mask,
+ unsigned long low_limit,
+ unsigned long high_limit);
+
+#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
+#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
+ * shift passed via *_hash_shift */
+#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
+
+/* Only NUMA needs hash distribution. 64bit NUMA architectures have
+ * sufficient vmalloc space.
+ */
+#ifdef CONFIG_NUMA
+#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
+extern int hashdist; /* Distribute hashes across NUMA nodes? */
+#else
+#define hashdist (0)
+#endif
+
#ifdef CONFIG_MEMTEST
extern void early_memtest(phys_addr_t start, phys_addr_t end);
#else
@@ -455,12 +583,6 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
{
}
#endif
-#else
-static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
-{
- return 0;
-}
-#endif /* CONFIG_HAVE_MEMBLOCK */
#endif /* __KERNEL__ */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0e6c515fb698..83ae11cbd12c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -78,7 +78,7 @@ struct mem_cgroup_reclaim_cookie {
struct mem_cgroup_id {
int id;
- atomic_t ref;
+ refcount_t ref;
};
/*
@@ -225,6 +225,11 @@ struct mem_cgroup {
*/
bool use_hierarchy;
+ /*
+ * Should the OOM killer kill all belonging tasks, had it kill one?
+ */
+ bool oom_group;
+
/* protected by memcg_oom_lock */
bool oom_lock;
int under_oom;
@@ -521,9 +526,11 @@ void mem_cgroup_handle_over_high(void);
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
-void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
struct task_struct *p);
+void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
+
static inline void mem_cgroup_enter_user_fault(void)
{
WARN_ON(current->in_user_fault);
@@ -542,6 +549,9 @@ static inline bool task_in_memcg_oom(struct task_struct *p)
}
bool mem_cgroup_oom_synchronize(bool wait);
+struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
+ struct mem_cgroup *oom_domain);
+void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
#ifdef CONFIG_MEMCG_SWAP
extern int do_swap_account;
@@ -962,7 +972,12 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
}
static inline void
-mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
+{
+}
+
+static inline void
+mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}
@@ -1001,6 +1016,16 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
return false;
}
+static inline struct mem_cgroup *mem_cgroup_get_oom_group(
+ struct task_struct *victim, struct mem_cgroup *oom_domain)
+{
+ return NULL;
+}
+
+static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
+{
+}
+
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
int idx)
{
@@ -1250,10 +1275,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
void memcg_kmem_put_cache(struct kmem_cache *cachep);
int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
struct mem_cgroup *memcg);
+
+#ifdef CONFIG_MEMCG_KMEM
int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
void memcg_kmem_uncharge(struct page *page, int order);
-#ifdef CONFIG_MEMCG_KMEM
extern struct static_key_false memcg_kmem_enabled_key;
extern struct workqueue_struct *memcg_kmem_cache_wq;
@@ -1289,6 +1315,16 @@ extern int memcg_expand_shrinker_maps(int new_id);
extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id);
#else
+
+static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+{
+ return 0;
+}
+
+static inline void memcg_kmem_uncharge(struct page *page, int order)
+{
+}
+
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 4e9828cda7a2..07da5c6c5ba0 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -107,8 +107,8 @@ static inline bool movable_node_is_enabled(void)
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern int arch_remove_memory(u64 start, u64 size,
- struct vmem_altmap *altmap);
+extern int arch_remove_memory(int nid, u64 start, u64 size,
+ struct vmem_altmap *altmap);
extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
#endif /* CONFIG_MEMORY_HOTREMOVE */
@@ -301,6 +301,7 @@ extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern void remove_memory(int nid, u64 start, u64 size);
+extern void __remove_memory(int nid, u64 start, u64 size);
#else
static inline bool is_mem_section_removable(unsigned long pfn,
@@ -317,21 +318,22 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
}
static inline void remove_memory(int nid, u64 start, u64 size) {}
+static inline void __remove_memory(int nid, u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
+extern void __ref free_area_init_core_hotplug(int nid);
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
+extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
-extern int add_memory_resource(int nid, struct resource *resource, bool online);
+extern int add_memory_resource(int nid, struct resource *resource);
extern int arch_add_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap, bool want_memblock);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
-extern void remove_memory(int nid, u64 start, u64 size);
-extern int sparse_add_one_section(struct pglist_data *pgdat,
- unsigned long start_pfn, struct vmem_altmap *altmap);
+extern int sparse_add_one_section(int nid, unsigned long start_pfn,
+ struct vmem_altmap *altmap);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset, struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index f91f9e763557..f0628660d541 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -4,8 +4,6 @@
#include <linux/ioport.h>
#include <linux/percpu-refcount.h>
-#include <asm/pgtable.h>
-
struct resource;
struct device;
@@ -53,73 +51,51 @@ struct vmem_altmap {
* wakeup event whenever a page is unpinned and becomes idle. This
* wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma).
+ *
+ * MEMORY_DEVICE_PCI_P2PDMA:
+ * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
+ * transactions.
*/
enum memory_type {
MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_PUBLIC,
MEMORY_DEVICE_FS_DAX,
+ MEMORY_DEVICE_PCI_P2PDMA,
};
/*
- * For MEMORY_DEVICE_PRIVATE we use ZONE_DEVICE and extend it with two
- * callbacks:
- * page_fault()
- * page_free()
- *
* Additional notes about MEMORY_DEVICE_PRIVATE may be found in
* include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief
* explanation in include/linux/memory_hotplug.h.
*
- * The page_fault() callback must migrate page back, from device memory to
- * system memory, so that the CPU can access it. This might fail for various
- * reasons (device issues, device have been unplugged, ...). When such error
- * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and
- * set the CPU page table entry to "poisoned".
- *
- * Note that because memory cgroup charges are transferred to the device memory,
- * this should never fail due to memory restrictions. However, allocation
- * of a regular system page might still fail because we are out of memory. If
- * that happens, the page_fault() callback must return VM_FAULT_OOM.
- *
- * The page_fault() callback can also try to migrate back multiple pages in one
- * chunk, as an optimization. It must, however, prioritize the faulting address
- * over all the others.
- *
- *
* The page_free() callback is called once the page refcount reaches 1
* (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug.
* This allows the device driver to implement its own memory management.)
- *
- * For MEMORY_DEVICE_PUBLIC only the page_free() callback matter.
*/
-typedef int (*dev_page_fault_t)(struct vm_area_struct *vma,
- unsigned long addr,
- const struct page *page,
- unsigned int flags,
- pmd_t *pmdp);
typedef void (*dev_page_free_t)(struct page *page, void *data);
/**
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
- * @page_fault: callback when CPU fault on an unaddressable device page
* @page_free: free page callback when page refcount reaches 1
* @altmap: pre-allocated/reserved memory for vmemmap allocations
* @res: physical address range covered by @ref
* @ref: reference count that pins the devm_memremap_pages() mapping
+ * @kill: callback to transition @ref to the dead state
* @dev: host device of the mapping for debug
* @data: private data pointer for page_free()
* @type: memory type: see MEMORY_* in memory_hotplug.h
*/
struct dev_pagemap {
- dev_page_fault_t page_fault;
dev_page_free_t page_free;
struct vmem_altmap altmap;
bool altmap_valid;
struct resource res;
struct percpu_ref *ref;
+ void (*kill)(struct percpu_ref *ref);
struct device *dev;
void *data;
enum memory_type type;
+ u64 pci_p2pdma_bus_offset;
};
#ifdef CONFIG_ZONE_DEVICE
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 517e60eecbcb..a353cd22b388 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -35,7 +35,7 @@ enum axp20x_variants {
#define AXP152_ALDO_OP_MODE 0x13
#define AXP152_LDO0_CTRL 0x15
#define AXP152_DCDC2_V_OUT 0x23
-#define AXP152_DCDC2_V_SCAL 0x25
+#define AXP152_DCDC2_V_RAMP 0x25
#define AXP152_DCDC1_V_OUT 0x26
#define AXP152_DCDC3_V_OUT 0x27
#define AXP152_ALDO12_V_OUT 0x28
@@ -53,7 +53,7 @@ enum axp20x_variants {
#define AXP20X_USB_OTG_STATUS 0x02
#define AXP20X_PWR_OUT_CTRL 0x12
#define AXP20X_DCDC2_V_OUT 0x23
-#define AXP20X_DCDC2_LDO3_V_SCAL 0x25
+#define AXP20X_DCDC2_LDO3_V_RAMP 0x25
#define AXP20X_DCDC3_V_OUT 0x27
#define AXP20X_LDO24_V_OUT 0x28
#define AXP20X_LDO3_V_OUT 0x29
@@ -266,6 +266,7 @@ enum axp20x_variants {
#define AXP288_RT_BATT_V_H 0xa0
#define AXP288_RT_BATT_V_L 0xa1
+#define AXP813_ACIN_PATH_CTRL 0x3a
#define AXP813_ADC_RATE 0x85
/* Fuel Gauge */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 20949dde35cd..e44e3ec8a9c7 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -36,7 +36,7 @@
* I2C requires 1 additional byte for requests.
* I2C requires 2 additional bytes for responses.
* SPI requires up to 32 additional bytes for responses.
- * */
+ */
#define EC_PROTO_VERSION_UNKNOWN 0
#define EC_MAX_REQUEST_OVERHEAD 1
#define EC_MAX_RESPONSE_OVERHEAD 32
@@ -58,13 +58,14 @@ enum {
EC_MAX_MSG_BYTES = 64 * 1024,
};
-/*
- * @version: Command version number (often 0)
- * @command: Command to send (EC_CMD_...)
- * @outsize: Outgoing length in bytes
- * @insize: Max number of bytes to accept from EC
- * @result: EC's response to the command (separate from communication failure)
- * @data: Where to put the incoming data from EC and outgoing data to EC
+/**
+ * struct cros_ec_command - Information about a ChromeOS EC command.
+ * @version: Command version number (often 0).
+ * @command: Command to send (EC_CMD_...).
+ * @outsize: Outgoing length in bytes.
+ * @insize: Max number of bytes to accept from the EC.
+ * @result: EC's response to the command (separate from communication failure).
+ * @data: Where to put the incoming data from EC and outgoing data to EC.
*/
struct cros_ec_command {
uint32_t version;
@@ -76,48 +77,55 @@ struct cros_ec_command {
};
/**
- * struct cros_ec_device - Information about a ChromeOS EC device
- *
- * @phys_name: name of physical comms layer (e.g. 'i2c-4')
+ * struct cros_ec_device - Information about a ChromeOS EC device.
+ * @phys_name: Name of physical comms layer (e.g. 'i2c-4').
* @dev: Device pointer for physical comms device
- * @was_wake_device: true if this device was set to wake the system from
- * sleep at the last suspend
- * @cmd_readmem: direct read of the EC memory-mapped region, if supported
- * @offset is within EC_LPC_ADDR_MEMMAP region.
- * @bytes: number of bytes to read. zero means "read a string" (including
- * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read.
- * Caller must ensure that the buffer is large enough for the result when
- * reading a string.
- *
- * @priv: Private data
- * @irq: Interrupt to use
- * @id: Device id
- * @din: input buffer (for data from EC)
- * @dout: output buffer (for data to EC)
- * \note
- * These two buffers will always be dword-aligned and include enough
- * space for up to 7 word-alignment bytes also, so we can ensure that
- * the body of the message is always dword-aligned (64-bit).
- * We use this alignment to keep ARM and x86 happy. Probably word
- * alignment would be OK, there might be a small performance advantage
- * to using dword.
- * @din_size: size of din buffer to allocate (zero to use static din)
- * @dout_size: size of dout buffer to allocate (zero to use static dout)
- * @wake_enabled: true if this device can wake the system from sleep
- * @suspended: true if this device had been suspended
- * @cmd_xfer: send command to EC and get response
- * Returns the number of bytes received if the communication succeeded, but
- * that doesn't mean the EC was happy with the command. The caller
- * should check msg.result for the EC's result code.
- * @pkt_xfer: send packet to EC and get response
- * @lock: one transaction at a time
- * @mkbp_event_supported: true if this EC supports the MKBP event protocol.
- * @event_notifier: interrupt event notifier for transport devices.
- * @event_data: raw payload transferred with the MKBP event.
- * @event_size: size in bytes of the event data.
+ * @was_wake_device: True if this device was set to wake the system from
+ * sleep at the last suspend.
+ * @cros_class: The class structure for this device.
+ * @cmd_readmem: Direct read of the EC memory-mapped region, if supported.
+ * @offset: Is within EC_LPC_ADDR_MEMMAP region.
+ * @bytes: Number of bytes to read. zero means "read a string" (including
+ * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be
+ * read. Caller must ensure that the buffer is large enough for the
+ * result when reading a string.
+ * @max_request: Max size of message requested.
+ * @max_response: Max size of message response.
+ * @max_passthru: Max sice of passthru message.
+ * @proto_version: The protocol version used for this device.
+ * @priv: Private data.
+ * @irq: Interrupt to use.
+ * @id: Device id.
+ * @din: Input buffer (for data from EC). This buffer will always be
+ * dword-aligned and include enough space for up to 7 word-alignment
+ * bytes also, so we can ensure that the body of the message is always
+ * dword-aligned (64-bit). We use this alignment to keep ARM and x86
+ * happy. Probably word alignment would be OK, there might be a small
+ * performance advantage to using dword.
+ * @dout: Output buffer (for data to EC). This buffer will always be
+ * dword-aligned and include enough space for up to 7 word-alignment
+ * bytes also, so we can ensure that the body of the message is always
+ * dword-aligned (64-bit). We use this alignment to keep ARM and x86
+ * happy. Probably word alignment would be OK, there might be a small
+ * performance advantage to using dword.
+ * @din_size: Size of din buffer to allocate (zero to use static din).
+ * @dout_size: Size of dout buffer to allocate (zero to use static dout).
+ * @wake_enabled: True if this device can wake the system from sleep.
+ * @suspended: True if this device had been suspended.
+ * @cmd_xfer: Send command to EC and get response.
+ * Returns the number of bytes received if the communication
+ * succeeded, but that doesn't mean the EC was happy with the
+ * command. The caller should check msg.result for the EC's result
+ * code.
+ * @pkt_xfer: Send packet to EC and get response.
+ * @lock: One transaction at a time.
+ * @mkbp_event_supported: True if this EC supports the MKBP event protocol.
+ * @event_notifier: Interrupt event notifier for transport devices.
+ * @event_data: Raw payload transferred with the MKBP event.
+ * @event_size: Size in bytes of the event data.
+ * @host_event_wake_mask: Mask of host events that cause wake from suspend.
*/
struct cros_ec_device {
-
/* These are used by other drivers that want to talk to the EC */
const char *phys_name;
struct device *dev;
@@ -153,20 +161,19 @@ struct cros_ec_device {
};
/**
- * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information
- *
+ * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information.
* @sensor_num: Id of the sensor, as reported by the EC.
*/
struct cros_ec_sensor_platform {
u8 sensor_num;
};
-/* struct cros_ec_platform - ChromeOS EC platform information
- *
- * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
- * used in /dev/ and sysfs.
- * @cmd_offset: offset to apply for each command. Set when
- * registering a devicde behind another one.
+/**
+ * struct cros_ec_platform - ChromeOS EC platform information.
+ * @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
+ * used in /dev/ and sysfs.
+ * @cmd_offset: Offset to apply for each command. Set when
+ * registering a device behind another one.
*/
struct cros_ec_platform {
const char *ec_name;
@@ -175,16 +182,16 @@ struct cros_ec_platform {
struct cros_ec_debugfs;
-/*
- * struct cros_ec_dev - ChromeOS EC device entry point
- *
- * @class_dev: Device structure used in sysfs
- * @cdev: Character device structure in /dev
- * @ec_dev: cros_ec_device structure to talk to the physical device
- * @dev: pointer to the platform device
- * @debug_info: cros_ec_debugfs structure for debugging information
- * @has_kb_wake_angle: true if at least 2 accelerometer are connected to the EC.
- * @cmd_offset: offset to apply for each command.
+/**
+ * struct cros_ec_dev - ChromeOS EC device entry point.
+ * @class_dev: Device structure used in sysfs.
+ * @cdev: Character device structure in /dev.
+ * @ec_dev: cros_ec_device structure to talk to the physical device.
+ * @dev: Pointer to the platform device.
+ * @debug_info: cros_ec_debugfs structure for debugging information.
+ * @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC.
+ * @cmd_offset: Offset to apply for each command.
+ * @features: Features supported by the EC.
*/
struct cros_ec_dev {
struct device class_dev;
@@ -200,124 +207,129 @@ struct cros_ec_dev {
#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
/**
- * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
+ * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
+ * @ec_dev: Device to suspend.
*
* This can be called by drivers to handle a suspend event.
*
- * ec_dev: Device to suspend
- * @return 0 if ok, -ve on error
+ * Return: 0 on success or negative error code.
*/
int cros_ec_suspend(struct cros_ec_device *ec_dev);
/**
- * cros_ec_resume - Handle a resume operation for the ChromeOS EC device
+ * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
+ * @ec_dev: Device to resume.
*
* This can be called by drivers to handle a resume event.
*
- * @ec_dev: Device to resume
- * @return 0 if ok, -ve on error
+ * Return: 0 on success or negative error code.
*/
int cros_ec_resume(struct cros_ec_device *ec_dev);
/**
- * cros_ec_prepare_tx - Prepare an outgoing message in the output buffer
+ * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
+ * @ec_dev: Device to register.
+ * @msg: Message to write.
*
* This is intended to be used by all ChromeOS EC drivers, but at present
* only SPI uses it. Once LPC uses the same protocol it can start using it.
* I2C could use it now, with a refactor of the existing code.
*
- * @ec_dev: Device to register
- * @msg: Message to write
+ * Return: 0 on success or negative error code.
*/
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
- * cros_ec_check_result - Check ec_msg->result
+ * cros_ec_check_result() - Check ec_msg->result.
+ * @ec_dev: EC device.
+ * @msg: Message to check.
*
* This is used by ChromeOS EC drivers to check the ec_msg->result for
* errors and to warn about them.
*
- * @ec_dev: EC device
- * @msg: Message to check
+ * Return: 0 on success or negative error code.
*/
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
- * cros_ec_cmd_xfer - Send a command to the ChromeOS EC
+ * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
*
* Call this to send a command to the ChromeOS EC. This should be used
* instead of calling the EC's cmd_xfer() callback directly.
*
- * @ec_dev: EC device
- * @msg: Message to write
+ * Return: 0 on success or negative error code.
*/
int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
- * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC
+ * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
*
* This function is identical to cros_ec_cmd_xfer, except it returns success
* status only if both the command was transmitted successfully and the EC
* replied with success status. It's not necessary to check msg->result when
* using this function.
*
- * @ec_dev: EC device
- * @msg: Message to write
- * @return: Num. of bytes transferred on success, <0 on failure
+ * Return: The number of bytes transferred on success or negative error code.
*/
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
- * cros_ec_remove - Remove a ChromeOS EC
+ * cros_ec_remove() - Remove a ChromeOS EC.
+ * @ec_dev: Device to register.
*
* Call this to deregister a ChromeOS EC, then clean up any private data.
*
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
+ * Return: 0 on success or negative error code.
*/
int cros_ec_remove(struct cros_ec_device *ec_dev);
/**
- * cros_ec_register - Register a new ChromeOS EC, using the provided info
+ * cros_ec_register() - Register a new ChromeOS EC, using the provided info.
+ * @ec_dev: Device to register.
*
* Before calling this, allocate a pointer to a new device and then fill
* in all the fields up to the --private-- marker.
*
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
+ * Return: 0 on success or negative error code.
*/
int cros_ec_register(struct cros_ec_device *ec_dev);
/**
- * cros_ec_query_all - Query the protocol version supported by the ChromeOS EC
+ * cros_ec_query_all() - Query the protocol version supported by the
+ * ChromeOS EC.
+ * @ec_dev: Device to register.
*
- * @ec_dev: Device to register
- * @return 0 if ok, -ve on error
+ * Return: 0 on success or negative error code.
*/
int cros_ec_query_all(struct cros_ec_device *ec_dev);
/**
- * cros_ec_get_next_event - Fetch next event from the ChromeOS EC
- *
- * @ec_dev: Device to fetch event from
+ * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
* @wake_event: Pointer to a bool set to true upon return if the event might be
* treated as a wake event. Ignored if null.
*
- * Returns: 0 on success, Linux error number on failure
+ * Return: 0 on success or negative error code.
*/
int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event);
/**
- * cros_ec_get_host_event - Return a mask of event set by the EC.
+ * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
*
- * When MKBP is supported, when the EC raises an interrupt,
- * We collect the events raised and call the functions in the ec notifier.
+ * When MKBP is supported, when the EC raises an interrupt, we collect the
+ * events raised and call the functions in the ec notifier. This function
+ * is a helper to know which events are raised.
*
- * This function is a helper to know which events are raised.
+ * Return: 0 on success or negative error code.
*/
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 6e1ab9bead28..9a9631f0559e 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -306,15 +306,18 @@ enum host_event_code {
/* Host event mask */
#define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1))
-/* Arguments at EC_LPC_ADDR_HOST_ARGS */
+/**
+ * struct ec_lpc_host_args - Arguments at EC_LPC_ADDR_HOST_ARGS
+ * @flags: The host argument flags.
+ * @command_version: Command version.
+ * @data_size: The length of data.
+ * @checksum: Checksum; sum of command + flags + command_version + data_size +
+ * all params/response data bytes.
+ */
struct ec_lpc_host_args {
uint8_t flags;
uint8_t command_version;
uint8_t data_size;
- /*
- * Checksum; sum of command + flags + command_version + data_size +
- * all params/response data bytes.
- */
uint8_t checksum;
} __packed;
@@ -468,54 +471,43 @@ struct ec_lpc_host_args {
#define EC_HOST_REQUEST_VERSION 3
-/* Version 3 request from host */
+/**
+ * struct ec_host_request - Version 3 request from host.
+ * @struct_version: Should be 3. The EC will return EC_RES_INVALID_HEADER if it
+ * receives a header with a version it doesn't know how to
+ * parse.
+ * @checksum: Checksum of request and data; sum of all bytes including checksum
+ * should total to 0.
+ * @command: Command to send (EC_CMD_...)
+ * @command_version: Command version.
+ * @reserved: Unused byte in current protocol version; set to 0.
+ * @data_len: Length of data which follows this header.
+ */
struct ec_host_request {
- /* Struct version (=3)
- *
- * EC will return EC_RES_INVALID_HEADER if it receives a header with a
- * version it doesn't know how to parse.
- */
uint8_t struct_version;
-
- /*
- * Checksum of request and data; sum of all bytes including checksum
- * should total to 0.
- */
uint8_t checksum;
-
- /* Command code */
uint16_t command;
-
- /* Command version */
uint8_t command_version;
-
- /* Unused byte in current protocol version; set to 0 */
uint8_t reserved;
-
- /* Length of data which follows this header */
uint16_t data_len;
} __packed;
#define EC_HOST_RESPONSE_VERSION 3
-/* Version 3 response from EC */
+/**
+ * struct ec_host_response - Version 3 response from EC.
+ * @struct_version: Struct version (=3).
+ * @checksum: Checksum of response and data; sum of all bytes including
+ * checksum should total to 0.
+ * @result: EC's response to the command (separate from communication failure)
+ * @data_len: Length of data which follows this header.
+ * @reserved: Unused bytes in current protocol version; set to 0.
+ */
struct ec_host_response {
- /* Struct version (=3) */
uint8_t struct_version;
-
- /*
- * Checksum of response and data; sum of all bytes including checksum
- * should total to 0.
- */
uint8_t checksum;
-
- /* Result code (EC_RES_*) */
uint16_t result;
-
- /* Length of data which follows this header */
uint16_t data_len;
-
- /* Unused bytes in current protocol version; set to 0 */
uint16_t reserved;
} __packed;
@@ -540,6 +532,10 @@ struct ec_host_response {
*/
#define EC_CMD_PROTO_VERSION 0x00
+/**
+ * struct ec_response_proto_version - Response to the proto version command.
+ * @version: The protocol version.
+ */
struct ec_response_proto_version {
uint32_t version;
} __packed;
@@ -550,12 +546,20 @@ struct ec_response_proto_version {
*/
#define EC_CMD_HELLO 0x01
+/**
+ * struct ec_params_hello - Parameters to the hello command.
+ * @in_data: Pass anything here.
+ */
struct ec_params_hello {
- uint32_t in_data; /* Pass anything here */
+ uint32_t in_data;
} __packed;
+/**
+ * struct ec_response_hello - Response to the hello command.
+ * @out_data: Output will be in_data + 0x01020304.
+ */
struct ec_response_hello {
- uint32_t out_data; /* Output will be in_data + 0x01020304 */
+ uint32_t out_data;
} __packed;
/* Get version number */
@@ -567,22 +571,37 @@ enum ec_current_image {
EC_IMAGE_RW
};
+/**
+ * struct ec_response_get_version - Response to the get version command.
+ * @version_string_ro: Null-terminated RO firmware version string.
+ * @version_string_rw: Null-terminated RW firmware version string.
+ * @reserved: Unused bytes; was previously RW-B firmware version string.
+ * @current_image: One of ec_current_image.
+ */
struct ec_response_get_version {
- /* Null-terminated version strings for RO, RW */
char version_string_ro[32];
char version_string_rw[32];
- char reserved[32]; /* Was previously RW-B string */
- uint32_t current_image; /* One of ec_current_image */
+ char reserved[32];
+ uint32_t current_image;
} __packed;
/* Read test */
#define EC_CMD_READ_TEST 0x03
+/**
+ * struct ec_params_read_test - Parameters for the read test command.
+ * @offset: Starting value for read buffer.
+ * @size: Size to read in bytes.
+ */
struct ec_params_read_test {
- uint32_t offset; /* Starting value for read buffer */
- uint32_t size; /* Size to read in bytes */
+ uint32_t offset;
+ uint32_t size;
} __packed;
+/**
+ * struct ec_response_read_test - Response to the read test command.
+ * @data: Data returned by the read test command.
+ */
struct ec_response_read_test {
uint32_t data[32];
} __packed;
@@ -597,18 +616,27 @@ struct ec_response_read_test {
/* Get chip info */
#define EC_CMD_GET_CHIP_INFO 0x05
+/**
+ * struct ec_response_get_chip_info - Response to the get chip info command.
+ * @vendor: Null-terminated string for chip vendor.
+ * @name: Null-terminated string for chip name.
+ * @revision: Null-terminated string for chip mask version.
+ */
struct ec_response_get_chip_info {
- /* Null-terminated strings */
char vendor[32];
char name[32];
- char revision[32]; /* Mask version */
+ char revision[32];
} __packed;
/* Get board HW version */
#define EC_CMD_GET_BOARD_VERSION 0x06
+/**
+ * struct ec_response_board_version - Response to the board version command.
+ * @board_version: A monotonously incrementing number.
+ */
struct ec_response_board_version {
- uint16_t board_version; /* A monotonously incrementing number. */
+ uint16_t board_version;
} __packed;
/*
@@ -621,27 +649,42 @@ struct ec_response_board_version {
*/
#define EC_CMD_READ_MEMMAP 0x07
+/**
+ * struct ec_params_read_memmap - Parameters for the read memory map command.
+ * @offset: Offset in memmap (EC_MEMMAP_*).
+ * @size: Size to read in bytes.
+ */
struct ec_params_read_memmap {
- uint8_t offset; /* Offset in memmap (EC_MEMMAP_*) */
- uint8_t size; /* Size to read in bytes */
+ uint8_t offset;
+ uint8_t size;
} __packed;
/* Read versions supported for a command */
#define EC_CMD_GET_CMD_VERSIONS 0x08
+/**
+ * struct ec_params_get_cmd_versions - Parameters for the get command versions.
+ * @cmd: Command to check.
+ */
struct ec_params_get_cmd_versions {
- uint8_t cmd; /* Command to check */
+ uint8_t cmd;
} __packed;
+/**
+ * struct ec_params_get_cmd_versions_v1 - Parameters for the get command
+ * versions (v1)
+ * @cmd: Command to check.
+ */
struct ec_params_get_cmd_versions_v1 {
- uint16_t cmd; /* Command to check */
+ uint16_t cmd;
} __packed;
+/**
+ * struct ec_response_get_cmd_version - Response to the get command versions.
+ * @version_mask: Mask of supported versions; use EC_VER_MASK() to compare with
+ * a desired version.
+ */
struct ec_response_get_cmd_versions {
- /*
- * Mask of supported versions; use EC_VER_MASK() to compare with a
- * desired version.
- */
uint32_t version_mask;
} __packed;
@@ -659,6 +702,11 @@ enum ec_comms_status {
EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */
};
+/**
+ * struct ec_response_get_comms_status - Response to the get comms status
+ * command.
+ * @flags: Mask of enum ec_comms_status.
+ */
struct ec_response_get_comms_status {
uint32_t flags; /* Mask of enum ec_comms_status */
} __packed;
@@ -685,19 +733,19 @@ struct ec_response_test_protocol {
/* EC_RES_IN_PROGRESS may be returned if a command is slow */
#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0)
+/**
+ * struct ec_response_get_protocol_info - Response to the get protocol info.
+ * @protocol_versions: Bitmask of protocol versions supported (1 << n means
+ * version n).
+ * @max_request_packet_size: Maximum request packet size in bytes.
+ * @max_response_packet_size: Maximum response packet size in bytes.
+ * @flags: see EC_PROTOCOL_INFO_*
+ */
struct ec_response_get_protocol_info {
/* Fields which exist if at least protocol version 3 supported */
-
- /* Bitmask of protocol versions supported (1 << n means version n)*/
uint32_t protocol_versions;
-
- /* Maximum request packet size, in bytes */
uint16_t max_request_packet_size;
-
- /* Maximum response packet size, in bytes */
uint16_t max_response_packet_size;
-
- /* Flags; see EC_PROTOCOL_INFO_* */
uint32_t flags;
} __packed;
@@ -708,8 +756,10 @@ struct ec_response_get_protocol_info {
/* The upper byte of .flags tells what to do (nothing means "get") */
#define EC_GSV_SET 0x80000000
-/* The lower three bytes of .flags identifies the parameter, if that has
- meaning for an individual command. */
+/*
+ * The lower three bytes of .flags identifies the parameter, if that has
+ * meaning for an individual command.
+ */
#define EC_GSV_PARAM_MASK 0x00ffffff
struct ec_params_get_set_value {
@@ -810,6 +860,7 @@ enum ec_feature_code {
#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32))
+
struct ec_response_get_features {
uint32_t flags[2];
} __packed;
@@ -820,24 +871,22 @@ struct ec_response_get_features {
/* Get flash info */
#define EC_CMD_FLASH_INFO 0x10
-/* Version 0 returns these fields */
+/**
+ * struct ec_response_flash_info - Response to the flash info command.
+ * @flash_size: Usable flash size in bytes.
+ * @write_block_size: Write block size. Write offset and size must be a
+ * multiple of this.
+ * @erase_block_size: Erase block size. Erase offset and size must be a
+ * multiple of this.
+ * @protect_block_size: Protection block size. Protection offset and size
+ * must be a multiple of this.
+ *
+ * Version 0 returns these fields.
+ */
struct ec_response_flash_info {
- /* Usable flash size, in bytes */
uint32_t flash_size;
- /*
- * Write block size. Write offset and size must be a multiple
- * of this.
- */
uint32_t write_block_size;
- /*
- * Erase block size. Erase offset and size must be a multiple
- * of this.
- */
uint32_t erase_block_size;
- /*
- * Protection block size. Protection offset and size must be a
- * multiple of this.
- */
uint32_t protect_block_size;
} __packed;
@@ -845,7 +894,22 @@ struct ec_response_flash_info {
/* EC flash erases bits to 0 instead of 1 */
#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0)
-/*
+/**
+ * struct ec_response_flash_info_1 - Response to the flash info v1 command.
+ * @flash_size: Usable flash size in bytes.
+ * @write_block_size: Write block size. Write offset and size must be a
+ * multiple of this.
+ * @erase_block_size: Erase block size. Erase offset and size must be a
+ * multiple of this.
+ * @protect_block_size: Protection block size. Protection offset and size
+ * must be a multiple of this.
+ * @write_ideal_size: Ideal write size in bytes. Writes will be fastest if
+ * size is exactly this and offset is a multiple of this.
+ * For example, an EC may have a write buffer which can do
+ * half-page operations if data is aligned, and a slower
+ * word-at-a-time write mode.
+ * @flags: Flags; see EC_FLASH_INFO_*
+ *
* Version 1 returns the same initial fields as version 0, with additional
* fields following.
*
@@ -860,15 +924,7 @@ struct ec_response_flash_info_1 {
uint32_t protect_block_size;
/* Version 1 adds these fields: */
- /*
- * Ideal write size in bytes. Writes will be fastest if size is
- * exactly this and offset is a multiple of this. For example, an EC
- * may have a write buffer which can do half-page operations if data is
- * aligned, and a slower word-at-a-time write mode.
- */
uint32_t write_ideal_size;
-
- /* Flags; see EC_FLASH_INFO_* */
uint32_t flags;
} __packed;
@@ -879,9 +935,14 @@ struct ec_response_flash_info_1 {
*/
#define EC_CMD_FLASH_READ 0x11
+/**
+ * struct ec_params_flash_read - Parameters for the flash read command.
+ * @offset: Byte offset to read.
+ * @size: Size to read in bytes.
+ */
struct ec_params_flash_read {
- uint32_t offset; /* Byte offset to read */
- uint32_t size; /* Size to read in bytes */
+ uint32_t offset;
+ uint32_t size;
} __packed;
/* Write flash */
@@ -891,18 +952,28 @@ struct ec_params_flash_read {
/* Version 0 of the flash command supported only 64 bytes of data */
#define EC_FLASH_WRITE_VER0_SIZE 64
+/**
+ * struct ec_params_flash_write - Parameters for the flash write command.
+ * @offset: Byte offset to write.
+ * @size: Size to write in bytes.
+ */
struct ec_params_flash_write {
- uint32_t offset; /* Byte offset to write */
- uint32_t size; /* Size to write in bytes */
+ uint32_t offset;
+ uint32_t size;
/* Followed by data to write */
} __packed;
/* Erase flash */
#define EC_CMD_FLASH_ERASE 0x13
+/**
+ * struct ec_params_flash_erase - Parameters for the flash erase command.
+ * @offset: Byte offset to erase.
+ * @size: Size to erase in bytes.
+ */
struct ec_params_flash_erase {
- uint32_t offset; /* Byte offset to erase */
- uint32_t size; /* Size to erase in bytes */
+ uint32_t offset;
+ uint32_t size;
} __packed;
/*
@@ -941,21 +1012,28 @@ struct ec_params_flash_erase {
/* Entile flash code protected when the EC boots */
#define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6)
+/**
+ * struct ec_params_flash_protect - Parameters for the flash protect command.
+ * @mask: Bits in flags to apply.
+ * @flags: New flags to apply.
+ */
struct ec_params_flash_protect {
- uint32_t mask; /* Bits in flags to apply */
- uint32_t flags; /* New flags to apply */
+ uint32_t mask;
+ uint32_t flags;
} __packed;
+/**
+ * struct ec_response_flash_protect - Response to the flash protect command.
+ * @flags: Current value of flash protect flags.
+ * @valid_flags: Flags which are valid on this platform. This allows the
+ * caller to distinguish between flags which aren't set vs. flags
+ * which can't be set on this platform.
+ * @writable_flags: Flags which can be changed given the current protection
+ * state.
+ */
struct ec_response_flash_protect {
- /* Current value of flash protect flags */
uint32_t flags;
- /*
- * Flags which are valid on this platform. This allows the caller
- * to distinguish between flags which aren't set vs. flags which can't
- * be set on this platform.
- */
uint32_t valid_flags;
- /* Flags which can be changed given the current protection state */
uint32_t writable_flags;
} __packed;
@@ -982,8 +1060,13 @@ enum ec_flash_region {
EC_FLASH_REGION_COUNT,
};
+/**
+ * struct ec_params_flash_region_info - Parameters for the flash region info
+ * command.
+ * @region: Flash region; see EC_FLASH_REGION_*
+ */
struct ec_params_flash_region_info {
- uint32_t region; /* enum ec_flash_region */
+ uint32_t region;
} __packed;
struct ec_response_flash_region_info {
@@ -1094,7 +1177,9 @@ struct rgb_s {
};
#define LB_BATTERY_LEVELS 4
-/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
+
+/*
+ * List of tweakable parameters. NOTE: It's __packed so it can be sent in a
* host command, but the alignment is the same regardless. Keep it that way.
*/
struct lightbar_params_v0 {
@@ -2132,6 +2217,7 @@ struct ec_response_get_next_event_v1 {
/* Switches */
#define EC_MKBP_LID_OPEN 0
#define EC_MKBP_TABLET_MODE 1
+#define EC_MKBP_BASE_ATTACHED 2
/*****************************************************************************/
/* Temperature sensor commands */
@@ -3102,6 +3188,16 @@ struct ec_params_usb_pd_info_request {
uint8_t port;
} __packed;
+/*
+ * This command will return the number of USB PD charge port + the number
+ * of dedicated port present.
+ * EC_CMD_USB_PD_PORTS does NOT include the dedicated ports
+ */
+#define EC_CMD_CHARGE_PORT_COUNT 0x0105
+struct ec_response_charge_port_count {
+ uint8_t port_count;
+} __packed;
+
/* Read USB-PD Device discovery info */
#define EC_CMD_USB_PD_DISCOVERY 0x0113
struct ec_params_usb_pd_discovery_entry {
diff --git a/include/linux/mfd/cros_ec_lpc_mec.h b/include/linux/mfd/cros_ec_lpc_mec.h
deleted file mode 100644
index 176496ddc66c..000000000000
--- a/include/linux/mfd/cros_ec_lpc_mec.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * cros_ec_lpc_mec - LPC variant I/O for Microchip EC
- *
- * Copyright (C) 2016 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
- */
-
-#ifndef __LINUX_MFD_CROS_EC_MEC_H
-#define __LINUX_MFD_CROS_EC_MEC_H
-
-#include <linux/mfd/cros_ec_commands.h>
-
-enum cros_ec_lpc_mec_emi_access_mode {
- /* 8-bit access */
- ACCESS_TYPE_BYTE = 0x0,
- /* 16-bit access */
- ACCESS_TYPE_WORD = 0x1,
- /* 32-bit access */
- ACCESS_TYPE_LONG = 0x2,
- /*
- * 32-bit access, read or write of MEC_EMI_EC_DATA_B3 causes the
- * EC data register to be incremented.
- */
- ACCESS_TYPE_LONG_AUTO_INCREMENT = 0x3,
-};
-
-enum cros_ec_lpc_mec_io_type {
- MEC_IO_READ,
- MEC_IO_WRITE,
-};
-
-/* Access IO ranges 0x800 thru 0x9ff using EMI interface instead of LPC */
-#define MEC_EMI_RANGE_START EC_HOST_CMD_REGION0
-#define MEC_EMI_RANGE_END (EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE)
-
-/* EMI registers are relative to base */
-#define MEC_EMI_BASE 0x800
-#define MEC_EMI_HOST_TO_EC (MEC_EMI_BASE + 0)
-#define MEC_EMI_EC_TO_HOST (MEC_EMI_BASE + 1)
-#define MEC_EMI_EC_ADDRESS_B0 (MEC_EMI_BASE + 2)
-#define MEC_EMI_EC_ADDRESS_B1 (MEC_EMI_BASE + 3)
-#define MEC_EMI_EC_DATA_B0 (MEC_EMI_BASE + 4)
-#define MEC_EMI_EC_DATA_B1 (MEC_EMI_BASE + 5)
-#define MEC_EMI_EC_DATA_B2 (MEC_EMI_BASE + 6)
-#define MEC_EMI_EC_DATA_B3 (MEC_EMI_BASE + 7)
-
-/*
- * cros_ec_lpc_mec_init
- *
- * Initialize MEC I/O.
- */
-void cros_ec_lpc_mec_init(void);
-
-/*
- * cros_ec_lpc_mec_destroy
- *
- * Cleanup MEC I/O.
- */
-void cros_ec_lpc_mec_destroy(void);
-
-/**
- * cros_ec_lpc_io_bytes_mec - Read / write bytes to MEC EMI port
- *
- * @io_type: MEC_IO_READ or MEC_IO_WRITE, depending on request
- * @offset: Base read / write address
- * @length: Number of bytes to read / write
- * @buf: Destination / source buffer
- *
- * @return 8-bit checksum of all bytes read / written
- */
-u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
- unsigned int offset, unsigned int length, u8 *buf);
-
-#endif /* __LINUX_MFD_CROS_EC_MEC_H */
diff --git a/include/linux/mfd/cros_ec_lpc_reg.h b/include/linux/mfd/cros_ec_lpc_reg.h
deleted file mode 100644
index 5560bef63c2b..000000000000
--- a/include/linux/mfd/cros_ec_lpc_reg.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * cros_ec_lpc_reg - LPC access to the Chrome OS Embedded Controller
- *
- * Copyright (C) 2016 Google, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver uses the Chrome OS EC byte-level message-based protocol for
- * communicating the keyboard state (which keys are pressed) from a keyboard EC
- * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
- * but everything else (including deghosting) is done here. The main
- * motivation for this is to keep the EC firmware as simple as possible, since
- * it cannot be easily upgraded and EC flash/IRAM space is relatively
- * expensive.
- */
-
-#ifndef __LINUX_MFD_CROS_EC_REG_H
-#define __LINUX_MFD_CROS_EC_REG_H
-
-/**
- * cros_ec_lpc_read_bytes - Read bytes from a given LPC-mapped address.
- * Returns 8-bit checksum of all bytes read.
- *
- * @offset: Base read address
- * @length: Number of bytes to read
- * @dest: Destination buffer
- */
-u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest);
-
-/**
- * cros_ec_lpc_write_bytes - Write bytes to a given LPC-mapped address.
- * Returns 8-bit checksum of all bytes written.
- *
- * @offset: Base write address
- * @length: Number of bytes to write
- * @msg: Write data buffer
- */
-u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg);
-
-/**
- * cros_ec_lpc_reg_init
- *
- * Initialize register I/O.
- */
-void cros_ec_lpc_reg_init(void);
-
-/**
- * cros_ec_lpc_reg_destroy
- *
- * Cleanup reg I/O.
- */
-void cros_ec_lpc_reg_destroy(void);
-
-#endif /* __LINUX_MFD_CROS_EC_REG_H */
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 8a125701ef7b..50bed4f89c1a 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -21,7 +21,7 @@
/*
* Regulator configuration
*/
-/* DA9063 regulator IDs */
+/* DA9063 and DA9063L regulator IDs */
enum {
/* BUCKs */
DA9063_ID_BCORE1,
@@ -37,18 +37,20 @@ enum {
DA9063_ID_BMEM_BIO_MERGED,
/* When two BUCKs are merged, they cannot be reused separately */
- /* LDOs */
+ /* LDOs on both DA9063 and DA9063L */
+ DA9063_ID_LDO3,
+ DA9063_ID_LDO7,
+ DA9063_ID_LDO8,
+ DA9063_ID_LDO9,
+ DA9063_ID_LDO11,
+
+ /* DA9063-only LDOs */
DA9063_ID_LDO1,
DA9063_ID_LDO2,
- DA9063_ID_LDO3,
DA9063_ID_LDO4,
DA9063_ID_LDO5,
DA9063_ID_LDO6,
- DA9063_ID_LDO7,
- DA9063_ID_LDO8,
- DA9063_ID_LDO9,
DA9063_ID_LDO10,
- DA9063_ID_LDO11,
};
/* Regulators platform data */
diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h
new file mode 100644
index 000000000000..ab16ad283def
--- /dev/null
+++ b/include/linux/mfd/ingenic-tcu.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for the Ingenic JZ47xx TCU driver
+ */
+#ifndef __LINUX_MFD_INGENIC_TCU_H_
+#define __LINUX_MFD_INGENIC_TCU_H_
+
+#include <linux/bitops.h>
+
+#define TCU_REG_WDT_TDR 0x00
+#define TCU_REG_WDT_TCER 0x04
+#define TCU_REG_WDT_TCNT 0x08
+#define TCU_REG_WDT_TCSR 0x0c
+#define TCU_REG_TER 0x10
+#define TCU_REG_TESR 0x14
+#define TCU_REG_TECR 0x18
+#define TCU_REG_TSR 0x1c
+#define TCU_REG_TFR 0x20
+#define TCU_REG_TFSR 0x24
+#define TCU_REG_TFCR 0x28
+#define TCU_REG_TSSR 0x2c
+#define TCU_REG_TMR 0x30
+#define TCU_REG_TMSR 0x34
+#define TCU_REG_TMCR 0x38
+#define TCU_REG_TSCR 0x3c
+#define TCU_REG_TDFR0 0x40
+#define TCU_REG_TDHR0 0x44
+#define TCU_REG_TCNT0 0x48
+#define TCU_REG_TCSR0 0x4c
+#define TCU_REG_OST_DR 0xe0
+#define TCU_REG_OST_CNTL 0xe4
+#define TCU_REG_OST_CNTH 0xe8
+#define TCU_REG_OST_TCSR 0xec
+#define TCU_REG_TSTR 0xf0
+#define TCU_REG_TSTSR 0xf4
+#define TCU_REG_TSTCR 0xf8
+#define TCU_REG_OST_CNTHBUF 0xfc
+
+#define TCU_TCSR_RESERVED_BITS 0x3f
+#define TCU_TCSR_PARENT_CLOCK_MASK 0x07
+#define TCU_TCSR_PRESCALE_LSB 3
+#define TCU_TCSR_PRESCALE_MASK 0x38
+
+#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */
+#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */
+#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */
+
+#define TCU_WDT_TCER_TCEN BIT(0) /* Watchdog timer enable */
+
+#define TCU_CHANNEL_STRIDE 0x10
+#define TCU_REG_TDFRc(c) (TCU_REG_TDFR0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TDHRc(c) (TCU_REG_TDHR0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TCNTc(c) (TCU_REG_TCNT0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TCSRc(c) (TCU_REG_TCSR0 + ((c) * TCU_CHANNEL_STRIDE))
+
+#endif /* __LINUX_MFD_INGENIC_TCU_H_ */
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
index 439a7a617bc9..317e8608cf41 100644
--- a/include/linux/mfd/intel_msic.h
+++ b/include/linux/mfd/intel_msic.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * include/linux/mfd/intel_msic.h - Core interface for Intel MSIC
+ * Core interface for Intel MSIC
*
* Copyright (C) 2011, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_INTEL_MSIC_H__
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index 5aacdb017a9f..ed1dfba5e5f9 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * intel_soc_pmic.h - Intel SoC PMIC Driver
+ * Intel SoC PMIC Driver
*
* Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
@@ -25,6 +17,7 @@ struct intel_soc_pmic {
int irq;
struct regmap *regmap;
struct regmap_irq_chip_data *irq_chip_data;
+ struct regmap_irq_chip_data *irq_chip_data_pwrbtn;
struct regmap_irq_chip_data *irq_chip_data_tmu;
struct regmap_irq_chip_data *irq_chip_data_bcu;
struct regmap_irq_chip_data *irq_chip_data_adc;
diff --git a/include/linux/mfd/intel_soc_pmic_bxtwc.h b/include/linux/mfd/intel_soc_pmic_bxtwc.h
index 0c351bc85d2d..9be566cc58c6 100644
--- a/include/linux/mfd/intel_soc_pmic_bxtwc.h
+++ b/include/linux/mfd/intel_soc_pmic_bxtwc.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header file for Intel Broxton Whiskey Cove PMIC
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __INTEL_BXTWC_H__
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
index c332681848ef..fe69c0f4398f 100644
--- a/include/linux/mfd/madera/core.h
+++ b/include/linux/mfd/madera/core.h
@@ -148,6 +148,7 @@ struct snd_soc_dapm_context;
* @internal_dcvdd: true if DCVDD is supplied from the internal LDO1
* @pdata: our pdata
* @irq_dev: the irqchip child driver device
+ * @irq_data: pointer to irqchip data for the child irqchip driver
* @irq: host irq number from SPI or I2C configuration
* @out_clamp: indicates output clamp state for each analogue output
* @out_shorted: indicates short circuit state for each analogue output
@@ -175,6 +176,7 @@ struct madera {
struct madera_pdata pdata;
struct device *irq_dev;
+ struct regmap_irq_chip_data *irq_data;
int irq;
unsigned int num_micbias;
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index 0b311f39c8f4..8dc852402dbb 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -24,7 +24,6 @@
struct gpio_desc;
struct pinctrl_map;
-struct madera_irqchip_pdata;
struct madera_codec_pdata;
/**
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index df75234f979d..a21374f8ad26 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip
*
* Copyright (C) 2014 Samsung Electrnoics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MAX14577_PRIVATE_H__
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index d81b52bb8bee..8b3ef891ba42 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max14577.h - Driver for the Maxim 14577/77836
*
@@ -5,16 +6,6 @@
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This driver is based on max8997.h
*
* MAX14577 has MUIC, Charger devices.
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 643dae777b43..833e578e051e 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77686-private.h - Voltage regulator driver for the Maxim 77686/802
*
* Copyright (C) 2012 Samsung Electrnoics
* Chiwoong Byun <woong.byun@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX77686_PRIV_H
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index d4b72d519115..d0fb510875e6 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77686.h - Driver for the Maxim 77686/802
*
* Copyright (C) 2012 Samsung Electrnoics
* Chiwoong Byun <woong.byun@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8997.h
*
* MAX77686 has PMIC, RTC devices.
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
index 095b121aa725..a5bce099f1ed 100644
--- a/include/linux/mfd/max77693-common.h
+++ b/include/linux/mfd/max77693-common.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Common data shared between Maxim 77693 and 77843 drivers
*
* Copyright (C) 2015 Samsung Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_MFD_MAX77693_COMMON_H
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 3c7a63b98ad6..e798c81aec31 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77693-private.h - Voltage regulator driver for the Maxim 77693
*
@@ -5,20 +6,6 @@
* SangYoung Son <hello.son@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX77693_PRIV_H
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index d450f687301b..c67c16ba8649 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77693.h - Driver for the Maxim 77693
*
@@ -6,20 +7,6 @@
*
* This program is not provided / owned by Maxim Integrated Products.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8997.h
*
* MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index b8908bf8d315..0bc7454c4dbe 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Common variables for the Maxim MAX77843 driver
*
* Copyright (C) 2015 Samsung Electronics
* Author: Jaewon Kim <jaewon02.kim@samsung.com>
* Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __MAX77843_PRIVATE_H_
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 78c76cd4d37b..a10cd6945232 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8997-private.h - Voltage regulator driver for the Maxim 8997
*
* Copyright (C) 2010 Samsung Electrnoics
* MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8997_PRIV_H
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index cf815577bd68..e955e2f0a2cc 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8997.h - Driver for the Maxim 8997/8966
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8998.h
*
* MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices.
@@ -178,7 +165,6 @@ struct max8997_led_platform_data {
struct max8997_platform_data {
/* IRQ */
int ono;
- int wakeup;
/* ---- PMIC ---- */
struct max8997_regulator_data *regulators;
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index d68ada502ff3..6deb5f577602 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8998-private.h - Voltage regulator driver for the Maxim 8998
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8998_PRIV_H
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index e3956a654cbc..061af220dcd3 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8998.h - Voltage regulator driver for the Maxim 8998
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8998_H
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 54a3cd808f9e..2ad9bdc0a5ec 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -249,6 +249,7 @@ struct mc13xxx_platform_data {
#define MC13XXX_ADC0_TSMOD0 (1 << 12)
#define MC13XXX_ADC0_TSMOD1 (1 << 13)
#define MC13XXX_ADC0_TSMOD2 (1 << 14)
+#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15)
#define MC13XXX_ADC0_ADINC1 (1 << 16)
#define MC13XXX_ADC0_ADINC2 (1 << 17)
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
index a528747f8aed..fd194bfc836f 100644
--- a/include/linux/mfd/rohm-bd718x7.h
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -1,112 +1,127 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Copyright (C) 2018 ROHM Semiconductors */
-#ifndef __LINUX_MFD_BD71837_H__
-#define __LINUX_MFD_BD71837_H__
+#ifndef __LINUX_MFD_BD718XX_H__
+#define __LINUX_MFD_BD718XX_H__
#include <linux/regmap.h>
enum {
- BD71837_BUCK1 = 0,
- BD71837_BUCK2,
- BD71837_BUCK3,
- BD71837_BUCK4,
- BD71837_BUCK5,
- BD71837_BUCK6,
- BD71837_BUCK7,
- BD71837_BUCK8,
- BD71837_LDO1,
- BD71837_LDO2,
- BD71837_LDO3,
- BD71837_LDO4,
- BD71837_LDO5,
- BD71837_LDO6,
- BD71837_LDO7,
- BD71837_REGULATOR_CNT,
+ BD718XX_TYPE_BD71837 = 0,
+ BD718XX_TYPE_BD71847,
+ BD718XX_TYPE_AMOUNT
};
-#define BD71837_BUCK1_VOLTAGE_NUM 0x40
-#define BD71837_BUCK2_VOLTAGE_NUM 0x40
-#define BD71837_BUCK3_VOLTAGE_NUM 0x40
-#define BD71837_BUCK4_VOLTAGE_NUM 0x40
+enum {
+ BD718XX_BUCK1 = 0,
+ BD718XX_BUCK2,
+ BD718XX_BUCK3,
+ BD718XX_BUCK4,
+ BD718XX_BUCK5,
+ BD718XX_BUCK6,
+ BD718XX_BUCK7,
+ BD718XX_BUCK8,
+ BD718XX_LDO1,
+ BD718XX_LDO2,
+ BD718XX_LDO3,
+ BD718XX_LDO4,
+ BD718XX_LDO5,
+ BD718XX_LDO6,
+ BD718XX_LDO7,
+ BD718XX_REGULATOR_AMOUNT,
+};
+
+/* Common voltage configurations */
+#define BD718XX_DVS_BUCK_VOLTAGE_NUM 0x3D
+#define BD718XX_4TH_NODVS_BUCK_VOLTAGE_NUM 0x3D
+
+#define BD718XX_LDO1_VOLTAGE_NUM 0x08
+#define BD718XX_LDO2_VOLTAGE_NUM 0x02
+#define BD718XX_LDO3_VOLTAGE_NUM 0x10
+#define BD718XX_LDO4_VOLTAGE_NUM 0x0A
+#define BD718XX_LDO6_VOLTAGE_NUM 0x0A
-#define BD71837_BUCK5_VOLTAGE_NUM 0x08
+/* BD71837 specific voltage configurations */
+#define BD71837_BUCK5_VOLTAGE_NUM 0x10
#define BD71837_BUCK6_VOLTAGE_NUM 0x04
#define BD71837_BUCK7_VOLTAGE_NUM 0x08
-#define BD71837_BUCK8_VOLTAGE_NUM 0x40
-
-#define BD71837_LDO1_VOLTAGE_NUM 0x04
-#define BD71837_LDO2_VOLTAGE_NUM 0x02
-#define BD71837_LDO3_VOLTAGE_NUM 0x10
-#define BD71837_LDO4_VOLTAGE_NUM 0x10
#define BD71837_LDO5_VOLTAGE_NUM 0x10
-#define BD71837_LDO6_VOLTAGE_NUM 0x10
#define BD71837_LDO7_VOLTAGE_NUM 0x10
+/* BD71847 specific voltage configurations */
+#define BD71847_BUCK3_VOLTAGE_NUM 0x18
+#define BD71847_BUCK4_VOLTAGE_NUM 0x08
+#define BD71847_LDO5_VOLTAGE_NUM 0x20
+
+/* Registers specific to BD71837 */
+enum {
+ BD71837_REG_BUCK3_CTRL = 0x07,
+ BD71837_REG_BUCK4_CTRL = 0x08,
+ BD71837_REG_BUCK3_VOLT_RUN = 0x12,
+ BD71837_REG_BUCK4_VOLT_RUN = 0x13,
+ BD71837_REG_LDO7_VOLT = 0x1E,
+};
+
+/* Registers common for BD71837 and BD71847 */
enum {
- BD71837_REG_REV = 0x00,
- BD71837_REG_SWRESET = 0x01,
- BD71837_REG_I2C_DEV = 0x02,
- BD71837_REG_PWRCTRL0 = 0x03,
- BD71837_REG_PWRCTRL1 = 0x04,
- BD71837_REG_BUCK1_CTRL = 0x05,
- BD71837_REG_BUCK2_CTRL = 0x06,
- BD71837_REG_BUCK3_CTRL = 0x07,
- BD71837_REG_BUCK4_CTRL = 0x08,
- BD71837_REG_BUCK5_CTRL = 0x09,
- BD71837_REG_BUCK6_CTRL = 0x0A,
- BD71837_REG_BUCK7_CTRL = 0x0B,
- BD71837_REG_BUCK8_CTRL = 0x0C,
- BD71837_REG_BUCK1_VOLT_RUN = 0x0D,
- BD71837_REG_BUCK1_VOLT_IDLE = 0x0E,
- BD71837_REG_BUCK1_VOLT_SUSP = 0x0F,
- BD71837_REG_BUCK2_VOLT_RUN = 0x10,
- BD71837_REG_BUCK2_VOLT_IDLE = 0x11,
- BD71837_REG_BUCK3_VOLT_RUN = 0x12,
- BD71837_REG_BUCK4_VOLT_RUN = 0x13,
- BD71837_REG_BUCK5_VOLT = 0x14,
- BD71837_REG_BUCK6_VOLT = 0x15,
- BD71837_REG_BUCK7_VOLT = 0x16,
- BD71837_REG_BUCK8_VOLT = 0x17,
- BD71837_REG_LDO1_VOLT = 0x18,
- BD71837_REG_LDO2_VOLT = 0x19,
- BD71837_REG_LDO3_VOLT = 0x1A,
- BD71837_REG_LDO4_VOLT = 0x1B,
- BD71837_REG_LDO5_VOLT = 0x1C,
- BD71837_REG_LDO6_VOLT = 0x1D,
- BD71837_REG_LDO7_VOLT = 0x1E,
- BD71837_REG_TRANS_COND0 = 0x1F,
- BD71837_REG_TRANS_COND1 = 0x20,
- BD71837_REG_VRFAULTEN = 0x21,
- BD71837_REG_MVRFLTMASK0 = 0x22,
- BD71837_REG_MVRFLTMASK1 = 0x23,
- BD71837_REG_MVRFLTMASK2 = 0x24,
- BD71837_REG_RCVCFG = 0x25,
- BD71837_REG_RCVNUM = 0x26,
- BD71837_REG_PWRONCONFIG0 = 0x27,
- BD71837_REG_PWRONCONFIG1 = 0x28,
- BD71837_REG_RESETSRC = 0x29,
- BD71837_REG_MIRQ = 0x2A,
- BD71837_REG_IRQ = 0x2B,
- BD71837_REG_IN_MON = 0x2C,
- BD71837_REG_POW_STATE = 0x2D,
- BD71837_REG_OUT32K = 0x2E,
- BD71837_REG_REGLOCK = 0x2F,
- BD71837_REG_OTPVER = 0xFF,
- BD71837_MAX_REGISTER = 0x100,
+ BD718XX_REG_REV = 0x00,
+ BD718XX_REG_SWRESET = 0x01,
+ BD718XX_REG_I2C_DEV = 0x02,
+ BD718XX_REG_PWRCTRL0 = 0x03,
+ BD718XX_REG_PWRCTRL1 = 0x04,
+ BD718XX_REG_BUCK1_CTRL = 0x05,
+ BD718XX_REG_BUCK2_CTRL = 0x06,
+ BD718XX_REG_1ST_NODVS_BUCK_CTRL = 0x09,
+ BD718XX_REG_2ND_NODVS_BUCK_CTRL = 0x0A,
+ BD718XX_REG_3RD_NODVS_BUCK_CTRL = 0x0B,
+ BD718XX_REG_4TH_NODVS_BUCK_CTRL = 0x0C,
+ BD718XX_REG_BUCK1_VOLT_RUN = 0x0D,
+ BD718XX_REG_BUCK1_VOLT_IDLE = 0x0E,
+ BD718XX_REG_BUCK1_VOLT_SUSP = 0x0F,
+ BD718XX_REG_BUCK2_VOLT_RUN = 0x10,
+ BD718XX_REG_BUCK2_VOLT_IDLE = 0x11,
+ BD718XX_REG_1ST_NODVS_BUCK_VOLT = 0x14,
+ BD718XX_REG_2ND_NODVS_BUCK_VOLT = 0x15,
+ BD718XX_REG_3RD_NODVS_BUCK_VOLT = 0x16,
+ BD718XX_REG_4TH_NODVS_BUCK_VOLT = 0x17,
+ BD718XX_REG_LDO1_VOLT = 0x18,
+ BD718XX_REG_LDO2_VOLT = 0x19,
+ BD718XX_REG_LDO3_VOLT = 0x1A,
+ BD718XX_REG_LDO4_VOLT = 0x1B,
+ BD718XX_REG_LDO5_VOLT = 0x1C,
+ BD718XX_REG_LDO6_VOLT = 0x1D,
+ BD718XX_REG_TRANS_COND0 = 0x1F,
+ BD718XX_REG_TRANS_COND1 = 0x20,
+ BD718XX_REG_VRFAULTEN = 0x21,
+ BD718XX_REG_MVRFLTMASK0 = 0x22,
+ BD718XX_REG_MVRFLTMASK1 = 0x23,
+ BD718XX_REG_MVRFLTMASK2 = 0x24,
+ BD718XX_REG_RCVCFG = 0x25,
+ BD718XX_REG_RCVNUM = 0x26,
+ BD718XX_REG_PWRONCONFIG0 = 0x27,
+ BD718XX_REG_PWRONCONFIG1 = 0x28,
+ BD718XX_REG_RESETSRC = 0x29,
+ BD718XX_REG_MIRQ = 0x2A,
+ BD718XX_REG_IRQ = 0x2B,
+ BD718XX_REG_IN_MON = 0x2C,
+ BD718XX_REG_POW_STATE = 0x2D,
+ BD718XX_REG_OUT32K = 0x2E,
+ BD718XX_REG_REGLOCK = 0x2F,
+ BD718XX_REG_OTPVER = 0xFF,
+ BD718XX_MAX_REGISTER = 0x100,
};
#define REGLOCK_PWRSEQ 0x1
#define REGLOCK_VREG 0x10
/* Generic BUCK control masks */
-#define BD71837_BUCK_SEL 0x02
-#define BD71837_BUCK_EN 0x01
-#define BD71837_BUCK_RUN_ON 0x04
+#define BD718XX_BUCK_SEL 0x02
+#define BD718XX_BUCK_EN 0x01
+#define BD718XX_BUCK_RUN_ON 0x04
/* Generic LDO masks */
-#define BD71837_LDO_SEL 0x80
-#define BD71837_LDO_EN 0x40
+#define BD718XX_LDO_SEL 0x80
+#define BD718XX_LDO_EN 0x40
/* BD71837 BUCK ramp rate CTRL reg bits */
#define BUCK_RAMPRATE_MASK 0xC0
@@ -115,51 +130,64 @@ enum {
#define BUCK_RAMPRATE_2P50MV 0x2
#define BUCK_RAMPRATE_1P25MV 0x3
-/* BD71837_REG_BUCK1_VOLT_RUN bits */
-#define BUCK1_RUN_MASK 0x3F
-#define BUCK1_RUN_DEFAULT 0x14
-
-/* BD71837_REG_BUCK1_VOLT_SUSP bits */
-#define BUCK1_SUSP_MASK 0x3F
-#define BUCK1_SUSP_DEFAULT 0x14
-
-/* BD71837_REG_BUCK1_VOLT_IDLE bits */
-#define BUCK1_IDLE_MASK 0x3F
-#define BUCK1_IDLE_DEFAULT 0x14
-
-/* BD71837_REG_BUCK2_VOLT_RUN bits */
-#define BUCK2_RUN_MASK 0x3F
-#define BUCK2_RUN_DEFAULT 0x1E
-
-/* BD71837_REG_BUCK2_VOLT_IDLE bits */
-#define BUCK2_IDLE_MASK 0x3F
-#define BUCK2_IDLE_DEFAULT 0x14
-
-/* BD71837_REG_BUCK3_VOLT_RUN bits */
-#define BUCK3_RUN_MASK 0x3F
-#define BUCK3_RUN_DEFAULT 0x1E
-
-/* BD71837_REG_BUCK4_VOLT_RUN bits */
-#define BUCK4_RUN_MASK 0x3F
-#define BUCK4_RUN_DEFAULT 0x1E
-
-/* BD71837_REG_BUCK5_VOLT bits */
-#define BUCK5_MASK 0x07
-#define BUCK5_DEFAULT 0x02
-
-/* BD71837_REG_BUCK6_VOLT bits */
-#define BUCK6_MASK 0x03
-#define BUCK6_DEFAULT 0x03
-
-/* BD71837_REG_BUCK7_VOLT bits */
-#define BUCK7_MASK 0x07
-#define BUCK7_DEFAULT 0x03
-
-/* BD71837_REG_BUCK8_VOLT bits */
-#define BUCK8_MASK 0x3F
-#define BUCK8_DEFAULT 0x1E
-
-/* BD71837_REG_IRQ bits */
+#define DVS_BUCK_RUN_MASK 0x3F
+#define DVS_BUCK_SUSP_MASK 0x3F
+#define DVS_BUCK_IDLE_MASK 0x3F
+
+#define BD718XX_1ST_NODVS_BUCK_MASK 0x07
+#define BD718XX_3RD_NODVS_BUCK_MASK 0x07
+#define BD718XX_4TH_NODVS_BUCK_MASK 0x3F
+
+#define BD71847_BUCK3_MASK 0x07
+#define BD71847_BUCK3_RANGE_MASK 0xC0
+#define BD71847_BUCK4_MASK 0x03
+#define BD71847_BUCK4_RANGE_MASK 0x40
+
+#define BD71837_BUCK5_MASK 0x07
+#define BD71837_BUCK5_RANGE_MASK 0x80
+#define BD71837_BUCK6_MASK 0x03
+
+#define BD718XX_LDO1_MASK 0x03
+#define BD718XX_LDO1_RANGE_MASK 0x20
+#define BD718XX_LDO2_MASK 0x20
+#define BD718XX_LDO3_MASK 0x0F
+#define BD718XX_LDO4_MASK 0x0F
+#define BD718XX_LDO6_MASK 0x0F
+
+#define BD71837_LDO5_MASK 0x0F
+#define BD71847_LDO5_MASK 0x0F
+#define BD71847_LDO5_RANGE_MASK 0x20
+
+#define BD71837_LDO7_MASK 0x0F
+
+/* BD718XX Voltage monitoring masks */
+#define BD718XX_BUCK1_VRMON80 0x1
+#define BD718XX_BUCK1_VRMON130 0x2
+#define BD718XX_BUCK2_VRMON80 0x4
+#define BD718XX_BUCK2_VRMON130 0x8
+#define BD718XX_1ST_NODVS_BUCK_VRMON80 0x1
+#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2
+#define BD718XX_2ND_NODVS_BUCK_VRMON80 0x4
+#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8
+#define BD718XX_3RD_NODVS_BUCK_VRMON80 0x10
+#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20
+#define BD718XX_4TH_NODVS_BUCK_VRMON80 0x40
+#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80
+#define BD718XX_LDO1_VRMON80 0x1
+#define BD718XX_LDO2_VRMON80 0x2
+#define BD718XX_LDO3_VRMON80 0x4
+#define BD718XX_LDO4_VRMON80 0x8
+#define BD718XX_LDO5_VRMON80 0x10
+#define BD718XX_LDO6_VRMON80 0x20
+
+/* BD71837 specific voltage monitoring masks */
+#define BD71837_BUCK3_VRMON80 0x10
+#define BD71837_BUCK3_VRMON130 0x20
+#define BD71837_BUCK4_VRMON80 0x40
+#define BD71837_BUCK4_VRMON130 0x80
+#define BD71837_LDO7_VRMON80 0x40
+
+/* BD718XX_REG_IRQ bits */
#define IRQ_SWRST 0x40
#define IRQ_PWRON_S 0x20
#define IRQ_PWRON_L 0x10
@@ -168,52 +196,31 @@ enum {
#define IRQ_ON_REQ 0x02
#define IRQ_STBY_REQ 0x01
-/* BD71837_REG_OUT32K bits */
-#define BD71837_OUT32K_EN 0x01
+/* BD718XX_REG_OUT32K bits */
+#define BD718XX_OUT32K_EN 0x01
-/* BD71837 gated clock rate */
-#define BD71837_CLK_RATE 32768
+/* BD7183XX gated clock rate */
+#define BD718XX_CLK_RATE 32768
-/* ROHM BD71837 irqs */
+/* ROHM BD718XX irqs */
enum {
- BD71837_INT_STBY_REQ,
- BD71837_INT_ON_REQ,
- BD71837_INT_WDOG,
- BD71837_INT_PWRBTN,
- BD71837_INT_PWRBTN_L,
- BD71837_INT_PWRBTN_S,
- BD71837_INT_SWRST
+ BD718XX_INT_STBY_REQ,
+ BD718XX_INT_ON_REQ,
+ BD718XX_INT_WDOG,
+ BD718XX_INT_PWRBTN,
+ BD718XX_INT_PWRBTN_L,
+ BD718XX_INT_PWRBTN_S,
+ BD718XX_INT_SWRST
};
-/* ROHM BD71837 interrupt masks */
-#define BD71837_INT_SWRST_MASK 0x40
-#define BD71837_INT_PWRBTN_S_MASK 0x20
-#define BD71837_INT_PWRBTN_L_MASK 0x10
-#define BD71837_INT_PWRBTN_MASK 0x8
-#define BD71837_INT_WDOG_MASK 0x4
-#define BD71837_INT_ON_REQ_MASK 0x2
-#define BD71837_INT_STBY_REQ_MASK 0x1
-
-/* BD71837_REG_LDO1_VOLT bits */
-#define LDO1_MASK 0x03
-
-/* BD71837_REG_LDO1_VOLT bits */
-#define LDO2_MASK 0x20
-
-/* BD71837_REG_LDO3_VOLT bits */
-#define LDO3_MASK 0x0F
-
-/* BD71837_REG_LDO4_VOLT bits */
-#define LDO4_MASK 0x0F
-
-/* BD71837_REG_LDO5_VOLT bits */
-#define LDO5_MASK 0x0F
-
-/* BD71837_REG_LDO6_VOLT bits */
-#define LDO6_MASK 0x0F
-
-/* BD71837_REG_LDO7_VOLT bits */
-#define LDO7_MASK 0x0F
+/* ROHM BD718XX interrupt masks */
+#define BD718XX_INT_SWRST_MASK 0x40
+#define BD718XX_INT_PWRBTN_S_MASK 0x20
+#define BD718XX_INT_PWRBTN_L_MASK 0x10
+#define BD718XX_INT_PWRBTN_MASK 0x8
+#define BD718XX_INT_WDOG_MASK 0x4
+#define BD718XX_INT_ON_REQ_MASK 0x2
+#define BD718XX_INT_STBY_REQ_MASK 0x1
/* Register write induced reset settings */
@@ -223,13 +230,13 @@ enum {
* write 1 to it we will trigger the action. So always write 0 to it when
* changning SWRESET action - no matter what we read from it.
*/
-#define BD71837_SWRESET_TYPE_MASK 7
-#define BD71837_SWRESET_TYPE_DISABLED 0
-#define BD71837_SWRESET_TYPE_COLD 4
-#define BD71837_SWRESET_TYPE_WARM 6
+#define BD718XX_SWRESET_TYPE_MASK 7
+#define BD718XX_SWRESET_TYPE_DISABLED 0
+#define BD718XX_SWRESET_TYPE_COLD 4
+#define BD718XX_SWRESET_TYPE_WARM 6
-#define BD71837_SWRESET_RESET_MASK 1
-#define BD71837_SWRESET_RESET 1
+#define BD718XX_SWRESET_RESET_MASK 1
+#define BD718XX_SWRESET_RESET 1
/* Poweroff state transition conditions */
@@ -314,10 +321,10 @@ enum {
BD718XX_PWRBTN_LONG_PRESS_15S
};
-struct bd71837_pmic;
-struct bd71837_clk;
+struct bd718xx_clk;
-struct bd71837 {
+struct bd718xx {
+ unsigned int chip_type;
struct device *dev;
struct regmap *regmap;
unsigned long int id;
@@ -325,8 +332,7 @@ struct bd71837 {
int chip_irq;
struct regmap_irq_chip_data *irq_data;
- struct bd71837_pmic *pmic;
- struct bd71837_clk *clk;
+ struct bd718xx_clk *clk;
};
-#endif /* __LINUX_MFD_BD71837_H__ */
+#endif /* __LINUX_MFD_BD718XX_H__ */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 28f4ae76271d..3ca17eb89aa2 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * core.h
- *
- * copyright (c) 2011 Samsung Electronics Co., Ltd
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_SEC_CORE_H
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index 667aa40486dd..6cfe4201a106 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -1,13 +1,7 @@
-/* irq.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_SEC_IRQ_H
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 9ed2871ea335..0204decfc9aa 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -1,18 +1,7 @@
-/* rtc.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_SEC_RTC_H
diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h
index 2766108bca2f..0762e9de6f2f 100644
--- a/include/linux/mfd/samsung/s2mpa01.h
+++ b/include/linux/mfd/samsung/s2mpa01.h
@@ -1,12 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S2MPA01_H
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index 2c14eeca46f0..6e7668a389a1 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps11.h
- *
* Copyright (c) 2012 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S2MPS11_H
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
index 239e977ba45d..b96d8a11dcd3 100644
--- a/include/linux/mfd/samsung/s2mps13.h
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps13.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPS13_H
diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h
index c92f4782afb5..f4afa0cfc24f 100644
--- a/include/linux/mfd/samsung/s2mps14.h
+++ b/include/linux/mfd/samsung/s2mps14.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps14.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPS14_H
diff --git a/include/linux/mfd/samsung/s2mps15.h b/include/linux/mfd/samsung/s2mps15.h
index 36d35287c3c0..eac6bf74b72e 100644
--- a/include/linux/mfd/samsung/s2mps15.h
+++ b/include/linux/mfd/samsung/s2mps15.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_MFD_S2MPS15_H
diff --git a/include/linux/mfd/samsung/s2mpu02.h b/include/linux/mfd/samsung/s2mpu02.h
index 47ae9bc583a7..76cd5380cf0f 100644
--- a/include/linux/mfd/samsung/s2mpu02.h
+++ b/include/linux/mfd/samsung/s2mpu02.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mpu02.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPU02_H
diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h
index e025418e5589..c534f086ca16 100644
--- a/include/linux/mfd/samsung/s5m8763.h
+++ b/include/linux/mfd/samsung/s5m8763.h
@@ -1,13 +1,7 @@
-/* s5m8763.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S5M8763_H
diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h
index 243b58fec33d..704f8d80e96e 100644
--- a/include/linux/mfd/samsung/s5m8767.h
+++ b/include/linux/mfd/samsung/s5m8767.h
@@ -1,13 +1,7 @@
-/* s5m8767.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S5M8767_H
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index e06f5f79eaef..6c1ad160ed87 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -457,4 +457,7 @@
#define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \
IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR)
+/* For imx6sll iomux gpr register field define */
+#define IMX6SLL_GPR5_AFCG_X_BYPASS_MASK (0x1f << 11)
+
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h
index 09d5f30384e5..1ef51ed36be5 100644
--- a/include/linux/mfd/ti-lmu.h
+++ b/include/linux/mfd/ti-lmu.h
@@ -16,6 +16,7 @@
#include <linux/gpio.h>
#include <linux/notifier.h>
#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
/* Notifier event */
#define LMU_EVENT_MONITOR_DONE 0x01
@@ -81,7 +82,7 @@ enum lm363x_regulator_id {
struct ti_lmu {
struct device *dev;
struct regmap *regmap;
- int en_gpio;
+ struct gpio_desc *en_gpio;
struct blocking_notifier_head notifier;
};
#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 77866214ab51..e2687a30e5a1 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -54,19 +54,8 @@
* idle before writing to some registers.
*/
#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
-/*
- * A GPIO is used for card hotplug detection. We need an extra flag for this,
- * because 0 is a valid GPIO number too, and requiring users to specify
- * cd_gpio < 0 to disable GPIO hotplug would break backwards compatibility.
- */
-#define TMIO_MMC_USE_GPIO_CD BIT(5)
-/*
- * Some controllers doesn't have over 0x100 register.
- * it is used to checking accessibility of
- * CTL_SD_CARD_CLK_CTL / CTL_CLK_AND_WAIT_CTL
- */
-#define TMIO_MMC_HAVE_HIGH_REG BIT(6)
+/* BIT(5) is unused */
/*
* Some controllers have CMD12 automatically
@@ -111,7 +100,6 @@ struct tmio_mmc_data {
unsigned long capabilities2;
unsigned long flags;
u32 ocr_mask; /* available voltages */
- unsigned int cd_gpio;
int alignment_shift;
dma_addr_t dma_rx_offset;
unsigned int max_blk_count;
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index b19c370fe81a..f346167c0e00 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -20,9 +20,6 @@
#define WM8994_NUM_AIF 3
struct wm8994_ldo_pdata {
- /** GPIOs to enable regulator, 0 or less if not available */
- int enable;
-
const struct regulator_init_data *init_data;
};
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 472fa4d4ea62..7361cd3fddc1 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -31,6 +31,7 @@
#define PHY_ID_KSZ8081 0x00221560
#define PHY_ID_KSZ8061 0x00221570
#define PHY_ID_KSZ9031 0x00221620
+#define PHY_ID_KSZ9131 0x00221640
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index f2b4abbca55e..e13d9bf2f9a5 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -29,7 +29,7 @@ enum migrate_reason {
};
/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
-extern char *migrate_reason_names[MR_TYPES];
+extern const char *migrate_reason_names[MR_TYPES];
static inline struct page *new_page_nodemask(struct page *page,
int preferred_nid, nodemask_t *nodemask)
@@ -77,8 +77,7 @@ extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page,
- struct buffer_head *head, enum migrate_mode mode,
+ struct page *newpage, struct page *page, enum migrate_mode mode,
int extra_count);
#else
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 55000ee5c6ad..6fee8b1a4400 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -10,6 +10,7 @@
#include <linux/if.h>
+#include <linux/linkmode.h>
#include <uapi/linux/mii.h>
struct ethtool_cmd;
@@ -132,6 +133,34 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
}
/**
+ * linkmode_adv_to_mii_adv_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADVERTISE register.
+ */
+static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, advertising))
+ result |= ADVERTISE_10HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, advertising))
+ result |= ADVERTISE_10FULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, advertising))
+ result |= ADVERTISE_100HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, advertising))
+ result |= ADVERTISE_100FULL;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= ADVERTISE_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= ADVERTISE_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
* mii_adv_to_ethtool_adv_t
* @adv: value of the MII_ADVERTISE register
*
@@ -179,6 +208,28 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
}
/**
+ * linkmode_adv_to_mii_ctrl1000_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000T mode.
+ */
+static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising))
+ result |= ADVERTISE_1000HALF;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising))
+ result |= ADVERTISE_1000FULL;
+
+ return result;
+}
+
+/**
* mii_ctrl1000_to_ethtool_adv_t
* @adv: value of the MII_CTRL1000 register
*
@@ -237,6 +288,25 @@ static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
}
/**
+ * mii_stat1000_mod_linkmode_lpa_t
+ * @advertising: target the linkmode advertisement settings
+ * @adv: value of the MII_STAT1000 register
+ *
+ * A small helper function that translates MII_STAT1000 bits, when in
+ * 1000Base-T mode, to linkmode advertisement settings. Other bits in
+ * advertising are not changes.
+ */
+static inline void mii_stat1000_mod_linkmode_lpa_t(unsigned long *advertising,
+ u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising, lpa & LPA_1000HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising, lpa & LPA_1000FULL);
+}
+
+/**
* ethtool_adv_to_mii_adv_x
* @ethadv: the ethtool advertisement settings
*
@@ -303,6 +373,110 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
}
/**
+ * mii_adv_mod_linkmode_adv_t
+ * @advertising:pointer to destination link mode.
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits to
+ * linkmode advertisement settings. Leaves other bits unchanged.
+ */
+static inline void mii_adv_mod_linkmode_adv_t(unsigned long *advertising,
+ u32 adv)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ advertising, adv & ADVERTISE_10HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising, adv & ADVERTISE_10FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ advertising, adv & ADVERTISE_100HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising, adv & ADVERTISE_100FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
+ adv & ADVERTISE_PAUSE_CAP);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ advertising, adv & ADVERTISE_PAUSE_ASYM);
+}
+
+/**
+ * mii_adv_to_linkmode_adv_t
+ * @advertising:pointer to destination link mode.
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to linkmode advertisement settings. Clears the old value
+ * of advertising.
+ */
+static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising,
+ u32 adv)
+{
+ linkmode_zero(advertising);
+
+ mii_adv_mod_linkmode_adv_t(advertising, adv);
+}
+
+/**
+ * mii_lpa_to_linkmode_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA bits, when in
+ * 1000Base-T mode, to linkmode LP advertisement settings. Clears the
+ * old value of advertising
+ */
+static inline void mii_lpa_to_linkmode_lpa_t(unsigned long *lp_advertising,
+ u32 lpa)
+{
+ mii_adv_to_linkmode_adv_t(lp_advertising, lpa);
+
+ if (lpa & LPA_LPACK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lp_advertising);
+
+}
+
+/**
+ * mii_lpa_mod_linkmode_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA bits, when in
+ * 1000Base-T mode, to linkmode LP advertisement settings. Leaves
+ * other bits unchanged.
+ */
+static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising,
+ u32 lpa)
+{
+ mii_adv_mod_linkmode_adv_t(lp_advertising, lpa);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lp_advertising, lpa & LPA_LPACK);
+}
+
+/**
+ * linkmode_adv_to_lcl_adv_t
+ * @advertising:pointer to linkmode advertising
+ *
+ * A small helper function that translates linkmode advertising to LVL
+ * pause capabilities.
+ */
+static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
+{
+ u32 lcl_adv = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising))
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising))
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ return lcl_adv;
+}
+
+/**
* mii_advertise_flowctrl - get flow control advertisement flags
* @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
*/
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index dca6ab4eaa99..36e412c3d657 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -226,6 +226,7 @@ enum {
MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38,
MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39,
+ MLX4_DEV_CAP_FLAG2_SW_CQ_INIT = 1ULL << 40,
};
enum {
@@ -1136,7 +1137,8 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
- unsigned vector, int collapsed, int timestamp_en);
+ unsigned int vector, int collapsed, int timestamp_en,
+ void *buf_addr, bool user_cq);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 flags, u8 usage);
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 0ef6138eca49..612c8c2f2466 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -60,7 +60,8 @@ struct mlx5_core_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
- struct mlx5_eq *eq;
+ struct mlx5_eq_comp *eq;
+ u16 uid;
};
@@ -124,9 +125,9 @@ struct mlx5_cq_modify_params {
};
enum {
- CQE_SIZE_64 = 0,
- CQE_SIZE_128 = 1,
- CQE_SIZE_128_PAD = 2,
+ CQE_STRIDE_64 = 0,
+ CQE_STRIDE_128 = 1,
+ CQE_STRIDE_128_PAD = 2,
};
#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
@@ -134,8 +135,8 @@ enum {
static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
{
- return padding_128_en ? CQE_SIZE_128_PAD :
- size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
+ return padding_128_en ? CQE_STRIDE_128_PAD :
+ size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128;
}
static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 11fa4e66afc5..8c4a820bd4c1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -212,6 +212,13 @@ enum {
MLX5_PFAULT_SUBTYPE_RDMA = 1,
};
+enum wqe_page_fault_type {
+ MLX5_WQE_PF_TYPE_RMP = 0,
+ MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
+ MLX5_WQE_PF_TYPE_RESP = 2,
+ MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
+};
+
enum {
MLX5_PERM_LOCAL_READ = 1 << 2,
MLX5_PERM_LOCAL_WRITE = 1 << 3,
@@ -294,9 +301,15 @@ enum {
MLX5_EVENT_QUEUE_TYPE_DCT = 6,
};
+/* mlx5 components can subscribe to any one of these events via
+ * mlx5_eq_notifier_register API.
+ */
enum mlx5_event {
+ /* Special value to subscribe to any event */
+ MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0,
+ /* HW events enum start: comp events are not subscribable */
MLX5_EVENT_TYPE_COMP = 0x0,
-
+ /* HW Async events enum start: subscribable events */
MLX5_EVENT_TYPE_PATH_MIG = 0x01,
MLX5_EVENT_TYPE_COMM_EST = 0x02,
MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
@@ -317,6 +330,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
+ MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@@ -334,6 +348,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
+
+ MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1,
};
enum {
@@ -405,6 +421,7 @@ enum {
MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
MLX5_OPCODE_BIND_MW = 0x18,
MLX5_OPCODE_CONFIG_CMD = 0x1f,
+ MLX5_OPCODE_ENHANCED_MPSW = 0x29,
MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
MLX5_RECV_OPCODE_SEND = 0x01,
@@ -504,6 +521,10 @@ struct health_buffer {
__be16 ext_synd;
};
+enum mlx5_cmd_addr_l_sz_offset {
+ MLX5_NIC_IFC_OFFSET = 8,
+};
+
struct mlx5_init_seg {
__be32 fw_rev;
__be32 cmdif_rev_fw_sub;
@@ -762,6 +783,11 @@ static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
return (cqe->op_own >> 2) & 0x3;
}
+static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
+{
+ return cqe->op_own >> 4;
+}
+
static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
@@ -1120,6 +1146,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
+
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 7a452716de4b..54299251d40d 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -46,10 +46,11 @@
#include <linux/mempool.h>
#include <linux/interrupt.h>
#include <linux/idr.h>
+#include <linux/notifier.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/srq.h>
+#include <linux/mlx5/eq.h>
#include <linux/timecounter.h>
#include <linux/ptp_clock_kernel.h>
@@ -85,26 +86,15 @@ enum {
};
enum {
- MLX5_EQ_VEC_PAGES = 0,
- MLX5_EQ_VEC_CMD = 1,
- MLX5_EQ_VEC_ASYNC = 2,
- MLX5_EQ_VEC_PFAULT = 3,
- MLX5_EQ_VEC_COMP_BASE,
-};
-
-enum {
- MLX5_MAX_IRQ_NAME = 32
-};
-
-enum {
- MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
- MLX5_ATOMIC_MODE_CX = 2 << 16,
- MLX5_ATOMIC_MODE_8B = 3 << 16,
- MLX5_ATOMIC_MODE_16B = 4 << 16,
- MLX5_ATOMIC_MODE_32B = 5 << 16,
- MLX5_ATOMIC_MODE_64B = 6 << 16,
- MLX5_ATOMIC_MODE_128B = 7 << 16,
- MLX5_ATOMIC_MODE_256B = 8 << 16,
+ MLX5_ATOMIC_MODE_OFFSET = 16,
+ MLX5_ATOMIC_MODE_IB_COMP = 1,
+ MLX5_ATOMIC_MODE_CX = 2,
+ MLX5_ATOMIC_MODE_8B = 3,
+ MLX5_ATOMIC_MODE_16B = 4,
+ MLX5_ATOMIC_MODE_32B = 5,
+ MLX5_ATOMIC_MODE_64B = 6,
+ MLX5_ATOMIC_MODE_128B = 7,
+ MLX5_ATOMIC_MODE_256B = 8,
};
enum {
@@ -133,6 +123,7 @@ enum {
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
MLX5_REG_PMLP = 0x5002,
+ MLX5_REG_PPLM = 0x5023,
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
@@ -162,16 +153,11 @@ enum mlx5_dcbx_oper_mode {
MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
};
-enum mlx5_dct_atomic_mode {
- MLX5_ATOMIC_MODE_DCT_OFF = 20,
- MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
- MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
- MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
-};
-
enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
+ MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
+ MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
};
enum mlx5_page_fault_resume_flags {
@@ -208,16 +194,7 @@ struct mlx5_rsc_debug {
};
enum mlx5_dev_event {
- MLX5_DEV_EVENT_SYS_ERROR,
- MLX5_DEV_EVENT_PORT_UP,
- MLX5_DEV_EVENT_PORT_DOWN,
- MLX5_DEV_EVENT_PORT_INITIALIZED,
- MLX5_DEV_EVENT_LID_CHANGE,
- MLX5_DEV_EVENT_PKEY_CHANGE,
- MLX5_DEV_EVENT_GUID_CHANGE,
- MLX5_DEV_EVENT_CLIENT_REREG,
- MLX5_DEV_EVENT_PPS,
- MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
+ MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
};
enum mlx5_port_status {
@@ -225,14 +202,6 @@ enum mlx5_port_status {
MLX5_PORT_DOWN = 2,
};
-enum mlx5_eq_type {
- MLX5_EQ_TYPE_COMP,
- MLX5_EQ_TYPE_ASYNC,
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- MLX5_EQ_TYPE_PF,
-#endif
-};
-
struct mlx5_bfreg_info {
u32 *sys_pages;
int num_low_latency_bfregs;
@@ -300,6 +269,8 @@ struct mlx5_cmd_stats {
};
struct mlx5_cmd {
+ struct mlx5_nb nb;
+
void *cmd_alloc_buf;
dma_addr_t alloc_dma;
int alloc_size;
@@ -360,60 +331,15 @@ struct mlx5_frag_buf {
};
struct mlx5_frag_buf_ctrl {
- struct mlx5_frag_buf frag_buf;
+ struct mlx5_buf_list *frags;
u32 sz_m1;
- u32 frag_sz_m1;
- u32 strides_offset;
+ u16 frag_sz_m1;
+ u16 strides_offset;
u8 log_sz;
u8 log_stride;
u8 log_frag_strides;
};
-struct mlx5_eq_tasklet {
- struct list_head list;
- struct list_head process_list;
- struct tasklet_struct task;
- /* lock on completion tasklet list */
- spinlock_t lock;
-};
-
-struct mlx5_eq_pagefault {
- struct work_struct work;
- /* Pagefaults lock */
- spinlock_t lock;
- struct workqueue_struct *wq;
- mempool_t *pool;
-};
-
-struct mlx5_cq_table {
- /* protect radix tree */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
-
-struct mlx5_eq {
- struct mlx5_core_dev *dev;
- struct mlx5_cq_table cq_table;
- __be32 __iomem *doorbell;
- u32 cons_index;
- struct mlx5_frag_buf buf;
- int size;
- unsigned int irqn;
- u8 eqn;
- int nent;
- u64 mask;
- struct list_head list;
- int index;
- struct mlx5_rsc_debug *dbg;
- enum mlx5_eq_type type;
- union {
- struct mlx5_eq_tasklet tasklet_ctx;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct mlx5_eq_pagefault pf_ctx;
-#endif
- };
-};
-
struct mlx5_core_psv {
u32 psv_idx;
struct psv_layout {
@@ -466,35 +392,6 @@ struct mlx5_core_rsc_common {
struct completion free;
};
-struct mlx5_core_srq {
- struct mlx5_core_rsc_common common; /* must be first */
- u32 srqn;
- int max;
- size_t max_gs;
- size_t max_avail_gather;
- int wqe_shift;
- void (*event) (struct mlx5_core_srq *, enum mlx5_event);
-
- atomic_t refcount;
- struct completion free;
-};
-
-struct mlx5_eq_table {
- void __iomem *update_ci;
- void __iomem *update_arm_ci;
- struct list_head comp_eqs_list;
- struct mlx5_eq pages_eq;
- struct mlx5_eq async_eq;
- struct mlx5_eq cmd_eq;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct mlx5_eq pfault_eq;
-#endif
- int num_comp_vectors;
- /* protect EQs list
- */
- spinlock_t lock;
-};
-
struct mlx5_uars_page {
void __iomem *map;
bool wc;
@@ -544,13 +441,8 @@ struct mlx5_core_health {
};
struct mlx5_qp_table {
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
+ struct notifier_block nb;
-struct mlx5_srq_table {
/* protect radix tree
*/
spinlock_t lock;
@@ -577,16 +469,12 @@ struct mlx5_core_sriov {
int enabled_vfs;
};
-struct mlx5_irq_info {
- cpumask_var_t mask;
- char name[MLX5_MAX_IRQ_NAME];
-};
-
struct mlx5_fc_stats {
- struct rb_root counters;
- struct list_head addlist;
- /* protect addlist add/splice operations */
- spinlock_t addlist_lock;
+ spinlock_t counters_idr_lock; /* protects counters_idr */
+ struct idr counters_idr;
+ struct list_head counters;
+ struct llist_head addlist;
+ struct llist_head dellist;
struct workqueue_struct *wq;
struct delayed_work work;
@@ -594,10 +482,12 @@ struct mlx5_fc_stats {
unsigned long sampling_interval; /* jiffies */
};
+struct mlx5_events;
struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
-struct mlx5_pagefault;
+struct mlx5_devcom;
+struct mlx5_eq_table;
struct mlx5_rate_limit {
u32 rate;
@@ -620,37 +510,12 @@ struct mlx5_rl_table {
struct mlx5_rl_entry *rl_entry;
};
-enum port_module_event_status_type {
- MLX5_MODULE_STATUS_PLUGGED = 0x1,
- MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
- MLX5_MODULE_STATUS_ERROR = 0x3,
- MLX5_MODULE_STATUS_NUM = 0x3,
-};
-
-enum port_module_event_error_type {
- MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
- MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
- MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
- MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
- MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
- MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
- MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
- MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
- MLX5_MODULE_EVENT_ERROR_UNKNOWN,
- MLX5_MODULE_EVENT_ERROR_NUM,
-};
-
-struct mlx5_port_module_event_stats {
- u64 status_counters[MLX5_MODULE_STATUS_NUM];
- u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
-};
-
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
- struct mlx5_eq_table eq_table;
- struct mlx5_irq_info *irq_info;
+ struct mlx5_eq_table *eq_table;
/* pages stuff */
+ struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq;
struct rb_root page_root;
int fw_pages;
@@ -660,8 +525,6 @@ struct mlx5_priv {
struct mlx5_core_health health;
- struct mlx5_srq_table srq_table;
-
/* start: qp staff */
struct mlx5_qp_table qp_table;
struct dentry *qp_debugfs;
@@ -691,28 +554,18 @@ struct mlx5_priv {
struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
-
- struct list_head waiting_events_list;
- bool is_accum_events;
+ struct mlx5_events *events;
struct mlx5_flow_steering *steering;
struct mlx5_mpfs *mpfs;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
+ struct mlx5_devcom *devcom;
unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
- struct mlx5_port_module_event_stats pme_stats;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- void (*pfault)(struct mlx5_core_dev *dev,
- void *context,
- struct mlx5_pagefault *pfault);
- void *pfault_ctx;
- struct srcu_struct pfault_srcu;
-#endif
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
};
@@ -737,44 +590,6 @@ enum mlx5_pagefault_type_flags {
MLX5_PFAULT_RDMA = 1 << 2,
};
-/* Contains the details of a pagefault. */
-struct mlx5_pagefault {
- u32 bytes_committed;
- u32 token;
- u8 event_subtype;
- u8 type;
- union {
- /* Initiator or send message responder pagefault details. */
- struct {
- /* Received packet size, only valid for responders. */
- u32 packet_size;
- /*
- * Number of resource holding WQE, depends on type.
- */
- u32 wq_num;
- /*
- * WQE index. Refers to either the send queue or
- * receive queue, according to event_subtype.
- */
- u16 wqe_index;
- } wqe;
- /* RDMA responder pagefault details */
- struct {
- u32 r_key;
- /*
- * Received packet size, minimal size page fault
- * resolution required for forward progress.
- */
- u32 packet_size;
- u32 rdma_op_len;
- u64 rdma_va;
- } rdma;
- };
-
- struct mlx5_eq *eq;
- struct work_struct work;
-};
-
struct mlx5_td {
struct list_head tirs_list;
u32 tdn;
@@ -804,14 +619,15 @@ struct mlx5_pps {
};
struct mlx5_clock {
- rwlock_t lock;
+ struct mlx5_core_dev *mdev;
+ struct mlx5_nb pps_nb;
+ seqlock_t lock;
struct cyclecounter cycles;
struct timecounter tc;
struct hwtstamp_config hwtstamp_config;
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
- struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
@@ -837,15 +653,13 @@ struct mlx5_core_dev {
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
} caps;
+ u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
unsigned long intf_state;
- void (*event) (struct mlx5_core_dev *dev,
- enum mlx5_dev_event event,
- unsigned long param);
struct mlx5_priv priv;
struct mlx5_profile *profile;
atomic_t num_qps;
@@ -859,9 +673,6 @@ struct mlx5_core_dev {
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct page *clock_info_page;
@@ -940,8 +751,8 @@ struct mlx5_hca_vport_context {
u64 node_guid;
u32 cap_mask1;
u32 cap_mask1_perm;
- u32 cap_mask2;
- u32 cap_mask2_perm;
+ u16 cap_mask2;
+ u16 cap_mask2_perm;
u16 lid;
u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
u8 lmc;
@@ -994,10 +805,12 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
-static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
- u32 strides_offset,
+static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
+ u8 log_stride, u8 log_sz,
+ u16 strides_offset,
struct mlx5_frag_buf_ctrl *fbc)
{
+ fbc->frags = frags;
fbc->log_stride = log_stride;
fbc->log_sz = log_sz;
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
@@ -1006,18 +819,11 @@ static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
fbc->strides_offset = strides_offset;
}
-static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
+static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
+ u8 log_stride, u8 log_sz,
struct mlx5_frag_buf_ctrl *fbc)
{
- mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
-}
-
-static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
- void *cqc)
-{
- mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz),
- MLX5_GET(cqc, cqc, log_cq_size),
- fbc);
+ mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
}
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
@@ -1028,8 +834,15 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
ix += fbc->strides_offset;
frag = ix >> fbc->log_frag_strides;
- return fbc->frag_buf.frags[frag].buf +
- ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
+ return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
+}
+
+static inline u32
+mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
+{
+ u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
+
+ return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
}
int mlx5_cmd_init(struct mlx5_core_dev *dev);
@@ -1052,7 +865,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
@@ -1068,13 +881,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in);
-int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
-int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *out);
-int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- u16 lwm, int is_srq);
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
@@ -1093,9 +899,9 @@ int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
+int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
+void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages);
@@ -1106,9 +912,6 @@ void mlx5_unregister_debugfs(void);
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
-void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
-struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1153,6 +956,9 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
+unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
+struct cpumask *
+mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
@@ -1200,23 +1006,21 @@ struct mlx5_interface {
void (*remove)(struct mlx5_core_dev *dev, void *context);
int (*attach)(struct mlx5_core_dev *dev, void *context);
void (*detach)(struct mlx5_core_dev *dev, void *context);
- void (*event)(struct mlx5_core_dev *dev, void *context,
- enum mlx5_dev_event event, unsigned long param);
- void (*pfault)(struct mlx5_core_dev *dev,
- void *context,
- struct mlx5_pagefault *pfault);
- void * (*get_dev)(void *context);
int protocol;
struct list_head list;
};
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
+int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
@@ -1226,21 +1030,15 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
-#ifndef CONFIG_MLX5_CORE_IPOIB
-static inline
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *))
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-#else
+#ifdef CONFIG_MLX5_CORE_IPOIB
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
struct ib_device *ibdev,
const char *name,
void (*setup)(struct net_device *));
#endif /* CONFIG_MLX5_CORE_IPOIB */
+int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
+ struct ib_device *device,
+ struct rdma_netdev_alloc_params *params);
struct mlx5_profile {
u64 mask;
@@ -1310,10 +1108,4 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
-static inline const struct cpumask *
-mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
-{
- return dev->priv.irq_info[vector].mask;
-}
-
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h
new file mode 100644
index 000000000000..00045cc4ea11
--- /dev/null
+++ b/include/linux/mlx5/eq.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef MLX5_CORE_EQ_H
+#define MLX5_CORE_EQ_H
+
+enum {
+ MLX5_EQ_PAGEREQ_IDX = 0,
+ MLX5_EQ_CMD_IDX = 1,
+ MLX5_EQ_ASYNC_IDX = 2,
+ /* reserved to be used by mlx5_core ulps (mlx5e/mlx5_ib) */
+ MLX5_EQ_PFAULT_IDX = 3,
+ MLX5_EQ_MAX_ASYNC_EQS,
+ /* completion eqs vector indices start here */
+ MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS,
+};
+
+#define MLX5_NUM_CMD_EQE (32)
+#define MLX5_NUM_ASYNC_EQE (0x1000)
+#define MLX5_NUM_SPARE_EQE (0x80)
+
+struct mlx5_eq;
+struct mlx5_core_dev;
+
+struct mlx5_eq_param {
+ u8 index;
+ int nent;
+ u64 mask;
+ void *context;
+ irq_handler_t handler;
+};
+
+struct mlx5_eq *
+mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
+ struct mlx5_eq_param *param);
+int
+mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+
+struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc);
+void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm);
+
+/* The HCA will think the queue has overflowed if we
+ * don't tell it we've been processing events. We
+ * create EQs with MLX5_NUM_SPARE_EQE extra entries,
+ * so we must update our consumer index at
+ * least that often.
+ *
+ * mlx5_eq_update_cc must be called on every EQE @EQ irq handler
+ */
+static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc)
+{
+ if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) {
+ mlx5_eq_update_ci(eq, cc, 0);
+ cc = 0;
+ }
+ return cc;
+}
+
+struct mlx5_nb {
+ struct notifier_block nb;
+ u8 event_type;
+};
+
+#define mlx5_nb_cof(ptr, type, member) \
+ (container_of(container_of(ptr, struct mlx5_nb, nb), type, member))
+
+#define MLX5_NB_INIT(name, handler, event) do { \
+ (name)->nb.notifier_call = handler; \
+ (name)->event_type = MLX5_EVENT_TYPE_##event; \
+} while (0)
+
+#endif /* MLX5_CORE_EQ_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 804516e4f483..9df51da04621 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -45,7 +45,8 @@ enum {
};
enum {
- MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0),
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
};
#define LEFTOVERS_RULE_NUM 2
@@ -85,22 +86,30 @@ struct mlx5_flow_spec {
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
};
+enum {
+ MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0),
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
+};
+
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
u32 tir_num;
u32 ft_num;
struct mlx5_flow_table *ft;
- struct mlx5_fc *counter;
+ u32 counter_id;
struct {
u16 num;
u16 vhca_id;
- bool vhca_id_valid;
+ u32 reformat_id;
+ u8 flags;
} vport;
};
};
struct mlx5_flow_namespace *
+mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n);
+struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
struct mlx5_flow_namespace *
@@ -155,20 +164,28 @@ struct mlx5_fs_vlan {
#define MLX5_FS_VLAN_DEPTH 2
+enum {
+ FLOW_ACT_HAS_TAG = BIT(0),
+ FLOW_ACT_NO_APPEND = BIT(1),
+};
+
struct mlx5_flow_act {
u32 action;
- bool has_flow_tag;
u32 flow_tag;
- u32 encap_id;
+ u32 reformat_id;
u32 modify_id;
uintptr_t esp_id;
+ u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
- struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
- MLX5_FS_DEFAULT_FLOW_TAG, 0, 0}
+ struct mlx5_flow_act name = { .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG, \
+ .reformat_id = 0, \
+ .modify_id = 0, \
+ .flags = 0, }
/* Single destination per rule.
* Group ID is implied by the match criteria.
@@ -185,15 +202,30 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *new_dest,
struct mlx5_flow_destination *old_dest);
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes);
+u32 mlx5_fc_id(struct mlx5_fc *counter);
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
+int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ u8 namespace, u8 num_actions,
+ void *modify_actions, u32 *modify_header_id);
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
+ u32 modify_header_id);
+
+int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
+ int reformat_type,
+ size_t size,
+ void *reformat_data,
+ enum mlx5_flow_namespace_type namespace,
+ u32 *packet_reformat_id);
+void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
+ u32 packet_reformat_id);
+
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index f043d65b9bac..35fe5217b244 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -76,13 +76,7 @@ enum {
};
enum {
- MLX5_GENERAL_OBJ_TYPES_CAP_UCTX = (1ULL << 4),
- MLX5_GENERAL_OBJ_TYPES_CAP_UMEM = (1ULL << 5),
-};
-
-enum {
- MLX5_OBJ_TYPE_UCTX = 0x0004,
- MLX5_OBJ_TYPE_UMEM = 0x0005,
+ MLX5_SHARED_RESOURCE_UID = 0xffff,
};
enum {
@@ -144,6 +138,9 @@ enum {
MLX5_CMD_OP_DESTROY_XRQ = 0x718,
MLX5_CMD_OP_QUERY_XRQ = 0x719,
MLX5_CMD_OP_ARM_XRQ = 0x71a,
+ MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
+ MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
+ MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
@@ -161,6 +158,8 @@ enum {
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
+ MLX5_CMD_OP_SET_MONITOR_COUNTER = 0x774,
+ MLX5_CMD_OP_ARM_MONITOR_COUNTER = 0x775,
MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
@@ -243,8 +242,9 @@ enum {
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
- MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
- MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
+ MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT = 0x93f,
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
@@ -257,9 +257,19 @@ enum {
MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01,
MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02,
MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03,
+ MLX5_CMD_OP_CREATE_UCTX = 0xa04,
+ MLX5_CMD_OP_DESTROY_UCTX = 0xa06,
+ MLX5_CMD_OP_CREATE_UMEM = 0xa08,
+ MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
MLX5_CMD_OP_MAX
};
+/* Valid range for general commands that don't work over an object */
+enum {
+ MLX5_CMD_OP_GENERAL_START = 0xb00,
+ MLX5_CMD_OP_GENERAL_END = 0xd00,
+};
+
struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_dmac[0x1];
u8 outer_smac[0x1];
@@ -336,7 +346,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1];
- u8 encap[0x1];
+ u8 reformat[0x1];
u8 decap[0x1];
u8 reserved_at_9[0x1];
u8 pop_vlan[0x1];
@@ -344,8 +354,12 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_c[0x1];
u8 pop_vlan_2[0x1];
u8 push_vlan_2[0x1];
- u8 reserved_at_f[0x11];
-
+ u8 reformat_and_vlan_action[0x1];
+ u8 reserved_at_10[0x2];
+ u8 reformat_l3_tunnel_to_l2[0x1];
+ u8 reformat_l2_to_l3_tunnel[0x1];
+ u8 reformat_and_modify_action[0x1];
+ u8 reserved_at_15[0xb];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
@@ -417,6 +431,16 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
};
+struct mlx5_ifc_nvgre_key_bits {
+ u8 hi[0x18];
+ u8 lo[0x8];
+};
+
+union mlx5_ifc_gre_key_bits {
+ struct mlx5_ifc_nvgre_key_bits nvgre;
+ u8 key[0x20];
+};
+
struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_0[0x8];
u8 source_sqn[0x18];
@@ -438,8 +462,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_64[0xc];
u8 gre_protocol[0x10];
- u8 gre_key_h[0x18];
- u8 gre_key_l[0x8];
+ union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
u8 reserved_at_b8[0x8];
@@ -554,7 +577,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 nic_rx_multi_path_tirs[0x1];
u8 nic_rx_multi_path_tirs_fts[0x1];
u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
- u8 reserved_at_3[0x1fd];
+ u8 reserved_at_3[0x1d];
+ u8 encap_general_header[0x1];
+ u8 reserved_at_21[0xa];
+ u8 log_max_packet_reformat_context[0x5];
+ u8 reserved_at_30[0x6];
+ u8 max_encap_header_size[0xa];
+ u8 reserved_at_40[0x1c0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
@@ -572,9 +601,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
};
struct mlx5_ifc_flow_table_eswitch_cap_bits {
- u8 reserved_at_0[0x1c];
+ u8 reserved_at_0[0x1a];
+ u8 multi_fdb_encap[0x1];
+ u8 reserved_at_1b[0x1];
u8 fdb_multi_path_to_table[0x1];
- u8 reserved_at_1d[0x1e3];
+ u8 reserved_at_1d[0x3];
+
+ u8 reserved_at_20[0x1e0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
@@ -585,21 +618,29 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_800[0x7800];
};
+enum {
+ MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
+ MLX5_COUNTER_FLOW_ESWITCH = 0x1,
+};
+
struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_strip[0x1];
u8 vport_cvlan_strip[0x1];
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_at_5[0x18];
+ u8 reserved_at_5[0x17];
+ u8 counter_eswitch_affinity[0x1];
u8 merged_eswitch[0x1];
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
u8 vxlan_encap_decap[0x1];
u8 nvgre_encap_decap[0x1];
- u8 reserved_at_22[0x9];
- u8 log_max_encap_headers[0x5];
+ u8 reserved_at_22[0x1];
+ u8 log_max_fdb_encap_uplink[0x5];
+ u8 reserved_at_21[0x3];
+ u8 log_max_packet_reformat_context[0x5];
u8 reserved_2b[0x6];
u8 max_encap_header_size[0xa];
@@ -817,7 +858,7 @@ struct mlx5_ifc_vector_calc_cap_bits {
struct mlx5_ifc_calc_op calc2;
struct mlx5_ifc_calc_op calc3;
- u8 reserved_at_e0[0x720];
+ u8 reserved_at_c0[0x720];
};
enum {
@@ -871,6 +912,10 @@ enum {
MLX5_CAP_UMR_FENCE_NONE = 0x2,
};
+enum {
+ MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x30];
u8 vhca_id[0x10];
@@ -896,7 +941,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_mkey[0x6];
u8 reserved_at_f0[0x8];
u8 dump_fill_mkey[0x1];
- u8 reserved_at_f9[0x3];
+ u8 reserved_at_f9[0x2];
+ u8 fast_teardown[0x1];
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
@@ -995,7 +1041,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 umr_modify_atomic_disabled[0x1];
u8 umr_indirect_mkey_disabled[0x1];
u8 umr_fence[0x2];
- u8 reserved_at_20c[0x3];
+ u8 dc_req_scat_data_cqe[0x1];
+ u8 reserved_at_20d[0x2];
u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
@@ -1029,7 +1076,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vector_calc[0x1];
u8 umr_ptr_rlky[0x1];
u8 imaicl[0x1];
- u8 reserved_at_232[0x4];
+ u8 qp_packet_based[0x1];
+ u8 reserved_at_233[0x3];
u8 qkv[0x1];
u8 pkv[0x1];
u8 set_deth_sqpn[0x1];
@@ -1139,7 +1187,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_440[0x20];
- u8 reserved_at_460[0x10];
+ u8 reserved_at_460[0x3];
+ u8 log_max_uctx[0x5];
+ u8 reserved_at_468[0x3];
+ u8 log_max_umem[0x5];
u8 max_num_eqs[0x10];
u8 reserved_at_480[0x3];
@@ -1179,7 +1230,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 num_vhca_ports[0x8];
u8 reserved_at_618[0x6];
u8 sw_owner_id[0x1];
- u8 reserved_at_61f[0x1e1];
+ u8 reserved_at_61f[0x1];
+
+ u8 max_num_of_monitor_counters[0x10];
+ u8 num_ppcnt_monitor_counters[0x10];
+
+ u8 reserved_at_640[0x10];
+ u8 num_q_monitor_counters[0x10];
+
+ u8 reserved_at_660[0x40];
+
+ u8 uctx_cap[0x20];
+
+ u8 reserved_at_6c0[0x140];
};
enum mlx5_flow_destination_type {
@@ -1195,8 +1258,10 @@ enum mlx5_flow_destination_type {
struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8];
u8 destination_id[0x18];
+
u8 destination_eswitch_owner_vhca_id_valid[0x1];
- u8 reserved_at_21[0xf];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0xe];
u8 destination_eswitch_owner_vhca_id[0x10];
};
@@ -1206,6 +1271,14 @@ struct mlx5_ifc_flow_counter_list_bits {
u8 reserved_at_20[0x20];
};
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_struct_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
@@ -1280,7 +1353,9 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_118[0x3];
u8 log_wq_sz[0x5];
- u8 reserved_at_120[0x3];
+ u8 dbr_umem_valid[0x1];
+ u8 wq_umem_valid[0x1];
+ u8 reserved_at_122[0x1];
u8 log_hairpin_num_packets[0x5];
u8 reserved_at_128[0x3];
u8 log_hairpin_data_sz[0x5];
@@ -2233,7 +2308,8 @@ struct mlx5_ifc_qpc_bits {
u8 st[0x8];
u8 reserved_at_10[0x3];
u8 pm_state[0x2];
- u8 reserved_at_15[0x3];
+ u8 reserved_at_15[0x1];
+ u8 req_e2e_credit_mode[0x2];
u8 offload_type[0x4];
u8 end_padding_mode[0x2];
u8 reserved_at_1e[0x2];
@@ -2354,7 +2430,10 @@ struct mlx5_ifc_qpc_bits {
u8 dc_access_key[0x40];
- u8 reserved_at_680[0xc0];
+ u8 reserved_at_680[0x3];
+ u8 dbr_umem_valid[0x1];
+
+ u8 reserved_at_684[0xbc];
};
struct mlx5_ifc_roce_addr_layout_bits {
@@ -2394,7 +2473,7 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
- MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
@@ -2421,13 +2500,14 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_60[0x10];
u8 action[0x10];
- u8 reserved_at_80[0x8];
+ u8 extended_destination[0x1];
+ u8 reserved_at_80[0x7];
u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 modify_header_id[0x20];
@@ -2461,7 +2541,8 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 xrcd[0x18];
u8 page_offset[0x6];
- u8 reserved_at_46[0x2];
+ u8 reserved_at_46[0x1];
+ u8 dbr_umem_valid[0x1];
u8 cqn[0x18];
u8 reserved_at_60[0x20];
@@ -2549,8 +2630,8 @@ enum {
};
enum {
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2,
};
struct mlx5_ifc_tirc_bits {
@@ -3118,7 +3199,9 @@ enum {
struct mlx5_ifc_cqc_bits {
u8 status[0x4];
- u8 reserved_at_4[0x4];
+ u8 reserved_at_4[0x2];
+ u8 dbr_umem_valid[0x1];
+ u8 reserved_at_7[0x1];
u8 cqe_sz[0x3];
u8 cc[0x1];
u8 reserved_at_c[0x1];
@@ -3352,12 +3435,13 @@ struct mlx5_ifc_teardown_hca_out_bits {
u8 reserved_at_40[0x3f];
- u8 force_state[0x1];
+ u8 state[0x1];
};
enum {
MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2,
};
struct mlx5_ifc_teardown_hca_in_bits {
@@ -3384,7 +3468,7 @@ struct mlx5_ifc_sqerr2rts_qp_out_bits {
struct mlx5_ifc_sqerr2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3414,7 +3498,7 @@ struct mlx5_ifc_sqd2rts_qp_out_bits {
struct mlx5_ifc_sqd2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3619,7 +3703,7 @@ struct mlx5_ifc_rts2rts_qp_out_bits {
struct mlx5_ifc_rts2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3649,7 +3733,7 @@ struct mlx5_ifc_rtr2rts_qp_out_bits {
struct mlx5_ifc_rtr2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3679,7 +3763,7 @@ struct mlx5_ifc_rst2init_qp_out_bits {
struct mlx5_ifc_rst2init_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -3773,6 +3857,83 @@ enum {
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
};
+struct mlx5_ifc_arm_monitor_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_arm_monitor_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT = 0x0,
+ MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER = 0x1,
+};
+
+enum mlx5_monitor_counter_ppcnt {
+ MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0,
+ MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1,
+ MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2,
+ MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3,
+ MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4,
+ MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5,
+};
+
+enum {
+ MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4,
+};
+
+struct mlx5_ifc_monitor_counter_output_bits {
+ u8 reserved_at_0[0x4];
+ u8 type[0x4];
+ u8 reserved_at_8[0x8];
+ u8 counter[0x10];
+
+ u8 counter_group_id[0x20];
+};
+
+#define MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 (6)
+#define MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 (1)
+#define MLX5_CMD_SET_MONITOR_NUM_COUNTER (MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +\
+ MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1)
+
+struct mlx5_ifc_set_monitor_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 num_of_counters[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_monitor_counter_output_bits monitor_counter[MLX5_CMD_SET_MONITOR_NUM_COUNTER];
+};
+
+struct mlx5_ifc_set_monitor_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_query_vport_state_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -4638,7 +4799,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
- MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -4802,19 +4963,19 @@ struct mlx5_ifc_query_eq_in_bits {
u8 reserved_at_60[0x20];
};
-struct mlx5_ifc_encap_header_in_bits {
+struct mlx5_ifc_packet_reformat_context_in_bits {
u8 reserved_at_0[0x5];
- u8 header_type[0x3];
+ u8 reformat_type[0x3];
u8 reserved_at_8[0xe];
- u8 encap_header_size[0xa];
+ u8 reformat_data_size[0xa];
u8 reserved_at_20[0x10];
- u8 encap_header[2][0x8];
+ u8 reformat_data[2][0x8];
- u8 more_encap_header[0][0x8];
+ u8 more_reformat_data[0][0x8];
};
-struct mlx5_ifc_query_encap_header_out_bits {
+struct mlx5_ifc_query_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4822,33 +4983,41 @@ struct mlx5_ifc_query_encap_header_out_bits {
u8 reserved_at_40[0xa0];
- struct mlx5_ifc_encap_header_in_bits encap_header[0];
+ struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[0];
};
-struct mlx5_ifc_query_encap_header_in_bits {
+struct mlx5_ifc_query_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_at_60[0xa0];
};
-struct mlx5_ifc_alloc_encap_header_out_bits {
+struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_at_60[0x20];
};
-struct mlx5_ifc_alloc_encap_header_in_bits {
+enum {
+ MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
+ MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
+ MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
+ MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
+ MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+};
+
+struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -4857,10 +5026,10 @@ struct mlx5_ifc_alloc_encap_header_in_bits {
u8 reserved_at_40[0xa0];
- struct mlx5_ifc_encap_header_in_bits encap_header;
+ struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context;
};
-struct mlx5_ifc_dealloc_encap_header_out_bits {
+struct mlx5_ifc_dealloc_packet_reformat_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4869,14 +5038,14 @@ struct mlx5_ifc_dealloc_encap_header_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_dealloc_encap_header_in_bits {
+struct mlx5_ifc_dealloc_packet_reformat_context_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_20[0x10];
u8 op_mod[0x10];
- u8 encap_id[0x20];
+ u8 packet_reformat_id[0x20];
u8 reserved_60[0x20];
};
@@ -5174,7 +5343,7 @@ struct mlx5_ifc_qp_2rst_out_bits {
struct mlx5_ifc_qp_2rst_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5196,7 +5365,7 @@ struct mlx5_ifc_qp_2err_out_bits {
struct mlx5_ifc_qp_2err_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5296,7 +5465,7 @@ struct mlx5_ifc_modify_tis_bitmask_bits {
struct mlx5_ifc_modify_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5335,7 +5504,7 @@ struct mlx5_ifc_modify_tir_out_bits {
struct mlx5_ifc_modify_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5363,7 +5532,7 @@ struct mlx5_ifc_modify_sq_out_bits {
struct mlx5_ifc_modify_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5436,7 +5605,7 @@ struct mlx5_ifc_rqt_bitmask_bits {
struct mlx5_ifc_modify_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5470,7 +5639,7 @@ enum {
struct mlx5_ifc_modify_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5506,7 +5675,7 @@ struct mlx5_ifc_rmp_bitmask_bits {
struct mlx5_ifc_modify_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5536,7 +5705,7 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
struct mlx5_ifc_modify_nic_vport_field_select_bits {
u8 reserved_at_0[0x12];
u8 affiliation[0x1];
- u8 reserved_at_e[0x1];
+ u8 reserved_at_13[0x1];
u8 disable_uc_local_lb[0x1];
u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
@@ -5611,7 +5780,7 @@ enum {
struct mlx5_ifc_modify_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5623,7 +5792,10 @@ struct mlx5_ifc_modify_cq_in_bits {
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x40];
+
+ u8 cq_umem_valid[0x1];
+ u8 reserved_at_2c1[0x5bf];
u8 pas[0][0x40];
};
@@ -5771,7 +5943,7 @@ struct mlx5_ifc_init2rtr_qp_out_bits {
struct mlx5_ifc_init2rtr_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5801,7 +5973,7 @@ struct mlx5_ifc_init2init_qp_out_bits {
struct mlx5_ifc_init2init_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5900,7 +6072,7 @@ struct mlx5_ifc_drain_dct_out_bits {
struct mlx5_ifc_drain_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5944,7 +6116,7 @@ struct mlx5_ifc_detach_from_mcg_out_bits {
struct mlx5_ifc_detach_from_mcg_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5968,7 +6140,7 @@ struct mlx5_ifc_destroy_xrq_out_bits {
struct mlx5_ifc_destroy_xrq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -5990,7 +6162,7 @@ struct mlx5_ifc_destroy_xrc_srq_out_bits {
struct mlx5_ifc_destroy_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6012,7 +6184,7 @@ struct mlx5_ifc_destroy_tis_out_bits {
struct mlx5_ifc_destroy_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6034,7 +6206,7 @@ struct mlx5_ifc_destroy_tir_out_bits {
struct mlx5_ifc_destroy_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6056,7 +6228,7 @@ struct mlx5_ifc_destroy_srq_out_bits {
struct mlx5_ifc_destroy_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6078,7 +6250,7 @@ struct mlx5_ifc_destroy_sq_out_bits {
struct mlx5_ifc_destroy_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6124,7 +6296,7 @@ struct mlx5_ifc_destroy_rqt_out_bits {
struct mlx5_ifc_destroy_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6146,7 +6318,7 @@ struct mlx5_ifc_destroy_rq_out_bits {
struct mlx5_ifc_destroy_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6190,7 +6362,7 @@ struct mlx5_ifc_destroy_rmp_out_bits {
struct mlx5_ifc_destroy_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6212,7 +6384,7 @@ struct mlx5_ifc_destroy_qp_out_bits {
struct mlx5_ifc_destroy_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6364,7 +6536,7 @@ struct mlx5_ifc_destroy_dct_out_bits {
struct mlx5_ifc_destroy_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6386,7 +6558,7 @@ struct mlx5_ifc_destroy_cq_out_bits {
struct mlx5_ifc_destroy_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6489,7 +6661,7 @@ struct mlx5_ifc_dealloc_xrcd_out_bits {
struct mlx5_ifc_dealloc_xrcd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6533,7 +6705,7 @@ struct mlx5_ifc_dealloc_transport_domain_out_bits {
struct mlx5_ifc_dealloc_transport_domain_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6577,7 +6749,7 @@ struct mlx5_ifc_dealloc_pd_out_bits {
struct mlx5_ifc_dealloc_pd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6623,7 +6795,7 @@ struct mlx5_ifc_create_xrq_out_bits {
struct mlx5_ifc_create_xrq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6647,7 +6819,7 @@ struct mlx5_ifc_create_xrc_srq_out_bits {
struct mlx5_ifc_create_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6656,7 +6828,12 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x60];
+
+ u8 xrc_srq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x1f];
+
+ u8 reserved_at_300[0x580];
u8 pas[0][0x40];
};
@@ -6675,7 +6852,7 @@ struct mlx5_ifc_create_tis_out_bits {
struct mlx5_ifc_create_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6699,7 +6876,7 @@ struct mlx5_ifc_create_tir_out_bits {
struct mlx5_ifc_create_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6723,7 +6900,7 @@ struct mlx5_ifc_create_srq_out_bits {
struct mlx5_ifc_create_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6751,7 +6928,7 @@ struct mlx5_ifc_create_sq_out_bits {
struct mlx5_ifc_create_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6805,7 +6982,7 @@ struct mlx5_ifc_create_rqt_out_bits {
struct mlx5_ifc_create_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6829,7 +7006,7 @@ struct mlx5_ifc_create_rq_out_bits {
struct mlx5_ifc_create_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6853,7 +7030,7 @@ struct mlx5_ifc_create_rmp_out_bits {
struct mlx5_ifc_create_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6877,7 +7054,7 @@ struct mlx5_ifc_create_qp_out_bits {
struct mlx5_ifc_create_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -6890,7 +7067,10 @@ struct mlx5_ifc_create_qp_in_bits {
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_at_800[0x80];
+ u8 reserved_at_800[0x60];
+
+ u8 wq_umem_valid[0x1];
+ u8 reserved_at_861[0x1f];
u8 pas[0][0x40];
};
@@ -6952,7 +7132,8 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 reserved_at_40[0x20];
u8 pg_access[0x1];
- u8 reserved_at_61[0x1f];
+ u8 mkey_umem_valid[0x1];
+ u8 reserved_at_62[0x1e];
struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
@@ -6978,7 +7159,7 @@ struct mlx5_ifc_create_flow_table_out_bits {
};
struct mlx5_ifc_flow_table_context_bits {
- u8 encap_en[0x1];
+ u8 reformat_en[0x1];
u8 decap_en[0x1];
u8 reserved_at_2[0x2];
u8 table_miss_action[0x4];
@@ -7120,7 +7301,7 @@ struct mlx5_ifc_create_dct_out_bits {
struct mlx5_ifc_create_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7146,7 +7327,7 @@ struct mlx5_ifc_create_cq_out_bits {
struct mlx5_ifc_create_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7155,7 +7336,10 @@ struct mlx5_ifc_create_cq_in_bits {
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x60];
+
+ u8 cq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x59f];
u8 pas[0][0x40];
};
@@ -7203,7 +7387,7 @@ struct mlx5_ifc_attach_to_mcg_out_bits {
struct mlx5_ifc_attach_to_mcg_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7254,7 +7438,7 @@ enum {
struct mlx5_ifc_arm_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7282,7 +7466,7 @@ enum {
struct mlx5_ifc_arm_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7330,7 +7514,7 @@ struct mlx5_ifc_alloc_xrcd_out_bits {
struct mlx5_ifc_alloc_xrcd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7374,7 +7558,7 @@ struct mlx5_ifc_alloc_transport_domain_out_bits {
struct mlx5_ifc_alloc_transport_domain_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7396,7 +7580,7 @@ struct mlx5_ifc_alloc_q_counter_out_bits {
struct mlx5_ifc_alloc_q_counter_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7418,7 +7602,7 @@ struct mlx5_ifc_alloc_pd_out_bits {
struct mlx5_ifc_alloc_pd_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7786,20 +7970,34 @@ struct mlx5_ifc_pplr_reg_bits {
struct mlx5_ifc_pplm_reg_bits {
u8 reserved_at_0[0x8];
- u8 local_port[0x8];
- u8 reserved_at_10[0x10];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x20];
+ u8 reserved_at_20[0x20];
- u8 port_profile_mode[0x8];
- u8 static_port_profile[0x8];
- u8 active_port_profile[0x8];
- u8 reserved_at_58[0x8];
+ u8 port_profile_mode[0x8];
+ u8 static_port_profile[0x8];
+ u8 active_port_profile[0x8];
+ u8 reserved_at_58[0x8];
- u8 retransmission_active[0x8];
- u8 fec_mode_active[0x18];
+ u8 retransmission_active[0x8];
+ u8 fec_mode_active[0x18];
- u8 reserved_at_80[0x20];
+ u8 rs_fec_correction_bypass_cap[0x4];
+ u8 reserved_at_84[0x8];
+ u8 fec_override_cap_56g[0x4];
+ u8 fec_override_cap_100g[0x4];
+ u8 fec_override_cap_50g[0x4];
+ u8 fec_override_cap_25g[0x4];
+ u8 fec_override_cap_10g_40g[0x4];
+
+ u8 rs_fec_correction_bypass_admin[0x4];
+ u8 reserved_at_a4[0x8];
+ u8 fec_override_admin_56g[0x4];
+ u8 fec_override_admin_100g[0x4];
+ u8 fec_override_admin_50g[0x4];
+ u8 fec_override_admin_25g[0x4];
+ u8 fec_override_admin_10g_40g[0x4];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -8084,7 +8282,8 @@ struct mlx5_ifc_pcam_enhanced_features_bits {
u8 rx_icrc_encapsulated_counter[0x1];
u8 reserved_at_6e[0x8];
u8 pfcc_mask[0x1];
- u8 reserved_at_77[0x4];
+ u8 reserved_at_77[0x3];
+ u8 per_lane_error_counters[0x1];
u8 rx_buffer_fullness_counters[0x1];
u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1];
@@ -8095,12 +8294,17 @@ struct mlx5_ifc_pcam_enhanced_features_bits {
struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
u8 port_access_reg_cap_mask_127_to_96[0x20];
u8 port_access_reg_cap_mask_95_to_64[0x20];
- u8 port_access_reg_cap_mask_63_to_32[0x20];
+
+ u8 port_access_reg_cap_mask_63_to_36[0x1c];
+ u8 pplm[0x1];
+ u8 port_access_reg_cap_mask_34_to_32[0x3];
u8 port_access_reg_cap_mask_31_to_13[0x13];
u8 pbmc[0x1];
u8 pptb[0x1];
- u8 port_access_reg_cap_mask_10_to_0[0xb];
+ u8 port_access_reg_cap_mask_10_to_09[0x2];
+ u8 ppcnt[0x1];
+ u8 port_access_reg_cap_mask_07_to_00[0x8];
};
struct mlx5_ifc_pcam_reg_bits {
@@ -8964,7 +9168,7 @@ struct mlx5_ifc_dcbx_param_bits {
u8 dcbx_cee_cap[0x1];
u8 dcbx_ieee_cap[0x1];
u8 dcbx_standby_cap[0x1];
- u8 reserved_at_0[0x5];
+ u8 reserved_at_3[0x5];
u8 port_number[0x8];
u8 reserved_at_10[0xa];
u8 max_application_table_size[6];
@@ -9197,9 +9401,9 @@ struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
};
struct mlx5_ifc_umem_bits {
- u8 modify_field_select[0x40];
+ u8 reserved_at_0[0x80];
- u8 reserved_at_40[0x5b];
+ u8 reserved_at_80[0x1b];
u8 log_page_size[0x5];
u8 page_offset[0x20];
@@ -9210,19 +9414,46 @@ struct mlx5_ifc_umem_bits {
};
struct mlx5_ifc_uctx_bits {
- u8 modify_field_select[0x40];
+ u8 cap[0x20];
- u8 reserved_at_40[0x1c0];
+ u8 reserved_at_20[0x160];
};
struct mlx5_ifc_create_umem_in_bits {
- struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
- struct mlx5_ifc_umem_bits umem;
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_umem_bits umem;
};
struct mlx5_ifc_create_uctx_in_bits {
- struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
- struct mlx5_ifc_uctx_bits uctx;
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_uctx_bits uctx;
+};
+
+struct mlx5_ifc_destroy_uctx_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_mtrc_string_db_param_bits {
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 34aed6032f86..bf4bc01ffb0c 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -107,9 +107,6 @@ enum mlx5e_connector_type {
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
-#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
-#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
-
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 4778d41085d4..b26ea9077384 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -471,6 +471,7 @@ struct mlx5_core_qp {
int qpn;
struct mlx5_rsc_debug *dbg;
int pid;
+ u16 uid;
};
struct mlx5_core_dct {
@@ -595,6 +596,11 @@ int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
int reset, void *out, int out_size);
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type);
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
+
static inline const char *mlx5_qp_type_str(int type)
{
switch (type) {
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
deleted file mode 100644
index 24ff23e27c8a..000000000000
--- a/include/linux/mlx5/srq.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_SRQ_H
-#define MLX5_SRQ_H
-
-#include <linux/mlx5/driver.h>
-
-enum {
- MLX5_SRQ_FLAG_ERR = (1 << 0),
- MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
- MLX5_SRQ_FLAG_RNDV = (1 << 2),
-};
-
-struct mlx5_srq_attr {
- u32 type;
- u32 flags;
- u32 log_size;
- u32 wqe_shift;
- u32 log_page_size;
- u32 wqe_cnt;
- u32 srqn;
- u32 xrcd;
- u32 page_offset;
- u32 cqn;
- u32 pd;
- u32 lwm;
- u32 user_index;
- u64 db_record;
- __be64 *pas;
- u32 tm_log_list_size;
- u32 tm_next_tag;
- u32 tm_hw_phase_cnt;
- u32 tm_sw_phase_cnt;
-};
-
-struct mlx5_core_dev;
-
-void mlx5_init_srq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
-
-#endif /* MLX5_SRQ_H */
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 83a33a1873a6..a261d5528ff7 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -58,17 +58,6 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
int inlen);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
-int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn);
-int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
-int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
-int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn);
-int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
@@ -90,6 +79,8 @@ struct mlx5_hairpin {
u32 *rqn;
u32 *sqn;
+
+ bool peer_gone;
};
struct mlx5_hairpin *
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 7e7c6dfcfb09..9c694808c212 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -121,4 +121,6 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
struct mlx5_core_dev *port_mdev);
int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
+
+u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a3cae495f9ce..80bb6408fe73 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -48,7 +48,32 @@ static inline void set_max_mapnr(unsigned long limit)
static inline void set_max_mapnr(unsigned long limit) { }
#endif
-extern unsigned long totalram_pages;
+extern atomic_long_t _totalram_pages;
+static inline unsigned long totalram_pages(void)
+{
+ return (unsigned long)atomic_long_read(&_totalram_pages);
+}
+
+static inline void totalram_pages_inc(void)
+{
+ atomic_long_inc(&_totalram_pages);
+}
+
+static inline void totalram_pages_dec(void)
+{
+ atomic_long_dec(&_totalram_pages);
+}
+
+static inline void totalram_pages_add(long count)
+{
+ atomic_long_add(count, &_totalram_pages);
+}
+
+static inline void totalram_pages_set(long val)
+{
+ atomic_long_set(&_totalram_pages, val);
+}
+
extern void * high_memory;
extern int page_cluster;
@@ -146,6 +171,8 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
+#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+
/*
* Linux kernel virtual memory manager primitives.
* The idea being to have a "virtual" mm in the same way
@@ -456,6 +483,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
static const struct vm_operations_struct dummy_vm_ops = {};
+ memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
vma->vm_ops = &dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
@@ -727,10 +755,10 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
+vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
struct page *page);
-int finish_fault(struct vm_fault *vmf);
-int finish_mkwrite_fault(struct vm_fault *vmf);
+vm_fault_t finish_fault(struct vm_fault *vmf);
+vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
/*
@@ -803,6 +831,7 @@ int finish_mkwrite_fault(struct vm_fault *vmf);
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
+#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
/*
* Define the bit shifts to access each section. For non-existent
@@ -813,6 +842,7 @@ int finish_mkwrite_fault(struct vm_fault *vmf);
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
+#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
@@ -835,6 +865,7 @@ int finish_mkwrite_fault(struct vm_fault *vmf);
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
+#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
static inline enum zone_type page_zonenum(const struct page *page)
@@ -847,6 +878,8 @@ static inline bool is_zone_device_page(const struct page *page)
{
return page_zonenum(page) == ZONE_DEVICE;
}
+extern void memmap_init_zone_device(struct zone *, unsigned long,
+ unsigned long, struct dev_pagemap *);
#else
static inline bool is_zone_device_page(const struct page *page)
{
@@ -889,6 +922,19 @@ static inline bool is_device_public_page(const struct page *page)
page->pgmap->type == MEMORY_DEVICE_PUBLIC;
}
+#ifdef CONFIG_PCI_P2PDMA
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+}
+#else /* CONFIG_PCI_P2PDMA */
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return false;
+}
+#endif /* CONFIG_PCI_P2PDMA */
+
#else /* CONFIG_DEV_PAGEMAP_OPS */
static inline void dev_pagemap_get_ops(void)
{
@@ -912,6 +958,11 @@ static inline bool is_device_public_page(const struct page *page)
{
return false;
}
+
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return false;
+}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline void get_page(struct page *page)
@@ -959,15 +1010,6 @@ static inline int page_zone_id(struct page *page)
return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
}
-static inline int zone_to_nid(struct zone *zone)
-{
-#ifdef CONFIG_NUMA
- return zone->node;
-#else
- return 0;
-#endif
-}
-
#ifdef NODE_NOT_IN_PAGE_FLAGS
extern int page_to_nid(const struct page *page);
#else
@@ -1089,6 +1131,32 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
}
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_KASAN_SW_TAGS
+static inline u8 page_kasan_tag(const struct page *page)
+{
+ return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+}
+
+static inline void page_kasan_tag_set(struct page *page, u8 tag)
+{
+ page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
+ page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+}
+
+static inline void page_kasan_tag_reset(struct page *page)
+{
+ page_kasan_tag_set(page, 0xff);
+}
+#else
+static inline u8 page_kasan_tag(const struct page *page)
+{
+ return 0xff;
+}
+
+static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
+static inline void page_kasan_tag_reset(struct page *page) { }
+#endif
+
static inline struct zone *page_zone(const struct page *page)
{
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
@@ -1385,6 +1453,8 @@ struct mm_walk {
void *private;
};
+struct mmu_notifier_range;
+
int walk_page_range(unsigned long addr, unsigned long end,
struct mm_walk *walk);
int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
@@ -1393,8 +1463,8 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
- unsigned long *start, unsigned long *end,
- pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
+ struct mmu_notifier_range *range,
+ pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
@@ -1411,8 +1481,8 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
-extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
- unsigned int flags);
+extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags);
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
@@ -1421,7 +1491,7 @@ void unmap_mapping_pages(struct address_space *mapping,
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
#else
-static inline int handle_mm_fault(struct vm_area_struct *vma,
+static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
/* should never happen if there's no MMU */
@@ -1732,11 +1802,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
static inline void mm_inc_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
#endif
@@ -1756,11 +1830,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
static inline void mm_inc_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
#endif
@@ -1797,8 +1875,8 @@ static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
#endif
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
-int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
+int __pte_alloc_kernel(pmd_t *pmd);
/*
* The following ifdef needed to get the 4level-fixup.h header to work.
@@ -1880,13 +1958,6 @@ static inline bool ptlock_init(struct page *page)
return true;
}
-/* Reset page->mapping so free_pages_check won't complain. */
-static inline void pte_lock_deinit(struct page *page)
-{
- page->mapping = NULL;
- ptlock_free(page);
-}
-
#else /* !USE_SPLIT_PTE_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
@@ -1897,7 +1968,7 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
}
static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct page *page) { return true; }
-static inline void pte_lock_deinit(struct page *page) {}
+static inline void ptlock_free(struct page *page) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */
static inline void pgtable_init(void)
@@ -1917,7 +1988,7 @@ static inline bool pgtable_page_ctor(struct page *page)
static inline void pgtable_page_dtor(struct page *page)
{
- pte_lock_deinit(page);
+ ptlock_free(page);
__ClearPageTable(page);
dec_zone_page_state(page, NR_PAGETABLE);
}
@@ -1936,18 +2007,17 @@ static inline void pgtable_page_dtor(struct page *page)
pte_unmap(pte); \
} while (0)
-#define pte_alloc(mm, pmd, address) \
- (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
+#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
#define pte_alloc_map(mm, pmd, address) \
- (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
+ (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
- (pte_alloc(mm, pmd, address) ? \
+ (pte_alloc(mm, pmd) ? \
NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
#define pte_alloc_kernel(pmd, address) \
- ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
+ ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
#if USE_SPLIT_PMD_PTLOCKS
@@ -2023,7 +2093,7 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
extern void __init pagecache_init(void);
extern void free_area_init(unsigned long * zones_size);
-extern void free_area_init_node(int nid, unsigned long * zones_size,
+extern void __init free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
extern void free_initmem(void);
@@ -2034,7 +2104,7 @@ extern void free_initmem(void);
* Return pages freed into the buddy system.
*/
extern unsigned long free_reserved_area(void *start, void *end,
- int poison, char *s);
+ int poison, const char *s);
#ifdef CONFIG_HIGHMEM
/*
@@ -2151,7 +2221,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state);
#endif
-#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
+#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
void zero_resv_unavail(void);
#else
static inline void zero_resv_unavail(void) {}
@@ -2182,6 +2252,7 @@ extern void zone_pcp_reset(struct zone *zone);
/* page_alloc.c */
extern int min_free_kbytes;
+extern int watermark_boost_factor;
extern int watermark_scale_factor;
/* nommu.c */
@@ -2294,6 +2365,8 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
struct list_head *uf);
+extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
+ struct list_head *uf, bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
@@ -2463,6 +2536,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
+static inline bool range_in_vma(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ return (vma && vma->vm_start <= start && end <= vma->vm_end);
+}
+
#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(unsigned long vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
@@ -2486,11 +2565,11 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
-int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
-int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
-int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn);
@@ -2509,32 +2588,6 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
-static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma,
- unsigned long addr, pfn_t pfn)
-{
- int err = vm_insert_mixed(vma, addr, pfn);
-
- if (err == -ENOMEM)
- return VM_FAULT_OOM;
- if (err < 0 && err != -EBUSY)
- return VM_FAULT_SIGBUS;
-
- return VM_FAULT_NOPAGE;
-}
-
-static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn)
-{
- int err = vm_insert_pfn(vma, addr, pfn);
-
- if (err == -ENOMEM)
- return VM_FAULT_OOM;
- if (err < 0 && err != -EBUSY)
- return VM_FAULT_SIGBUS;
-
- return VM_FAULT_NOPAGE;
-}
-
static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
@@ -2542,16 +2595,8 @@ static inline vm_fault_t vmf_error(int err)
return VM_FAULT_SIGBUS;
}
-struct page *follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int foll_flags,
- unsigned int *page_mask);
-
-static inline struct page *follow_page(struct vm_area_struct *vma,
- unsigned long address, unsigned int foll_flags)
-{
- unsigned int unused_page_mask;
- return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
-}
+struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
+ unsigned int foll_flags);
#define FOLL_WRITE 0x01 /* check pte is writable */
#define FOLL_TOUCH 0x02 /* mark page accessed */
@@ -2571,7 +2616,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_COW 0x4000 /* internal GUP flag */
#define FOLL_ANON 0x8000 /* don't do file mappings */
-static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
+static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
if (vm_fault & VM_FAULT_OOM)
return -ENOMEM;
@@ -2739,6 +2784,7 @@ enum mf_action_page_type {
MF_MSG_TRUNCATED_LRU,
MF_MSG_BUDDY,
MF_MSG_BUDDY_2ND,
+ MF_MSG_DAX,
MF_MSG_UNKNOWN,
};
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 10191c28fc04..04ec454d44ce 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -124,7 +124,4 @@ static __always_inline enum lru_list page_lru(struct page *page)
}
return lru;
}
-
-#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
-
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cd2bc939efd0..2c471a2c43fa 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -206,6 +206,11 @@ struct page {
#endif
} _struct_page_alignment;
+/*
+ * Used for sizing the vmemmap region on some architectures
+ */
+#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
+
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
@@ -341,7 +346,7 @@ struct mm_struct {
struct {
struct vm_area_struct *mmap; /* list of VMAs */
struct rb_root mm_rb;
- u32 vmacache_seqnum; /* per-thread vmacache */
+ u64 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 5fe87687664c..d7016dcb245e 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -32,7 +32,7 @@
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
struct vmacache {
- u32 seqnum;
+ u64 seqnum;
struct vm_area_struct *vmas[VMACACHE_SIZE];
};
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index beed7121c781..4d35ff36ceff 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -147,6 +147,9 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+ /* Prepare switch to DDR during the HS400 init sequence */
+ int (*hs400_prepare_ddr)(struct mmc_host *host);
+
/* Prepare for switching from HS400 to HS200 */
void (*hs400_downgrade)(struct mmc_host *host);
@@ -331,7 +334,7 @@ struct mmc_host {
#define MMC_CAP_UHS (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | \
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
MMC_CAP_UHS_DDR50)
-/* (1 << 21) is free for reuse */
+#define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
@@ -569,6 +572,11 @@ static inline bool mmc_can_retune(struct mmc_host *host)
return host->can_retune == 1;
}
+static inline bool mmc_doing_retune(struct mmc_host *host)
+{
+ return host->doing_retune == 1;
+}
+
static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
{
return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 4224902a8e22..4332199c71c2 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -42,6 +42,7 @@
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373
+#define SDIO_DEVICE_ID_CYPRESS_43012 43012
#define SDIO_VENDOR_ID_INTEL 0x0089
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 06607c59c4d0..feebd7aa6f5c 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -17,12 +17,7 @@
struct mmc_host;
int mmc_gpio_get_ro(struct mmc_host *host);
-int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio);
-
int mmc_gpio_get_cd(struct mmc_host *host);
-int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
- unsigned int debounce);
-
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
unsigned int debounce, bool *gpio_invert);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 392e6af82701..4050ec1c3b45 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -2,7 +2,6 @@
#ifndef _LINUX_MMU_NOTIFIER_H
#define _LINUX_MMU_NOTIFIER_H
-#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
@@ -11,9 +10,6 @@
struct mmu_notifier;
struct mmu_notifier_ops;
-/* mmu_notifier_ops flags */
-#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
-
#ifdef CONFIG_MMU_NOTIFIER
/*
@@ -29,16 +25,14 @@ struct mmu_notifier_mm {
spinlock_t lock;
};
-struct mmu_notifier_ops {
- /*
- * Flags to specify behavior of callbacks for this MMU notifier.
- * Used to determine which context an operation may be called.
- *
- * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not
- * block
- */
- int flags;
+struct mmu_notifier_range {
+ struct mm_struct *mm;
+ unsigned long start;
+ unsigned long end;
+ bool blockable;
+};
+struct mmu_notifier_ops {
/*
* Called either by mmu_notifier_unregister or when the mm is
* being destroyed by exit_mmap, always before all pages are
@@ -151,16 +145,17 @@ struct mmu_notifier_ops {
* address space but may still be referenced by sptes until
* the last refcount is dropped.
*
- * If both of these callbacks cannot block, and invalidate_range
- * cannot block, mmu_notifier_ops.flags should have
- * MMU_INVALIDATE_DOES_NOT_BLOCK set.
+ * If blockable argument is set to false then the callback cannot
+ * sleep and has to return with -EAGAIN. 0 should be returned
+ * otherwise. Please note that if invalidate_range_start approves
+ * a non-blocking behavior then the same applies to
+ * invalidate_range_end.
+ *
*/
- void (*invalidate_range_start)(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end);
+ int (*invalidate_range_start)(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *range);
void (*invalidate_range_end)(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end);
+ const struct mmu_notifier_range *range);
/*
* invalidate_range() is either called between
@@ -179,10 +174,6 @@ struct mmu_notifier_ops {
* Note that this function might be called with just a sub-range
* of what was passed to invalidate_range_start()/end(), if
* called between those functions.
- *
- * If this callback cannot block, and invalidate_range_{start,end}
- * cannot block, mmu_notifier_ops.flags should have
- * MMU_INVALIDATE_DOES_NOT_BLOCK set.
*/
void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
unsigned long start, unsigned long end);
@@ -229,14 +220,11 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte);
-extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
- unsigned long start, unsigned long end);
-extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
- unsigned long start, unsigned long end,
+extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
+extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
bool only_end);
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
-extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm);
static inline void mmu_notifier_release(struct mm_struct *mm)
{
@@ -277,25 +265,37 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
__mmu_notifier_change_pte(mm, address, pte);
}
-static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void
+mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range_start(mm, start, end);
+ if (mm_has_notifiers(range->mm)) {
+ range->blockable = true;
+ __mmu_notifier_invalidate_range_start(range);
+ }
}
-static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline int
+mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range_end(mm, start, end, false);
+ if (mm_has_notifiers(range->mm)) {
+ range->blockable = false;
+ return __mmu_notifier_invalidate_range_start(range);
+ }
+ return 0;
}
-static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void
+mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range_end(mm, start, end, true);
+ if (mm_has_notifiers(range->mm))
+ __mmu_notifier_invalidate_range_end(range, false);
+}
+
+static inline void
+mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
+{
+ if (mm_has_notifiers(range->mm))
+ __mmu_notifier_invalidate_range_end(range, true);
}
static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
@@ -316,6 +316,17 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
__mmu_notifier_mm_destroy(mm);
}
+
+static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ range->mm = mm;
+ range->start = start;
+ range->end = end;
+}
+
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
({ \
int __young; \
@@ -425,10 +436,26 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
void (*func)(struct rcu_head *rcu));
-extern void mmu_notifier_synchronize(void);
#else /* CONFIG_MMU_NOTIFIER */
+struct mmu_notifier_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
+ unsigned long start,
+ unsigned long end)
+{
+ range->start = start;
+ range->end = end;
+}
+
+#define mmu_notifier_range_init(range, mm, start, end) \
+ _mmu_notifier_range_init(range, start, end)
+
+
static inline int mm_has_notifiers(struct mm_struct *mm)
{
return 0;
@@ -456,29 +483,30 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
{
}
-static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void
+mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
}
-static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline int
+mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
+ return 0;
}
-static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline
+void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
}
-static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void
+mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
{
}
-static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
- return false;
}
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 32699b2dc52a..cc4a507d7ca4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -65,7 +65,7 @@ enum migratetype {
};
/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
-extern char * const migratetype_names[MIGRATE_TYPES];
+extern const char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
@@ -161,8 +161,10 @@ enum node_stat_item {
NR_SLAB_UNRECLAIMABLE,
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
+ WORKINGSET_NODES,
WORKINGSET_REFAULT,
WORKINGSET_ACTIVATE,
+ WORKINGSET_RESTORE,
WORKINGSET_NODERECLAIM,
NR_ANON_MAPPED, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
@@ -180,7 +182,7 @@ enum node_stat_item {
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
- NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */
+ NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
NR_VM_NODE_STAT_ITEMS
};
@@ -267,9 +269,10 @@ enum zone_watermarks {
NR_WMARK
};
-#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
-#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
-#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
+#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
+#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
+#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
+#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
struct per_cpu_pages {
int count; /* number of pages in the list */
@@ -312,7 +315,7 @@ enum zone_type {
* Architecture Limit
* ---------------------------
* parisc, ia64, sparc <4G
- * s390 <2G
+ * s390, powerpc <2G
* arm Various
* alpha Unlimited or 0-16MB.
*
@@ -360,7 +363,8 @@ struct zone {
/* Read-mostly fields */
/* zone watermarks, access with *_wmark_pages(zone) macros */
- unsigned long watermark[NR_WMARK];
+ unsigned long _watermark[NR_WMARK];
+ unsigned long watermark_boost;
unsigned long nr_reserved_highatomic;
@@ -426,14 +430,8 @@ struct zone {
* Write access to present_pages at runtime should be protected by
* mem_hotplug_begin/end(). Any reader who can't tolerant drift of
* present_pages should get_online_mems() to get a stable value.
- *
- * Read access to managed_pages should be safe because it's unsigned
- * long. Write access to zone->managed_pages and totalram_pages are
- * protected by managed_page_count_lock at runtime. Idealy only
- * adjust_managed_page_count() should be used instead of directly
- * touching zone->managed_pages and totalram_pages.
*/
- unsigned long managed_pages;
+ atomic_long_t managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;
@@ -522,6 +520,11 @@ enum pgdat_flags {
PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
};
+static inline unsigned long zone_managed_pages(struct zone *zone)
+{
+ return (unsigned long)atomic_long_read(&zone->managed_pages);
+}
+
static inline unsigned long zone_end_pfn(const struct zone *zone)
{
return zone->zone_start_pfn + zone->spanned_pages;
@@ -631,14 +634,10 @@ typedef struct pglist_data {
struct page_ext *node_page_ext;
#endif
#endif
-#ifndef CONFIG_NO_BOOTMEM
- struct bootmem_data *bdata;
-#endif
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
- * Must be held any time you expect node_start_pfn, node_present_pages
- * or node_spanned_pages stay constant. Holding this will also
- * guarantee that any pfn_valid() stays that way.
+ * Must be held any time you expect node_start_pfn,
+ * node_present_pages, node_spanned_pages or nr_zones to stay constant.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
* manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
@@ -668,16 +667,6 @@ typedef struct pglist_data {
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
#endif
-#ifdef CONFIG_NUMA_BALANCING
- /* Lock serializing the migrate rate limiting window */
- spinlock_t numabalancing_migrate_lock;
-
- /* Rate limiting time interval */
- unsigned long numabalancing_migrate_next_window;
-
- /* Number of pages migrated during the rate limiting time interval */
- unsigned long numabalancing_migrate_nr_pages;
-#endif
/*
* This is a per-node reserve of pages that are not available
* to userspace allocations.
@@ -702,8 +691,6 @@ typedef struct pglist_data {
* is the first PFN that needs to be initialised.
*/
unsigned long first_deferred_pfn;
- /* Number of non-deferred pages */
- unsigned long static_init_pgcnt;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -755,25 +742,6 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}
-static inline int zone_id(const struct zone *zone)
-{
- struct pglist_data *pgdat = zone->zone_pgdat;
-
- return zone - pgdat->node_zones;
-}
-
-#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_dev_zone(const struct zone *zone)
-{
- return zone_id(zone) == ZONE_DEVICE;
-}
-#else
-static inline bool is_dev_zone(const struct zone *zone)
-{
- return false;
-}
-#endif
-
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
@@ -813,6 +781,12 @@ void memory_present(int nid, unsigned long start, unsigned long end);
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif
+#if defined(CONFIG_SPARSEMEM)
+void memblocks_present(void);
+#else
+static inline void memblocks_present(void) {}
+#endif
+
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
@@ -824,6 +798,18 @@ static inline int local_memory_node(int node_id) { return node_id; };
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool is_dev_zone(const struct zone *zone)
+{
+ return zone_idx(zone) == ZONE_DEVICE;
+}
+#else
+static inline bool is_dev_zone(const struct zone *zone)
+{
+ return false;
+}
+#endif
+
/*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
@@ -832,7 +818,7 @@ static inline int local_memory_node(int node_id) { return node_id; };
*/
static inline bool managed_zone(struct zone *zone)
{
- return zone->managed_pages;
+ return zone_managed_pages(zone);
}
/* Returns true if a zone has memory */
@@ -841,6 +827,25 @@ static inline bool populated_zone(struct zone *zone)
return zone->present_pages;
}
+#ifdef CONFIG_NUMA
+static inline int zone_to_nid(struct zone *zone)
+{
+ return zone->node;
+}
+
+static inline void zone_set_nid(struct zone *zone, int nid)
+{
+ zone->node = nid;
+}
+#else
+static inline int zone_to_nid(struct zone *zone)
+{
+ return 0;
+}
+
+static inline void zone_set_nid(struct zone *zone, int nid) {}
+#endif
+
extern int movable_zone;
#ifdef CONFIG_HIGHMEM
@@ -865,7 +870,7 @@ static inline int is_highmem_idx(enum zone_type idx)
}
/**
- * is_highmem - helper function to quickly check if a struct zone is a
+ * is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
* to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
* @zone - pointer to struct zone variable
@@ -883,6 +888,8 @@ static inline int is_highmem(struct zone *zone)
struct ctl_table;
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
@@ -956,12 +963,7 @@ static inline int zonelist_zone_idx(struct zoneref *zoneref)
static inline int zonelist_node_idx(struct zoneref *zoneref)
{
-#ifdef CONFIG_NUMA
- /* zone_to_nid not available in this context */
- return zoneref->zone->node;
-#else
- return 0;
-#endif /* CONFIG_NUMA */
+ return zone_to_nid(zoneref->zone);
}
struct zoneref *__next_zones_zonelist(struct zoneref *z,
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 1298a7daa57d..f9bd2f34b99f 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -448,6 +448,23 @@ struct pci_epf_device_id {
kernel_ulong_t driver_data;
};
+/* i3c */
+
+#define I3C_MATCH_DCR 0x1
+#define I3C_MATCH_MANUF 0x2
+#define I3C_MATCH_PART 0x4
+#define I3C_MATCH_EXTRA_INFO 0x8
+
+struct i3c_device_id {
+ __u8 match_flags;
+ __u8 dcr;
+ __u16 manuf_id;
+ __u16 part_id;
+ __u16 extra_info;
+
+ const void *data;
+};
+
/* spi */
#define SPI_NAME_SIZE 32
@@ -565,7 +582,7 @@ struct platform_device_id {
/**
* struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
* @phy_id: The result of
- * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
+ * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&MII_PHYSID2)) & @phy_id_mask
* for this PHY type
* @phy_id_mask: Defines the significant bits of @phy_id. A value of 0
* is used to terminate an array of struct mdio_device_id.
@@ -754,6 +771,7 @@ struct tb_service_id {
* struct typec_device_id - USB Type-C alternate mode identifiers
* @svid: Standard or Vendor ID
* @mode: Mode index
+ * @driver_data: Driver specific data
*/
struct typec_device_id {
__u16 svid;
diff --git a/include/linux/module.h b/include/linux/module.h
index f807f15bebbe..d5453eb5a68b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -20,6 +20,7 @@
#include <linux/export.h>
#include <linux/rbtree_latch.h>
#include <linux/error-injection.h>
+#include <linux/tracepoint-defs.h>
#include <linux/percpu.h>
#include <asm/module.h>
@@ -123,7 +124,6 @@ extern void cleanup_module(void);
#define late_initcall_sync(fn) module_init(fn)
#define console_initcall(fn) module_init(fn)
-#define security_initcall(fn) module_init(fn)
/* Each module must use one module_init(). */
#define module_init(initfn) \
@@ -430,7 +430,11 @@ struct module {
#ifdef CONFIG_TRACEPOINTS
unsigned int num_tracepoints;
- struct tracepoint * const *tracepoints_ptrs;
+ tracepoint_ptr_t *tracepoints_ptrs;
+#endif
+#ifdef CONFIG_BPF_EVENTS
+ unsigned int num_bpf_raw_events;
+ struct bpf_raw_event_map *bpf_raw_events;
#endif
#ifdef HAVE_JUMP_LABEL
struct jump_entry *jump_entries;
@@ -486,6 +490,13 @@ struct module {
#define MODULE_ARCH_INIT {}
#endif
+#ifndef HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
+static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
+{
+ return sym->st_value;
+}
+#endif
+
extern struct mutex module_mutex;
/* FIXME: It'd be nice to isolate modules during init, too, so they
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 45b1f56c6c2f..037eed52164b 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -81,7 +81,7 @@ extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(const struct path *path);
-extern int __mnt_is_readonly(struct vfsmount *mnt);
+extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
struct path;
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 6675b9f81979..34de06b426ef 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -7,6 +7,7 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/fib_notifier.h>
+#include <net/ip_fib.h>
/**
* struct vif_device - interface representor for multicast routing
@@ -283,6 +284,12 @@ void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg);
int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mr_mfc *c, struct rtmsg *rtm);
+int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
+ u32 portid, u32 seq, struct mr_mfc *c,
+ int cmd, int flags),
+ spinlock_t *lock, struct fib_dump_filter *filter);
int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct mr_table *(*iter)(struct net *net,
struct mr_table *mrt),
@@ -290,7 +297,7 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
- spinlock_t *lock);
+ spinlock_t *lock, struct fib_dump_filter *filter);
int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
int (*rules_dump)(struct net *net,
@@ -340,7 +347,7 @@ mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
- spinlock_t *lock)
+ spinlock_t *lock, struct fib_dump_filter *filter)
{
return -EINVAL;
}
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 5839d8062dfc..784fb52b9900 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -76,7 +76,7 @@ struct msi_desc {
unsigned int nvec_used;
struct device *dev;
struct msi_msg msg;
- struct cpumask *affinity;
+ struct irq_affinity_desc *affinity;
union {
/* PCI MSI/X specific data */
@@ -116,6 +116,8 @@ struct msi_desc {
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
+#define for_each_msi_entry_safe(desc, tmp, dev) \
+ list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
#ifdef CONFIG_PCI_MSI
#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
@@ -136,7 +138,7 @@ static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
#endif /* CONFIG_PCI_MSI */
struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
void free_msi_entry(struct msi_desc *entry);
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
@@ -317,11 +319,18 @@ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
int virq, int nvec, msi_alloc_info_t *args);
struct irq_domain *
-platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data);
+__platform_msi_create_device_domain(struct device *dev,
+ unsigned int nvec,
+ bool is_tree,
+ irq_write_msi_msg_t write_msi_msg,
+ const struct irq_domain_ops *ops,
+ void *host_data);
+
+#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
+ __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
+#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
+ __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
+
int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs);
void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index e93837f647de..1d3ade69d39a 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -23,7 +23,6 @@
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
-#include <linux/workqueue.h>
struct hd_geometry;
struct mtd_info;
@@ -44,9 +43,9 @@ struct mtd_blktrans_dev {
struct kref ref;
struct gendisk *disk;
struct attribute_group *disk_attributes;
- struct workqueue_struct *wq;
- struct work_struct work;
struct request_queue *rq;
+ struct list_head rq_list;
+ struct blk_mq_tag_set *tag_set;
spinlock_t queue_lock;
void *priv;
fmode_t file_mode;
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 9b57a9b1b081..cbf77168658c 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -377,6 +377,7 @@ struct cfi_fixup {
#define CFI_MFR_SHARP 0x00B0
#define CFI_MFR_SST 0x00BF
#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
+#define CFI_MFR_MICRON 0x002C /* Micron */
#define CFI_MFR_TOSHIBA 0x0098
#define CFI_MFR_WINBOND 0x00DA
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
new file mode 100644
index 000000000000..0b6b59f7cfbd
--- /dev/null
+++ b/include/linux/mtd/jedec.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all JEDEC related definitions
+ */
+
+#ifndef __LINUX_MTD_JEDEC_H
+#define __LINUX_MTD_JEDEC_H
+
+struct jedec_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+/* JEDEC features */
+#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+
+struct nand_jedec_params {
+ /* rev info and features block */
+ /* 'J' 'E' 'S' 'D' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ u8 opt_cmd[3];
+ __le16 sec_cmd;
+ u8 num_of_param_pages;
+ u8 reserved0[18];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id[6];
+ u8 reserved1[10];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ u8 reserved2[6];
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ u8 programs_per_page;
+ u8 multi_plane_addr;
+ u8 multi_plane_op_attr;
+ u8 reserved3[38];
+
+ /* electrical parameter block */
+ __le16 async_sdr_speed_grade;
+ __le16 toggle_ddr_speed_grade;
+ __le16 sync_ddr_speed_grade;
+ u8 async_sdr_features;
+ u8 toggle_ddr_features;
+ u8 sync_ddr_features;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_r_multi_plane;
+ __le16 t_ccs;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ __le16 clk_pin_capacitance_typ;
+ u8 driver_strength_support;
+ __le16 t_adl;
+ u8 reserved4[36];
+
+ /* ECC and endurance block */
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ struct jedec_ecc_info ecc_info[4];
+ u8 reserved5[29];
+
+ /* reserved */
+ u8 reserved6[148];
+
+ /* vendor */
+ __le16 vendor_rev_num;
+ u8 reserved7[88];
+
+ /* CRC for Parameter Page */
+ __le16 crc;
+} __packed;
+
+#endif /* __LINUX_MTD_JEDEC_H */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cd0be91bdefa..677768b21a1d 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -25,6 +25,7 @@
#include <linux/notifier.h>
#include <linux/device.h>
#include <linux/of.h>
+#include <linux/nvmem-provider.h>
#include <mtd/mtd-abi.h>
@@ -207,6 +208,7 @@ struct mtd_debug_info {
struct mtd_info {
u_char type;
uint32_t flags;
+ uint32_t orig_flags; /* Flags as before running mtd checks */
uint64_t size; // Total size of the MTD
/* "Major" erase size for the device. Naïve users may take this
@@ -341,6 +343,7 @@ struct mtd_info {
struct device dev;
int usecount;
struct mtd_debug_info dbg;
+ struct nvmem_device *nvmem;
};
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
@@ -386,7 +389,7 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
return dev_of_node(&mtd->dev);
}
-static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
+static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
{
return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
}
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index abe975c87b90..7f53ece2c039 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
*/
static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
{
- return (u64)nand->memorg.luns_per_target *
- nand->memorg.eraseblocks_per_lun *
- nand->memorg.pages_per_eraseblock;
+ return nand->memorg.ntargets * nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun;
}
/**
@@ -569,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
}
/**
- * nanddev_pos_next_eraseblock() - Move a position to the next page
+ * nanddev_pos_next_page() - Move a position to the next page
* @nand: NAND device
* @pos: the position to update
*
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
index 98f20ef05d60..b8106651f807 100644
--- a/include/linux/mtd/nand_bch.h
+++ b/include/linux/mtd/nand_bch.h
@@ -12,6 +12,7 @@
#define __MTD_NAND_BCH_H__
struct mtd_info;
+struct nand_chip;
struct nand_bch_control;
#if defined(CONFIG_MTD_NAND_ECC_BCH)
@@ -21,14 +22,14 @@ static inline int mtd_nand_has_bch(void) { return 1; }
/*
* Calculate BCH ecc code
*/
-int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+int nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat,
u_char *ecc_code);
/*
* Detect and correct bit errors
*/
-int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc,
- u_char *calc_ecc);
+int nand_bch_correct_data(struct nand_chip *chip, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc);
/*
* Initialize BCH encoder/decoder
*/
@@ -43,14 +44,14 @@ void nand_bch_free(struct nand_bch_control *nbc);
static inline int mtd_nand_has_bch(void) { return 0; }
static inline int
-nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat,
u_char *ecc_code)
{
return -1;
}
static inline int
-nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
+nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
return -ENOTSUPP;
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 8a2decf7462c..0b3bb156c344 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -13,28 +13,30 @@
#ifndef __MTD_NAND_ECC_H__
#define __MTD_NAND_ECC_H__
-struct mtd_info;
+struct nand_chip;
/*
* Calculate 3 byte ECC code for eccsize byte block
*/
void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
- u_char *ecc_code);
+ u_char *ecc_code, bool sm_order);
/*
* Calculate 3 byte ECC code for 256/512 byte block
*/
-int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
+int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat,
+ u_char *ecc_code);
/*
* Detect and correct a 1 bit error for eccsize byte block
*/
int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
- unsigned int eccsize);
+ unsigned int eccsize, bool sm_order);
/*
* Detect and correct a 1 bit error for 256/512 byte block
*/
-int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
+int nand_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc,
+ u_char *calc_ecc);
#endif /* __MTD_NAND_ECC_H__ */
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
new file mode 100644
index 000000000000..339ac798568e
--- /dev/null
+++ b/include/linux/mtd/onfi.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all ONFI related definitions
+ */
+
+#ifndef __LINUX_MTD_ONFI_H
+#define __LINUX_MTD_ONFI_H
+
+#include <linux/types.h>
+
+/* ONFI version bits */
+#define ONFI_VERSION_1_0 BIT(1)
+#define ONFI_VERSION_2_0 BIT(2)
+#define ONFI_VERSION_2_1 BIT(3)
+#define ONFI_VERSION_2_2 BIT(4)
+#define ONFI_VERSION_2_3 BIT(5)
+#define ONFI_VERSION_3_0 BIT(6)
+#define ONFI_VERSION_3_1 BIT(7)
+#define ONFI_VERSION_3_2 BIT(8)
+#define ONFI_VERSION_4_0 BIT(9)
+
+/* ONFI features */
+#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
+#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
+
+/* ONFI timing mode, used in both asynchronous and synchronous mode */
+#define ONFI_TIMING_MODE_0 (1 << 0)
+#define ONFI_TIMING_MODE_1 (1 << 1)
+#define ONFI_TIMING_MODE_2 (1 << 2)
+#define ONFI_TIMING_MODE_3 (1 << 3)
+#define ONFI_TIMING_MODE_4 (1 << 4)
+#define ONFI_TIMING_MODE_5 (1 << 5)
+#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
+
+/* ONFI feature number/address */
+#define ONFI_FEATURE_NUMBER 256
+#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
+
+/* Vendor-specific feature address (Micron) */
+#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
+#define ONFI_FEATURE_ON_DIE_ECC 0x90
+#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
+
+/* ONFI subfeature parameters length */
+#define ONFI_SUBFEATURE_PARAM_LEN 4
+
+/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
+
+struct nand_onfi_params {
+ /* rev info and features block */
+ /* 'O' 'N' 'F' 'I' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ __le16 opt_cmd;
+ u8 reserved0[2];
+ __le16 ext_param_page_length; /* since ONFI 2.1 */
+ u8 num_of_param_pages; /* since ONFI 2.1 */
+ u8 reserved1[17];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id;
+ __le16 date_code;
+ u8 reserved2[13];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ __le32 data_bytes_per_ppage;
+ __le16 spare_bytes_per_ppage;
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ u8 programs_per_page;
+ u8 ppage_attr;
+ u8 ecc_bits;
+ u8 interleaved_bits;
+ u8 interleaved_ops;
+ u8 reserved3[13];
+
+ /* electrical parameter block */
+ u8 io_pin_capacitance_max;
+ __le16 async_timing_mode;
+ __le16 program_cache_timing_mode;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_ccs;
+ __le16 src_sync_timing_mode;
+ u8 src_ssync_features;
+ __le16 clk_pin_capacitance_typ;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ u8 input_pin_capacitance_max;
+ u8 driver_strength_support;
+ __le16 t_int_r;
+ __le16 t_adl;
+ u8 reserved4[8];
+
+ /* vendor */
+ __le16 vendor_revision;
+ u8 vendor[88];
+
+ __le16 crc;
+} __packed;
+
+#define ONFI_CRC_BASE 0x4F4E
+
+/* Extended ECC information Block Definition (since ONFI 2.1) */
+struct onfi_ext_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
+#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
+#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
+struct onfi_ext_section {
+ u8 type;
+ u8 length;
+} __packed;
+
+#define ONFI_EXT_SECTION_MAX 8
+
+/* Extended Parameter Page Definition (since ONFI 2.1) */
+struct onfi_ext_param_page {
+ __le16 crc;
+ u8 sig[4]; /* 'E' 'P' 'P' 'S' */
+ u8 reserved0[10];
+ struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
+
+ /*
+ * The actual size of the Extended Parameter Page is in
+ * @ext_param_page_length of nand_onfi_params{}.
+ * The following are the variable length sections.
+ * So we do not add any fields below. Please see the ONFI spec.
+ */
+} __packed;
+
+/**
+ * struct onfi_params - ONFI specific parameters that will be reused
+ * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
+ * @tPROG: Page program time
+ * @tBERS: Block erase time
+ * @tR: Page read time
+ * @tCCS: Change column setup time
+ * @async_timing_mode: Supported asynchronous timing mode
+ * @vendor_revision: Vendor specific revision number
+ * @vendor: Vendor specific data
+ */
+struct onfi_params {
+ int version;
+ u16 tPROG;
+ u16 tBERS;
+ u16 tR;
+ u16 tCCS;
+ u16 async_timing_mode;
+ u16 vendor_revision;
+ u8 vendor[88];
+};
+
+#endif /* __LINUX_MTD_ONFI_H */
diff --git a/include/linux/mtd/platnand.h b/include/linux/mtd/platnand.h
new file mode 100644
index 000000000000..bc11eb6b593b
--- /dev/null
+++ b/include/linux/mtd/platnand.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Contains all platform NAND related definitions.
+ */
+
+#ifndef __LINUX_MTD_PLATNAND_H
+#define __LINUX_MTD_PLATNAND_H
+
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/platform_device.h>
+
+/**
+ * struct platform_nand_chip - chip level device structure
+ * @nr_chips: max. number of chips to scan for
+ * @chip_offset: chip number offset
+ * @nr_partitions: number of partitions pointed to by partitions (or zero)
+ * @partitions: mtd partition list
+ * @chip_delay: R/B delay value in us
+ * @options: Option flags, e.g. 16bit buswidth
+ * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
+ * @part_probe_types: NULL-terminated array of probe types
+ */
+struct platform_nand_chip {
+ int nr_chips;
+ int chip_offset;
+ int nr_partitions;
+ struct mtd_partition *partitions;
+ int chip_delay;
+ unsigned int options;
+ unsigned int bbt_options;
+ const char **part_probe_types;
+};
+
+/**
+ * struct platform_nand_ctrl - controller level device structure
+ * @probe: platform specific function to probe/setup hardware
+ * @remove: platform specific function to remove/teardown hardware
+ * @dev_ready: platform specific function to read ready/busy pin
+ * @select_chip: platform specific chip select function
+ * @cmd_ctrl: platform specific function for controlling
+ * ALE/CLE/nCE. Also used to write command and address
+ * @write_buf: platform specific function for write buffer
+ * @read_buf: platform specific function for read buffer
+ * @priv: private data to transport driver specific settings
+ *
+ * All fields are optional and depend on the hardware driver requirements
+ */
+struct platform_nand_ctrl {
+ int (*probe)(struct platform_device *pdev);
+ void (*remove)(struct platform_device *pdev);
+ int (*dev_ready)(struct nand_chip *chip);
+ void (*select_chip)(struct nand_chip *chip, int cs);
+ void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
+ void (*write_buf)(struct nand_chip *chip, const uint8_t *buf, int len);
+ void (*read_buf)(struct nand_chip *chip, uint8_t *buf, int len);
+ void *priv;
+};
+
+/**
+ * struct platform_nand_data - container structure for platform-specific data
+ * @chip: chip level chip structure
+ * @ctrl: controller level device structure
+ */
+struct platform_nand_data {
+ struct platform_nand_chip chip;
+ struct platform_nand_ctrl ctrl;
+};
+
+#endif /* __LINUX_MTD_PLATNAND_H */
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index efb2345359bb..33e240acdc6d 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -21,22 +21,12 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
+#include <linux/mtd/jedec.h>
+#include <linux/mtd/onfi.h>
#include <linux/of.h>
#include <linux/types.h>
-struct nand_flash_dev;
-
-/* Scan and identify a NAND device */
-int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
- struct nand_flash_dev *ids);
-
-static inline int nand_scan(struct mtd_info *mtd, int max_chips)
-{
- return nand_scan_with_ids(mtd, max_chips, NULL);
-}
-
-/* Internal helper for board drivers which need to override command function */
-void nand_wait_ready(struct mtd_info *mtd);
+struct nand_chip;
/* The maximum number of NAND chips in an array */
#define NAND_MAX_CHIPS 8
@@ -131,9 +121,11 @@ enum nand_ecc_algo {
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
#define NAND_ECC_MAXIMIZE BIT(1)
-/* Bit mask for flags passed to do_nand_read_ecc */
-#define NAND_GET_DEVICE 0x80
-
+/*
+ * When using software implementation of Hamming, we can specify which byte
+ * ordering should be used.
+ */
+#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
/*
* Option constants for bizarre disfunctionality and real
@@ -175,9 +167,7 @@ enum nand_ecc_algo {
#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
/* Macros to identify the above */
-#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
-#define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE)
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
@@ -198,10 +188,10 @@ enum nand_ecc_algo {
#define NAND_USE_BOUNCE_BUFFER 0x00100000
/*
- * In case your controller is implementing ->cmd_ctrl() and is relying on the
- * default ->cmdfunc() implementation, you may want to let the core handle the
- * tCCS delay which is required when a column change (RNDIN or RNDOUT) is
- * requested.
+ * In case your controller is implementing ->legacy.cmd_ctrl() and is relying
+ * on the default ->cmdfunc() implementation, you may want to let the core
+ * handle the tCCS delay which is required when a column change (RNDIN or
+ * RNDOUT) is requested.
* If your controller already takes care of this delay, you don't need to set
* this flag.
*/
@@ -213,259 +203,18 @@ enum nand_ecc_algo {
*/
#define NAND_IS_BOOT_MEDIUM 0x00400000
-/* Options set by nand scan */
-/* Nand scan has allocated controller struct */
-#define NAND_CONTROLLER_ALLOC 0x80000000
+/*
+ * Do not try to tweak the timings at runtime. This is needed when the
+ * controller initializes the timings on itself or when it relies on
+ * configuration done by the bootloader.
+ */
+#define NAND_KEEP_TIMINGS 0x00800000
/* Cell info constants */
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
#define NAND_CI_CELLTYPE_SHIFT 2
-/* Keep gcc happy */
-struct nand_chip;
-
-/* ONFI version bits */
-#define ONFI_VERSION_1_0 BIT(1)
-#define ONFI_VERSION_2_0 BIT(2)
-#define ONFI_VERSION_2_1 BIT(3)
-#define ONFI_VERSION_2_2 BIT(4)
-#define ONFI_VERSION_2_3 BIT(5)
-#define ONFI_VERSION_3_0 BIT(6)
-#define ONFI_VERSION_3_1 BIT(7)
-#define ONFI_VERSION_3_2 BIT(8)
-#define ONFI_VERSION_4_0 BIT(9)
-
-/* ONFI features */
-#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
-#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
-
-/* ONFI timing mode, used in both asynchronous and synchronous mode */
-#define ONFI_TIMING_MODE_0 (1 << 0)
-#define ONFI_TIMING_MODE_1 (1 << 1)
-#define ONFI_TIMING_MODE_2 (1 << 2)
-#define ONFI_TIMING_MODE_3 (1 << 3)
-#define ONFI_TIMING_MODE_4 (1 << 4)
-#define ONFI_TIMING_MODE_5 (1 << 5)
-#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
-
-/* ONFI feature number/address */
-#define ONFI_FEATURE_NUMBER 256
-#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
-
-/* Vendor-specific feature address (Micron) */
-#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
-#define ONFI_FEATURE_ON_DIE_ECC 0x90
-#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
-
-/* ONFI subfeature parameters length */
-#define ONFI_SUBFEATURE_PARAM_LEN 4
-
-/* ONFI optional commands SET/GET FEATURES supported? */
-#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
-
-struct nand_onfi_params {
- /* rev info and features block */
- /* 'O' 'N' 'F' 'I' */
- u8 sig[4];
- __le16 revision;
- __le16 features;
- __le16 opt_cmd;
- u8 reserved0[2];
- __le16 ext_param_page_length; /* since ONFI 2.1 */
- u8 num_of_param_pages; /* since ONFI 2.1 */
- u8 reserved1[17];
-
- /* manufacturer information block */
- char manufacturer[12];
- char model[20];
- u8 jedec_id;
- __le16 date_code;
- u8 reserved2[13];
-
- /* memory organization block */
- __le32 byte_per_page;
- __le16 spare_bytes_per_page;
- __le32 data_bytes_per_ppage;
- __le16 spare_bytes_per_ppage;
- __le32 pages_per_block;
- __le32 blocks_per_lun;
- u8 lun_count;
- u8 addr_cycles;
- u8 bits_per_cell;
- __le16 bb_per_lun;
- __le16 block_endurance;
- u8 guaranteed_good_blocks;
- __le16 guaranteed_block_endurance;
- u8 programs_per_page;
- u8 ppage_attr;
- u8 ecc_bits;
- u8 interleaved_bits;
- u8 interleaved_ops;
- u8 reserved3[13];
-
- /* electrical parameter block */
- u8 io_pin_capacitance_max;
- __le16 async_timing_mode;
- __le16 program_cache_timing_mode;
- __le16 t_prog;
- __le16 t_bers;
- __le16 t_r;
- __le16 t_ccs;
- __le16 src_sync_timing_mode;
- u8 src_ssync_features;
- __le16 clk_pin_capacitance_typ;
- __le16 io_pin_capacitance_typ;
- __le16 input_pin_capacitance_typ;
- u8 input_pin_capacitance_max;
- u8 driver_strength_support;
- __le16 t_int_r;
- __le16 t_adl;
- u8 reserved4[8];
-
- /* vendor */
- __le16 vendor_revision;
- u8 vendor[88];
-
- __le16 crc;
-} __packed;
-
-#define ONFI_CRC_BASE 0x4F4E
-
-/* Extended ECC information Block Definition (since ONFI 2.1) */
-struct onfi_ext_ecc_info {
- u8 ecc_bits;
- u8 codeword_size;
- __le16 bb_per_lun;
- __le16 block_endurance;
- u8 reserved[2];
-} __packed;
-
-#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
-#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
-#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
-struct onfi_ext_section {
- u8 type;
- u8 length;
-} __packed;
-
-#define ONFI_EXT_SECTION_MAX 8
-
-/* Extended Parameter Page Definition (since ONFI 2.1) */
-struct onfi_ext_param_page {
- __le16 crc;
- u8 sig[4]; /* 'E' 'P' 'P' 'S' */
- u8 reserved0[10];
- struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
-
- /*
- * The actual size of the Extended Parameter Page is in
- * @ext_param_page_length of nand_onfi_params{}.
- * The following are the variable length sections.
- * So we do not add any fields below. Please see the ONFI spec.
- */
-} __packed;
-
-struct jedec_ecc_info {
- u8 ecc_bits;
- u8 codeword_size;
- __le16 bb_per_lun;
- __le16 block_endurance;
- u8 reserved[2];
-} __packed;
-
-/* JEDEC features */
-#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
-
-struct nand_jedec_params {
- /* rev info and features block */
- /* 'J' 'E' 'S' 'D' */
- u8 sig[4];
- __le16 revision;
- __le16 features;
- u8 opt_cmd[3];
- __le16 sec_cmd;
- u8 num_of_param_pages;
- u8 reserved0[18];
-
- /* manufacturer information block */
- char manufacturer[12];
- char model[20];
- u8 jedec_id[6];
- u8 reserved1[10];
-
- /* memory organization block */
- __le32 byte_per_page;
- __le16 spare_bytes_per_page;
- u8 reserved2[6];
- __le32 pages_per_block;
- __le32 blocks_per_lun;
- u8 lun_count;
- u8 addr_cycles;
- u8 bits_per_cell;
- u8 programs_per_page;
- u8 multi_plane_addr;
- u8 multi_plane_op_attr;
- u8 reserved3[38];
-
- /* electrical parameter block */
- __le16 async_sdr_speed_grade;
- __le16 toggle_ddr_speed_grade;
- __le16 sync_ddr_speed_grade;
- u8 async_sdr_features;
- u8 toggle_ddr_features;
- u8 sync_ddr_features;
- __le16 t_prog;
- __le16 t_bers;
- __le16 t_r;
- __le16 t_r_multi_plane;
- __le16 t_ccs;
- __le16 io_pin_capacitance_typ;
- __le16 input_pin_capacitance_typ;
- __le16 clk_pin_capacitance_typ;
- u8 driver_strength_support;
- __le16 t_adl;
- u8 reserved4[36];
-
- /* ECC and endurance block */
- u8 guaranteed_good_blocks;
- __le16 guaranteed_block_endurance;
- struct jedec_ecc_info ecc_info[4];
- u8 reserved5[29];
-
- /* reserved */
- u8 reserved6[148];
-
- /* vendor */
- __le16 vendor_rev_num;
- u8 reserved7[88];
-
- /* CRC for Parameter Page */
- __le16 crc;
-} __packed;
-
-/**
- * struct onfi_params - ONFI specific parameters that will be reused
- * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
- * @tPROG: Page program time
- * @tBERS: Block erase time
- * @tR: Page read time
- * @tCCS: Change column setup time
- * @async_timing_mode: Supported asynchronous timing mode
- * @vendor_revision: Vendor specific revision number
- * @vendor: Vendor specific data
- */
-struct onfi_params {
- int version;
- u16 tPROG;
- u16 tBERS;
- u16 tR;
- u16 tCCS;
- u16 async_timing_mode;
- u16 vendor_revision;
- u8 vendor[88];
-};
-
/**
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
@@ -499,49 +248,6 @@ struct nand_id {
};
/**
- * struct nand_controller_ops - Controller operations
- *
- * @attach_chip: this method is called after the NAND detection phase after
- * flash ID and MTD fields such as erase size, page size and OOB
- * size have been set up. ECC requirements are available if
- * provided by the NAND chip or device tree. Typically used to
- * choose the appropriate ECC configuration and allocate
- * associated resources.
- * This hook is optional.
- * @detach_chip: free all resources allocated/claimed in
- * nand_controller_ops->attach_chip().
- * This hook is optional.
- */
-struct nand_controller_ops {
- int (*attach_chip)(struct nand_chip *chip);
- void (*detach_chip)(struct nand_chip *chip);
-};
-
-/**
- * struct nand_controller - Structure used to describe a NAND controller
- *
- * @lock: protection lock
- * @active: the mtd device which holds the controller currently
- * @wq: wait queue to sleep on if a NAND operation is in
- * progress used instead of the per chip wait queue
- * when a hw controller is available.
- * @ops: NAND controller operations.
- */
-struct nand_controller {
- spinlock_t lock;
- struct nand_chip *active;
- wait_queue_head_t wq;
- const struct nand_controller_ops *ops;
-};
-
-static inline void nand_controller_init(struct nand_controller *nfc)
-{
- nfc->active = NULL;
- spin_lock_init(&nfc->lock);
- init_waitqueue_head(&nfc->wq);
-}
-
-/**
* struct nand_ecc_step_info - ECC step information of ECC engine
* @stepsize: data bytes per ECC step
* @strengths: array of supported strengths
@@ -646,31 +352,28 @@ struct nand_ecc_ctrl {
void *priv;
u8 *calc_buf;
u8 *code_buf;
- void (*hwctl)(struct mtd_info *mtd, int mode);
- int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
- uint8_t *ecc_code);
- int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc,
- uint8_t *calc_ecc);
- int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
- int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page);
- int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
- int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offs, uint32_t len, uint8_t *buf, int page);
- int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, uint32_t data_len,
- const uint8_t *data_buf, int oob_required, int page);
- int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page);
- int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
- int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
- int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page);
- int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
+ void (*hwctl)(struct nand_chip *chip, int mode);
+ int (*calculate)(struct nand_chip *chip, const uint8_t *dat,
+ uint8_t *ecc_code);
+ int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc,
+ uint8_t *calc_ecc);
+ int (*read_page_raw)(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+ int (*write_page_raw)(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+ int (*read_page)(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+ int (*read_subpage)(struct nand_chip *chip, uint32_t offs,
+ uint32_t len, uint8_t *buf, int page);
+ int (*write_subpage)(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *data_buf,
+ int oob_required, int page);
+ int (*write_page)(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+ int (*write_oob_raw)(struct nand_chip *chip, int page);
+ int (*read_oob_raw)(struct nand_chip *chip, int page);
+ int (*read_oob)(struct nand_chip *chip, int page);
+ int (*write_oob)(struct nand_chip *chip, int page);
};
/**
@@ -800,24 +503,6 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
}
/**
- * struct nand_manufacturer_ops - NAND Manufacturer operations
- * @detect: detect the NAND memory organization and capabilities
- * @init: initialize all vendor specific fields (like the ->read_retry()
- * implementation) if any.
- * @cleanup: the ->init() function may have allocated resources, ->cleanup()
- * is here to let vendor specific code release those resources.
- * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
- * page. This is called after the checksum is verified.
- */
-struct nand_manufacturer_ops {
- void (*detect)(struct nand_chip *chip);
- int (*init)(struct nand_chip *chip);
- void (*cleanup)(struct nand_chip *chip);
- void (*fixup_onfi_param_page)(struct nand_chip *chip,
- struct nand_onfi_params *p);
-};
-
-/**
* struct nand_op_cmd_instr - Definition of a command instruction
* @opcode: the command to issue in one cycle
*/
@@ -1154,18 +839,21 @@ struct nand_op_parser {
/**
* struct nand_operation - NAND operation descriptor
+ * @cs: the CS line to select for this NAND operation
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
* The actual operation structure that will be passed to chip->exec_op().
*/
struct nand_operation {
+ unsigned int cs;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
-#define NAND_OPERATION(_instrs) \
+#define NAND_OPERATION(_cs, _instrs) \
{ \
+ .cs = _cs, \
.instrs = _instrs, \
.ninstrs = ARRAY_SIZE(_instrs), \
}
@@ -1173,46 +861,128 @@ struct nand_operation {
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
+/**
+ * struct nand_controller_ops - Controller operations
+ *
+ * @attach_chip: this method is called after the NAND detection phase after
+ * flash ID and MTD fields such as erase size, page size and OOB
+ * size have been set up. ECC requirements are available if
+ * provided by the NAND chip or device tree. Typically used to
+ * choose the appropriate ECC configuration and allocate
+ * associated resources.
+ * This hook is optional.
+ * @detach_chip: free all resources allocated/claimed in
+ * nand_controller_ops->attach_chip().
+ * This hook is optional.
+ * @exec_op: controller specific method to execute NAND operations.
+ * This method replaces chip->legacy.cmdfunc(),
+ * chip->legacy.{read,write}_{buf,byte,word}(),
+ * chip->legacy.dev_ready() and chip->legacy.waifunc().
+ * @setup_data_interface: setup the data interface and timing. If
+ * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
+ * means the configuration should not be applied but
+ * only checked.
+ * This hook is optional.
+ */
+struct nand_controller_ops {
+ int (*attach_chip)(struct nand_chip *chip);
+ void (*detach_chip)(struct nand_chip *chip);
+ int (*exec_op)(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only);
+ int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf);
+};
+
+/**
+ * struct nand_controller - Structure used to describe a NAND controller
+ *
+ * @lock: protection lock
+ * @active: the mtd device which holds the controller currently
+ * @wq: wait queue to sleep on if a NAND operation is in
+ * progress used instead of the per chip wait queue
+ * when a hw controller is available.
+ * @ops: NAND controller operations.
+ */
+struct nand_controller {
+ spinlock_t lock;
+ struct nand_chip *active;
+ wait_queue_head_t wq;
+ const struct nand_controller_ops *ops;
+};
+
+static inline void nand_controller_init(struct nand_controller *nfc)
+{
+ nfc->active = NULL;
+ spin_lock_init(&nfc->lock);
+ init_waitqueue_head(&nfc->wq);
+}
+
+/**
+ * struct nand_legacy - NAND chip legacy fields/hooks
+ * @IO_ADDR_R: address to read the 8 I/O lines of the flash device
+ * @IO_ADDR_W: address to write the 8 I/O lines of the flash device
+ * @select_chip: select/deselect a specific target/die
+ * @read_byte: read one byte from the chip
+ * @write_byte: write a single byte to the chip on the low 8 I/O lines
+ * @write_buf: write data from the buffer to the chip
+ * @read_buf: read data from the chip into the buffer
+ * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used
+ * to write command and address
+ * @cmdfunc: hardware specific function for writing commands to the chip.
+ * @dev_ready: hardware specific function for accessing device ready/busy line.
+ * If set to NULL no access to ready/busy is available and the
+ * ready/busy information is read from the chip status register.
+ * @waitfunc: hardware specific function for wait on ready.
+ * @block_bad: check if a block is bad, using OOB markers
+ * @block_markbad: mark a block bad
+ * @erase: erase function
+ * @set_features: set the NAND chip features
+ * @get_features: get the NAND chip features
+ * @chip_delay: chip dependent delay for transferring data from array to read
+ * regs (tR).
+ * @dummy_controller: dummy controller implementation for drivers that can
+ * only control a single chip
+ *
+ * If you look at this structure you're already wrong. These fields/hooks are
+ * all deprecated.
+ */
+struct nand_legacy {
+ void __iomem *IO_ADDR_R;
+ void __iomem *IO_ADDR_W;
+ void (*select_chip)(struct nand_chip *chip, int cs);
+ u8 (*read_byte)(struct nand_chip *chip);
+ void (*write_byte)(struct nand_chip *chip, u8 byte);
+ void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
+ void (*read_buf)(struct nand_chip *chip, u8 *buf, int len);
+ void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
+ void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column,
+ int page_addr);
+ int (*dev_ready)(struct nand_chip *chip);
+ int (*waitfunc)(struct nand_chip *chip);
+ int (*block_bad)(struct nand_chip *chip, loff_t ofs);
+ int (*block_markbad)(struct nand_chip *chip, loff_t ofs);
+ int (*erase)(struct nand_chip *chip, int page);
+ int (*set_features)(struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para);
+ int (*get_features)(struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para);
+ int chip_delay;
+ struct nand_controller dummy_controller;
+};
/**
* struct nand_chip - NAND Private Flash Chip Data
* @mtd: MTD device registered to the MTD framework
- * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
- * flash device
- * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
- * flash device.
- * @read_byte: [REPLACEABLE] read one byte from the chip
- * @read_word: [REPLACEABLE] read one word from the chip
- * @write_byte: [REPLACEABLE] write a single byte to the chip on the
- * low 8 I/O lines
- * @write_buf: [REPLACEABLE] write data from the buffer to the chip
- * @read_buf: [REPLACEABLE] read data from the chip into the buffer
- * @select_chip: [REPLACEABLE] select chip nr
- * @block_bad: [REPLACEABLE] check if a block is bad, using OOB markers
- * @block_markbad: [REPLACEABLE] mark a block bad
- * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling
- * ALE/CLE/nCE. Also used to write command and address
- * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing
- * device ready/busy line. If set to NULL no access to
- * ready/busy is available and the ready/busy information
- * is read from the chip status register.
- * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing
- * commands to the chip.
- * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
- * ready.
- * @exec_op: controller specific method to execute NAND operations.
- * This method replaces ->cmdfunc(),
- * ->{read,write}_{buf,byte,word}(), ->dev_ready() and
- * ->waifunc().
+ * @legacy: All legacy fields/hooks. If you develop a new driver,
+ * don't even try to use any of these fields/hooks, and if
+ * you're modifying an existing driver that is using those
+ * fields/hooks, you should consider reworking the driver
+ * avoid using them.
* @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
* @buf_align: minimum buffer alignment required by a platform
- * @dummy_controller: dummy controller implementation for drivers that can
- * only control a single chip
- * @erase: [REPLACEABLE] erase function
- * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
- * data from array to read regs (tR).
* @state: [INTERN] the current state of the NAND device
* @oob_poi: "poison value buffer," used for laying out OOB data
* before writing
@@ -1259,13 +1029,11 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
* this nand device will encounter their life times.
* @blocks_per_die: [INTERN] The number of PEBs in a die
* @data_interface: [INTERN] NAND interface timing information
+ * @cur_cs: currently selected target. -1 means no target selected,
+ * otherwise we should always have cur_cs >= 0 &&
+ * cur_cs < numchips. NAND Controller drivers should not
+ * modify this value, but they're allowed to read it.
* @read_retries: [INTERN] the number of read retry modes supported
- * @set_features: [REPLACEABLE] set the NAND chip features
- * @get_features: [REPLACEABLE] get the NAND chip features
- * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
- * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
- * means the configuration should not be applied but
- * only checked.
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -1283,35 +1051,11 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
struct nand_chip {
struct mtd_info mtd;
- void __iomem *IO_ADDR_R;
- void __iomem *IO_ADDR_W;
- uint8_t (*read_byte)(struct mtd_info *mtd);
- u16 (*read_word)(struct mtd_info *mtd);
- void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
- void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
- void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- void (*select_chip)(struct mtd_info *mtd, int chip);
- int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
- int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
- void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
- int (*dev_ready)(struct mtd_info *mtd);
- void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
- int page_addr);
- int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
- int (*exec_op)(struct nand_chip *chip,
- const struct nand_operation *op,
- bool check_only);
- int (*erase)(struct mtd_info *mtd, int page);
- int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
- int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip,
- int feature_addr, uint8_t *subfeature_para);
- int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
- int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
- const struct nand_data_interface *conf);
+ struct nand_legacy legacy;
+
+ int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
- int chip_delay;
unsigned int options;
unsigned int bbt_options;
@@ -1340,6 +1084,8 @@ struct nand_chip {
struct nand_data_interface data_interface;
+ int cur_cs;
+
int read_retries;
flstate_t state;
@@ -1349,7 +1095,6 @@ struct nand_chip {
struct nand_ecc_ctrl ecc;
unsigned long buf_align;
- struct nand_controller dummy_controller;
uint8_t *bbt;
struct nand_bbt_descr *bbt_td;
@@ -1365,15 +1110,6 @@ struct nand_chip {
} manufacturer;
};
-static inline int nand_exec_op(struct nand_chip *chip,
- const struct nand_operation *op)
-{
- if (!chip->exec_op)
- return -ENOTSUPP;
-
- return chip->exec_op(chip, op, false);
-}
-
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
@@ -1420,27 +1156,6 @@ static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
}
/*
- * NAND Flash Manufacturer ID Codes
- */
-#define NAND_MFR_TOSHIBA 0x98
-#define NAND_MFR_ESMT 0xc8
-#define NAND_MFR_SAMSUNG 0xec
-#define NAND_MFR_FUJITSU 0x04
-#define NAND_MFR_NATIONAL 0x8f
-#define NAND_MFR_RENESAS 0x07
-#define NAND_MFR_STMICRO 0x20
-#define NAND_MFR_HYNIX 0xad
-#define NAND_MFR_MICRON 0x2c
-#define NAND_MFR_AMD 0x01
-#define NAND_MFR_MACRONIX 0xc2
-#define NAND_MFR_EON 0x92
-#define NAND_MFR_SANDISK 0x45
-#define NAND_MFR_INTEL 0x89
-#define NAND_MFR_ATO 0x9b
-#define NAND_MFR_WINBOND 0xef
-
-
-/*
* A helper for defining older NAND chips where the second ID byte fully
* defined the chip, including the geometry (chip size, eraseblock size, page
* size). All these chips have 512 bytes NAND page size.
@@ -1519,114 +1234,7 @@ struct nand_flash_dev {
int onfi_timing_mode_default;
};
-/**
- * struct nand_manufacturer - NAND Flash Manufacturer structure
- * @name: Manufacturer name
- * @id: manufacturer ID code of device.
- * @ops: manufacturer operations
-*/
-struct nand_manufacturer {
- int id;
- char *name;
- const struct nand_manufacturer_ops *ops;
-};
-
-const struct nand_manufacturer *nand_get_manufacturer(u8 id);
-
-static inline const char *
-nand_manufacturer_name(const struct nand_manufacturer *manufacturer)
-{
- return manufacturer ? manufacturer->name : "Unknown";
-}
-
-extern struct nand_flash_dev nand_flash_ids[];
-
-extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
-extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
-extern const struct nand_manufacturer_ops hynix_nand_manuf_ops;
-extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
-extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
-extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
-
int nand_create_bbt(struct nand_chip *chip);
-int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
-int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
-int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
-int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
- int allowbbt);
-
-/**
- * struct platform_nand_chip - chip level device structure
- * @nr_chips: max. number of chips to scan for
- * @chip_offset: chip number offset
- * @nr_partitions: number of partitions pointed to by partitions (or zero)
- * @partitions: mtd partition list
- * @chip_delay: R/B delay value in us
- * @options: Option flags, e.g. 16bit buswidth
- * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
- * @part_probe_types: NULL-terminated array of probe types
- */
-struct platform_nand_chip {
- int nr_chips;
- int chip_offset;
- int nr_partitions;
- struct mtd_partition *partitions;
- int chip_delay;
- unsigned int options;
- unsigned int bbt_options;
- const char **part_probe_types;
-};
-
-/* Keep gcc happy */
-struct platform_device;
-
-/**
- * struct platform_nand_ctrl - controller level device structure
- * @probe: platform specific function to probe/setup hardware
- * @remove: platform specific function to remove/teardown hardware
- * @dev_ready: platform specific function to read ready/busy pin
- * @select_chip: platform specific chip select function
- * @cmd_ctrl: platform specific function for controlling
- * ALE/CLE/nCE. Also used to write command and address
- * @write_buf: platform specific function for write buffer
- * @read_buf: platform specific function for read buffer
- * @priv: private data to transport driver specific settings
- *
- * All fields are optional and depend on the hardware driver requirements
- */
-struct platform_nand_ctrl {
- int (*probe)(struct platform_device *pdev);
- void (*remove)(struct platform_device *pdev);
- int (*dev_ready)(struct mtd_info *mtd);
- void (*select_chip)(struct mtd_info *mtd, int chip);
- void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
- void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
- void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- void *priv;
-};
-
-/**
- * struct platform_nand_data - container structure for platform-specific data
- * @chip: chip level chip structure
- * @ctrl: controller level device structure
- */
-struct platform_nand_data {
- struct platform_nand_chip chip;
- struct platform_nand_ctrl ctrl;
-};
-
-/* return the supported asynchronous timing mode. */
-static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
-{
- if (!chip->parameters.onfi)
- return ONFI_TIMING_MODE_UNKNOWN;
-
- return chip->parameters.onfi->async_timing_mode;
-}
-
-int onfi_fill_data_interface(struct nand_chip *chip,
- enum nand_data_interface_type type,
- int timing_mode);
/*
* Check if it is a SLC nand.
@@ -1658,9 +1266,6 @@ static inline int nand_opcode_8bits(unsigned int command)
return 0;
}
-/* get timing characteristics from ONFI timing mode. */
-const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
-
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
void *extraoob, int extraooblen,
@@ -1670,37 +1275,22 @@ int nand_ecc_choose_conf(struct nand_chip *chip,
const struct nand_ecc_caps *caps, int oobavail);
/* Default write_oob implementation */
-int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
-
-/* Default write_oob syndrome implementation */
-int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
+int nand_write_oob_std(struct nand_chip *chip, int page);
/* Default read_oob implementation */
-int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
-
-/* Default read_oob syndrome implementation */
-int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page);
+int nand_read_oob_std(struct nand_chip *chip, int page);
-/* Wrapper to use in order for controllers/vendors to GET/SET FEATURES */
-int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
-int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
/* Stub used by drivers that do not support GET/SET FEATURES operations */
-int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
- int addr, u8 *subfeature_param);
+int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
+ u8 *subfeature_param);
/* Default read_page_raw implementation */
-int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
-int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
- u8 *buf, int oob_required, int page);
+int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page);
/* Default write_page_raw implementation */
-int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int oob_required, int page);
-int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
- const u8 *buf, int oob_required, int page);
+int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1710,7 +1300,6 @@ int nand_reset_op(struct nand_chip *chip);
int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len);
int nand_status_op(struct nand_chip *chip, u8 *status);
-int nand_exit_status_op(struct nand_chip *chip);
int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
int nand_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf, unsigned int len);
@@ -1734,21 +1323,37 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
+/* Scan and identify a NAND device */
+int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
+ struct nand_flash_dev *ids);
+
+static inline int nand_scan(struct nand_chip *chip, unsigned int max_chips)
+{
+ return nand_scan_with_ids(chip, max_chips, NULL);
+}
+
+/* Internal helper for board drivers which need to override command function */
+void nand_wait_ready(struct nand_chip *chip);
+
/*
* Free resources held by the NAND device, must be called on error after a
* sucessful nand_scan().
*/
void nand_cleanup(struct nand_chip *chip);
/* Unregister the MTD device and calls nand_cleanup() */
-void nand_release(struct mtd_info *mtd);
-
-/* Default extended ID decoding function */
-void nand_decode_ext_id(struct nand_chip *chip);
+void nand_release(struct nand_chip *chip);
/*
* External helper for controller drivers that have to implement the WAITRDY
* instruction and have no physical pin to check it.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+struct gpio_desc;
+int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
+ unsigned long timeout_ms);
+
+/* Select/deselect a NAND target. */
+void nand_select_target(struct nand_chip *chip, unsigned int cs);
+void nand_deselect_target(struct nand_chip *chip);
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index c759d403cbc0..78fc2d4218c8 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -1,20 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* SuperH FLCTL nand controller
*
* Copyright © 2008 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __SH_FLCTL_H__
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index c922e97f205a..fa2d89e38e40 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_MTD_SPI_NOR_H
@@ -23,7 +19,8 @@
#define SNOR_MFR_ATMEL CFI_MFR_ATMEL
#define SNOR_MFR_GIGADEVICE 0xc8
#define SNOR_MFR_INTEL CFI_MFR_INTEL
-#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */
+#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */
+#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */
#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
#define SNOR_MFR_SPANSION CFI_MFR_AMD
#define SNOR_MFR_SST CFI_MFR_SST
@@ -236,6 +233,96 @@ enum spi_nor_option_flags {
SNOR_F_READY_XSR_RDY = BIT(4),
SNOR_F_USE_CLSR = BIT(5),
SNOR_F_BROKEN_RESET = BIT(6),
+ SNOR_F_4B_OPCODES = BIT(7),
+ SNOR_F_HAS_4BAIT = BIT(8),
+};
+
+/**
+ * struct spi_nor_erase_type - Structure to describe a SPI NOR erase type
+ * @size: the size of the sector/block erased by the erase type.
+ * JEDEC JESD216B imposes erase sizes to be a power of 2.
+ * @size_shift: @size is a power of 2, the shift is stored in
+ * @size_shift.
+ * @size_mask: the size mask based on @size_shift.
+ * @opcode: the SPI command op code to erase the sector/block.
+ * @idx: Erase Type index as sorted in the Basic Flash Parameter
+ * Table. It will be used to synchronize the supported
+ * Erase Types with the ones identified in the SFDP
+ * optional tables.
+ */
+struct spi_nor_erase_type {
+ u32 size;
+ u32 size_shift;
+ u32 size_mask;
+ u8 opcode;
+ u8 idx;
+};
+
+/**
+ * struct spi_nor_erase_command - Used for non-uniform erases
+ * The structure is used to describe a list of erase commands to be executed
+ * once we validate that the erase can be performed. The elements in the list
+ * are run-length encoded.
+ * @list: for inclusion into the list of erase commands.
+ * @count: how many times the same erase command should be
+ * consecutively used.
+ * @size: the size of the sector/block erased by the command.
+ * @opcode: the SPI command op code to erase the sector/block.
+ */
+struct spi_nor_erase_command {
+ struct list_head list;
+ u32 count;
+ u32 size;
+ u8 opcode;
+};
+
+/**
+ * struct spi_nor_erase_region - Structure to describe a SPI NOR erase region
+ * @offset: the offset in the data array of erase region start.
+ * LSB bits are used as a bitmask encoding flags to
+ * determine if this region is overlaid, if this region is
+ * the last in the SPI NOR flash memory and to indicate
+ * all the supported erase commands inside this region.
+ * The erase types are sorted in ascending order with the
+ * smallest Erase Type size being at BIT(0).
+ * @size: the size of the region in bytes.
+ */
+struct spi_nor_erase_region {
+ u64 offset;
+ u64 size;
+};
+
+#define SNOR_ERASE_TYPE_MAX 4
+#define SNOR_ERASE_TYPE_MASK GENMASK_ULL(SNOR_ERASE_TYPE_MAX - 1, 0)
+
+#define SNOR_LAST_REGION BIT(4)
+#define SNOR_OVERLAID_REGION BIT(5)
+
+#define SNOR_ERASE_FLAGS_MAX 6
+#define SNOR_ERASE_FLAGS_MASK GENMASK_ULL(SNOR_ERASE_FLAGS_MAX - 1, 0)
+
+/**
+ * struct spi_nor_erase_map - Structure to describe the SPI NOR erase map
+ * @regions: array of erase regions. The regions are consecutive in
+ * address space. Walking through the regions is done
+ * incrementally.
+ * @uniform_region: a pre-allocated erase region for SPI NOR with a uniform
+ * sector size (legacy implementation).
+ * @erase_type: an array of erase types shared by all the regions.
+ * The erase types are sorted in ascending order, with the
+ * smallest Erase Type size being the first member in the
+ * erase_type array.
+ * @uniform_erase_type: bitmask encoding erase types that can erase the
+ * entire memory. This member is completed at init by
+ * uniform and non-uniform SPI NOR flash memories if they
+ * support at least one erase type that can erase the
+ * entire memory.
+ */
+struct spi_nor_erase_map {
+ struct spi_nor_erase_region *regions;
+ struct spi_nor_erase_region uniform_region;
+ struct spi_nor_erase_type erase_type[SNOR_ERASE_TYPE_MAX];
+ u8 uniform_erase_type;
};
/**
@@ -262,6 +349,7 @@ struct flash_info;
* @write_proto: the SPI protocol for write operations
* @reg_proto the SPI protocol for read_reg/write_reg/erase operations
* @cmd_buf: used by the write_reg
+ * @erase_map: the erase map of the SPI NOR
* @prepare: [OPTIONAL] do some preparations for the
* read/write/erase/lock/unlock operations
* @unprepare: [OPTIONAL] do some post work after the
@@ -297,6 +385,7 @@ struct spi_nor {
bool sst_write_second;
u32 flags;
u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
+ struct spi_nor_erase_map erase_map;
int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
@@ -317,6 +406,35 @@ struct spi_nor {
void *priv;
};
+static u64 __maybe_unused
+spi_nor_region_is_last(const struct spi_nor_erase_region *region)
+{
+ return region->offset & SNOR_LAST_REGION;
+}
+
+static u64 __maybe_unused
+spi_nor_region_end(const struct spi_nor_erase_region *region)
+{
+ return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
+}
+
+static void __maybe_unused
+spi_nor_region_mark_end(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_LAST_REGION;
+}
+
+static void __maybe_unused
+spi_nor_region_mark_overlay(struct spi_nor_erase_region *region)
+{
+ region->offset |= SNOR_OVERLAID_REGION;
+}
+
+static bool __maybe_unused spi_nor_has_uniform_erase(const struct spi_nor *nor)
+{
+ return !!nor->erase_map.uniform_erase_type;
+}
+
static inline void spi_nor_set_flash_node(struct spi_nor *nor,
struct device_node *np)
{
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 088ff96c3eb6..b92e2aa955b6 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -194,8 +194,10 @@ struct spinand_manufacturer {
};
/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
+extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
/**
diff --git a/include/linux/ndctl.h b/include/linux/ndctl.h
new file mode 100644
index 000000000000..cd5a293ce3ae
--- /dev/null
+++ b/include/linux/ndctl.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU Lesser General Public License,
+ * version 2.1, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+ * more details.
+ */
+#ifndef _LINUX_NDCTL_H
+#define _LINUX_NDCTL_H
+
+#include <uapi/linux/ndctl.h>
+
+enum {
+ ND_MIN_NAMESPACE_SIZE = PAGE_SIZE,
+};
+
+#endif /* _LINUX_NDCTL_H */
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
index db99240d00bd..fd458389f7d1 100644
--- a/include/linux/net_dim.h
+++ b/include/linux/net_dim.h
@@ -363,7 +363,6 @@ static inline void net_dim_sample(u16 event_ctr,
}
#define NET_DIM_NEVENTS 64
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
static inline void net_dim_calc_stats(struct net_dim_sample *start,
@@ -407,6 +406,8 @@ static inline void net_dim(struct net_dim *dim,
}
/* fall through */
case NET_DIM_START_MEASURE:
+ net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr,
+ &dim->start_sample);
dim->state = NET_DIM_MEASURE_IN_PROGRESS;
break;
case NET_DIM_APPLY_NEW_PROFILE:
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ca5ab98053c8..1377d085ef99 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -535,6 +535,32 @@ static inline void napi_synchronize(const struct napi_struct *n)
barrier();
}
+/**
+ * napi_if_scheduled_mark_missed - if napi is running, set the
+ * NAPIF_STATE_MISSED
+ * @n: NAPI context
+ *
+ * If napi is running, set the NAPIF_STATE_MISSED, and return true if
+ * NAPI is scheduled.
+ **/
+static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
+{
+ unsigned long val, new;
+
+ do {
+ val = READ_ONCE(n->state);
+ if (val & NAPIF_STATE_DISABLE)
+ return true;
+
+ if (!(val & NAPIF_STATE_SCHED))
+ return false;
+
+ new = val | NAPIF_STATE_MISSED;
+ } while (cmpxchg(&n->state, val, new) != val);
+
+ return true;
+}
+
enum netdev_queue_state_t {
__QUEUE_STATE_DRV_XOFF,
__QUEUE_STATE_STACK_XOFF,
@@ -583,6 +609,9 @@ struct netdev_queue {
/* Subordinate device that the queue has been assigned to */
struct net_device *sb_dev;
+#ifdef CONFIG_XDP_SOCKETS
+ struct xdp_umem *umem;
+#endif
/*
* write-mostly part
*/
@@ -712,6 +741,9 @@ struct netdev_rx_queue {
struct kobject kobj;
struct net_device *dev;
struct xdp_rxq_info xdp_rxq;
+#ifdef CONFIG_XDP_SOCKETS
+ struct xdp_umem *umem;
+#endif
} ____cacheline_aligned_in_smp;
/*
@@ -813,6 +845,8 @@ enum tc_setup_type {
TC_SETUP_QDISC_PRIO,
TC_SETUP_QDISC_MQ,
TC_SETUP_QDISC_ETF,
+ TC_SETUP_ROOT_QDISC,
+ TC_SETUP_QDISC_GRED,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -831,9 +865,6 @@ enum bpf_netdev_command {
XDP_QUERY_PROG,
XDP_QUERY_PROG_HW,
/* BPF program for offload callbacks, invoked at program load time. */
- BPF_OFFLOAD_VERIFIER_PREP,
- BPF_OFFLOAD_TRANSLATE,
- BPF_OFFLOAD_DESTROY,
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
XDP_QUERY_XSK_UMEM,
@@ -859,15 +890,6 @@ struct netdev_bpf {
/* flags with which program was installed */
u32 prog_flags;
};
- /* BPF_OFFLOAD_VERIFIER_PREP */
- struct {
- struct bpf_prog *prog;
- const struct bpf_prog_offload_ops *ops; /* callee set */
- } verifier;
- /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
- struct {
- struct bpf_prog *prog;
- } offload;
/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
struct {
struct bpf_offloaded_map *offmap;
@@ -1143,7 +1165,7 @@ struct dev_ifalias {
* entries to skb and update idx with the number of entries.
*
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
- * u16 flags)
+ * u16 flags, struct netlink_ext_ack *extack)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
* struct net_device *dev, u32 filter_mask,
* int nlflags)
@@ -1365,10 +1387,16 @@ struct net_device_ops {
struct net_device *dev,
struct net_device *filter_dev,
int *idx);
-
+ int (*ndo_fdb_get)(struct sk_buff *skb,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u32 portid, u32 seq,
+ struct netlink_ext_ack *extack);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
- u16 flags);
+ u16 flags,
+ struct netlink_ext_ack *extack);
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
@@ -1730,6 +1758,8 @@ enum netdev_priv_flags {
* switch driver and used to set the phys state of the
* switch port.
*
+ * @wol_enabled: Wake-on-LAN is enabled
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -1974,7 +2004,6 @@ struct net_device {
struct pcpu_lstats __percpu *lstats;
struct pcpu_sw_netstats __percpu *tstats;
struct pcpu_dstats __percpu *dstats;
- struct pcpu_vstats __percpu *vstats;
};
#if IS_ENABLED(CONFIG_GARP)
@@ -2014,6 +2043,7 @@ struct net_device {
struct lock_class_key *qdisc_tx_busylock;
struct lock_class_key *qdisc_running_key;
bool proto_down;
+ unsigned wol_enabled:1;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2317,6 +2347,7 @@ static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
+ bool ignore_outgoing;
struct net_device *dev; /* NULL is wildcarded here */
int (*func) (struct sk_buff *,
struct net_device *,
@@ -2353,7 +2384,13 @@ struct pcpu_sw_netstats {
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
-};
+} __aligned(4 * sizeof(u64));
+
+struct pcpu_lstats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
@@ -2418,7 +2455,8 @@ enum netdev_cmd {
NETDEV_REGISTER,
NETDEV_UNREGISTER,
NETDEV_CHANGEMTU, /* notify after mtu change happened */
- NETDEV_CHANGEADDR,
+ NETDEV_CHANGEADDR, /* notify after the address change */
+ NETDEV_PRE_CHANGEADDR, /* notify before the address change */
NETDEV_GOING_DOWN,
NETDEV_CHANGENAME,
NETDEV_FEAT_CHANGE,
@@ -2455,6 +2493,13 @@ struct netdev_notifier_info {
struct netlink_ext_ack *extack;
};
+struct netdev_notifier_info_ext {
+ struct netdev_notifier_info info; /* must be first */
+ union {
+ u32 mtu;
+ } ext;
+};
+
struct netdev_notifier_change_info {
struct netdev_notifier_info info; /* must be first */
unsigned int flags_changed;
@@ -2473,6 +2518,11 @@ struct netdev_notifier_changelowerstate_info {
void *lower_state_info; /* is lower dev state */
};
+struct netdev_notifier_pre_changeaddr_info {
+ struct netdev_notifier_info info; /* must be first */
+ const unsigned char *dev_addr;
+};
+
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
@@ -2567,7 +2617,7 @@ struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
-int dev_open(struct net_device *dev);
+int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
void dev_close(struct net_device *dev);
void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
@@ -3142,6 +3192,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
#endif
}
+/* Variant of netdev_tx_sent_queue() for drivers that are aware
+ * that they should not test BQL status themselves.
+ * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
+ * skb of a batch.
+ * Returns true if the doorbell must be used to kick the NIC.
+ */
+static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+ unsigned int bytes,
+ bool xmit_more)
+{
+ if (xmit_more) {
+#ifdef CONFIG_BQL
+ dql_queued(&dev_queue->dql, bytes);
+#endif
+ return netif_tx_queue_stopped(dev_queue);
+ }
+ netdev_tx_sent_queue(dev_queue, bytes);
+ return true;
+}
+
/**
* netdev_sent_queue - report the number of bytes queued to hardware
* @dev: network device
@@ -3156,6 +3226,14 @@ static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
}
+static inline bool __netdev_sent_queue(struct net_device *dev,
+ unsigned int bytes,
+ bool xmit_more)
+{
+ return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
+ xmit_more);
+}
+
static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned int pkts, unsigned int bytes)
{
@@ -3545,8 +3623,10 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
int dev_ifconf(struct net *net, struct ifconf *, int);
int dev_ethtool(struct net *net, struct ifreq *);
unsigned int dev_get_flags(const struct net_device *);
-int __dev_change_flags(struct net_device *, unsigned int flags);
-int dev_change_flags(struct net_device *, unsigned int);
+int __dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack);
+int dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack);
void __dev_notify_flags(struct net_device *, unsigned int old_flags,
unsigned int gchanges);
int dev_change_name(struct net_device *, const char *);
@@ -3559,7 +3639,10 @@ int dev_set_mtu_ext(struct net_device *dev, int mtu,
int dev_set_mtu(struct net_device *, int);
int dev_change_tx_queue_len(struct net_device *, unsigned long);
void dev_set_group(struct net_device *, int);
-int dev_set_mac_address(struct net_device *, struct sockaddr *);
+int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
+ struct netlink_ext_ack *extack);
+int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+ struct netlink_ext_ack *extack);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
@@ -3597,6 +3680,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
return 0;
}
+bool dev_nit_active(struct net_device *dev);
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern int netdev_budget;
@@ -3999,6 +4083,16 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
int (*sync)(struct net_device *, const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *));
+int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*sync)(struct net_device *,
+ const unsigned char *, int),
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int));
+void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int));
void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*unsync)(struct net_device *,
@@ -4263,9 +4357,10 @@ static inline bool can_checksum_protocol(netdev_features_t features,
}
#ifdef CONFIG_BUG
-void netdev_rx_csum_fault(struct net_device *dev);
+void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
#else
-static inline void netdev_rx_csum_fault(struct net_device *dev)
+static inline void netdev_rx_csum_fault(struct net_device *dev,
+ struct sk_buff *skb)
{
}
#endif
@@ -4291,7 +4386,7 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi
struct netdev_queue *txq, bool more)
{
const struct net_device_ops *ops = dev->netdev_ops;
- int rc;
+ netdev_tx_t rc;
rc = __netdev_start_xmit(ops, skb, dev, more);
if (rc == NETDEV_TX_OK)
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 07efffd0c759..bbe99d2b28b4 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
break;
case NFPROTO_ARP:
#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
+ break;
hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
#endif
break;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 34fc80f3eb90..f2e1e6b13ca4 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -303,18 +303,18 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
/* Netlink CB args */
enum {
IPSET_CB_NET = 0, /* net namespace */
+ IPSET_CB_PROTO, /* ipset protocol */
IPSET_CB_DUMP, /* dump single set/all sets */
IPSET_CB_INDEX, /* set index */
IPSET_CB_PRIVATE, /* set private data */
IPSET_CB_ARG0, /* type specific */
- IPSET_CB_ARG1,
};
/* register and unregister set references */
extern ip_set_id_t ip_set_get_byname(struct net *net,
const char *name, struct ip_set **set);
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
-extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 8e2bab1e8e90..70877f8de7e9 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
rcu_assign_pointer(comment->c, c);
}
-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
+/* Used only when dumping a set, protected by rcu_read_lock() */
static inline int
ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
{
- struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+ struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
if (!c)
return 0;
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 03097fa70975..e142b2b5f1ea 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -19,7 +19,4 @@ struct ip_conntrack_stat {
unsigned int search_restart;
};
-/* call to create an explicit dependency on nf_conntrack. */
-void need_conntrack(void);
-
#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index b8d95564bd53..6989e2e4eabf 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
struct nf_conntrack_tuple tuple;
};
+enum grep_conntrack {
+ GRE_CT_UNREPLIED,
+ GRE_CT_REPLIED,
+ GRE_CT_MAX
+};
+
+struct netns_proto_gre {
+ struct nf_proto_net nf;
+ rwlock_t keymap_lock;
+ struct list_head keymap_list;
+ unsigned int gre_timeouts[GRE_CT_MAX];
+};
+
/* add new tuple->key_reply pair to keymap */
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);
@@ -28,7 +41,5 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
-void nf_nat_need_gre(void);
-
#endif /* __KERNEL__ */
#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 4a520d3304a2..cf09ab37b45b 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -62,18 +62,6 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
}
#endif /* CONFIG_PROVE_LOCKING */
-/*
- * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
- *
- * @p: The pointer to read, prior to dereferencing
- * @ss: The nfnetlink subsystem ID
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
- */
-#define nfnl_dereference(p, ss) \
- rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
-
#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
diff --git a/include/linux/netfilter/nfnetlink_osf.h b/include/linux/netfilter/nfnetlink_osf.h
index ecf7dab81e9e..c6000046c966 100644
--- a/include/linux/netfilter/nfnetlink_osf.h
+++ b/include/linux/netfilter/nfnetlink_osf.h
@@ -27,6 +27,7 @@ bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
const struct list_head *nf_osf_fingers);
const char *nf_osf_find(const struct sk_buff *skb,
- const struct list_head *nf_osf_fingers);
+ const struct list_head *nf_osf_fingers,
+ const int ttl_check);
#endif /* _NFOSF_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index fa0686500970..5f2614d02e03 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -17,43 +17,58 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
skb_dst_drop(skb);
}
+static inline struct nf_bridge_info *
+nf_bridge_info_get(const struct sk_buff *skb)
+{
+ return skb_ext_find(skb, SKB_EXT_BRIDGE_NF);
+}
+
+static inline bool nf_bridge_info_exists(const struct sk_buff *skb)
+{
+ return skb_ext_exist(skb, SKB_EXT_BRIDGE_NF);
+}
+
static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- if (skb->nf_bridge == NULL)
+ if (!nf_bridge)
return 0;
- nf_bridge = skb->nf_bridge;
return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
}
static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- if (skb->nf_bridge == NULL)
+ if (!nf_bridge)
return 0;
- nf_bridge = skb->nf_bridge;
return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
}
static inline struct net_device *
nf_bridge_get_physindev(const struct sk_buff *skb)
{
- return skb->nf_bridge ? skb->nf_bridge->physindev : NULL;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge ? nf_bridge->physindev : NULL;
}
static inline struct net_device *
nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
- return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge ? nf_bridge->physoutdev : NULL;
}
static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
{
- return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge && nf_bridge->in_prerouting;
}
#else
#define br_drop_fake_rtable(skb) do { } while (0)
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 71f121b66ca8..4e8add270200 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -34,8 +34,8 @@ struct netlink_skb_parms {
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
-extern void netlink_table_grab(void);
-extern void netlink_table_ungrab(void);
+void netlink_table_grab(void);
+void netlink_table_ungrab(void);
#define NL_CFG_F_NONROOT_RECV (1 << 0)
#define NL_CFG_F_NONROOT_SEND (1 << 1)
@@ -51,7 +51,7 @@ struct netlink_kernel_cfg {
bool (*compare)(struct net *net, struct sock *sk);
};
-extern struct sock *__netlink_kernel_create(struct net *net, int unit,
+struct sock *__netlink_kernel_create(struct net *net, int unit,
struct module *module,
struct netlink_kernel_cfg *cfg);
static inline struct sock *
@@ -110,24 +110,33 @@ struct netlink_ext_ack {
} \
} while (0)
-extern void netlink_kernel_release(struct sock *sk);
-extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
-extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
-extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
-extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
- const struct netlink_ext_ack *extack);
-extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-
-extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
-extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
- __u32 group, gfp_t allocation);
-extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
- __u32 portid, __u32 group, gfp_t allocation,
- int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
- void *filter_data);
-extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
-extern int netlink_register_notifier(struct notifier_block *nb);
-extern int netlink_unregister_notifier(struct notifier_block *nb);
+static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
+ u64 cookie)
+{
+ u64 __cookie = cookie;
+
+ memcpy(extack->cookie, &__cookie, sizeof(__cookie));
+ extack->cookie_len = sizeof(__cookie);
+}
+
+void netlink_kernel_release(struct sock *sk);
+int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
+int netlink_change_ngroups(struct sock *sk, unsigned int groups);
+void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
+void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ const struct netlink_ext_ack *extack);
+int netlink_has_listeners(struct sock *sk, unsigned int group);
+
+int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
+int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
+ __u32 group, gfp_t allocation);
+int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
+ __u32 portid, __u32 group, gfp_t allocation,
+ int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
+ void *filter_data);
+int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
+int netlink_register_notifier(struct notifier_block *nb);
+int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
struct sock *netlink_getsockbyfilp(struct file *filp);
@@ -176,8 +185,11 @@ struct netlink_callback {
void *data;
/* the module that dump function belong to */
struct module *module;
+ struct netlink_ext_ack *extack;
u16 family;
u16 min_dump_alloc;
+ bool strict_check;
+ u16 answer_flags;
unsigned int prev_seq, seq;
long args[6];
};
@@ -200,7 +212,7 @@ struct netlink_dump_control {
u16 min_dump_alloc;
};
-extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *control);
static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
@@ -219,8 +231,8 @@ struct netlink_tap {
struct list_head list;
};
-extern int netlink_add_tap(struct netlink_tap *nt);
-extern int netlink_remove_tap(struct netlink_tap *nt);
+int netlink_add_tap(struct netlink_tap *nt);
+int netlink_remove_tap(struct netlink_tap *nt);
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
struct user_namespace *ns, int cap);
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 67662d01130a..676f1ff161a9 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -31,8 +31,6 @@ struct netpoll {
bool ipv6;
u16 local_port, remote_port;
u8 remote_mac[ETH_ALEN];
-
- struct work_struct cleanup_work;
};
struct netpoll_info {
@@ -49,8 +47,9 @@ struct netpoll_info {
};
#ifdef CONFIG_NETPOLL
-extern void netpoll_poll_disable(struct net_device *dev);
-extern void netpoll_poll_enable(struct net_device *dev);
+void netpoll_poll_dev(struct net_device *dev);
+void netpoll_poll_disable(struct net_device *dev);
+void netpoll_poll_enable(struct net_device *dev);
#else
static inline void netpoll_poll_disable(struct net_device *dev) { return; }
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
@@ -62,7 +61,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
int netpoll_setup(struct netpoll *np);
void __netpoll_cleanup(struct netpoll *np);
-void __netpoll_free_async(struct netpoll *np);
+void __netpoll_free(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
struct net_device *dev);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 57ffaa20d564..1b06f0b28453 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -374,6 +374,13 @@ enum lock_type4 {
NFS4_WRITEW_LT = 4
};
+enum change_attr_type4 {
+ NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0,
+ NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1,
+ NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2,
+ NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3,
+ NFS4_CHANGE_TYPE_IS_UNDEFINED = 4
+};
/* Mandatory Attributes */
#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0)
@@ -441,6 +448,7 @@ enum lock_type4 {
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
+#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15)
#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
@@ -527,6 +535,7 @@ enum {
NFSPROC4_CLNT_LAYOUTSTATS,
NFSPROC4_CLNT_CLONE,
NFSPROC4_CLNT_COPY,
+ NFSPROC4_CLNT_OFFLOAD_CANCEL,
NFSPROC4_CLNT_LOOKUPP,
};
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 2f129bbfaae8..40e30376130b 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -51,7 +51,7 @@
struct nfs_access_entry {
struct rb_node rb_node;
struct list_head lru;
- struct rpc_cred * cred;
+ const struct cred * cred;
__u32 mask;
struct rcu_head rcu_head;
};
@@ -62,6 +62,7 @@ struct nfs_lock_context {
struct nfs_open_context *open_context;
fl_owner_t lockowner;
atomic_t io_count;
+ struct rcu_head rcu_head;
};
struct nfs4_state;
@@ -69,7 +70,8 @@ struct nfs_open_context {
struct nfs_lock_context lock_context;
fl_owner_t flock_owner;
struct dentry *dentry;
- struct rpc_cred *cred;
+ const struct cred *cred;
+ struct rpc_cred *ll_cred; /* low-level cred - use to check for expiry */
struct nfs4_state *state;
fmode_t mode;
@@ -82,11 +84,12 @@ struct nfs_open_context {
struct list_head list;
struct nfs4_threshold *mdsthreshold;
+ struct rcu_head rcu_head;
};
struct nfs_open_dir_context {
struct list_head list;
- struct rpc_cred *cred;
+ const struct cred *cred;
unsigned long attr_gencount;
__u64 dir_cookie;
__u64 dup_cookie;
@@ -185,6 +188,17 @@ struct nfs_inode {
struct inode vfs_inode;
};
+struct nfs4_copy_state {
+ struct list_head copies;
+ nfs4_stateid stateid;
+ struct completion completion;
+ uint64_t count;
+ struct nfs_writeverf verf;
+ int error;
+ int flags;
+ struct nfs4_state *parent_state;
+};
+
/*
* Access bit flags
*/
@@ -377,7 +391,7 @@ extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
struct nfs4_label *label);
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
extern void put_nfs_open_context(struct nfs_open_context *ctx);
-extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
+extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode);
extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp);
extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
@@ -448,7 +462,7 @@ static inline struct nfs_open_context *nfs_file_open_context(struct file *filp)
return filp->private_data;
}
-static inline struct rpc_cred *nfs_file_cred(struct file *file)
+static inline const struct cred *nfs_file_cred(struct file *file)
{
if (file != NULL) {
struct nfs_open_context *ctx =
@@ -477,7 +491,7 @@ extern const struct dentry_operations nfs_dentry_operations;
extern void nfs_force_lookup_revalidate(struct inode *dir);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
struct nfs_fattr *fattr, struct nfs4_label *label);
-extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags);
+extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
extern void nfs_access_zap_cache(struct inode *inode);
/*
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 74ae3e1d19a0..6aa8cc83c3b6 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -28,7 +28,6 @@ struct nfs41_impl_id;
struct nfs_client {
refcount_t cl_count;
atomic_t cl_mds_count;
- seqcount_t cl_callback_count;
int cl_cons_state; /* current construction state (-ve: init error) */
#define NFS_CS_READY 0 /* ready to be used */
#define NFS_CS_INITING 1 /* busy initialising */
@@ -59,7 +58,7 @@ struct nfs_client {
struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */
u32 cl_minorversion;/* NFSv4 minorversion */
- struct rpc_cred *cl_machine_cred;
+ const char * cl_principal; /* used for machine cred */
#if IS_ENABLED(CONFIG_NFS_V4)
struct list_head cl_ds_clients; /* auth flavor data servers */
@@ -122,6 +121,7 @@ struct nfs_client {
#endif
struct net *cl_net;
+ struct list_head pending_cb_stateids;
};
/*
@@ -209,6 +209,7 @@ struct nfs_server {
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
+ struct list_head ss_copies;
unsigned long mig_gen;
unsigned long mig_status;
@@ -227,6 +228,9 @@ struct nfs_server {
unsigned short mountd_port;
unsigned short mountd_protocol;
struct rpc_wait_queue uoc_rpcwaitq;
+
+ /* XDR related information */
+ unsigned int read_hdrsize;
};
/* Server capabilities */
@@ -256,5 +260,6 @@ struct nfs_server {
#define NFS_CAP_LAYOUTSTATS (1U << 22)
#define NFS_CAP_CLONE (1U << 23)
#define NFS_CAP_COPY (1U << 24)
+#define NFS_CAP_OFFLOAD_CANCEL (1U << 25)
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 712eed156d09..441a93ebcac0 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -270,8 +270,7 @@ struct nfs4_layoutget_res {
struct nfs4_layoutget {
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
- struct rpc_cred *cred;
- unsigned callback_count;
+ const struct cred *cred;
gfp_t gfp_flags;
};
@@ -310,7 +309,7 @@ struct nfs4_layoutcommit_data {
struct rpc_task task;
struct nfs_fattr fattr;
struct list_head lseg_list;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct inode *inode;
struct nfs4_layoutcommit_args args;
struct nfs4_layoutcommit_res res;
@@ -335,7 +334,7 @@ struct nfs4_layoutreturn_res {
struct nfs4_layoutreturn {
struct nfs4_layoutreturn_args args;
struct nfs4_layoutreturn_res res;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct nfs_client *clp;
struct inode *inode;
int rpc_status;
@@ -609,8 +608,13 @@ struct nfs_pgio_args {
__u32 count;
unsigned int pgbase;
struct page ** pages;
- const u32 * bitmask; /* used by write */
- enum nfs3_stable_how stable; /* used by write */
+ union {
+ unsigned int replen; /* used by read */
+ struct {
+ const u32 * bitmask; /* used by write */
+ enum nfs3_stable_how stable; /* used by write */
+ };
+ };
};
struct nfs_pgio_res {
@@ -618,10 +622,16 @@ struct nfs_pgio_res {
struct nfs_fattr * fattr;
__u32 count;
__u32 op_status;
- int eof; /* used by read */
- struct nfs_writeverf * verf; /* used by write */
- const struct nfs_server *server; /* used by write */
-
+ union {
+ struct {
+ unsigned int replen; /* used by read */
+ int eof; /* used by read */
+ };
+ struct {
+ struct nfs_writeverf * verf; /* used by write */
+ const struct nfs_server *server; /* used by write */
+ };
+ };
};
/*
@@ -1389,9 +1399,11 @@ struct nfs42_copy_args {
u64 dst_pos;
u64 count;
+ bool sync;
};
struct nfs42_write_res {
+ nfs4_stateid stateid;
u64 count;
struct nfs_writeverf verifier;
};
@@ -1404,6 +1416,18 @@ struct nfs42_copy_res {
struct nfs_commitres commit_res;
};
+struct nfs42_offload_status_args {
+ struct nfs4_sequence_args osa_seq_args;
+ struct nfs_fh *osa_src_fh;
+ nfs4_stateid osa_stateid;
+};
+
+struct nfs42_offload_status_res {
+ struct nfs4_sequence_res osr_seq_res;
+ uint64_t osr_count;
+ int osr_status;
+};
+
struct nfs42_seek_args {
struct nfs4_sequence_args seq_args;
@@ -1445,7 +1469,7 @@ enum {
struct nfs_io_completion;
struct nfs_pgio_header {
struct inode *inode;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct list_head pages;
struct nfs_page *req;
struct nfs_writeverf verf; /* Used for writes */
@@ -1458,11 +1482,10 @@ struct nfs_pgio_header {
const struct nfs_rw_ops *rw_ops;
struct nfs_io_completion *io_completion;
struct nfs_direct_req *dreq;
- spinlock_t lock;
- /* fields protected by lock */
+
int pnfs_error;
int error; /* merge with pnfs_error */
- unsigned long good_bytes; /* boundary of good data */
+ unsigned int good_bytes; /* boundary of good data */
unsigned long flags;
/*
@@ -1506,7 +1529,7 @@ struct nfs_commit_info {
struct nfs_commit_data {
struct rpc_task task;
struct inode *inode;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct nfs_fattr fattr;
struct nfs_writeverf verf;
struct list_head pages; /* Coalesced requests we wish to flush */
@@ -1537,7 +1560,7 @@ struct nfs_unlinkdata {
struct nfs_removeres res;
struct dentry *dentry;
wait_queue_head_t wq;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct nfs_fattr dir_attr;
long timeout;
};
@@ -1545,7 +1568,7 @@ struct nfs_unlinkdata {
struct nfs_renamedata {
struct nfs_renameargs args;
struct nfs_renameres res;
- struct rpc_cred *cred;
+ const struct cred *cred;
struct inode *old_dir;
struct dentry *old_dentry;
struct nfs_fattr old_fattr;
@@ -1611,7 +1634,7 @@ struct nfs_rpc_ops {
unsigned int, struct iattr *);
int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
- int (*readdir) (struct dentry *, struct rpc_cred *,
+ int (*readdir) (struct dentry *, const struct cred *,
u64, struct page **, unsigned int, bool);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
dev_t);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 08f9247e9827..9003e29cde46 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
void watchdog_nmi_stop(void);
void watchdog_nmi_start(void);
int watchdog_nmi_probe(void);
+int watchdog_nmi_enable(unsigned int cpu);
+void watchdog_nmi_disable(unsigned int cpu);
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 1fbde8a880d9..5a30ad594ccc 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -518,7 +518,7 @@ static inline int node_random(const nodemask_t *mask)
* NODEMASK_ALLOC(type, name) allocates an object with a specified type and
* name.
*/
-#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
+#if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */
#define NODEMASK_ALLOC(type, name, gfp_flags) \
type *name = kmalloc(sizeof(*name), gfp_flags)
#define NODEMASK_FREE(m) kfree(m)
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index f35c7bf76143..0096a05395e3 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -122,8 +122,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#ifdef CONFIG_TREE_SRCU
#define _SRCU_NOTIFIER_HEAD(name, mod) \
- static DEFINE_PER_CPU(struct srcu_data, \
- name##_head_srcu_data); \
+ static DEFINE_PER_CPU(struct srcu_data, name##_head_srcu_data); \
mod struct srcu_notifier_head name = \
SRCU_NOTIFIER_INIT(name, name##_head_srcu_data)
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 496ff759f84c..91745cc3704c 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -403,7 +403,6 @@ struct nvme_fc_port_template {
void **handle);
void (*delete_queue)(struct nvme_fc_local_port *,
unsigned int qidx, void *handle);
- void (*poll_queue)(struct nvme_fc_local_port *, void *handle);
int (*ls_req)(struct nvme_fc_local_port *,
struct nvme_fc_remote_port *,
struct nvmefc_ls_req *);
@@ -649,22 +648,6 @@ enum {
* sequence in one LLDD operation. Errors during Data
* sequence transmit must not allow RSP sequence to be sent.
*/
- NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1),
- /* Bit 2: When 0, the LLDD is calling the cmd rcv handler
- * in a non-isr context, allowing the transport to finish
- * op completion in the calling context. When 1, the LLDD
- * is calling the cmd rcv handler in an ISR context,
- * requiring the transport to transition to a workqueue
- * for op completion.
- */
- NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2),
- /* Bit 3: When 0, the LLDD is calling the op done handler
- * in a non-isr context, allowing the transport to finish
- * op completion in the calling context. When 1, the LLDD
- * is calling the op done handler in an ISR context,
- * requiring the transport to transition to a workqueue
- * for op completion.
- */
};
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
new file mode 100644
index 000000000000..03d87c0550a9
--- /dev/null
+++ b/include/linux/nvme-tcp.h
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics TCP protocol header.
+ * Copyright (c) 2018 Lightbits Labs. All rights reserved.
+ */
+
+#ifndef _LINUX_NVME_TCP_H
+#define _LINUX_NVME_TCP_H
+
+#include <linux/nvme.h>
+
+#define NVME_TCP_DISC_PORT 8009
+#define NVME_TCP_ADMIN_CCSZ SZ_8K
+#define NVME_TCP_DIGEST_LENGTH 4
+
+enum nvme_tcp_pfv {
+ NVME_TCP_PFV_1_0 = 0x0,
+};
+
+enum nvme_tcp_fatal_error_status {
+ NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
+ NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
+ NVME_TCP_FES_HDR_DIGEST_ERR = 0x03,
+ NVME_TCP_FES_DATA_OUT_OF_RANGE = 0x04,
+ NVME_TCP_FES_R2T_LIMIT_EXCEEDED = 0x05,
+ NVME_TCP_FES_DATA_LIMIT_EXCEEDED = 0x05,
+ NVME_TCP_FES_UNSUPPORTED_PARAM = 0x06,
+};
+
+enum nvme_tcp_digest_option {
+ NVME_TCP_HDR_DIGEST_ENABLE = (1 << 0),
+ NVME_TCP_DATA_DIGEST_ENABLE = (1 << 1),
+};
+
+enum nvme_tcp_pdu_type {
+ nvme_tcp_icreq = 0x0,
+ nvme_tcp_icresp = 0x1,
+ nvme_tcp_h2c_term = 0x2,
+ nvme_tcp_c2h_term = 0x3,
+ nvme_tcp_cmd = 0x4,
+ nvme_tcp_rsp = 0x5,
+ nvme_tcp_h2c_data = 0x6,
+ nvme_tcp_c2h_data = 0x7,
+ nvme_tcp_r2t = 0x9,
+};
+
+enum nvme_tcp_pdu_flags {
+ NVME_TCP_F_HDGST = (1 << 0),
+ NVME_TCP_F_DDGST = (1 << 1),
+ NVME_TCP_F_DATA_LAST = (1 << 2),
+ NVME_TCP_F_DATA_SUCCESS = (1 << 3),
+};
+
+/**
+ * struct nvme_tcp_hdr - nvme tcp pdu common header
+ *
+ * @type: pdu type
+ * @flags: pdu specific flags
+ * @hlen: pdu header length
+ * @pdo: pdu data offset
+ * @plen: pdu wire byte length
+ */
+struct nvme_tcp_hdr {
+ __u8 type;
+ __u8 flags;
+ __u8 hlen;
+ __u8 pdo;
+ __le32 plen;
+};
+
+/**
+ * struct nvme_tcp_icreq_pdu - nvme tcp initialize connection request pdu
+ *
+ * @hdr: pdu generic header
+ * @pfv: pdu version format
+ * @hpda: host pdu data alignment (dwords, 0's based)
+ * @digest: digest types enabled
+ * @maxr2t: maximum r2ts per request supported
+ */
+struct nvme_tcp_icreq_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 pfv;
+ __u8 hpda;
+ __u8 digest;
+ __le32 maxr2t;
+ __u8 rsvd2[112];
+};
+
+/**
+ * struct nvme_tcp_icresp_pdu - nvme tcp initialize connection response pdu
+ *
+ * @hdr: pdu common header
+ * @pfv: pdu version format
+ * @cpda: controller pdu data alignment (dowrds, 0's based)
+ * @digest: digest types enabled
+ * @maxdata: maximum data capsules per r2t supported
+ */
+struct nvme_tcp_icresp_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 pfv;
+ __u8 cpda;
+ __u8 digest;
+ __le32 maxdata;
+ __u8 rsvd[112];
+};
+
+/**
+ * struct nvme_tcp_term_pdu - nvme tcp terminate connection pdu
+ *
+ * @hdr: pdu common header
+ * @fes: fatal error status
+ * @fei: fatal error information
+ */
+struct nvme_tcp_term_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 fes;
+ __le32 fei;
+ __u8 rsvd[8];
+};
+
+/**
+ * struct nvme_tcp_cmd_pdu - nvme tcp command capsule pdu
+ *
+ * @hdr: pdu common header
+ * @cmd: nvme command
+ */
+struct nvme_tcp_cmd_pdu {
+ struct nvme_tcp_hdr hdr;
+ struct nvme_command cmd;
+};
+
+/**
+ * struct nvme_tcp_rsp_pdu - nvme tcp response capsule pdu
+ *
+ * @hdr: pdu common header
+ * @hdr: nvme-tcp generic header
+ * @cqe: nvme completion queue entry
+ */
+struct nvme_tcp_rsp_pdu {
+ struct nvme_tcp_hdr hdr;
+ struct nvme_completion cqe;
+};
+
+/**
+ * struct nvme_tcp_r2t_pdu - nvme tcp ready-to-transfer pdu
+ *
+ * @hdr: pdu common header
+ * @command_id: nvme command identifier which this relates to
+ * @ttag: transfer tag (controller generated)
+ * @r2t_offset: offset from the start of the command data
+ * @r2t_length: length the host is allowed to send
+ */
+struct nvme_tcp_r2t_pdu {
+ struct nvme_tcp_hdr hdr;
+ __u16 command_id;
+ __u16 ttag;
+ __le32 r2t_offset;
+ __le32 r2t_length;
+ __u8 rsvd[4];
+};
+
+/**
+ * struct nvme_tcp_data_pdu - nvme tcp data pdu
+ *
+ * @hdr: pdu common header
+ * @command_id: nvme command identifier which this relates to
+ * @ttag: transfer tag (controller generated)
+ * @data_offset: offset from the start of the command data
+ * @data_length: length of the data stream
+ */
+struct nvme_tcp_data_pdu {
+ struct nvme_tcp_hdr hdr;
+ __u16 command_id;
+ __u16 ttag;
+ __le32 data_offset;
+ __le32 data_length;
+ __u8 rsvd[4];
+};
+
+union nvme_tcp_pdu {
+ struct nvme_tcp_icreq_pdu icreq;
+ struct nvme_tcp_icresp_pdu icresp;
+ struct nvme_tcp_cmd_pdu cmd;
+ struct nvme_tcp_rsp_pdu rsp;
+ struct nvme_tcp_r2t_pdu r2t;
+ struct nvme_tcp_data_pdu data;
+};
+
+#endif /* _LINUX_NVME_TCP_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 68e91ef5494c..bbcc83886899 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -52,15 +52,20 @@ enum {
enum {
NVMF_TRTYPE_RDMA = 1, /* RDMA */
NVMF_TRTYPE_FC = 2, /* Fibre Channel */
+ NVMF_TRTYPE_TCP = 3, /* TCP/IP */
NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
NVMF_TRTYPE_MAX,
};
/* Transport Requirements codes for Discovery Log Page entry TREQ field */
enum {
- NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
- NVMF_TREQ_REQUIRED = 1, /* Required */
- NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
+ NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
+ NVMF_TREQ_REQUIRED = 1, /* Required */
+ NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
+#define NVME_TREQ_SECURE_CHANNEL_MASK \
+ (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED)
+
+ NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */
};
/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
@@ -198,6 +203,11 @@ enum {
NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
};
+enum nvme_ctrl_attr {
+ NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
+ NVME_CTRL_ATTR_TBKAS = (1 << 6),
+};
+
struct nvme_id_ctrl {
__le16 vid;
__le16 ssvid;
@@ -214,7 +224,11 @@ struct nvme_id_ctrl {
__le32 rtd3e;
__le32 oaes;
__le32 ctratt;
- __u8 rsvd100[156];
+ __u8 rsvd100[28];
+ __le16 crdt1;
+ __le16 crdt2;
+ __le16 crdt3;
+ __u8 rsvd134[122];
__le16 oacs;
__u8 acl;
__u8 aerl;
@@ -481,12 +495,21 @@ enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
NVME_AER_NOTICE_ANA = 0x03,
+ NVME_AER_NOTICE_DISC_CHANGED = 0xf0,
};
enum {
- NVME_AEN_CFG_NS_ATTR = 1 << 8,
- NVME_AEN_CFG_FW_ACT = 1 << 9,
- NVME_AEN_CFG_ANA_CHANGE = 1 << 11,
+ NVME_AEN_BIT_NS_ATTR = 8,
+ NVME_AEN_BIT_FW_ACT = 9,
+ NVME_AEN_BIT_ANA_CHANGE = 11,
+ NVME_AEN_BIT_DISC_CHANGE = 31,
+};
+
+enum {
+ NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR,
+ NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT,
+ NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE,
+ NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE,
};
struct nvme_lba_range_type {
@@ -639,7 +662,12 @@ struct nvme_common_command {
__le32 cdw2[2];
__le64 metadata;
union nvme_data_ptr dptr;
- __le32 cdw10[6];
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
};
struct nvme_rw_command {
@@ -738,6 +766,15 @@ enum {
NVME_HOST_MEM_RETURN = (1 << 1),
};
+struct nvme_feat_host_behavior {
+ __u8 acre;
+ __u8 resv1[511];
+};
+
+enum {
+ NVME_ENABLE_ACRE = 1,
+};
+
/* Admin commands */
enum nvme_admin_opcode {
@@ -792,6 +829,7 @@ enum {
NVME_FEAT_RRL = 0x12,
NVME_FEAT_PLM_CONFIG = 0x13,
NVME_FEAT_PLM_WINDOW = 0x14,
+ NVME_FEAT_HOST_BEHAVIOR = 0x16,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -1030,6 +1068,10 @@ struct nvmf_disc_rsp_page_hdr {
struct nvmf_disc_rsp_page_entry entries[0];
};
+enum {
+ NVME_CONNECT_DISABLE_SQFLOW = (1 << 2),
+};
+
struct nvmf_connect_command {
__u8 opcode;
__u8 resv1;
@@ -1126,6 +1168,20 @@ struct nvme_command {
};
};
+struct nvme_error_slot {
+ __le64 error_count;
+ __le16 sqid;
+ __le16 cmdid;
+ __le16 status_field;
+ __le16 param_error_location;
+ __le64 lba;
+ __le32 nsid;
+ __u8 vs;
+ __u8 resv[3];
+ __le64 cs;
+ __u8 resv2[24];
+};
+
static inline bool nvme_is_write(struct nvme_command *cmd)
{
/*
@@ -1241,7 +1297,9 @@ enum {
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
NVME_SC_ANA_INACCESSIBLE = 0x302,
NVME_SC_ANA_TRANSITION = 0x303,
+ NVME_SC_HOST_PATH_ERROR = 0x370,
+ NVME_SC_CRD = 0x1800,
NVME_SC_DNR = 0x4000,
};
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 4e85447f7860..312bfa5efd80 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* nvmem framework consumer.
*
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
* Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _LINUX_NVMEM_CONSUMER_H
@@ -14,6 +11,7 @@
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/notifier.h>
struct device;
struct device_node;
@@ -29,11 +27,36 @@ struct nvmem_cell_info {
unsigned int nbits;
};
+/**
+ * struct nvmem_cell_lookup - cell lookup entry
+ *
+ * @nvmem_name: Name of the provider.
+ * @cell_name: Name of the nvmem cell as defined in the name field of
+ * struct nvmem_cell_info.
+ * @dev_id: Name of the consumer device that will be associated with
+ * this cell.
+ * @con_id: Connector id for this cell lookup.
+ */
+struct nvmem_cell_lookup {
+ const char *nvmem_name;
+ const char *cell_name;
+ const char *dev_id;
+ const char *con_id;
+ struct list_head node;
+};
+
+enum {
+ NVMEM_ADD = 1,
+ NVMEM_REMOVE,
+ NVMEM_CELL_ADD,
+ NVMEM_CELL_REMOVE,
+};
+
#if IS_ENABLED(CONFIG_NVMEM)
/* Cell based interface */
-struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
-struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id);
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id);
void nvmem_cell_put(struct nvmem_cell *cell);
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
@@ -55,18 +78,28 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf);
+const char *nvmem_dev_name(struct nvmem_device *nvmem);
+
+void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
+ size_t nentries);
+void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries,
+ size_t nentries);
+
+int nvmem_register_notifier(struct notifier_block *nb);
+int nvmem_unregister_notifier(struct notifier_block *nb);
+
#else
static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
- const char *name)
+ const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev,
- const char *name)
+ const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void devm_nvmem_cell_put(struct device *dev,
@@ -80,31 +113,31 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell)
static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline int nvmem_cell_write(struct nvmem_cell *cell,
const char *buf, size_t len)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int nvmem_cell_read_u32(struct device *dev,
const char *cell_id, u32 *val)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev,
const char *name)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void nvmem_device_put(struct nvmem_device *nvmem)
@@ -120,47 +153,68 @@ static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
struct nvmem_cell_info *info,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int nvmem_device_read(struct nvmem_device *nvmem,
unsigned int offset, size_t bytes,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int nvmem_device_write(struct nvmem_device *nvmem,
unsigned int offset, size_t bytes,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
+
+static inline const char *nvmem_dev_name(struct nvmem_device *nvmem)
+{
+ return NULL;
+}
+
+static inline void
+nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {}
+static inline void
+nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {}
+
+static inline int nvmem_register_notifier(struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_unregister_notifier(struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_NVMEM */
#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
- const char *name);
+ const char *id);
struct nvmem_device *of_nvmem_device_get(struct device_node *np,
const char *name);
#else
static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
- const char *name)
+ const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
const char *name)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
#endif /* CONFIG_NVMEM && CONFIG_OF */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 24def6ad09bb..fe051323be0a 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* nvmem framework provider.
*
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
* Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _LINUX_NVMEM_PROVIDER_H
@@ -22,6 +19,13 @@ typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
+enum nvmem_type {
+ NVMEM_TYPE_UNKNOWN = 0,
+ NVMEM_TYPE_EEPROM,
+ NVMEM_TYPE_OTP,
+ NVMEM_TYPE_BATTERY_BACKED,
+};
+
/**
* struct nvmem_config - NVMEM device configuration
*
@@ -31,8 +35,10 @@ typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
* @owner: Pointer to exporter module. Used for refcounting.
* @cells: Optional array of pre-defined NVMEM cells.
* @ncells: Number of elements in cells.
+ * @type: Type of the nvmem storage
* @read_only: Device is read-only.
* @root_only: Device is accessibly to root only.
+ * @no_of_node: Device should not use the parent's of_node even if it's !NULL.
* @reg_read: Callback to read data.
* @reg_write: Callback to write data.
* @size: Device size.
@@ -54,8 +60,10 @@ struct nvmem_config {
struct module *owner;
const struct nvmem_cell_info *cells;
int ncells;
+ enum nvmem_type type;
bool read_only;
bool root_only;
+ bool no_of_node;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
int size;
@@ -67,30 +75,46 @@ struct nvmem_config {
struct device *base_dev;
};
+/**
+ * struct nvmem_cell_table - NVMEM cell definitions for given provider
+ *
+ * @nvmem_name: Provider name.
+ * @cells: Array of cell definitions.
+ * @ncells: Number of cell definitions in the array.
+ * @node: List node.
+ *
+ * This structure together with related helper functions is provided for users
+ * that don't can't access the nvmem provided structure but wish to register
+ * cell definitions for it e.g. board files registering an EEPROM device.
+ */
+struct nvmem_cell_table {
+ const char *nvmem_name;
+ const struct nvmem_cell_info *cells;
+ size_t ncells;
+ struct list_head node;
+};
+
#if IS_ENABLED(CONFIG_NVMEM)
struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
-int nvmem_unregister(struct nvmem_device *nvmem);
+void nvmem_unregister(struct nvmem_device *nvmem);
struct nvmem_device *devm_nvmem_register(struct device *dev,
const struct nvmem_config *cfg);
int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem);
-int nvmem_add_cells(struct nvmem_device *nvmem,
- const struct nvmem_cell_info *info,
- int ncells);
+void nvmem_add_cell_table(struct nvmem_cell_table *table);
+void nvmem_del_cell_table(struct nvmem_cell_table *table);
+
#else
static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline int nvmem_unregister(struct nvmem_device *nvmem)
-{
- return -ENOSYS;
-}
+static inline void nvmem_unregister(struct nvmem_device *nvmem) {}
static inline struct nvmem_device *
devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
@@ -101,16 +125,11 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
static inline int
devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
{
- return nvmem_unregister(nvmem);
-
+ return -EOPNOTSUPP;
}
-static inline int nvmem_add_cells(struct nvmem_device *nvmem,
- const struct nvmem_cell_info *info,
- int ncells)
-{
- return -ENOSYS;
-}
+static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
+static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
#endif /* CONFIG_NVMEM */
#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/objagg.h b/include/linux/objagg.h
new file mode 100644
index 000000000000..34f38c186ea0
--- /dev/null
+++ b/include/linux/objagg.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _OBJAGG_H
+#define _OBJAGG_H
+
+struct objagg_ops {
+ size_t obj_size;
+ void * (*delta_create)(void *priv, void *parent_obj, void *obj);
+ void (*delta_destroy)(void *priv, void *delta_priv);
+ void * (*root_create)(void *priv, void *obj);
+ void (*root_destroy)(void *priv, void *root_priv);
+};
+
+struct objagg;
+struct objagg_obj;
+
+const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj);
+const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj);
+const void *objagg_obj_raw(const struct objagg_obj *objagg_obj);
+
+struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj);
+void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj);
+struct objagg *objagg_create(const struct objagg_ops *ops, void *priv);
+void objagg_destroy(struct objagg *objagg);
+
+struct objagg_obj_stats {
+ unsigned int user_count;
+ unsigned int delta_user_count; /* includes delta object users */
+};
+
+struct objagg_obj_stats_info {
+ struct objagg_obj_stats stats;
+ struct objagg_obj *objagg_obj; /* associated object */
+ bool is_root;
+};
+
+struct objagg_stats {
+ unsigned int stats_info_count;
+ struct objagg_obj_stats_info stats_info[];
+};
+
+const struct objagg_stats *objagg_stats_get(struct objagg *objagg);
+void objagg_stats_put(const struct objagg_stats *objagg_stats);
+
+#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index 4d25e4f952d9..fe472e5195a9 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -66,7 +66,6 @@ struct device_node {
unsigned long _flags;
void *data;
#if defined(CONFIG_SPARC)
- const char *path_component_name;
unsigned int unique_id;
struct of_irq_controller *irq_trans;
#endif
@@ -138,11 +137,16 @@ extern struct device_node *of_aliases;
extern struct device_node *of_stdout;
extern raw_spinlock_t devtree_lock;
-/* flag descriptions (need to be visible even when !CONFIG_OF) */
-#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
-#define OF_DETACHED 2 /* node has been detached from the device tree */
-#define OF_POPULATED 3 /* device already created for the node */
-#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
+/*
+ * struct device_node flag descriptions
+ * (need to be visible even when !CONFIG_OF)
+ */
+#define OF_DYNAMIC 1 /* (and properties) allocated via kmalloc */
+#define OF_DETACHED 2 /* detached from the device tree */
+#define OF_POPULATED 3 /* device already created */
+#define OF_POPULATED_BUS 4 /* platform bus created for children */
+#define OF_OVERLAY 5 /* allocated for an overlay */
+#define OF_OVERLAY_FREE_CSET 6 /* in overlay cset being freed */
#define OF_BAD_ADDR ((u64)-1)
@@ -247,15 +251,12 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#include <asm/prom.h>
#endif
-/* Default #address and #size cells. Allow arch asm/prom.h to override */
-#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT)
-#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
-#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
-#endif
-
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
+extern bool of_node_name_eq(const struct device_node *np, const char *name);
+extern bool of_node_name_prefix(const struct device_node *np, const char *prefix);
+
static inline const char *of_node_full_name(const struct device_node *np)
{
return np ? np->full_name : "<no-node>";
@@ -290,6 +291,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
extern struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev);
+extern struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible);
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
@@ -348,6 +351,8 @@ extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
+
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -387,6 +392,9 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
+extern int of_alias_get_alias_list(const struct of_device_id *matches,
+ const char *stem, unsigned long *bitmap,
+ unsigned int nbits);
extern int of_machine_is_compatible(const char *compat);
@@ -545,6 +553,10 @@ bool of_console_check(struct device_node *dn, char *name, int index);
extern int of_cpu_node_to_id(struct device_node *np);
+int of_map_rid(struct device_node *np, u32 rid,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out);
+
#else /* CONFIG_OF */
static inline void of_core_init(void)
@@ -561,6 +573,16 @@ static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode)
return NULL;
}
+static inline bool of_node_name_eq(const struct device_node *np, const char *name)
+{
+ return false;
+}
+
+static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix)
+{
+ return false;
+}
+
static inline const char* of_node_full_name(const struct device_node *np)
{
return "<no-node>";
@@ -632,6 +654,12 @@ static inline bool of_have_populated_dt(void)
return false;
}
+static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_child_by_name(
const struct device_node *node,
const char *name)
@@ -733,6 +761,11 @@ static inline struct device_node *of_get_cpu_node(int cpu,
return NULL;
}
+static inline struct device_node *of_get_next_cpu_node(struct device_node *prev)
+{
+ return NULL;
+}
+
static inline int of_n_addr_cells(struct device_node *np)
{
return 0;
@@ -872,6 +905,13 @@ static inline int of_alias_get_highest_id(const char *stem)
return -ENOSYS;
}
+static inline int of_alias_get_alias_list(const struct of_device_id *matches,
+ const char *stem, unsigned long *bitmap,
+ unsigned int nbits)
+{
+ return -ENOSYS;
+}
+
static inline int of_machine_is_compatible(const char *compat)
{
return 0;
@@ -931,6 +971,13 @@ static inline int of_cpu_node_to_id(struct device_node *np)
return -ENODEV;
}
+static inline int of_map_rid(struct device_node *np, u32 rid,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out)
+{
+ return -EINVAL;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
@@ -942,6 +989,12 @@ static inline int of_cpu_node_to_id(struct device_node *np)
#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
#endif
+static inline int of_prop_val_eq(struct property *p1, struct property *p2)
+{
+ return p1->length == p2->length &&
+ !memcmp(p1->value, p2->value, (size_t)p1->length);
+}
+
#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
extern int of_node_to_nid(struct device_node *np);
#else
@@ -967,6 +1020,18 @@ static inline struct device_node *of_find_matching_node(
return of_find_matching_node_and_match(from, matches, NULL);
}
+static inline const char *of_node_get_device_type(const struct device_node *np)
+{
+ return of_get_property(np, "device_type", NULL);
+}
+
+static inline bool of_node_is_type(const struct device_node *np, const char *type)
+{
+ const char *match = of_node_get_device_type(np);
+
+ return np && match && type && !strcmp(match, type);
+}
+
/**
* of_property_count_u8_elems - Count the number of u8 elements in a property
*
@@ -1184,6 +1249,10 @@ static inline int of_property_read_s32(const struct device_node *np,
for (child = of_get_next_available_child(parent, NULL); child != NULL; \
child = of_get_next_available_child(parent, child))
+#define for_each_of_cpu_node(cpu) \
+ for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \
+ cpu = of_get_next_cpu_node(cpu))
+
#define for_each_node_with_property(dn, prop_name) \
for (dn = of_find_node_with_property(NULL, prop_name); dn; \
dn = of_find_node_with_property(dn, prop_name))
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 165fd302b442..8d31e39dd564 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -58,7 +58,6 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
int of_dma_configure(struct device *dev,
struct device_node *np,
bool force_dma);
-void of_dma_deconfigure(struct device *dev);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -113,8 +112,6 @@ static inline int of_dma_configure(struct device *dev,
{
return 0;
}
-static inline void of_dma_deconfigure(struct device *dev)
-{}
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index b9cd9ebdf9b9..a713e5d156d8 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -76,6 +76,7 @@ extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
+extern void __init early_init_dt_scan_chosen_arch(unsigned long node);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 90d81ee9e6a0..9cd72aab76fe 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -13,7 +13,6 @@
struct net_device;
extern int of_get_phy_mode(struct device_node *np);
extern const void *of_get_mac_address(struct device_node *np);
-extern int of_get_nvmem_mac_address(struct device_node *np, void *addr);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np)
@@ -26,11 +25,6 @@ static inline const void *of_get_mac_address(struct device_node *np)
return NULL;
}
-static inline int of_get_nvmem_mac_address(struct device_node *np, void *addr)
-{
- return -ENODEV;
-}
-
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
{
return NULL;
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index e83d87fc5673..21a89c4880fa 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -14,9 +14,6 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn);
int of_pci_get_devfn(struct device_node *np);
void of_pci_check_probe_only(void);
-int of_pci_map_rid(struct device_node *np, u32 rid,
- const char *map_name, const char *map_mask_name,
- struct device_node **target, u32 *id_out);
#else
static inline struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn)
@@ -29,13 +26,6 @@ static inline int of_pci_get_devfn(struct device_node *np)
return -EINVAL;
}
-static inline int of_pci_map_rid(struct device_node *np, u32 rid,
- const char *map_name, const char *map_mask_name,
- struct device_node **target, u32 *id_out)
-{
- return -EINVAL;
-}
-
static inline void of_pci_check_probe_only(void) { }
#endif
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
index d0b183ab65c6..89e4eb076a01 100644
--- a/include/linux/of_pdt.h
+++ b/include/linux/of_pdt.h
@@ -35,6 +35,4 @@ extern void *prom_early_alloc(unsigned long size);
/* for building the device tree */
extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops);
-extern void (*of_pdt_build_more)(struct device_node *dp);
-
#endif /* _LINUX_OF_PDT_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 6adac113e96d..d07992009265 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -15,6 +15,13 @@ struct notifier_block;
struct mem_cgroup;
struct task_struct;
+enum oom_constraint {
+ CONSTRAINT_NONE,
+ CONSTRAINT_CPUSET,
+ CONSTRAINT_MEMORY_POLICY,
+ CONSTRAINT_MEMCG,
+};
+
/*
* Details of the page allocation that triggered the oom killer that are used to
* determine what should be killed.
@@ -42,6 +49,9 @@ struct oom_control {
unsigned long totalpages;
struct task_struct *chosen;
unsigned long chosen_points;
+
+ /* Used to print the constraint info. */
+ enum oom_constraint constraint;
};
extern struct mutex oom_lock;
@@ -88,14 +98,14 @@ static inline bool mm_is_oom_victim(struct mm_struct *mm)
*
* Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
*/
-static inline int check_stable_address_space(struct mm_struct *mm)
+static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
{
if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
return VM_FAULT_SIGBUS;
return 0;
}
-void __oom_reap_task_mm(struct mm_struct *mm);
+bool __oom_reap_task_mm(struct mm_struct *mm);
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 7ec86bf31ce4..1dda31825ec4 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -82,6 +82,16 @@
#define LAST_CPUPID_WIDTH 0
#endif
+#ifdef CONFIG_KASAN_SW_TAGS
+#define KASAN_TAG_WIDTH 8
+#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
+ > BITS_PER_LONG - NR_PAGEFLAGS
+#error "KASAN: not enough bits in page flags for tag"
+#endif
+#else
+#define KASAN_TAG_WIDTH 0
+#endif
+
/*
* We are going to use the flags for the page to node mapping if its in
* there. This includes the case where there is no node, so it is implicit.
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 901943e4754b..39b4494e29f1 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -69,13 +69,14 @@
*/
enum pageflags {
PG_locked, /* Page is locked. Don't touch. */
- PG_error,
PG_referenced,
PG_uptodate,
PG_dirty,
PG_lru,
PG_active,
+ PG_workingset,
PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
+ PG_error,
PG_slab,
PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
PG_arch_1,
@@ -162,6 +163,14 @@ static inline int PagePoisoned(const struct page *page)
return page->flags == PAGE_POISON_PATTERN;
}
+#ifdef CONFIG_DEBUG_VM
+void page_init_poison(struct page *page, size_t size);
+#else
+static inline void page_init_poison(struct page *page, size_t size)
+{
+}
+#endif
+
/*
* Page flags policies wrt compound pages
*
@@ -280,6 +289,8 @@ PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
TESTCLEARFLAG(Active, active, PF_HEAD)
+PAGEFLAG(Workingset, workingset, PF_HEAD)
+ TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
__PAGEFLAG(Slab, slab, PF_NO_TAIL)
__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
@@ -292,6 +303,7 @@ PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
+ __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
@@ -369,8 +381,13 @@ PAGEFLAG_FALSE(Uncached)
PAGEFLAG(HWPoison, hwpoison, PF_ANY)
TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
#define __PG_HWPOISON (1UL << PG_hwpoison)
+extern bool set_hwpoison_free_buddy_page(struct page *page);
#else
PAGEFLAG_FALSE(HWPoison)
+static inline bool set_hwpoison_free_buddy_page(struct page *page)
+{
+ return 0;
+}
#define __PG_HWPOISON 0
#endif
@@ -652,6 +669,7 @@ PAGEFLAG_FALSE(DoubleMap)
#define PAGE_TYPE_BASE 0xf0000000
/* Reserve 0x0000007f to catch underflows of page_mapcount */
+#define PAGE_MAPCOUNT_RESERVE -128
#define PG_buddy 0x00000080
#define PG_balloon 0x00000100
#define PG_kmemcg 0x00000200
@@ -660,6 +678,11 @@ PAGEFLAG_FALSE(DoubleMap)
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+static inline int page_has_type(struct page *page)
+{
+ return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
+}
+
#define PAGE_TYPE_OPS(uname, lname) \
static __always_inline int Page##uname(struct page *page) \
{ \
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 4ae347cbc36d..4eb26d278046 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -30,8 +30,11 @@ static inline bool is_migrate_isolate(int migratetype)
}
#endif
+#define SKIP_HWPOISON 0x1
+#define REPORT_FAILURE 0x2
+
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
- int migratetype, bool skip_hwpoisoned_pages);
+ int migratetype, int flags);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
@@ -44,10 +47,14 @@ int move_freepages_block(struct zone *zone, struct page *page,
* For isolating all pages in the range finally, the caller have to
* free all pages in the range. test_page_isolated() can be used for
* test it.
+ *
+ * The following flags are allowed (they can be combined in a bit mask)
+ * SKIP_HWPOISON - ignore hwpoison pages
+ * REPORT_FAILURE - report details about the failure to isolate the range
*/
int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- unsigned migratetype, bool skip_hwpoisoned_pages);
+ unsigned migratetype, int flags);
/*
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 9132c5cb41f1..06a66327333d 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -25,10 +25,11 @@
#include <linux/types.h>
+#define PB_migratetype_bits 3
/* Bit indices that affect a whole block of pages */
enum pageblock_bits {
PB_migrate,
- PB_migrate_end = PB_migrate + 3 - 1,
+ PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
/* 3 bits required for migrate types */
PB_migrate_skip,/* If set the block is skipped by compaction */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b1bd2186e6d2..e2d7039af6a3 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -241,9 +241,9 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x)
typedef int filler_t(void *, struct page *);
-pgoff_t page_cache_next_hole(struct address_space *mapping,
+pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
-pgoff_t page_cache_prev_hole(struct address_space *mapping,
+pgoff_t page_cache_prev_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
#define FGP_ACCESSED 0x00000001
@@ -363,17 +363,17 @@ static inline unsigned find_get_pages(struct address_space *mapping,
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
- pgoff_t end, int tag, unsigned int nr_pages,
+ pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
struct page **pages);
static inline unsigned find_get_pages_tag(struct address_space *mapping,
- pgoff_t *index, int tag, unsigned int nr_pages,
+ pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
struct page **pages)
{
return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
nr_pages, pages);
}
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
- int tag, unsigned int nr_entries,
+ xa_mark_t tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
@@ -537,6 +537,8 @@ static inline int wait_on_page_locked_killable(struct page *page)
return wait_on_page_bit_killable(compound_head(page), PG_locked);
}
+extern void put_and_wait_on_page_locked(struct page *page);
+
/*
* Wait for a page to complete writeback
*/
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 6dc456ac6136..081d934eda64 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -9,6 +9,8 @@
#ifndef _LINUX_PAGEVEC_H
#define _LINUX_PAGEVEC_H
+#include <linux/xarray.h>
+
/* 15 pointers + header align the pagevec structure to a power of two */
#define PAGEVEC_SIZE 15
@@ -40,12 +42,12 @@ static inline unsigned pagevec_lookup(struct pagevec *pvec,
unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
- int tag);
+ xa_mark_t tag);
unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
- int tag, unsigned max_pages);
+ xa_mark_t tag, unsigned max_pages);
static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, int tag)
+ struct address_space *mapping, pgoff_t *index, xa_mark_t tag)
{
return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag);
}
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index c3f1b44ade29..cb1adf0b78a9 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -119,29 +119,11 @@ static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{
return dma_set_coherent_mask(&dev->dev, mask);
}
-
-static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
- unsigned int size)
-{
- return dma_set_max_seg_size(&dev->dev, size);
-}
-
-static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
- unsigned long mask)
-{
- return dma_set_seg_boundary(&dev->dev, mask);
-}
#else
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
-static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
- unsigned int size)
-{ return -EIO; }
-static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
- unsigned long mask)
-{ return -EIO; }
#endif
#endif
diff --git a/include/linux/pci-dma.h b/include/linux/pci-dma.h
deleted file mode 100644
index 0f7aa7353ca3..000000000000
--- a/include/linux/pci-dma.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PCI_DMA_H
-#define _LINUX_PCI_DMA_H
-
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) DEFINE_DMA_UNMAP_ADDR(ADDR_NAME);
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) DEFINE_DMA_UNMAP_LEN(LEN_NAME);
-#define pci_unmap_addr dma_unmap_addr
-#define pci_unmap_addr_set dma_unmap_addr_set
-#define pci_unmap_len dma_unmap_len
-#define pci_unmap_len_set dma_unmap_len_set
-
-#endif
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
new file mode 100644
index 000000000000..bca9bc3e5be7
--- /dev/null
+++ b/include/linux/pci-p2pdma.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCI Peer 2 Peer DMA support.
+ *
+ * Copyright (c) 2016-2018, Logan Gunthorpe
+ * Copyright (c) 2016-2017, Microsemi Corporation
+ * Copyright (c) 2017, Christoph Hellwig
+ * Copyright (c) 2018, Eideticom Inc.
+ */
+
+#ifndef _LINUX_PCI_P2PDMA_H
+#define _LINUX_PCI_P2PDMA_H
+
+#include <linux/pci.h>
+
+struct block_device;
+struct scatterlist;
+
+#ifdef CONFIG_PCI_P2PDMA
+int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
+ u64 offset);
+int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
+ int num_clients, bool verbose);
+bool pci_has_p2pmem(struct pci_dev *pdev);
+struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients);
+void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size);
+void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size);
+pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr);
+struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
+ unsigned int *nents, u32 length);
+void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
+void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
+int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
+ bool *use_p2pdma);
+ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
+ bool use_p2pdma);
+#else /* CONFIG_PCI_P2PDMA */
+static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar,
+ size_t size, u64 offset)
+{
+ return -EOPNOTSUPP;
+}
+static inline int pci_p2pdma_distance_many(struct pci_dev *provider,
+ struct device **clients, int num_clients, bool verbose)
+{
+ return -1;
+}
+static inline bool pci_has_p2pmem(struct pci_dev *pdev)
+{
+ return false;
+}
+static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients,
+ int num_clients)
+{
+ return NULL;
+}
+static inline void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
+{
+ return NULL;
+}
+static inline void pci_free_p2pmem(struct pci_dev *pdev, void *addr,
+ size_t size)
+{
+}
+static inline pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev,
+ void *addr)
+{
+ return 0;
+}
+static inline struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
+ unsigned int *nents, u32 length)
+{
+ return NULL;
+}
+static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
+ struct scatterlist *sgl)
+{
+}
+static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
+{
+}
+static inline int pci_p2pdma_map_sg(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir)
+{
+ return 0;
+}
+static inline int pci_p2pdma_enable_store(const char *page,
+ struct pci_dev **p2p_dev, bool *use_p2pdma)
+{
+ *use_p2pdma = false;
+ return 0;
+}
+static inline ssize_t pci_p2pdma_enable_show(char *page,
+ struct pci_dev *p2p_dev, bool use_p2pdma)
+{
+ return sprintf(page, "none\n");
+}
+#endif /* CONFIG_PCI_P2PDMA */
+
+
+static inline int pci_p2pdma_distance(struct pci_dev *provider,
+ struct device *client, bool verbose)
+{
+ return pci_p2pdma_distance_many(provider, &client, 1, verbose);
+}
+
+static inline struct pci_dev *pci_p2pmem_find(struct device *client)
+{
+ return pci_p2pmem_find_many(&client, 1);
+}
+
+#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9b87f1936906..1ab78a23ae08 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -281,6 +281,7 @@ struct pcie_link_state;
struct pci_vpd;
struct pci_sriov;
struct pci_ats;
+struct pci_p2pdma;
/* The pci_dev structure describes PCI devices */
struct pci_dev {
@@ -325,6 +326,7 @@ struct pci_dev {
pci_power_t current_state; /* Current operating state. In ACPI,
this is D0-D3, D0 being fully
functional, and D3 being off. */
+ unsigned int imm_ready:1; /* Supports Immediate Readiness */
u8 pm_cap; /* PM capability offset */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
@@ -394,6 +396,14 @@ struct pci_dev {
unsigned int is_hotplug_bridge:1;
unsigned int shpc_managed:1; /* SHPC owned by shpchp */
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
+ /*
+ * Devices marked being untrusted are the ones that can potentially
+ * execute DMA attacks and similar. They are typically connected
+ * through external ports such as Thunderbolt but not limited to
+ * that. When an IOMMU is enabled they should be getting full
+ * mappings to make sure they cannot access arbitrary memory.
+ */
+ unsigned int untrusted:1;
unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
unsigned int broken_intx_masking:1; /* INTx masking can't be used */
@@ -402,6 +412,7 @@ struct pci_dev {
unsigned int has_secondary_link:1;
unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
unsigned int is_probed:1; /* Device probing in progress */
+ unsigned int link_active_reporting:1;/* Device capable of reporting link active */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -439,6 +450,9 @@ struct pci_dev {
#ifdef CONFIG_PCI_PASID
u16 pasid_features;
#endif
+#ifdef CONFIG_PCI_P2PDMA
+ struct pci_p2pdma *p2pdma;
+#endif
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
char *driver_override; /* Driver name to force a match */
@@ -1235,6 +1249,9 @@ void pci_bus_remove_resources(struct pci_bus *bus);
int devm_request_pci_bus_resources(struct device *dev,
struct list_head *resources);
+/* Temporary until new and working PCI SBR API in place */
+int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
+
#define pci_bus_for_each_resource(bus, res, i) \
for (i = 0; \
(res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
@@ -1339,7 +1356,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
/* kmem_cache style wrapper around pci_alloc_consistent() */
-#include <linux/pci-dma.h>
#include <linux/dmapool.h>
#define pci_pool dma_pool
@@ -1702,6 +1718,10 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
unsigned long *out_hwirq,
unsigned int *out_type)
{ return -EINVAL; }
+
+static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
+ struct pci_dev *dev)
+{ return NULL; }
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
@@ -1809,7 +1829,11 @@ struct pci_fixup {
u16 device; /* Or PCI_ANY_ID */
u32 class; /* Or PCI_ANY_ID */
unsigned int class_shift; /* should be 0, 8, 16 */
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ int hook_offset;
+#else
void (*hook)(struct pci_dev *dev);
+#endif
};
enum pci_fixup_pass {
@@ -1823,12 +1847,28 @@ enum pci_fixup_pass {
pci_fixup_suspend_late, /* pci_device_suspend_late() */
};
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook) \
+ __ADDRESSABLE(hook) \
+ asm(".section " #sec ", \"a\" \n" \
+ ".balign 16 \n" \
+ ".short " #vendor ", " #device " \n" \
+ ".long " #class ", " #class_shift " \n" \
+ ".long " #hook " - . \n" \
+ ".previous \n");
+#define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook) \
+ __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook)
+#else
/* Anonymous variables would be nice... */
#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
class_shift, hook) \
static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
__attribute__((__section__(#section), aligned((sizeof(void *))))) \
= { vendor, device, class, class_shift, hook };
+#endif
#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
class_shift, hook) \
@@ -1928,7 +1968,11 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
enum pcie_reset_state state);
int pcibios_add_device(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
+#ifdef CONFIG_PCI
void pcibios_penalize_isa_irq(int irq, int active);
+#else
+static inline void pcibios_penalize_isa_irq(int irq, int active) {}
+#endif
int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
resource_size_t pcibios_default_alignment(void);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index a6d6650a0490..7acc9f91e72b 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -16,8 +16,6 @@
/**
* struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
- * @owner: The module owner of this structure
- * @mod_name: The module name (KBUILD_MODNAME) of this structure
* @enable_slot: Called when the user wants to enable a specific pci slot
* @disable_slot: Called when the user wants to disable a specific pci slot
* @set_attention_status: Called to set the specific slot's attention LED to
@@ -25,17 +23,9 @@
* @hardware_test: Called to run a specified hardware test on the specified
* slot.
* @get_power_status: Called to get the current power status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_attention_status: Called to get the current attention status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_latch_status: Called to get the current latch status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_adapter_status: Called to get see if an adapter is present in the slot or not.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @reset_slot: Optional interface to allow override of a bus reset for the
* slot for cases where a secondary bus reset can result in spurious
* hotplug events or where a slot can be reset independent of the bus.
@@ -46,8 +36,6 @@
* set an LED, enable / disable power, etc.)
*/
struct hotplug_slot_ops {
- struct module *owner;
- const char *mod_name;
int (*enable_slot) (struct hotplug_slot *slot);
int (*disable_slot) (struct hotplug_slot *slot);
int (*set_attention_status) (struct hotplug_slot *slot, u8 value);
@@ -60,37 +48,19 @@ struct hotplug_slot_ops {
};
/**
- * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
- * @power_status: if power is enabled or not (1/0)
- * @attention_status: if the attention light is enabled or not (1/0)
- * @latch_status: if the latch (if any) is open or closed (1/0)
- * @adapter_status: if there is a pci board present in the slot or not (1/0)
- *
- * Used to notify the hotplug pci core of the status of a specific slot.
- */
-struct hotplug_slot_info {
- u8 power_status;
- u8 attention_status;
- u8 latch_status;
- u8 adapter_status;
-};
-
-/**
* struct hotplug_slot - used to register a physical slot with the hotplug pci core
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
- * @info: pointer to the &struct hotplug_slot_info for the initial values for
- * this slot.
- * @private: used by the hotplug pci controller driver to store whatever it
- * needs.
+ * @owner: The module owner of this structure
+ * @mod_name: The module name (KBUILD_MODNAME) of this structure
*/
struct hotplug_slot {
- struct hotplug_slot_ops *ops;
- struct hotplug_slot_info *info;
- void *private;
+ const struct hotplug_slot_ops *ops;
/* Variables below this are for use only by the hotplug pci core. */
struct list_head slot_list;
struct pci_slot *pci_slot;
+ struct module *owner;
+ const char *mod_name;
};
static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
@@ -110,9 +80,6 @@ void pci_hp_del(struct hotplug_slot *slot);
void pci_hp_destroy(struct hotplug_slot *slot);
void pci_hp_deregister(struct hotplug_slot *slot);
-int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
- struct hotplug_slot_info *info);
-
/* use a define to avoid include chaining to get THIS_MODULE & friends */
#define pci_hp_register(slot, pbus, devnr, name) \
__pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 99d366cb0e9f..d86d5a2477fc 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -117,6 +117,10 @@
#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
+#define PCI_CLASS_SERIAL_IPMI 0x0c07
+#define PCI_CLASS_SERIAL_IPMI_SMIC 0x0c0700
+#define PCI_CLASS_SERIAL_IPMI_KCS 0x0c0701
+#define PCI_CLASS_SERIAL_IPMI_BT 0x0c0702
#define PCI_BASE_CLASS_WIRELESS 0x0d
#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10
@@ -541,6 +545,9 @@
#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584
+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -2355,6 +2362,8 @@
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
+#define PCI_VENDOR_ID_USR 0x16ec
+
#define PCI_VENDOR_ID_VITESSE 0x1725
#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174
@@ -2539,8 +2548,6 @@
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#define PCI_VENDOR_ID_NETRONOME 0x19ee
-#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
-#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000
#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
@@ -2561,6 +2568,8 @@
#define PCI_VENDOR_ID_AMAZON 0x1d0f
+#define PCI_VENDOR_ID_HYGON 0x1d94
+
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
@@ -3084,4 +3093,6 @@
#define PCI_VENDOR_ID_OCZ 0x1b85
+#define PCI_VENDOR_ID_NCUBE 0x10ff
+
#endif /* _LINUX_PCI_IDS_H */
diff --git a/include/linux/pe.h b/include/linux/pe.h
index 143ce75be5f0..3482b18a48b5 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -166,7 +166,7 @@ struct mz_hdr {
uint16_t oem_info; /* oem specific */
uint16_t reserved1[10]; /* reserved */
uint32_t peaddr; /* address of pe header */
- char message[64]; /* message to print */
+ char message[]; /* message to print */
};
struct mz_reloc {
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 2d2096ba1cfe..1ce8e264a269 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -91,8 +91,7 @@
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
extern __PCPU_ATTRS(sec) __typeof__(type) name; \
- __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
- __typeof__(type) name
+ __PCPU_ATTRS(sec) __weak __typeof__(type) name
#else
/*
* Normal declaration and definition macros.
@@ -101,8 +100,7 @@
extern __PCPU_ATTRS(sec) __typeof__(type) name
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
- __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
- __typeof__(type) name
+ __PCPU_ATTRS(sec) __typeof__(type) name
#endif
/*
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 009cdf3d65b6..b297cd1cd4f1 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -108,6 +108,7 @@ void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
+void percpu_ref_resurrect(struct percpu_ref *ref);
void percpu_ref_reinit(struct percpu_ref *ref);
/**
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 79b99d653e03..71b75643c432 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -41,7 +41,7 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
* cannot both change sem->state from readers_fast and start checking
* counters while we are here. So if we see !sem->state, we know that
* the writer won't be checking until we're past the preempt_enable()
- * and that one the synchronize_sched() is done, the writer will see
+ * and that once the synchronize_rcu() is done, the writer will see
* anything we did within this RCU-sched read-size critical section.
*/
__this_cpu_inc(*sem->read_count);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 296bbe49d5d1..70b7123f38c7 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -149,4 +149,6 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
(typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
__alignof__(type))
+extern unsigned long pcpu_nr_pages(void);
+
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 10f92e1d8e7b..4641e850b204 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -99,10 +99,13 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
+ int (*filter_match)(struct perf_event *event);
int num_events;
bool secure_access; /* 32-bit ARM only */
-#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
+#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
+ DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
struct hlist_node node;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 53c500f0ca79..1d5c551a5add 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -262,8 +262,8 @@ struct pmu {
*/
int capabilities;
- int * __percpu pmu_disable_count;
- struct perf_cpu_context * __percpu pmu_cpu_context;
+ int __percpu *pmu_disable_count;
+ struct perf_cpu_context __percpu *pmu_cpu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 21713dc14ce2..7bb77850c65a 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -9,8 +9,10 @@
* PFN_SG_LAST - pfn references a page and is the last scatterlist entry
* PFN_DEV - pfn is not covered by system memmap by default
* PFN_MAP - pfn has a dynamic page mapping established by a device driver
+ * PFN_SPECIAL - for CONFIG_FS_DAX_LIMITED builds to allow XIP, but not
+ * get_user_pages
*/
-#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
+#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
diff --git a/include/linux/phy.h b/include/linux/phy.h
index cd6f637cbbfb..3b051f761450 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1,6 +1,6 @@
/*
* Framework and drivers for configuring and reading different PHYs
- * Based on code in sungem_phy.c and gianfar_phy.c
+ * Based on code in sungem_phy.c and (long-removed) gianfar_phy.c
*
* Author: Andy Fleming
*
@@ -19,6 +19,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
+#include <linux/linkmode.h>
#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/module.h>
@@ -41,13 +42,26 @@
#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \
SUPPORTED_1000baseT_Full)
-#define PHY_BASIC_FEATURES (PHY_10BT_FEATURES | \
- PHY_100BT_FEATURES | \
- PHY_DEFAULT_FEATURES)
-
-#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \
- PHY_1000BT_FEATURES)
-
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
+
+#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
+#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features)
+#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features)
+#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
+#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
+#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
+#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
+
+extern const int phy_10_100_features_array[4];
+extern const int phy_basic_t1_features_array[2];
+extern const int phy_gbit_features_array[2];
+extern const int phy_10gbit_features_array[1];
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
@@ -57,9 +71,8 @@
#define PHY_POLL -1
#define PHY_IGNORE_INTERRUPT -2
-#define PHY_HAS_INTERRUPT 0x00000001
-#define PHY_IS_INTERNAL 0x00000002
-#define PHY_RST_AFTER_CLK_EN 0x00000004
+#define PHY_IS_INTERNAL 0x00000001
+#define PHY_RST_AFTER_CLK_EN 0x00000002
#define MDIO_DEVICE_IS_PHY 0x80000000
/* Interface Mode definitions */
@@ -97,9 +110,9 @@ typedef enum {
* @speeds: buffer to store supported speeds in.
* @size: size of speeds buffer.
*
- * Description: Returns the number of supported speeds, and
- * fills the speeds * buffer with the supported speeds. If speeds buffer is
- * too small to contain * all currently supported speeds, will return as
+ * Description: Returns the number of supported speeds, and fills
+ * the speeds buffer with the supported speeds. If speeds buffer is
+ * too small to contain all currently supported speeds, will return as
* many speeds as can fit.
*/
unsigned int phy_supported_speeds(struct phy_device *phy,
@@ -107,7 +120,10 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int size);
/**
- * It maps 'enum phy_interface_t' found in include/linux/phy.h
+ * phy_modes - map phy_interface_t enum to device tree binding of phy-mode
+ * @interface: enum phy_interface_t value
+ *
+ * Description: maps 'enum phy_interface_t' defined in this file
* into the device tree binding of 'phy-mode', so that Ethernet
* device driver can get phy interface from device tree.
*/
@@ -169,7 +185,6 @@ static inline const char *phy_modes(phy_interface_t interface)
#define PHY_INIT_TIMEOUT 100000
#define PHY_STATE_TIME 1
#define PHY_FORCE_TIMEOUT 10
-#define PHY_AN_TIMEOUT 10
#define PHY_MAX_ADDR 32
@@ -255,57 +270,27 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
-#define PHY_INTERRUPT_DISABLED 0x0
-#define PHY_INTERRUPT_ENABLED 0x80000000
+#define PHY_INTERRUPT_DISABLED false
+#define PHY_INTERRUPT_ENABLED true
/* PHY state machine states:
*
* DOWN: PHY device and driver are not ready for anything. probe
* should be called if and only if the PHY is in this state,
* given that the PHY device exists.
- * - PHY driver probe function will, depending on the PHY, set
- * the state to STARTING or READY
- *
- * STARTING: PHY device is coming up, and the ethernet driver is
- * not ready. PHY drivers may set this in the probe function.
- * If they do, they are responsible for making sure the state is
- * eventually set to indicate whether the PHY is UP or READY,
- * depending on the state when the PHY is done starting up.
- * - PHY driver will set the state to READY
- * - start will set the state to PENDING
+ * - PHY driver probe function will set the state to READY
*
* READY: PHY is ready to send and receive packets, but the
* controller is not. By default, PHYs which do not implement
- * probe will be set to this state by phy_probe(). If the PHY
- * driver knows the PHY is ready, and the PHY state is STARTING,
- * then it sets this STATE.
+ * probe will be set to this state by phy_probe().
* - start will set the state to UP
*
- * PENDING: PHY device is coming up, but the ethernet driver is
- * ready. phy_start will set this state if the PHY state is
- * STARTING.
- * - PHY driver will set the state to UP when the PHY is ready
- *
* UP: The PHY and attached device are ready to do work.
* Interrupts should be started here.
- * - timer moves to AN
- *
- * AN: The PHY is currently negotiating the link state. Link is
- * therefore down for now. phy_timer will set this state when it
- * detects the state is UP. config_aneg will set this state
- * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
- * - If autonegotiation finishes, but there's no link, it sets
- * the state to NOLINK.
- * - If aneg finishes with link, it sets the state to RUNNING,
- * and calls adjust_link
- * - If autonegotiation did not finish after an arbitrary amount
- * of time, autonegotiation should be tried again if the PHY
- * supports "magic" autonegotiation (back to AN)
- * - If it didn't finish, and no magic_aneg, move to FORCING.
+ * - timer moves to NOLINK or RUNNING
*
* NOLINK: PHY is up, but not currently plugged in.
- * - If the timer notes that the link comes back, we move to RUNNING
- * - config_aneg moves to AN
+ * - irq or timer will set RUNNING if link comes back
* - phy_stop moves to HALTED
*
* FORCING: PHY is being configured with forced settings
@@ -316,11 +301,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
*
* RUNNING: PHY is currently up, running, and possibly sending
* and/or receiving packets
- * - timer will set CHANGELINK if we're polling (this ensures the
- * link state is polled every other cycle of this state machine,
- * which makes it every other second)
- * - irq will set CHANGELINK
- * - config_aneg will set AN
+ * - irq or timer will set NOLINK if link goes down
* - phy_stop moves to HALTED
*
* CHANGELINK: PHY experienced a change in link state
@@ -340,16 +321,13 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
*/
enum phy_state {
PHY_DOWN = 0,
- PHY_STARTING,
PHY_READY,
- PHY_PENDING,
+ PHY_HALTED,
PHY_UP,
- PHY_AN,
PHY_RUNNING,
PHY_NOLINK,
PHY_FORCING,
PHY_CHANGELINK,
- PHY_HALTED,
PHY_RESUMING
};
@@ -381,7 +359,6 @@ struct phy_c45_device_ids {
* giving up on the current attempt at acquiring a link
* irq: IRQ number of the PHY's interrupt (-1 if none)
* phy_timer: The timer for handling the state machine
- * phy_queue: A work_queue for the phy_mac_interrupt
* attached_dev: The attached enet driver's device instance ptr
* adjust_link: Callback for the enet controller to respond to
* changes in the link state.
@@ -418,6 +395,9 @@ struct phy_device {
/* The most recently read link state */
unsigned link:1;
+ /* Interrupts are enabled */
+ unsigned interrupts:1;
+
enum phy_state state;
u32 dev_flags;
@@ -433,14 +413,11 @@ struct phy_device {
int pause;
int asym_pause;
- /* Enabled Interrupts */
- u32 interrupts;
-
- /* Union of PHY and Attached devices' supported modes */
- /* See mii.h for more info */
- u32 supported;
- u32 advertising;
- u32 lp_advertising;
+ /* Union of PHY and Attached devices' supported link modes */
+ /* See ethtool.h for more info */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
/* Energy efficient ethernet modes which should be prohibited */
u32 eee_broken_modes;
@@ -466,7 +443,6 @@ struct phy_device {
void *priv;
/* Interrupt and Polling infrastructure */
- struct work_struct phy_queue;
struct delayed_work state_queue;
struct mutex lock;
@@ -509,7 +485,7 @@ struct phy_driver {
u32 phy_id;
char *name;
u32 phy_id_mask;
- u32 features;
+ const unsigned long * const features;
u32 flags;
const void *driver_data;
@@ -665,6 +641,10 @@ struct phy_driver {
#define PHY_ANY_ID "MATCH ANY PHY"
#define PHY_ANY_UID 0xffffffff
+#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0)
+#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4)
+#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10)
+
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
@@ -688,9 +668,31 @@ struct phy_setting {
const struct phy_setting *
phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
- size_t maxbit, bool exact);
+ bool exact);
size_t phy_speeds(unsigned int *speeds, size_t size,
- unsigned long *mask, size_t maxbit);
+ unsigned long *mask);
+
+static inline bool __phy_is_started(struct phy_device *phydev)
+{
+ WARN_ON(!mutex_is_locked(&phydev->lock));
+
+ return phydev->state >= PHY_UP;
+}
+
+/**
+ * phy_is_started - Convenience function to check whether PHY is started
+ * @phydev: The phy_device struct
+ */
+static inline bool phy_is_started(struct phy_device *phydev)
+{
+ bool started;
+
+ mutex_lock(&phydev->lock);
+ started = __phy_is_started(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return started;
+}
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
@@ -967,6 +969,12 @@ static inline void phy_device_reset(struct phy_device *phydev, int value)
#define phydev_err(_phydev, format, args...) \
dev_err(&_phydev->mdio.dev, format, ##args)
+#define phydev_info(_phydev, format, args...) \
+ dev_info(&_phydev->mdio.dev, format, ##args)
+
+#define phydev_warn(_phydev, format, args...) \
+ dev_warn(&_phydev->mdio.dev, format, ##args)
+
#define phydev_dbg(_phydev, format, args...) \
dev_dbg(&_phydev->mdio.dev, format, ##args)
@@ -1035,11 +1043,9 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_state_machine(struct work_struct *work);
-void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
-void phy_trigger_machine(struct phy_device *phydev, bool sync);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd);
@@ -1049,6 +1055,14 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
+void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
+void phy_support_sym_pause(struct phy_device *phydev);
+void phy_support_asym_pause(struct phy_device *phydev);
+void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
+ bool autoneg);
+void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
+bool phy_validate_pause(struct phy_device *phydev,
+ struct ethtool_pauseparam *pp);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *));
diff --git a/include/linux/phy/phy-mipi-dphy.h b/include/linux/phy/phy-mipi-dphy.h
new file mode 100644
index 000000000000..c08aacc0ac35
--- /dev/null
+++ b/include/linux/phy/phy-mipi-dphy.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ */
+
+#ifndef __PHY_MIPI_DPHY_H_
+#define __PHY_MIPI_DPHY_H_
+
+#include <video/videomode.h>
+
+/**
+ * struct phy_configure_opts_mipi_dphy - MIPI D-PHY configuration set
+ *
+ * This structure is used to represent the configuration state of a
+ * MIPI D-PHY phy.
+ */
+struct phy_configure_opts_mipi_dphy {
+ /**
+ * @clk_miss:
+ *
+ * Timeout, in picoseconds, for receiver to detect absence of
+ * Clock transitions and disable the Clock Lane HS-RX.
+ *
+ * Maximum value: 60000 ps
+ */
+ unsigned int clk_miss;
+
+ /**
+ * @clk_post:
+ *
+ * Time, in picoseconds, that the transmitter continues to
+ * send HS clock after the last associated Data Lane has
+ * transitioned to LP Mode. Interval is defined as the period
+ * from the end of @hs_trail to the beginning of @clk_trail.
+ *
+ * Minimum value: 60000 ps + 52 * @hs_clk_rate period in ps
+ */
+ unsigned int clk_post;
+
+ /**
+ * @clk_pre:
+ *
+ * Time, in UI, that the HS clock shall be driven by
+ * the transmitter prior to any associated Data Lane beginning
+ * the transition from LP to HS mode.
+ *
+ * Minimum value: 8 UI
+ */
+ unsigned int clk_pre;
+
+ /**
+ * @clk_prepare:
+ *
+ * Time, in picoseconds, that the transmitter drives the Clock
+ * Lane LP-00 Line state immediately before the HS-0 Line
+ * state starting the HS transmission.
+ *
+ * Minimum value: 38000 ps
+ * Maximum value: 95000 ps
+ */
+ unsigned int clk_prepare;
+
+ /**
+ * @clk_settle:
+ *
+ * Time interval, in picoseconds, during which the HS receiver
+ * should ignore any Clock Lane HS transitions, starting from
+ * the beginning of @clk_prepare.
+ *
+ * Minimum value: 95000 ps
+ * Maximum value: 300000 ps
+ */
+ unsigned int clk_settle;
+
+ /**
+ * @clk_term_en:
+ *
+ * Time, in picoseconds, for the Clock Lane receiver to enable
+ * the HS line termination.
+ *
+ * Maximum value: 38000 ps
+ */
+ unsigned int clk_term_en;
+
+ /**
+ * @clk_trail:
+ *
+ * Time, in picoseconds, that the transmitter drives the HS-0
+ * state after the last payload clock bit of a HS transmission
+ * burst.
+ *
+ * Minimum value: 60000 ps
+ */
+ unsigned int clk_trail;
+
+ /**
+ * @clk_zero:
+ *
+ * Time, in picoseconds, that the transmitter drives the HS-0
+ * state prior to starting the Clock.
+ */
+ unsigned int clk_zero;
+
+ /**
+ * @d_term_en:
+ *
+ * Time, in picoseconds, for the Data Lane receiver to enable
+ * the HS line termination.
+ *
+ * Maximum value: 35000 ps + 4 * @hs_clk_rate period in ps
+ */
+ unsigned int d_term_en;
+
+ /**
+ * @eot:
+ *
+ * Transmitted time interval, in picoseconds, from the start
+ * of @hs_trail or @clk_trail, to the start of the LP- 11
+ * state following a HS burst.
+ *
+ * Maximum value: 105000 ps + 12 * @hs_clk_rate period in ps
+ */
+ unsigned int eot;
+
+ /**
+ * @hs_exit:
+ *
+ * Time, in picoseconds, that the transmitter drives LP-11
+ * following a HS burst.
+ *
+ * Minimum value: 100000 ps
+ */
+ unsigned int hs_exit;
+
+ /**
+ * @hs_prepare:
+ *
+ * Time, in picoseconds, that the transmitter drives the Data
+ * Lane LP-00 Line state immediately before the HS-0 Line
+ * state starting the HS transmission.
+ *
+ * Minimum value: 40000 ps + 4 * @hs_clk_rate period in ps
+ * Maximum value: 85000 ps + 6 * @hs_clk_rate period in ps
+ */
+ unsigned int hs_prepare;
+
+ /**
+ * @hs_settle:
+ *
+ * Time interval, in picoseconds, during which the HS receiver
+ * shall ignore any Data Lane HS transitions, starting from
+ * the beginning of @hs_prepare.
+ *
+ * Minimum value: 85000 ps + 6 * @hs_clk_rate period in ps
+ * Maximum value: 145000 ps + 10 * @hs_clk_rate period in ps
+ */
+ unsigned int hs_settle;
+
+ /**
+ * @hs_skip:
+ *
+ * Time interval, in picoseconds, during which the HS-RX
+ * should ignore any transitions on the Data Lane, following a
+ * HS burst. The end point of the interval is defined as the
+ * beginning of the LP-11 state following the HS burst.
+ *
+ * Minimum value: 40000 ps
+ * Maximum value: 55000 ps + 4 * @hs_clk_rate period in ps
+ */
+ unsigned int hs_skip;
+
+ /**
+ * @hs_trail:
+ *
+ * Time, in picoseconds, that the transmitter drives the
+ * flipped differential state after last payload data bit of a
+ * HS transmission burst
+ *
+ * Minimum value: max(8 * @hs_clk_rate period in ps,
+ * 60000 ps + 4 * @hs_clk_rate period in ps)
+ */
+ unsigned int hs_trail;
+
+ /**
+ * @hs_zero:
+ *
+ * Time, in picoseconds, that the transmitter drives the HS-0
+ * state prior to transmitting the Sync sequence.
+ */
+ unsigned int hs_zero;
+
+ /**
+ * @init:
+ *
+ * Time, in picoseconds for the initialization period to
+ * complete.
+ *
+ * Minimum value: 100000000 ps
+ */
+ unsigned int init;
+
+ /**
+ * @lpx:
+ *
+ * Transmitted length, in picoseconds, of any Low-Power state
+ * period.
+ *
+ * Minimum value: 50000 ps
+ */
+ unsigned int lpx;
+
+ /**
+ * @ta_get:
+ *
+ * Time, in picoseconds, that the new transmitter drives the
+ * Bridge state (LP-00) after accepting control during a Link
+ * Turnaround.
+ *
+ * Value: 5 * @lpx
+ */
+ unsigned int ta_get;
+
+ /**
+ * @ta_go:
+ *
+ * Time, in picoseconds, that the transmitter drives the
+ * Bridge state (LP-00) before releasing control during a Link
+ * Turnaround.
+ *
+ * Value: 4 * @lpx
+ */
+ unsigned int ta_go;
+
+ /**
+ * @ta_sure:
+ *
+ * Time, in picoseconds, that the new transmitter waits after
+ * the LP-10 state before transmitting the Bridge state
+ * (LP-00) during a Link Turnaround.
+ *
+ * Minimum value: @lpx
+ * Maximum value: 2 * @lpx
+ */
+ unsigned int ta_sure;
+
+ /**
+ * @wakeup:
+ *
+ * Time, in picoseconds, that a transmitter drives a Mark-1
+ * state prior to a Stop state in order to initiate an exit
+ * from ULPS.
+ *
+ * Minimum value: 1000000000 ps
+ */
+ unsigned int wakeup;
+
+ /**
+ * @hs_clk_rate:
+ *
+ * Clock rate, in Hertz, of the high-speed clock.
+ */
+ unsigned long hs_clk_rate;
+
+ /**
+ * @lp_clk_rate:
+ *
+ * Clock rate, in Hertz, of the low-power clock.
+ */
+ unsigned long lp_clk_rate;
+
+ /**
+ * @lanes:
+ *
+ * Number of active data lanes used for the transmissions.
+ */
+ unsigned char lanes;
+};
+
+int phy_mipi_dphy_get_default_config(unsigned long pixel_clock,
+ unsigned int bpp,
+ unsigned int lanes,
+ struct phy_configure_opts_mipi_dphy *cfg);
+int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg);
+
+#endif /* __PHY_MIPI_DPHY_H_ */
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
deleted file mode 100644
index 0a2c18a9771d..000000000000
--- a/include/linux/phy/phy-qcom-ufs.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef PHY_QCOM_UFS_H_
-#define PHY_QCOM_UFS_H_
-
-#include "phy.h"
-
-/**
- * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
- * ref clock.
- * @phy: reference to a generic phy.
- */
-void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
-
-/**
- * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device
- * ref clock.
- * @phy: reference to a generic phy.
- */
-void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
-
-int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
-void ufs_qcom_phy_save_controller_version(struct phy *phy,
- u8 major, u16 minor, u16 step);
-
-#endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 9713aebdd348..e8e118d70fd7 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -20,6 +20,8 @@
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/phy/phy-mipi-dphy.h>
+
struct phy;
enum phy_mode {
@@ -35,11 +37,21 @@ enum phy_mode {
PHY_MODE_USB_DEVICE_HS,
PHY_MODE_USB_DEVICE_SS,
PHY_MODE_USB_OTG,
- PHY_MODE_SGMII,
- PHY_MODE_2500SGMII,
- PHY_MODE_10GKR,
PHY_MODE_UFS_HS_A,
PHY_MODE_UFS_HS_B,
+ PHY_MODE_PCIE,
+ PHY_MODE_ETHERNET,
+ PHY_MODE_MIPI_DPHY,
+};
+
+/**
+ * union phy_configure_opts - Opaque generic phy configuration
+ *
+ * @mipi_dphy: Configuration set applicable for phys supporting
+ * the MIPI_DPHY phy mode.
+ */
+union phy_configure_opts {
+ struct phy_configure_opts_mipi_dphy mipi_dphy;
};
/**
@@ -58,7 +70,38 @@ struct phy_ops {
int (*exit)(struct phy *phy);
int (*power_on)(struct phy *phy);
int (*power_off)(struct phy *phy);
- int (*set_mode)(struct phy *phy, enum phy_mode mode);
+ int (*set_mode)(struct phy *phy, enum phy_mode mode, int submode);
+
+ /**
+ * @configure:
+ *
+ * Optional.
+ *
+ * Used to change the PHY parameters. phy_init() must have
+ * been called on the phy.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*configure)(struct phy *phy, union phy_configure_opts *opts);
+
+ /**
+ * @validate:
+ *
+ * Optional.
+ *
+ * Used to check that the current set of parameters can be
+ * handled by the phy. Implementations are free to tune the
+ * parameters passed as arguments if needed by some
+ * implementation detail or constraints. It must not change
+ * any actual configuration of the PHY, so calling it as many
+ * times as deemed fit by the consumer must have no side
+ * effect.
+ *
+ * Returns: 0 if the configuration can be applied, an negative
+ * error code otherwise
+ */
+ int (*validate)(struct phy *phy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts);
int (*reset)(struct phy *phy);
int (*calibrate)(struct phy *phy);
struct module *owner;
@@ -67,6 +110,7 @@ struct phy_ops {
/**
* struct phy_attrs - represents phy attributes
* @bus_width: Data path width implemented by PHY
+ * @mode: PHY mode
*/
struct phy_attrs {
u32 bus_width;
@@ -78,7 +122,6 @@ struct phy_attrs {
* @dev: phy device
* @id: id of the phy device
* @ops: function pointers for performing phy operations
- * @init_data: list of PHY consumers (non-dt only)
* @mutex: mutex to protect phy_ops
* @init_count: used to protect when the PHY is used by multiple consumers
* @power_count: used to protect when the PHY is used by multiple consumers
@@ -162,7 +205,13 @@ int phy_init(struct phy *phy);
int phy_exit(struct phy *phy);
int phy_power_on(struct phy *phy);
int phy_power_off(struct phy *phy);
-int phy_set_mode(struct phy *phy, enum phy_mode mode);
+int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode);
+#define phy_set_mode(phy, mode) \
+ phy_set_mode_ext(phy, mode, 0)
+int phy_configure(struct phy *phy, union phy_configure_opts *opts);
+int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts);
+
static inline enum phy_mode phy_get_mode(struct phy *phy)
{
return phy->attrs.mode;
@@ -276,13 +325,17 @@ static inline int phy_power_off(struct phy *phy)
return -ENOSYS;
}
-static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
+static inline int phy_set_mode_ext(struct phy *phy, enum phy_mode mode,
+ int submode)
{
if (!phy)
return 0;
return -ENOSYS;
}
+#define phy_set_mode(phy, mode) \
+ phy_set_mode_ext(phy, mode, 0)
+
static inline enum phy_mode phy_get_mode(struct phy *phy)
{
return PHY_MODE_INVALID;
@@ -302,6 +355,24 @@ static inline int phy_calibrate(struct phy *phy)
return -ENOSYS;
}
+static inline int phy_configure(struct phy *phy,
+ union phy_configure_opts *opts)
+{
+ if (!phy)
+ return 0;
+
+ return -ENOSYS;
+}
+
+static inline int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
+ union phy_configure_opts *opts)
+{
+ if (!phy)
+ return 0;
+
+ return -ENOSYS;
+}
+
static inline int phy_get_bus_width(struct phy *phy)
{
return -ENOSYS;
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index ee54453a40a0..9525567b1951 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -13,6 +13,7 @@ struct fixed_phy_status {
struct device_node;
#if IS_ENABLED(CONFIG_FIXED_PHY)
+extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
extern int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status,
int link_gpio);
@@ -47,6 +48,10 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
{
return -ENODEV;
}
+static inline int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index b37b05bfd1a6..4587ce362535 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -20,7 +20,7 @@ struct phy_device;
#include <linux/leds.h>
#include <linux/phy.h>
-#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
+#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 11
#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
FIELD_SIZEOF(struct mdio_device, addr)+\
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 7633d55d9a24..14a9a39da9c7 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -7,11 +7,10 @@
enum pid_type
{
PIDTYPE_PID,
+ PIDTYPE_TGID,
PIDTYPE_PGID,
PIDTYPE_SID,
PIDTYPE_MAX,
- /* only valid to __task_pid_nr_ns() */
- __PIDTYPE_TGID
};
/*
@@ -67,12 +66,6 @@ struct pid
extern struct pid init_struct_pid;
-struct pid_link
-{
- struct hlist_node node;
- struct pid *pid;
-};
-
static inline struct pid *get_pid(struct pid *pid)
{
if (pid)
@@ -177,7 +170,7 @@ pid_t pid_vnr(struct pid *pid);
do { \
if ((pid) != NULL) \
hlist_for_each_entry_rcu((task), \
- &(pid)->tasks[type], pids[type].node) {
+ &(pid)->tasks[type], pid_links[type]) {
/*
* Both old and new leaders may be attached to
diff --git a/include/linux/pl353-smc.h b/include/linux/pl353-smc.h
new file mode 100644
index 000000000000..0e0d3df9bf72
--- /dev/null
+++ b/include/linux/pl353-smc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ARM PL353 SMC Driver Header
+ *
+ * Copyright (C) 2012 - 2018 Xilinx, Inc
+ */
+
+#ifndef __LINUX_PL353_SMC_H
+#define __LINUX_PL353_SMC_H
+
+enum pl353_smc_ecc_mode {
+ PL353_SMC_ECCMODE_BYPASS = 0,
+ PL353_SMC_ECCMODE_APB = 1,
+ PL353_SMC_ECCMODE_MEM = 2
+};
+
+enum pl353_smc_mem_width {
+ PL353_SMC_MEM_WIDTH_8 = 0,
+ PL353_SMC_MEM_WIDTH_16 = 1
+};
+
+u32 pl353_smc_get_ecc_val(int ecc_reg);
+bool pl353_smc_ecc_is_busy(void);
+int pl353_smc_get_nand_int_status_raw(void);
+void pl353_smc_clr_nand_int(void);
+int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode);
+int pl353_smc_set_ecc_pg_size(unsigned int pg_sz);
+int pl353_smc_set_buswidth(unsigned int bw);
+void pl353_smc_set_cycles(u32 timings[]);
+#endif
diff --git a/include/linux/platform_data/ad7879.h b/include/linux/platform_data/ad7879.h
deleted file mode 100644
index 6655cc8453ac..000000000000
--- a/include/linux/platform_data/ad7879.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* linux/platform_data/ad7879.h */
-
-/* Touchscreen characteristics vary between boards and models. The
- * platform_data for the device's "struct device" holds this information.
- *
- * It's OK if the min/max values are zero.
- */
-struct ad7879_platform_data {
- u16 model; /* 7879 */
- u16 x_plate_ohms;
- u16 x_min, x_max;
- u16 y_min, y_max;
- u16 pressure_min, pressure_max;
-
- bool swap_xy; /* swap x and y axes */
-
- /* [0..255] 0=OFF Starts at 1=550us and goes
- * all the way to 9.440ms in steps of 35us.
- */
- u8 pen_down_acc_interval;
- /* [0..15] Starts at 0=128us and goes all the
- * way to 4.096ms in steps of 128us.
- */
- u8 first_conversion_delay;
- /* [0..3] 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */
- u8 acquisition_time;
- /* [0..3] Average X middle samples 0 = 2, 1 = 4, 2 = 8, 3 = 16 */
- u8 averaging;
- /* [0..3] Perform X measurements 0 = OFF,
- * 1 = 4, 2 = 8, 3 = 16 (median > averaging)
- */
- u8 median;
- /* 1 = AUX/VBAT/GPIO export GPIO to gpiolib
- * requires CONFIG_GPIOLIB
- */
- bool gpio_export;
- /* identifies the first GPIO number handled by this chip;
- * or, if negative, requests dynamic ID allocation.
- */
- s32 gpio_base;
-};
diff --git a/include/linux/platform_data/ams-delta-fiq.h b/include/linux/platform_data/ams-delta-fiq.h
new file mode 100644
index 000000000000..cf4589ccb720
--- /dev/null
+++ b/include/linux/platform_data/ams-delta-fiq.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * include/linux/platform_data/ams-delta-fiq.h
+ *
+ * Taken from the original Amstrad modifications to fiq.h
+ *
+ * Copyright (c) 2004 Amstrad Plc
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_PLATFORM_DATA_AMS_DELTA_FIQ_H
+#define __LINUX_PLATFORM_DATA_AMS_DELTA_FIQ_H
+
+/*
+ * These are the offsets from the beginning of the fiq_buffer. They are put here
+ * since the buffer and header need to be accessed by drivers servicing devices
+ * which generate GPIO interrupts - e.g. keyboard, modem, hook switch.
+ */
+#define FIQ_MASK 0
+#define FIQ_STATE 1
+#define FIQ_KEYS_CNT 2
+#define FIQ_TAIL_OFFSET 3
+#define FIQ_HEAD_OFFSET 4
+#define FIQ_BUF_LEN 5
+#define FIQ_KEY 6
+#define FIQ_MISSED_KEYS 7
+#define FIQ_BUFFER_START 8
+#define FIQ_GPIO_INT_MASK 9
+#define FIQ_KEYS_HICNT 10
+#define FIQ_IRQ_PEND 11
+#define FIQ_SIR_CODE_L1 12
+#define IRQ_SIR_CODE_L2 13
+
+#define FIQ_CNT_INT_00 14
+#define FIQ_CNT_INT_KEY 15
+#define FIQ_CNT_INT_MDM 16
+#define FIQ_CNT_INT_03 17
+#define FIQ_CNT_INT_HSW 18
+#define FIQ_CNT_INT_05 19
+#define FIQ_CNT_INT_06 20
+#define FIQ_CNT_INT_07 21
+#define FIQ_CNT_INT_08 22
+#define FIQ_CNT_INT_09 23
+#define FIQ_CNT_INT_10 24
+#define FIQ_CNT_INT_11 25
+#define FIQ_CNT_INT_12 26
+#define FIQ_CNT_INT_13 27
+#define FIQ_CNT_INT_14 28
+#define FIQ_CNT_INT_15 29
+
+#define FIQ_CIRC_BUFF 30 /*Start of circular buffer */
+
+#endif
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index 85ad68f9206a..7fe80f1c7e08 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -79,6 +79,7 @@ struct davinci_mcasp_pdata {
/* McASP specific fields */
int tdm_slots;
u8 op_mode;
+ u8 dismod;
u8 num_serializer;
u8 *serial_dir;
u8 version;
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index 896cb71a382c..1a1d58ebffbf 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -49,6 +49,7 @@ struct dw_dma_slave {
* @data_width: Maximum data width supported by hardware per AHB master
* (in bytes, power of 2)
* @multi_block: Multi block transfers supported by hardware per channel.
+ * @protctl: Protection control signals setting per channel.
*/
struct dw_dma_platform_data {
unsigned int nr_channels;
@@ -65,6 +66,11 @@ struct dw_dma_platform_data {
unsigned char nr_masters;
unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS];
+#define CHAN_PROTCTL_PRIVILEGED BIT(0)
+#define CHAN_PROTCTL_BUFFERABLE BIT(1)
+#define CHAN_PROTCTL_CACHEABLE BIT(2)
+#define CHAN_PROTCTL_MASK GENMASK(2, 0)
+ unsigned char protctl;
};
#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
index f8f1f6b952a6..eb9805bb3fe8 100644
--- a/include/linux/platform_data/dma-ep93xx.h
+++ b/include/linux/platform_data/dma-ep93xx.h
@@ -85,7 +85,7 @@ static inline enum dma_transfer_direction
ep93xx_dma_chan_direction(struct dma_chan *chan)
{
if (!ep93xx_dma_chan_is_m2p(chan))
- return DMA_NONE;
+ return DMA_TRANS_NONE;
/* even channels are for TX, odd for RX */
return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
diff --git a/include/linux/platform_data/dma-mcf-edma.h b/include/linux/platform_data/dma-mcf-edma.h
new file mode 100644
index 000000000000..d718ccfa3421
--- /dev/null
+++ b/include/linux/platform_data/dma-mcf-edma.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale eDMA platform data, ColdFire SoC's family.
+ *
+ * Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_MCF_EDMA_H__
+#define __LINUX_PLATFORM_DATA_MCF_EDMA_H__
+
+struct dma_slave_map;
+
+bool mcf_edma_filter_fn(struct dma_chan *chan, void *param);
+
+#define MCF_EDMA_FILTER_PARAM(ch) ((void *)ch)
+
+/**
+ * struct mcf_edma_platform_data - platform specific data for eDMA engine
+ *
+ * @ver The eDMA module version.
+ * @dma_channels The number of eDMA channels.
+ */
+struct mcf_edma_platform_data {
+ int dma_channels;
+ const struct dma_slave_map *slave_map;
+ int slavecnt;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_MCF_EDMA_H__ */
diff --git a/include/linux/platform_data/ehci-sh.h b/include/linux/platform_data/ehci-sh.h
index 5c15a738e116..219bd79dabfc 100644
--- a/include/linux/platform_data/ehci-sh.h
+++ b/include/linux/platform_data/ehci-sh.h
@@ -1,21 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* EHCI SuperH driver platform data
*
* Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
* Copyright (C) 2012 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __USB_EHCI_SH_H
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index 57a5a35e0073..a93841bfb9f7 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -16,46 +16,14 @@
#ifndef __DAVINCI_GPIO_PLATFORM_H
#define __DAVINCI_GPIO_PLATFORM_H
-#include <linux/io.h>
-#include <linux/spinlock.h>
-
-#include <asm-generic/gpio.h>
-
-#define MAX_REGS_BANKS 5
-#define MAX_INT_PER_BANK 32
-
struct davinci_gpio_platform_data {
+ bool no_auto_base;
+ u32 base;
u32 ngpio;
u32 gpio_unbanked;
};
-struct davinci_gpio_irq_data {
- void __iomem *regs;
- struct davinci_gpio_controller *chip;
- int bank_num;
-};
-
-struct davinci_gpio_controller {
- struct gpio_chip chip;
- struct irq_domain *irq_domain;
- /* Serialize access to GPIO registers */
- spinlock_t lock;
- void __iomem *regs[MAX_REGS_BANKS];
- int gpio_unbanked;
- int irqs[MAX_INT_PER_BANK];
- unsigned int base;
-};
-
-/*
- * basic gpio routines
- */
-#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */
-
/* Convert GPIO signal to GPIO pin number */
#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
-static inline u32 __gpio_mask(unsigned gpio)
-{
- return 1 << (gpio % 32);
-}
#endif
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index 8612855691b2..6d07eebb3f75 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -24,8 +24,10 @@
#ifndef __ASM_ARCH_OMAP_GPIO_H
#define __ASM_ARCH_OMAP_GPIO_H
+#ifndef __ASSEMBLER__
#include <linux/io.h>
#include <linux/platform_device.h>
+#endif
#define OMAP1_MPUIO_BASE 0xfffb5000
@@ -157,6 +159,7 @@
#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
+#ifndef __ASSEMBLER__
struct omap_gpio_reg_offs {
u16 revision;
u16 direction;
@@ -197,23 +200,14 @@ struct omap_gpio_platform_data {
bool is_mpuio; /* whether the bank is of type MPUIO */
u32 non_wakeup_gpios;
+ u32 quirks; /* Version specific quirks mask */
+
struct omap_gpio_reg_offs *regs;
/* Return context loss count due to PM states changing */
int (*get_context_loss_count)(struct device *dev);
};
-#if IS_BUILTIN(CONFIG_GPIO_OMAP)
-extern void omap2_gpio_prepare_for_idle(int off_mode);
-extern void omap2_gpio_resume_after_idle(void);
-#else
-static inline void omap2_gpio_prepare_for_idle(int off_mode)
-{
-}
-
-static inline void omap2_gpio_resume_after_idle(void)
-{
-}
-#endif
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/include/linux/platform_data/gpio-ts5500.h b/include/linux/platform_data/gpio-ts5500.h
deleted file mode 100644
index b10d11c9bb49..000000000000
--- a/include/linux/platform_data/gpio-ts5500.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * GPIO (DIO) header for Technologic Systems TS-5500
- *
- * Copyright (c) 2012 Savoir-faire Linux Inc.
- * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _PDATA_GPIO_TS5500_H
-#define _PDATA_GPIO_TS5500_H
-
-/**
- * struct ts5500_dio_platform_data - TS-5500 pin block configuration
- * @base: The GPIO base number to use.
- * @strap: The only pin connected to an interrupt in a block is input-only.
- * If you need a bidirectional line which can trigger an IRQ, you
- * may strap it with an in/out pin. This flag indicates this case.
- */
-struct ts5500_dio_platform_data {
- int base;
- bool strap;
-};
-
-#endif /* _PDATA_GPIO_TS5500_H */
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index 73d9098ada2d..85da11916bd5 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -70,9 +70,6 @@ struct omap_hsmmc_platform_data {
/* string specifying a particular variant of hardware */
char *version;
- int gpio_cd; /* gpio (card detect) */
- int gpio_cod; /* gpio (cover detect) */
- int gpio_wp; /* gpio (write protect) */
/* if we have special card, init it using this callback */
void (*init_card)(struct mmc_card *card);
diff --git a/include/linux/platform_data/i2c-ocores.h b/include/linux/platform_data/i2c-ocores.h
index 01edd96fe1f7..113d6b12f650 100644
--- a/include/linux/platform_data/i2c-ocores.h
+++ b/include/linux/platform_data/i2c-ocores.h
@@ -1,7 +1,7 @@
/*
* i2c-ocores.h - definitions for the i2c-ocores interface
*
- * Peter Korsgaard <jacmet@sunsite.dk>
+ * Peter Korsgaard <peter@korsgaard.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
index 9abc0ca7259b..9f0aa1b48c78 100644
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -1,7 +1,7 @@
/*
* Driver for Texas Instruments INA219, INA226 power monitor chips
*
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h
new file mode 100644
index 000000000000..13874fa6e767
--- /dev/null
+++ b/include/linux/platform_data/mdio-gpio.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MDIO-GPIO bus platform data structure
+ */
+
+#ifndef __LINUX_MDIO_GPIO_PDATA_H
+#define __LINUX_MDIO_GPIO_PDATA_H
+
+struct mdio_gpio_platform_data {
+ u32 phy_mask;
+ u32 phy_ignore_ta_mask;
+};
+
+#endif /* __LINUX_MDIO_GPIO_PDATA_H */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index 640dec8b5b0c..b606ca4197df 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -30,15 +30,11 @@ enum cd_types {
*
* ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35.
*
- * @wp_gpio: gpio for write_protect
- * @cd_gpio: gpio for card_detect interrupt
* @wp_type: type of write_protect method (see wp_types enum above)
* @cd_type: type of card_detect method (see cd_types enum above)
*/
struct esdhc_platform_data {
- unsigned int wp_gpio;
- unsigned int cd_gpio;
enum wp_types wp_type;
enum cd_types cd_type;
int max_bus_width;
diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h
index 752f97c62ef2..7e44e84e7150 100644
--- a/include/linux/platform_data/mmc-pxamci.h
+++ b/include/linux/platform_data/mmc-pxamci.h
@@ -15,11 +15,7 @@ struct pxamci_platform_data {
int (*get_ro)(struct device *);
int (*setpower)(struct device *, unsigned int);
void (*exit)(struct device *, void *);
- int gpio_card_detect; /* gpio detecting card insertion */
- int gpio_card_ro; /* gpio detecting read only toggle */
bool gpio_card_ro_invert; /* gpio ro is inverted */
- int gpio_power; /* gpio powering up MMC bus */
- bool gpio_power_invert; /* gpio power is inverted */
};
extern void pxa_set_mci_info(struct pxamci_platform_data *info);
diff --git a/include/linux/platform_data/mmc-s3cmci.h b/include/linux/platform_data/mmc-s3cmci.h
index b68d9f0bdd9e..33310b11cbdd 100644
--- a/include/linux/platform_data/mmc-s3cmci.h
+++ b/include/linux/platform_data/mmc-s3cmci.h
@@ -7,7 +7,6 @@
* @no_wprotect: Set this to indicate there is no write-protect switch.
* @no_detect: Set this if there is no detect switch.
* @wprotect_invert: Invert the default sense of the write protect switch.
- * @detect_invert: Invert the default sense of the write protect switch.
* @use_dma: Set to allow the use of DMA.
* @gpio_detect: GPIO number for the card detect line.
* @gpio_wprotect: GPIO number for the write protect line.
@@ -31,11 +30,8 @@ struct s3c24xx_mci_pdata {
unsigned int no_wprotect:1;
unsigned int no_detect:1;
unsigned int wprotect_invert:1;
- unsigned int detect_invert:1; /* set => detect active high */
unsigned int use_dma:1;
- unsigned int gpio_detect;
- unsigned int gpio_wprotect;
unsigned long ocr_avail;
void (*set_power)(unsigned char power_mode,
unsigned short vdd);
diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h
index 97948ac2bb9b..a403dd51dacc 100644
--- a/include/linux/platform_data/mtd-davinci-aemif.h
+++ b/include/linux/platform_data/mtd-davinci-aemif.h
@@ -33,5 +33,4 @@ struct davinci_aemif_timing {
u8 ta;
};
-int davinci_aemif_setup(struct platform_device *pdev);
#endif
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
index 98b7925f1a2d..c0f624aca81c 100644
--- a/include/linux/platform_data/mv_usb.h
+++ b/include/linux/platform_data/mv_usb.h
@@ -48,6 +48,5 @@ struct mv_usb_platform_data {
int (*phy_init)(void __iomem *regbase);
void (*phy_deinit)(void __iomem *regbase);
int (*set_vbus)(unsigned int vbus);
- int (*private_init)(void __iomem *opregs, void __iomem *phyregs);
};
#endif
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index 698d0d59db76..ee03d429742b 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -24,10 +24,11 @@
struct iio_channel;
enum ntc_thermistor_type {
- TYPE_NCPXXWB473,
- TYPE_NCPXXWL333,
TYPE_B57330V2103,
+ TYPE_B57891S0103,
+ TYPE_NCPXXWB473,
TYPE_NCPXXWF104,
+ TYPE_NCPXXWL333,
TYPE_NCPXXXH103,
};
diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h
index f9bed2a0af9d..fbf5ed73c7cc 100644
--- a/include/linux/platform_data/pm33xx.h
+++ b/include/linux/platform_data/pm33xx.h
@@ -12,6 +12,29 @@
#include <linux/kbuild.h>
#include <linux/types.h>
+/*
+ * WFI Flags for sleep code control
+ *
+ * These flags allow PM code to exclude certain operations from happening
+ * in the low level ASM code found in sleep33xx.S and sleep43xx.S
+ *
+ * WFI_FLAG_FLUSH_CACHE: Flush the ARM caches and disable caching. Only
+ * needed when MPU will lose context.
+ * WFI_FLAG_SELF_REFRESH: Let EMIF place DDR memory into self-refresh and
+ * disable EMIF.
+ * WFI_FLAG_SAVE_EMIF: Save context of all EMIF registers and restore in
+ * resume path. Only needed if PER domain loses context
+ * and must also have WFI_FLAG_SELF_REFRESH set.
+ * WFI_FLAG_WAKE_M3: Disable MPU clock or clockdomain to cause wkup_m3 to
+ * execute when WFI instruction executes.
+ * WFI_FLAG_RTC_ONLY: Configure the RTC to enter RTC+DDR mode.
+ */
+#define WFI_FLAG_FLUSH_CACHE BIT(0)
+#define WFI_FLAG_SELF_REFRESH BIT(1)
+#define WFI_FLAG_SAVE_EMIF BIT(2)
+#define WFI_FLAG_WAKE_M3 BIT(3)
+#define WFI_FLAG_RTC_ONLY BIT(4)
+
#ifndef __ASSEMBLER__
struct am33xx_pm_sram_addr {
void (*do_wfi)(void);
@@ -19,12 +42,15 @@ struct am33xx_pm_sram_addr {
unsigned long *resume_offset;
unsigned long *emif_sram_table;
unsigned long *ro_sram_data;
+ unsigned long resume_address;
};
struct am33xx_pm_platform_data {
int (*init)(void);
- int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long));
+ int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long),
+ unsigned long args);
struct am33xx_pm_sram_addr *(*get_sram_addrs)(void);
+ void __iomem *(*get_rtc_base_addr)(void);
};
struct am33xx_pm_sram_data {
@@ -36,6 +62,7 @@ struct am33xx_pm_sram_data {
struct am33xx_pm_ro_sram_data {
u32 amx3_pm_sram_data_virt;
u32 amx3_pm_sram_data_phys;
+ void __iomem *rtc_base_virt;
} __packed __aligned(8);
#endif /* __ASSEMBLER__ */
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h
index 9e20c2fb4ffd..4977c06d8a86 100644
--- a/include/linux/platform_data/pxa_sdhci.h
+++ b/include/linux/platform_data/pxa_sdhci.h
@@ -33,8 +33,6 @@
* 1: choose feedback clk + delay value
* 2: choose internal clk
* @clk_delay_enable: enable clk_delay or not, used on pxa910
- * @ext_cd_gpio: gpio pin used for external CD line
- * @ext_cd_gpio_invert: invert values for external CD gpio line
* @max_speed: the maximum speed supported
* @host_caps: Standard MMC host capabilities bit field.
* @quirks: quirks of platfrom
@@ -46,8 +44,6 @@ struct sdhci_pxa_platdata {
unsigned int clk_delay_cycles;
unsigned int clk_delay_sel;
bool clk_delay_enable;
- unsigned int ext_cd_gpio;
- bool ext_cd_gpio_invert;
unsigned int max_speed;
u32 host_caps;
u32 host_caps2;
diff --git a/include/linux/platform_data/sh_ipmmu.h b/include/linux/platform_data/sh_ipmmu.h
deleted file mode 100644
index 39f7405cdac5..000000000000
--- a/include/linux/platform_data/sh_ipmmu.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* sh_ipmmu.h
- *
- * Copyright (C) 2012 Hideki EIRAKU
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- */
-
-#ifndef __SH_IPMMU_H__
-#define __SH_IPMMU_H__
-
-struct shmobile_ipmmu_platform_data {
- const char * const *dev_names;
- unsigned int num_dev_names;
-};
-
-#endif /* __SH_IPMMU_H__ */
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index ee495d707f17..fe815d7d9f58 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* shmob_drm.h -- SH Mobile DRM driver
*
* Copyright (C) 2012 Renesas Corporation
*
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __SHMOB_DRM_H__
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
index f4edcb03c40c..0638fb6353bc 100644
--- a/include/linux/platform_data/spi-davinci.h
+++ b/include/linux/platform_data/spi-davinci.h
@@ -36,9 +36,6 @@ enum {
* @num_chipselect: number of chipselects supported by this SPI master
* @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt
* controller withn the SoC. Possible values are 0 and 1.
- * @chip_sel: list of GPIOs which can act as chip-selects for the SPI.
- * SPI_INTERN_CS denotes internal SPI chip-select. Not necessary
- * to populate if all chip-selects are internal.
* @cshold_bug: set this to true if the SPI controller on your chip requires
* a write to CSHOLD bit in between transfers (like in DM355).
* @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any
@@ -48,7 +45,6 @@ struct davinci_spi_platform_data {
u8 version;
u8 num_chipselect;
u8 intr_line;
- u8 *chip_sel;
u8 prescaler_limit;
bool cshold_bug;
enum dma_event_q dma_event_q;
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index f8274b0c6888..728193111c2f 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -18,11 +18,13 @@
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
* @open_drain: set the interrupt line to be open drain if possible.
* @spi_3wire: enable spi-3wire mode.
+ * @pullups: enable/disable i2c controller pullup resistors.
*/
struct st_sensors_platform_data {
u8 drdy_int_pin;
bool open_drain;
bool spi_3wire;
+ bool pullups;
};
#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 990aad477458..1ea3aab972b4 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -14,6 +14,7 @@ enum ti_sysc_module_type {
TI_SYSC_OMAP4_SR,
TI_SYSC_OMAP4_MCASP,
TI_SYSC_OMAP4_USB_HOST_FS,
+ TI_SYSC_DRA7_MCAN,
};
struct ti_sysc_cookie {
@@ -45,7 +46,6 @@ struct sysc_regbits {
s8 emufree_shift;
};
-#define SYSC_QUIRK_RESOURCE_PROVIDER BIT(9)
#define SYSC_QUIRK_LEGACY_IDLE BIT(8)
#define SYSC_QUIRK_RESET_STATUS BIT(7)
#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6)
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
new file mode 100644
index 000000000000..53dfc2541960
--- /dev/null
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_DATA_X86_ASUS_WMI_H
+#define __PLATFORM_DATA_X86_ASUS_WMI_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+/* WMI Methods */
+#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
+#define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */
+#define ASUS_WMI_METHODID_GLCD 0x44434C47 /* Get LCD status */
+#define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */
+#define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */
+#define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */
+#define ASUS_WMI_METHODID_AGFN 0x4E464741 /* FaN? */
+#define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */
+#define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */
+#define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */
+#define ASUS_WMI_METHODID_DEVP 0x50564544 /* DEVice Policy */
+#define ASUS_WMI_METHODID_OSVR 0x5256534F /* OS VeRsion */
+#define ASUS_WMI_METHODID_DSTS 0x53544344 /* Device STatuS */
+#define ASUS_WMI_METHODID_DSTS2 0x53545344 /* Device STatuS #2*/
+#define ASUS_WMI_METHODID_BSTS 0x53545342 /* Bios STatuS ? */
+#define ASUS_WMI_METHODID_DEVS 0x53564544 /* DEVice Set */
+#define ASUS_WMI_METHODID_CFVS 0x53564643 /* CPU Frequency Volt Set */
+#define ASUS_WMI_METHODID_KBFT 0x5446424B /* KeyBoard FilTer */
+#define ASUS_WMI_METHODID_INIT 0x54494E49 /* INITialize */
+#define ASUS_WMI_METHODID_HKEY 0x59454B48 /* Hot KEY ?? */
+
+#define ASUS_WMI_UNSUPPORTED_METHOD 0xFFFFFFFE
+
+/* Wireless */
+#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001
+#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
+#define ASUS_WMI_DEVID_CWAP 0x00010003
+#define ASUS_WMI_DEVID_WLAN 0x00010011
+#define ASUS_WMI_DEVID_WLAN_LED 0x00010012
+#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
+#define ASUS_WMI_DEVID_GPS 0x00010015
+#define ASUS_WMI_DEVID_WIMAX 0x00010017
+#define ASUS_WMI_DEVID_WWAN3G 0x00010019
+#define ASUS_WMI_DEVID_UWB 0x00010021
+
+/* Leds */
+/* 0x000200XX and 0x000400XX */
+#define ASUS_WMI_DEVID_LED1 0x00020011
+#define ASUS_WMI_DEVID_LED2 0x00020012
+#define ASUS_WMI_DEVID_LED3 0x00020013
+#define ASUS_WMI_DEVID_LED4 0x00020014
+#define ASUS_WMI_DEVID_LED5 0x00020015
+#define ASUS_WMI_DEVID_LED6 0x00020016
+
+/* Backlight and Brightness */
+#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
+#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
+#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
+#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
+#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
+#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
+
+/* Misc */
+#define ASUS_WMI_DEVID_CAMERA 0x00060013
+
+/* Storage */
+#define ASUS_WMI_DEVID_CARDREADER 0x00080013
+
+/* Input */
+#define ASUS_WMI_DEVID_TOUCHPAD 0x00100011
+#define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012
+
+/* Fan, Thermal */
+#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
+#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012
+
+/* Power */
+#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
+
+/* Deep S3 / Resume on LID open */
+#define ASUS_WMI_DEVID_LID_RESUME 0x00120031
+
+/* DSTS masks */
+#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
+#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
+#define ASUS_WMI_DSTS_PRESENCE_BIT 0x00010000
+#define ASUS_WMI_DSTS_USER_BIT 0x00020000
+#define ASUS_WMI_DSTS_BIOS_BIT 0x00040000
+#define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF
+#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
+#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F
+
+#if IS_REACHABLE(CONFIG_ASUS_WMI)
+int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval);
+#else
+static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
+ u32 *retval)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __PLATFORM_DATA_X86_ASUS_WMI_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 1a9f38f27f65..c7c081dc6034 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -40,6 +40,7 @@ struct platform_device {
#define platform_get_device_id(pdev) ((pdev)->id_entry)
+#define dev_is_platform(dev) ((dev)->bus == &platform_bus_type)
#define to_platform_device(x) container_of((x), struct platform_device, dev)
extern int platform_device_register(struct platform_device *);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index e723b78d8357..0bd9de116826 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -26,6 +26,7 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <linux/completion.h>
/*
@@ -608,7 +609,7 @@ struct dev_pm_info {
unsigned int should_wakeup:1;
#endif
#ifdef CONFIG_PM
- struct timer_list suspend_timer;
+ struct hrtimer suspend_timer;
unsigned long timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
@@ -631,7 +632,7 @@ struct dev_pm_info {
enum rpm_status runtime_status;
int runtime_error;
int autosuspend_delay;
- unsigned long last_busy;
+ u64 last_busy;
unsigned long active_jiffies;
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 776c546d581a..dd364abb649a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -17,11 +17,36 @@
#include <linux/notifier.h>
#include <linux/spinlock.h>
-/* Defines used for the flags field in the struct generic_pm_domain */
-#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
-#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */
-#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */
-#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) /* Keep devices active if wakeup */
+/*
+ * Flags to control the behaviour of a genpd.
+ *
+ * These flags may be set in the struct generic_pm_domain's flags field by a
+ * genpd backend driver. The flags must be set before it calls pm_genpd_init(),
+ * which initializes a genpd.
+ *
+ * GENPD_FLAG_PM_CLK: Instructs genpd to use the PM clk framework,
+ * while powering on/off attached devices.
+ *
+ * GENPD_FLAG_IRQ_SAFE: This informs genpd that its backend callbacks,
+ * ->power_on|off(), doesn't sleep. Hence, these
+ * can be invoked from within atomic context, which
+ * enables genpd to power on/off the PM domain,
+ * even when pm_runtime_is_irq_safe() returns true,
+ * for any of its attached devices. Note that, a
+ * genpd having this flag set, requires its
+ * masterdomains to also have it set.
+ *
+ * GENPD_FLAG_ALWAYS_ON: Instructs genpd to always keep the PM domain
+ * powered on.
+ *
+ * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered
+ * on, in case any of its attached devices is used
+ * in the wakeup path to serve system wakeups.
+ */
+#define GENPD_FLAG_PM_CLK (1U << 0)
+#define GENPD_FLAG_IRQ_SAFE (1U << 1)
+#define GENPD_FLAG_ALWAYS_ON (1U << 2)
+#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -48,6 +73,7 @@ struct genpd_power_state {
struct genpd_lock_ops;
struct dev_pm_opp;
+struct opp_table;
struct generic_pm_domain {
struct device dev;
@@ -69,6 +95,7 @@ struct generic_pm_domain {
unsigned int performance_state; /* Aggregated max performance state */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
+ struct opp_table *opp_table; /* OPP table of the genpd */
unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
struct dev_pm_opp *opp);
int (*set_performance_state)(struct generic_pm_domain *genpd,
@@ -109,6 +136,10 @@ struct gpd_link {
struct list_head master_node;
struct generic_pm_domain *slave;
struct list_head slave_node;
+
+ /* Sub-domain's per-master domain performance state */
+ unsigned int performance_state;
+ unsigned int prev_performance_state;
};
struct gpd_timing_data {
@@ -233,8 +264,8 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent,
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
-unsigned int of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *np);
+unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp);
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -275,8 +306,8 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
}
static inline unsigned int
-of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *np)
+pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp)
{
return 0;
}
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 099b31960dec..0a2a88e5a383 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -79,6 +79,7 @@ struct dev_pm_set_opp_data {
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
+struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index);
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
@@ -125,6 +126,9 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
void dev_pm_opp_put_clkname(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
+struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index);
+void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev);
+int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
@@ -136,6 +140,11 @@ static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
return ERR_PTR(-ENOTSUPP);
}
+static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
@@ -266,6 +275,18 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const
static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
+static inline struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev) {}
+
+static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate)
+{
+ return -ENOTSUPP;
+}
+
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
return -ENOTSUPP;
@@ -299,8 +320,8 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
-struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
+int of_get_required_opp_performance_state(struct device_node *np, int index);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
@@ -335,13 +356,13 @@ static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device
return NULL;
}
-static inline struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np)
+static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
{
return NULL;
}
-static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
+static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
{
- return NULL;
+ return -ENOTSUPP;
}
#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index f0fc4700b6ff..54af4eef169f 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -51,7 +51,7 @@ extern void pm_runtime_no_callbacks(struct device *dev);
extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
-extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
+extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
- WRITE_ONCE(dev->power.last_busy, jiffies);
+ WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get()));
}
static inline bool pm_runtime_is_irq_safe(struct device *dev)
@@ -168,7 +168,7 @@ static inline void __pm_runtime_use_autosuspend(struct device *dev,
bool use) {}
static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
int delay) {}
-static inline unsigned long pm_runtime_autosuspend_expiration(
+static inline u64 pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 9ac8fc60ad49..52453a24a24f 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -9,6 +9,7 @@
#ifndef _LINUX_PMU_H
#define _LINUX_PMU_H
+#include <linux/rtc.h>
#include <uapi/linux/pmu.h>
@@ -36,6 +37,9 @@ static inline void pmu_resume(void)
extern void pmu_enable_irled(int on);
+extern time64_t pmu_get_time(void);
+extern int pmu_set_rtc_time(struct rtc_time *tm);
+
extern void pmu_restart(void);
extern void pmu_shutdown(void);
extern void pmu_unlock(void);
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index ee7e987ea1b4..e96581ca7c9d 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -126,5 +126,5 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
-void posixtimer_rearm(struct siginfo *info);
+void posixtimer_rearm(struct kernel_siginfo *info);
#endif
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index d6355f49fbae..507c5e214c42 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -24,6 +24,7 @@ enum bq27xxx_chip {
BQ27546,
BQ27742,
BQ27545, /* bq27545 */
+ BQ27411,
BQ27421, /* bq27421, bq27441, bq27621 */
BQ27425,
BQ27426,
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index c4fa907c8f14..2ce8d00c20de 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -119,7 +119,7 @@ struct charger_regulator {
struct charger_cable *cables;
int num_cables;
- struct attribute_group attr_g;
+ struct attribute_group attr_grp;
struct device_attribute attr_name;
struct device_attribute attr_state;
struct device_attribute attr_externally_control;
@@ -186,6 +186,7 @@ struct charger_desc {
int num_charger_regulators;
struct charger_regulator *charger_regulators;
+ const struct attribute_group **sysfs_groups;
const char *psy_fuel_gauge;
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index 7b81dad712de..d0b37e937037 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* OMAP Smartreflex Defines and Routines
*
@@ -11,10 +12,6 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Lesly A M <x0080970@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __POWER_SMARTREFLEX_H
@@ -303,9 +300,6 @@ void omap_sr_enable(struct voltagedomain *voltdm);
void omap_sr_disable(struct voltagedomain *voltdm);
void omap_sr_disable_reset_volt(struct voltagedomain *voltdm);
-/* API to register the pmic specific data with the smartreflex driver. */
-void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data);
-
/* Smartreflex driver hooks to be called from Smartreflex class driver */
int sr_enable(struct omap_sr *sr, unsigned long volt);
void sr_disable(struct omap_sr *sr);
@@ -320,7 +314,5 @@ static inline void omap_sr_enable(struct voltagedomain *voltdm) {}
static inline void omap_sr_disable(struct voltagedomain *voltdm) {}
static inline void omap_sr_disable_reset_volt(
struct voltagedomain *voltdm) {}
-static inline void omap_sr_register_pmic(
- struct omap_sr_pmic_data *pmic_data) {}
#endif
#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index b21c4bd96b84..57b2ab82b951 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -204,6 +204,9 @@ struct power_supply_config {
/* Driver private data */
void *drv_data;
+ /* Device specific sysfs attributes */
+ const struct attribute_group **attr_grp;
+
char **supplied_to;
size_t num_supplicants;
};
@@ -269,6 +272,7 @@ struct power_supply {
spinlock_t changed_lock;
bool changed;
bool initialized;
+ bool removing;
atomic_t use_cnt;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
@@ -308,6 +312,13 @@ struct power_supply_info {
int use_for_apm;
};
+struct power_supply_battery_ocv_table {
+ int ocv; /* microVolts */
+ int capacity; /* percent */
+};
+
+#define POWER_SUPPLY_OCV_TEMP_MAX 20
+
/*
* This is the recommended struct to manage static battery parameters,
* populated by power_supply_get_battery_info(). Most platform drivers should
@@ -325,6 +336,10 @@ struct power_supply_battery_info {
int charge_term_current_ua; /* microAmps */
int constant_charge_current_max_ua; /* microAmps */
int constant_charge_voltage_max_uv; /* microVolts */
+ int factory_internal_resistance_uohm; /* microOhms */
+ int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];/* celsius */
+ struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX];
+ int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX];
};
extern struct atomic_notifier_head power_supply_notifier;
@@ -348,6 +363,15 @@ devm_power_supply_get_by_phandle(struct device *dev, const char *property)
extern int power_supply_get_battery_info(struct power_supply *psy,
struct power_supply_battery_info *info);
+extern void power_supply_put_battery_info(struct power_supply *psy,
+ struct power_supply_battery_info *info);
+extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table,
+ int table_len, int ocv);
+extern struct power_supply_battery_ocv_table *
+power_supply_find_ocv2cap_table(struct power_supply_battery_info *info,
+ int temp, int *table_len);
+extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info,
+ int ocv, int temp);
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
extern int power_supply_set_input_current_limit_from_supplier(
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 5bd3f151da78..dd92b1a93919 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -53,9 +53,6 @@
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-/* We use the MSB mostly because its available */
-#define PREEMPT_NEED_RESCHED 0x80000000
-
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
/*
@@ -150,7 +147,7 @@
*/
#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
-#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() \
diff --git a/include/linux/printk.h b/include/linux/printk.h
index cf3eccfe1543..77740a506ebb 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -166,11 +166,6 @@ int vprintk_emit(int facility, int level,
asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
-asmlinkage __printf(5, 6) __cold
-int printk_emit(int facility, int level,
- const char *dict, size_t dictlen,
- const char *fmt, ...);
-
asmlinkage __printf(1, 2) __cold
int printk(const char *fmt, ...);
@@ -269,7 +264,7 @@ static inline void show_regs_print_info(const char *log_lvl)
{
}
-static inline asmlinkage void dump_stack(void)
+static inline void dump_stack(void)
{
}
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 626fc65c4336..d0e1f1522a78 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -129,7 +129,7 @@ int open_related_ns(struct ns_common *ns,
struct ns_common *(*get_ns)(struct ns_common *ns));
/* get the associated pid namespace for a file in procfs */
-static inline struct pid_namespace *proc_pid_ns(struct inode *inode)
+static inline struct pid_namespace *proc_pid_ns(const struct inode *inode)
{
return inode->i_sb->s_fs_info;
}
diff --git a/include/linux/property.h b/include/linux/property.h
index ac8a1ebc4c1b..3789ec755fb6 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -311,4 +311,16 @@ fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
+/* -------------------------------------------------------------------------- */
+/* Software fwnode support - when HW description is incomplete or missing */
+
+bool is_software_node(const struct fwnode_handle *fwnode);
+
+int software_node_notify(struct device *dev, unsigned long action);
+
+struct fwnode_handle *
+fwnode_create_software_node(const struct property_entry *properties,
+ const struct fwnode_handle *parent);
+void fwnode_remove_software_node(struct fwnode_handle *fwnode);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/psi.h b/include/linux/psi.h
new file mode 100644
index 000000000000..7006008d5b72
--- /dev/null
+++ b/include/linux/psi.h
@@ -0,0 +1,54 @@
+#ifndef _LINUX_PSI_H
+#define _LINUX_PSI_H
+
+#include <linux/jump_label.h>
+#include <linux/psi_types.h>
+#include <linux/sched.h>
+
+struct seq_file;
+struct css_set;
+
+#ifdef CONFIG_PSI
+
+extern struct static_key_false psi_disabled;
+
+void psi_init(void);
+
+void psi_task_change(struct task_struct *task, int clear, int set);
+
+void psi_memstall_tick(struct task_struct *task, int cpu);
+void psi_memstall_enter(unsigned long *flags);
+void psi_memstall_leave(unsigned long *flags);
+
+int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
+
+#ifdef CONFIG_CGROUPS
+int psi_cgroup_alloc(struct cgroup *cgrp);
+void psi_cgroup_free(struct cgroup *cgrp);
+void cgroup_move_task(struct task_struct *p, struct css_set *to);
+#endif
+
+#else /* CONFIG_PSI */
+
+static inline void psi_init(void) {}
+
+static inline void psi_memstall_enter(unsigned long *flags) {}
+static inline void psi_memstall_leave(unsigned long *flags) {}
+
+#ifdef CONFIG_CGROUPS
+static inline int psi_cgroup_alloc(struct cgroup *cgrp)
+{
+ return 0;
+}
+static inline void psi_cgroup_free(struct cgroup *cgrp)
+{
+}
+static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
+{
+ rcu_assign_pointer(p->cgroups, to);
+}
+#endif
+
+#endif /* CONFIG_PSI */
+
+#endif /* _LINUX_PSI_H */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
new file mode 100644
index 000000000000..2cf422db5d18
--- /dev/null
+++ b/include/linux/psi_types.h
@@ -0,0 +1,92 @@
+#ifndef _LINUX_PSI_TYPES_H
+#define _LINUX_PSI_TYPES_H
+
+#include <linux/seqlock.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_PSI
+
+/* Tracked task states */
+enum psi_task_count {
+ NR_IOWAIT,
+ NR_MEMSTALL,
+ NR_RUNNING,
+ NR_PSI_TASK_COUNTS,
+};
+
+/* Task state bitmasks */
+#define TSK_IOWAIT (1 << NR_IOWAIT)
+#define TSK_MEMSTALL (1 << NR_MEMSTALL)
+#define TSK_RUNNING (1 << NR_RUNNING)
+
+/* Resources that workloads could be stalled on */
+enum psi_res {
+ PSI_IO,
+ PSI_MEM,
+ PSI_CPU,
+ NR_PSI_RESOURCES,
+};
+
+/*
+ * Pressure states for each resource:
+ *
+ * SOME: Stalled tasks & working tasks
+ * FULL: Stalled tasks & no working tasks
+ */
+enum psi_states {
+ PSI_IO_SOME,
+ PSI_IO_FULL,
+ PSI_MEM_SOME,
+ PSI_MEM_FULL,
+ PSI_CPU_SOME,
+ /* Only per-CPU, to weigh the CPU in the global average: */
+ PSI_NONIDLE,
+ NR_PSI_STATES,
+};
+
+struct psi_group_cpu {
+ /* 1st cacheline updated by the scheduler */
+
+ /* Aggregator needs to know of concurrent changes */
+ seqcount_t seq ____cacheline_aligned_in_smp;
+
+ /* States of the tasks belonging to this group */
+ unsigned int tasks[NR_PSI_TASK_COUNTS];
+
+ /* Period time sampling buckets for each state of interest (ns) */
+ u32 times[NR_PSI_STATES];
+
+ /* Time of last task change in this group (rq_clock) */
+ u64 state_start;
+
+ /* 2nd cacheline updated by the aggregator */
+
+ /* Delta detection against the sampling buckets */
+ u32 times_prev[NR_PSI_STATES] ____cacheline_aligned_in_smp;
+};
+
+struct psi_group {
+ /* Protects data updated during an aggregation */
+ struct mutex stat_lock;
+
+ /* Per-cpu task state & time tracking */
+ struct psi_group_cpu __percpu *pcpu;
+
+ /* Periodic aggregation state */
+ u64 total_prev[NR_PSI_STATES - 1];
+ u64 last_update;
+ u64 next_update;
+ struct delayed_work clock_work;
+
+ /* Total stall times and sampled pressure averages */
+ u64 total[NR_PSI_STATES - 1];
+ unsigned long avg[NR_PSI_STATES - 1][3];
+};
+
+#else /* CONFIG_PSI */
+
+struct psi_group { };
+
+#endif /* CONFIG_PSI */
+
+#endif /* _LINUX_PSI_TYPES_H */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index a15bc4d48752..b146181e8709 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -26,27 +26,38 @@
#include <linux/errno.h>
#include <linux/kmsg_dump.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/semaphore.h>
#include <linux/time.h>
#include <linux/types.h>
struct module;
-/* pstore record types (see fs/pstore/inode.c for filename templates) */
+/*
+ * pstore record types (see fs/pstore/platform.c for pstore_type_names[])
+ * These values may be written to storage (see EFI vars backend), so
+ * they are kind of an ABI. Be careful changing the mappings.
+ */
enum pstore_type_id {
+ /* Frontend storage types */
PSTORE_TYPE_DMESG = 0,
PSTORE_TYPE_MCE = 1,
PSTORE_TYPE_CONSOLE = 2,
PSTORE_TYPE_FTRACE = 3,
- /* PPC64 partition types */
+
+ /* PPC64-specific partition types */
PSTORE_TYPE_PPC_RTAS = 4,
PSTORE_TYPE_PPC_OF = 5,
PSTORE_TYPE_PPC_COMMON = 6,
PSTORE_TYPE_PMSG = 7,
PSTORE_TYPE_PPC_OPAL = 8,
- PSTORE_TYPE_UNKNOWN = 255
+
+ /* End of the list */
+ PSTORE_TYPE_MAX
};
+const char *pstore_type_to_name(enum pstore_type_id type);
+enum pstore_type_id pstore_name_to_type(const char *name);
+
struct pstore_info;
/**
* struct pstore_record - details of a pstore record entry
@@ -85,12 +96,15 @@ struct pstore_record {
/**
* struct pstore_info - backend pstore driver structure
*
- * @owner: module which is repsonsible for this backend driver
+ * @owner: module which is responsible for this backend driver
* @name: name of the backend driver
*
- * @buf_lock: spinlock to serialize access to @buf
+ * @buf_lock: semaphore to serialize access to @buf
* @buf: preallocated crash dump buffer
- * @bufsize: size of @buf available for crash dump writes
+ * @bufsize: size of @buf available for crash dump bytes (must match
+ * smallest number of bytes available for writing to a
+ * backend entry, since compressed bytes don't take kindly
+ * to being truncated)
*
* @read_mutex: serializes @open, @read, @close, and @erase callbacks
* @flags: bitfield of frontends the backend can accept writes for
@@ -170,7 +184,7 @@ struct pstore_info {
struct module *owner;
char *name;
- spinlock_t buf_lock;
+ struct semaphore buf_lock;
char *buf;
size_t bufsize;
@@ -189,14 +203,13 @@ struct pstore_info {
};
/* Supported frontends */
-#define PSTORE_FLAGS_DMESG (1 << 0)
-#define PSTORE_FLAGS_CONSOLE (1 << 1)
-#define PSTORE_FLAGS_FTRACE (1 << 2)
-#define PSTORE_FLAGS_PMSG (1 << 3)
+#define PSTORE_FLAGS_DMESG BIT(0)
+#define PSTORE_FLAGS_CONSOLE BIT(1)
+#define PSTORE_FLAGS_FTRACE BIT(2)
+#define PSTORE_FLAGS_PMSG BIT(3)
extern int pstore_register(struct pstore_info *);
extern void pstore_unregister(struct pstore_info *);
-extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
struct pstore_ftrace_record {
unsigned long ip;
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index e6d226464838..337971c41980 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/pstore.h>
#include <linux/types.h>
/*
@@ -30,6 +31,11 @@
* PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
*/
#define PRZ_FLAG_NO_LOCK BIT(0)
+/*
+ * If a PRZ should only have a single-boot lifetime, this marks it as
+ * getting wiped after its contents get copied out after boot.
+ */
+#define PRZ_FLAG_ZAP_OLD BIT(1)
struct persistent_ram_buffer;
struct rs_control;
@@ -42,16 +48,55 @@ struct persistent_ram_ecc_info {
uint16_t *par;
};
+/**
+ * struct persistent_ram_zone - Details of a persistent RAM zone (PRZ)
+ * used as a pstore backend
+ *
+ * @paddr: physical address of the mapped RAM area
+ * @size: size of mapping
+ * @label: unique name of this PRZ
+ * @type: frontend type for this PRZ
+ * @flags: holds PRZ_FLAGS_* bits
+ *
+ * @buffer_lock:
+ * locks access to @buffer "size" bytes and "start" offset
+ * @buffer:
+ * pointer to actual RAM area managed by this PRZ
+ * @buffer_size:
+ * bytes in @buffer->data (not including any trailing ECC bytes)
+ *
+ * @par_buffer:
+ * pointer into @buffer->data containing ECC bytes for @buffer->data
+ * @par_header:
+ * pointer into @buffer->data containing ECC bytes for @buffer header
+ * (i.e. all fields up to @data)
+ * @rs_decoder:
+ * RSLIB instance for doing ECC calculations
+ * @corrected_bytes:
+ * ECC corrected bytes accounting since boot
+ * @bad_blocks:
+ * ECC uncorrectable bytes accounting since boot
+ * @ecc_info:
+ * ECC configuration details
+ *
+ * @old_log:
+ * saved copy of @buffer->data prior to most recent wipe
+ * @old_log_size:
+ * bytes contained in @old_log
+ *
+ */
struct persistent_ram_zone {
phys_addr_t paddr;
size_t size;
void *vaddr;
- struct persistent_ram_buffer *buffer;
- size_t buffer_size;
+ char *label;
+ enum pstore_type_id type;
u32 flags;
+
raw_spinlock_t buffer_lock;
+ struct persistent_ram_buffer *buffer;
+ size_t buffer_size;
- /* ECC correction */
char *par_buffer;
char *par_header;
struct rs_control *rs_decoder;
@@ -65,7 +110,7 @@ struct persistent_ram_zone {
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype, u32 flags);
+ unsigned int memtype, u32 flags, char *label);
void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz);
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 51349d124ee5..7121bbe76979 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -39,6 +39,15 @@ struct ptp_clock_request {
};
struct system_device_crosststamp;
+
+/**
+ * struct ptp_system_timestamp - system time corresponding to a PHC timestamp
+ */
+struct ptp_system_timestamp {
+ struct timespec64 pre_ts;
+ struct timespec64 post_ts;
+};
+
/**
* struct ptp_clock_info - decribes a PTP hardware clock
*
@@ -73,8 +82,18 @@ struct system_device_crosststamp;
* parameter delta: Desired change in nanoseconds.
*
* @gettime64: Reads the current time from the hardware clock.
+ * This method is deprecated. New drivers should implement
+ * the @gettimex64 method instead.
* parameter ts: Holds the result.
*
+ * @gettimex64: Reads the current time from the hardware clock and optionally
+ * also the system clock.
+ * parameter ts: Holds the PHC timestamp.
+ * parameter sts: If not NULL, it holds a pair of timestamps from
+ * the system clock. The first reading is made right before
+ * reading the lowest bits of the PHC timestamp and the second
+ * reading immediately follows that.
+ *
* @getcrosststamp: Reads the current time from the hardware clock and
* system clock simultaneously.
* parameter cts: Contains timestamp (device,system) pair,
@@ -124,6 +143,8 @@ struct ptp_clock_info {
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts);
int (*getcrosststamp)(struct ptp_clock_info *ptp,
struct system_device_crosststamp *cts);
int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
@@ -247,4 +268,16 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp,
#endif
+static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts)
+{
+ if (sts)
+ ktime_get_real_ts64(&sts->pre_ts);
+}
+
+static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts)
+{
+ if (sts)
+ ktime_get_real_ts64(&sts->post_ts);
+}
+
#endif
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6894976b54e3..186cd8e970c7 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -573,6 +573,8 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
else if (destroy)
destroy(ptr);
+ if (producer >= size)
+ producer = 0;
__ptr_ring_set_size(r, size);
r->producer = producer;
r->consumer_head = 0;
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 037bf0ef1ae9..edb9b040c94c 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -62,8 +62,8 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
-#define PTRACE_MODE_FSCREDS 0x08
-#define PTRACE_MODE_REALCREDS 0x10
+#define PTRACE_MODE_FSCREDS 0x08
+#define PTRACE_MODE_REALCREDS 0x10
/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
@@ -214,8 +214,6 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
else
sigaddset(&child->pending.signal, SIGSTOP);
-
- set_tsk_thread_flag(child, TIF_SIGPENDING);
}
else
child->ptracer_cred = NULL;
@@ -338,14 +336,19 @@ static inline void user_enable_block_step(struct task_struct *task)
extern void user_enable_block_step(struct task_struct *);
#endif /* arch_has_block_step */
-#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
-extern void user_single_step_siginfo(struct task_struct *tsk,
- struct pt_regs *regs, siginfo_t *info);
+#ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT
+extern void user_single_step_report(struct pt_regs *regs);
#else
-static inline void user_single_step_siginfo(struct task_struct *tsk,
- struct pt_regs *regs, siginfo_t *info)
+static inline void user_single_step_report(struct pt_regs *regs)
{
- info->si_signo = SIGTRAP;
+ kernel_siginfo_t info;
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = 0;
+ info.si_uid = 0;
+ force_sig_info(info.si_signo, &info, current);
}
#endif
@@ -408,4 +411,5 @@ extern int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc);
+extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 56518adc31dd..d5199b507d79 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -349,42 +349,6 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
}
/**
- * pwm_set_polarity() - configure the polarity of a PWM signal
- * @pwm: PWM device
- * @polarity: new polarity of the PWM signal
- *
- * Note that the polarity cannot be configured while the PWM device is
- * enabled.
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-static inline int pwm_set_polarity(struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct pwm_state state;
-
- if (!pwm)
- return -EINVAL;
-
- pwm_get_state(pwm, &state);
- if (state.polarity == polarity)
- return 0;
-
- /*
- * Changing the polarity of a running PWM without adjusting the
- * dutycycle/period value is a bit risky (can introduce glitches).
- * Return -EBUSY in this case.
- * Note that this is allowed when using pwm_apply_state() because
- * the user specifies all the parameters.
- */
- if (state.enabled)
- return -EBUSY;
-
- state.polarity = polarity;
- return pwm_apply_state(pwm, &state);
-}
-
-/**
* pwm_enable() - start a PWM output toggling
* @pwm: PWM device
*
@@ -483,12 +447,6 @@ static inline int pwm_capture(struct pwm_device *pwm,
return -EINVAL;
}
-static inline int pwm_set_polarity(struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- return -ENOTSUPP;
-}
-
static inline int pwm_enable(struct pwm_device *pwm)
{
return -EINVAL;
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 13b4244d44c1..979087e021f3 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -196,6 +196,7 @@ enum pxa_ssp_type {
PXA27x_SSP,
PXA3xx_SSP,
PXA168_SSP,
+ MMP2_SSP,
PXA910_SSP,
CE4100_SSP,
QUARK_X1000_SSP,
@@ -217,7 +218,7 @@ struct ssp_device {
const char *label;
int port_id;
- int type;
+ enum pxa_ssp_type type;
int use_count;
int irq;
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 5d6144977828..3bcd67fd5548 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -225,19 +225,14 @@ struct geni_se {
#define HW_VER_MINOR_SHFT 16
#define HW_VER_STEP_MASK GENMASK(15, 0)
+#define GENI_SE_VERSION_MAJOR(ver) ((ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT)
+#define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT)
+#define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK)
+
#if IS_ENABLED(CONFIG_QCOM_GENI_SE)
u32 geni_se_get_qup_hw_version(struct geni_se *se);
-#define geni_se_get_wrapper_version(se, major, minor, step) do { \
- u32 ver; \
-\
- ver = geni_se_get_qup_hw_version(se); \
- major = (ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT; \
- minor = (ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT; \
- step = version & HW_VER_STEP_MASK; \
-} while (0)
-
/**
* geni_se_read_proto() - Read the protocol configured for a serial engine
* @se: Pointer to the concerned serial engine.
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 5d65521260b3..1637385bcc17 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -33,6 +33,8 @@ struct qcom_scm_vmperm {
#define QCOM_SCM_VMID_HLOS 0x3
#define QCOM_SCM_VMID_MSS_MSA 0xF
+#define QCOM_SCM_VMID_WLAN 0x18
+#define QCOM_SCM_VMID_WLAN_CE 0x19
#define QCOM_SCM_PERM_READ 0x4
#define QCOM_SCM_PERM_WRITE 0x2
#define QCOM_SCM_PERM_EXEC 0x1
@@ -65,6 +67,9 @@ extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
#else
+
+#include <linux/errno.h>
+
static inline
int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
{
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 0081fa6d1268..03f59a28fefd 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -110,7 +110,7 @@
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 37
-#define FW_REVISION_VERSION 2
+#define FW_REVISION_VERSION 7
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -931,12 +931,12 @@ struct db_rdma_dpm_params {
#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28
#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1
#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
};
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index b34c573f2b30..66aba505ec56 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -896,7 +896,7 @@ struct e4_ustorm_iscsi_task_ag_ctx {
__le32 exp_cont_len;
__le32 total_data_acked;
__le32 exp_data_acked;
- u8 next_tid_valid;
+ u8 byte2;
u8 byte3;
__le16 word1;
__le16 next_tid;
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 8cd34645e892..91c536a01b56 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -47,6 +47,7 @@
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
@@ -448,11 +449,24 @@ struct qed_mfw_tlv_iscsi {
bool tx_bytes_set;
};
+enum qed_db_rec_width {
+ DB_REC_WIDTH_32B,
+ DB_REC_WIDTH_64B,
+};
+
+enum qed_db_rec_space {
+ DB_REC_KERNEL,
+ DB_REC_USER,
+};
+
#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
(void __iomem *)(reg_addr))
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
+#define DIRECT_REG_WR64(reg_addr, val) writeq((u32)val, \
+ (void __iomem *)(reg_addr))
+
#define QED_COALESCE_MAX 0x1FF
#define QED_DEFAULT_RX_USECS 12
#define QED_DEFAULT_TX_USECS 48
@@ -667,14 +681,35 @@ enum qed_link_mode_bits {
QED_LM_Autoneg_BIT = BIT(1),
QED_LM_Asym_Pause_BIT = BIT(2),
QED_LM_Pause_BIT = BIT(3),
- QED_LM_1000baseT_Half_BIT = BIT(4),
- QED_LM_1000baseT_Full_BIT = BIT(5),
+ QED_LM_1000baseT_Full_BIT = BIT(4),
+ QED_LM_10000baseT_Full_BIT = BIT(5),
QED_LM_10000baseKR_Full_BIT = BIT(6),
- QED_LM_25000baseKR_Full_BIT = BIT(7),
- QED_LM_40000baseLR4_Full_BIT = BIT(8),
- QED_LM_50000baseKR2_Full_BIT = BIT(9),
- QED_LM_100000baseKR4_Full_BIT = BIT(10),
- QED_LM_COUNT = 11
+ QED_LM_20000baseKR2_Full_BIT = BIT(7),
+ QED_LM_25000baseKR_Full_BIT = BIT(8),
+ QED_LM_40000baseLR4_Full_BIT = BIT(9),
+ QED_LM_50000baseKR2_Full_BIT = BIT(10),
+ QED_LM_100000baseKR4_Full_BIT = BIT(11),
+ QED_LM_2500baseX_Full_BIT = BIT(12),
+ QED_LM_Backplane_BIT = BIT(13),
+ QED_LM_1000baseKX_Full_BIT = BIT(14),
+ QED_LM_10000baseKX4_Full_BIT = BIT(15),
+ QED_LM_10000baseR_FEC_BIT = BIT(16),
+ QED_LM_40000baseKR4_Full_BIT = BIT(17),
+ QED_LM_40000baseCR4_Full_BIT = BIT(18),
+ QED_LM_40000baseSR4_Full_BIT = BIT(19),
+ QED_LM_25000baseCR_Full_BIT = BIT(20),
+ QED_LM_25000baseSR_Full_BIT = BIT(21),
+ QED_LM_50000baseCR2_Full_BIT = BIT(22),
+ QED_LM_100000baseSR4_Full_BIT = BIT(23),
+ QED_LM_100000baseCR4_Full_BIT = BIT(24),
+ QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25),
+ QED_LM_50000baseSR2_Full_BIT = BIT(26),
+ QED_LM_1000baseX_Full_BIT = BIT(27),
+ QED_LM_10000baseCR_Full_BIT = BIT(28),
+ QED_LM_10000baseSR_Full_BIT = BIT(29),
+ QED_LM_10000baseLR_Full_BIT = BIT(30),
+ QED_LM_10000baseLRM_Full_BIT = BIT(31),
+ QED_LM_COUNT = 32
};
struct qed_link_params {
@@ -994,6 +1029,33 @@ struct qed_common_ops {
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
+/**
+ * @brief db_recovery_add - add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address of where db_data is stored
+ * @param db_is_32b - doorbell is 32b pr 64b
+ * @param db_is_user - doorbell recovery addresses are user or kernel space
+ */
+ int (*db_recovery_add)(struct qed_dev *cdev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space);
+
+/**
+ * @brief db_recovery_del - remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address where db_data is stored. Serves as key for the
+ * entry to delete.
+ */
+ int (*db_recovery_del)(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data);
/**
* @brief update_drv_state - API to inform the change in the driver state.
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index df4d13f7e191..d15f8e4815e3 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -39,15 +39,6 @@
#include <linux/qed/qed_ll2_if.h>
#include <linux/qed/rdma_common.h>
-enum qed_roce_ll2_tx_dest {
- /* Light L2 TX Destination to the Network */
- QED_ROCE_LL2_TX_DEST_NW,
-
- /* Light L2 TX Destination to the Loopback */
- QED_ROCE_LL2_TX_DEST_LB,
- QED_ROCE_LL2_TX_DEST_MAX
-};
-
#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
/* rdma interface */
@@ -581,7 +572,7 @@ struct qed_roce_ll2_packet {
int n_seg;
struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
int roce_mode;
- enum qed_roce_ll2_tx_dest tx_dest;
+ enum qed_ll2_tx_dest tx_dest;
};
enum qed_rdma_type {
diff --git a/include/linux/quota.h b/include/linux/quota.h
index ca9772c8e48b..f32dd270b8e3 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -408,13 +408,7 @@ struct qc_type_state {
struct qc_state {
unsigned int s_incoredqs; /* Number of dquots in core */
- /*
- * Per quota type information. The array should really have
- * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
- * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS
- * supports project quotas, this can be changed to MAXQUOTAS
- */
- struct qc_type_state s_state[XQM_MAXQUOTAS];
+ struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */
};
/* Structure for communicating via ->set_info */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 34149e8b5f73..06c4c7a6c09c 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -28,34 +28,30 @@
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/xarray.h>
+
+/* Keep unconverted code working */
+#define radix_tree_root xarray
+#define radix_tree_node xa_node
/*
* The bottom two bits of the slot determine how the remaining bits in the
* slot are interpreted:
*
* 00 - data pointer
- * 01 - internal entry
- * 10 - exceptional entry
- * 11 - this bit combination is currently unused/reserved
+ * 10 - internal entry
+ * x1 - value entry
*
* The internal entry may be a pointer to the next level in the tree, a
* sibling entry, or an indicator that the entry in this slot has been moved
* to another location in the tree and the lookup should be restarted. While
* NULL fits the 'data pointer' pattern, it means that there is no entry in
* the tree for this index (no matter what level of the tree it is found at).
- * This means that you cannot store NULL in the tree as a value for the index.
+ * This means that storing a NULL entry in the tree is the same as deleting
+ * the entry from the tree.
*/
#define RADIX_TREE_ENTRY_MASK 3UL
-#define RADIX_TREE_INTERNAL_NODE 1UL
-
-/*
- * Most users of the radix tree store pointers but shmem/tmpfs stores swap
- * entries in the same tree. They are marked as exceptional entries to
- * distinguish them from pointers to struct page.
- * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
- */
-#define RADIX_TREE_EXCEPTIONAL_ENTRY 2
-#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
+#define RADIX_TREE_INTERNAL_NODE 2UL
static inline bool radix_tree_is_internal_node(void *ptr)
{
@@ -65,75 +61,32 @@ static inline bool radix_tree_is_internal_node(void *ptr)
/*** radix-tree API starts here ***/
-#define RADIX_TREE_MAX_TAGS 3
-
-#ifndef RADIX_TREE_MAP_SHIFT
-#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
-#endif
-
+#define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT
#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
-#define RADIX_TREE_TAG_LONGS \
- ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
+#define RADIX_TREE_MAX_TAGS XA_MAX_MARKS
+#define RADIX_TREE_TAG_LONGS XA_MARK_LONGS
#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
-/*
- * @count is the count of every non-NULL element in the ->slots array
- * whether that is an exceptional entry, a retry entry, a user pointer,
- * a sibling entry or a pointer to the next level of the tree.
- * @exceptional is the count of every element in ->slots which is
- * either radix_tree_exceptional_entry() or is a sibling entry for an
- * exceptional entry.
- */
-struct radix_tree_node {
- unsigned char shift; /* Bits remaining in each slot */
- unsigned char offset; /* Slot offset in parent */
- unsigned char count; /* Total entry count */
- unsigned char exceptional; /* Exceptional entry count */
- struct radix_tree_node *parent; /* Used when ascending tree */
- struct radix_tree_root *root; /* The tree we belong to */
- union {
- struct list_head private_list; /* For tree user */
- struct rcu_head rcu_head; /* Used when freeing node */
- };
- void __rcu *slots[RADIX_TREE_MAP_SIZE];
- unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
-};
-
-/* The IDR tag is stored in the low bits of the GFP flags */
+/* The IDR tag is stored in the low bits of xa_flags */
#define ROOT_IS_IDR ((__force gfp_t)4)
-/* The top bits of gfp_mask are used to store the root tags */
+/* The top bits of xa_flags are used to store the root tags */
#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
-struct radix_tree_root {
- spinlock_t xa_lock;
- gfp_t gfp_mask;
- struct radix_tree_node __rcu *rnode;
-};
-
-#define RADIX_TREE_INIT(name, mask) { \
- .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
- .gfp_mask = (mask), \
- .rnode = NULL, \
-}
+#define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask)
#define RADIX_TREE(name, mask) \
struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
-#define INIT_RADIX_TREE(root, mask) \
-do { \
- spin_lock_init(&(root)->xa_lock); \
- (root)->gfp_mask = (mask); \
- (root)->rnode = NULL; \
-} while (0)
+#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
static inline bool radix_tree_empty(const struct radix_tree_root *root)
{
- return root->rnode == NULL;
+ return root->xa_head == NULL;
}
/**
@@ -143,7 +96,6 @@ static inline bool radix_tree_empty(const struct radix_tree_root *root)
* @next_index: one beyond the last index for this chunk
* @tags: bit-mask for tag-iterating
* @node: node that contains current slot
- * @shift: shift for the node that holds our slots
*
* This radix tree iterator works in terms of "chunks" of slots. A chunk is a
* subinterval of slots contained within one radix tree leaf node. It is
@@ -157,20 +109,8 @@ struct radix_tree_iter {
unsigned long next_index;
unsigned long tags;
struct radix_tree_node *node;
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- unsigned int shift;
-#endif
};
-static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
-{
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
- return iter->shift;
-#else
- return 0;
-#endif
-}
-
/**
* Radix-tree synchronization
*
@@ -194,12 +134,11 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
* radix_tree_lookup_slot
* radix_tree_tag_get
* radix_tree_gang_lookup
- * radix_tree_gang_lookup_slot
* radix_tree_gang_lookup_tag
* radix_tree_gang_lookup_tag_slot
* radix_tree_tagged
*
- * The first 8 functions are able to be called locklessly, using RCU. The
+ * The first 7 functions are able to be called locklessly, using RCU. The
* caller must ensure calls to these functions are made within rcu_read_lock()
* regions. Other readers (lock-free or otherwise) and modifications may be
* running concurrently.
@@ -269,17 +208,6 @@ static inline int radix_tree_deref_retry(void *arg)
}
/**
- * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry?
- * @arg: value returned by radix_tree_deref_slot
- * Returns: 0 if well-aligned pointer, non-0 if exceptional entry.
- */
-static inline int radix_tree_exceptional_entry(void *arg)
-{
- /* Not unlikely because radix_tree_exception often tested first */
- return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
-}
-
-/**
* radix_tree_exception - radix_tree_deref_slot returned either exception?
* @arg: value returned by radix_tree_deref_slot
* Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
@@ -289,47 +217,28 @@ static inline int radix_tree_exception(void *arg)
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
}
-int __radix_tree_create(struct radix_tree_root *, unsigned long index,
- unsigned order, struct radix_tree_node **nodep,
- void __rcu ***slotp);
-int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
- unsigned order, void *);
-static inline int radix_tree_insert(struct radix_tree_root *root,
- unsigned long index, void *entry)
-{
- return __radix_tree_insert(root, index, 0, entry);
-}
+int radix_tree_insert(struct radix_tree_root *, unsigned long index,
+ void *);
void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
struct radix_tree_node **nodep, void __rcu ***slotp);
void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
unsigned long index);
-typedef void (*radix_tree_update_node_t)(struct radix_tree_node *);
void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
- void __rcu **slot, void *entry,
- radix_tree_update_node_t update_node);
+ void __rcu **slot, void *entry);
void radix_tree_iter_replace(struct radix_tree_root *,
const struct radix_tree_iter *, void __rcu **slot, void *entry);
void radix_tree_replace_slot(struct radix_tree_root *,
void __rcu **slot, void *entry);
-void __radix_tree_delete_node(struct radix_tree_root *,
- struct radix_tree_node *,
- radix_tree_update_node_t update_node);
void radix_tree_iter_delete(struct radix_tree_root *,
struct radix_tree_iter *iter, void __rcu **slot);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
-void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *,
- void __rcu **slot);
unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
void **results, unsigned long first_index,
unsigned int max_items);
-unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
- void __rcu ***results, unsigned long *indices,
- unsigned long first_index, unsigned int max_items);
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
-int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *,
unsigned long index, unsigned int tag);
@@ -337,8 +246,6 @@ void *radix_tree_tag_clear(struct radix_tree_root *,
unsigned long index, unsigned int tag);
int radix_tree_tag_get(const struct radix_tree_root *,
unsigned long index, unsigned int tag);
-void radix_tree_iter_tag_set(struct radix_tree_root *,
- const struct radix_tree_iter *iter, unsigned int tag);
void radix_tree_iter_tag_clear(struct radix_tree_root *,
const struct radix_tree_iter *iter, unsigned int tag);
unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
@@ -354,12 +261,6 @@ static inline void radix_tree_preload_end(void)
preempt_enable();
}
-int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
-int radix_tree_split(struct radix_tree_root *, unsigned long index,
- unsigned new_order);
-int radix_tree_join(struct radix_tree_root *, unsigned long index,
- unsigned new_order, void *);
-
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max);
@@ -465,7 +366,7 @@ void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
static inline unsigned long
__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
{
- return iter->index + (slots << iter_shift(iter));
+ return iter->index + slots;
}
/**
@@ -490,21 +391,9 @@ void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
- return (iter->next_index - iter->index) >> iter_shift(iter);
+ return iter->next_index - iter->index;
}
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-void __rcu **__radix_tree_next_slot(void __rcu **slot,
- struct radix_tree_iter *iter, unsigned flags);
-#else
-/* Can't happen without sibling entries, but the compiler can't tell that */
-static inline void __rcu **__radix_tree_next_slot(void __rcu **slot,
- struct radix_tree_iter *iter, unsigned flags)
-{
- return slot;
-}
-#endif
-
/**
* radix_tree_next_slot - find next slot in chunk
*
@@ -563,8 +452,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
return NULL;
found:
- if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot))))
- return __radix_tree_next_slot(slot, iter, flags);
return slot;
}
@@ -584,23 +471,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
slot = radix_tree_next_slot(slot, iter, 0))
/**
- * radix_tree_for_each_contig - iterate over contiguous slots
- *
- * @slot: the void** variable for pointer to slot
- * @root: the struct radix_tree_root pointer
- * @iter: the struct radix_tree_iter pointer
- * @start: iteration starting index
- *
- * @slot points to radix tree slot, @iter->index contains its index.
- */
-#define radix_tree_for_each_contig(slot, root, iter, start) \
- for (slot = radix_tree_iter_init(iter, start) ; \
- slot || (slot = radix_tree_next_chunk(root, iter, \
- RADIX_TREE_ITER_CONTIG)) ; \
- slot = radix_tree_next_slot(slot, iter, \
- RADIX_TREE_ITER_CONTIG))
-
-/**
* radix_tree_for_each_tagged - iterate over tagged slots
*
* @slot: the void** variable for pointer to slot
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index af8a61be2d8d..9510c677ac70 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -51,8 +51,8 @@ extern void __rb_insert_augmented(struct rb_node *node,
*
* On insertion, the user must update the augmented information on the path
* leading to the inserted node, then call rb_link_node() as usual and
- * rb_augment_inserted() instead of the usual rb_insert_color() call.
- * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * rb_insert_augmented() instead of the usual rb_insert_color() call.
+ * If rb_insert_augmented() rebalances the rbtree, it will callback into
* a user provided function to update the augmented information on the
* affected subtrees.
*/
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4786c2235b98..e91ec9ddcd30 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -182,7 +182,7 @@ static inline void list_replace_rcu(struct list_head *old,
* @list: the RCU-protected list to splice
* @prev: points to the last element of the existing list
* @next: points to the first element of the existing list
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*
* The list pointed to by @prev and @next can be RCU-read traversed
* concurrently with this function.
@@ -240,7 +240,7 @@ static inline void __list_splice_init_rcu(struct list_head *list,
* designed for stacks.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*/
static inline void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
@@ -255,7 +255,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
* list, designed for queues.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ * @sync: synchronize_rcu, synchronize_rcu_expedited, ...
*/
static inline void list_splice_tail_init_rcu(struct list_head *list,
struct list_head *head,
@@ -359,13 +359,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
- * This primitive may safely run concurrently with the _rcu list-mutation
- * primitives such as list_add_rcu(), but requires some implicit RCU
- * read-side guarding. One example is running within a special
- * exception-time environment where preemption is disabled and where
- * lockdep cannot be invoked (in which case updaters must use RCU-sched,
- * as in synchronize_sched(), call_rcu_sched(), and friends). Another
- * example is when items are added to the list, but never deleted.
+ * This primitive may safely run concurrently with the _rcu
+ * list-mutation primitives such as list_add_rcu(), but requires some
+ * implicit RCU read-side guarding. One example is running within a special
+ * exception-time environment where preemption is disabled and where lockdep
+ * cannot be invoked. Another example is when items are added to the list,
+ * but never deleted.
*/
#define list_entry_lockless(ptr, type, member) \
container_of((typeof(ptr))READ_ONCE(ptr), type, member)
@@ -376,13 +375,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
- * This primitive may safely run concurrently with the _rcu list-mutation
- * primitives such as list_add_rcu(), but requires some implicit RCU
- * read-side guarding. One example is running within a special
- * exception-time environment where preemption is disabled and where
- * lockdep cannot be invoked (in which case updaters must use RCU-sched,
- * as in synchronize_sched(), call_rcu_sched(), and friends). Another
- * example is when items are added to the list, but never deleted.
+ * This primitive may safely run concurrently with the _rcu
+ * list-mutation primitives such as list_add_rcu(), but requires some
+ * implicit RCU read-side guarding. One example is running within a special
+ * exception-time environment where preemption is disabled and where lockdep
+ * cannot be invoked. Another example is when items are added to the list,
+ * but never deleted.
*/
#define list_for_each_entry_lockless(pos, head, member) \
for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 75e5b393cf44..4db8bcacc51a 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -48,23 +48,14 @@
#define ulong2long(a) (*(long *)(&(a)))
/* Exported common interfaces */
-
-#ifdef CONFIG_PREEMPT_RCU
void call_rcu(struct rcu_head *head, rcu_callback_t func);
-#else /* #ifdef CONFIG_PREEMPT_RCU */
-#define call_rcu call_rcu_sched
-#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
-void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
-void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
-void synchronize_sched(void);
void rcu_barrier_tasks(void);
+void synchronize_rcu(void);
#ifdef CONFIG_PREEMPT_RCU
void __rcu_read_lock(void);
void __rcu_read_unlock(void);
-void synchronize_rcu(void);
/*
* Defined as a macro as it is a very low level header included from
@@ -88,11 +79,6 @@ static inline void __rcu_read_unlock(void)
preempt_enable();
}
-static inline void synchronize_rcu(void)
-{
- synchronize_sched();
-}
-
static inline int rcu_preempt_depth(void)
{
return 0;
@@ -103,8 +89,6 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
extern int rcu_scheduler_active __read_mostly;
-void rcu_sched_qs(void);
-void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu);
@@ -135,11 +119,10 @@ static inline void rcu_init_nohz(void) { }
* RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
* @a: Code that RCU needs to pay attention to.
*
- * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
- * in the inner idle loop, that is, between the rcu_idle_enter() and
- * the rcu_idle_exit() -- RCU will happily ignore any such read-side
- * critical sections. However, things like powertop need tracepoints
- * in the inner idle loop.
+ * RCU read-side critical sections are forbidden in the inner idle loop,
+ * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
+ * will happily ignore any such read-side critical sections. However,
+ * things like powertop need tracepoints in the inner idle loop.
*
* This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
* will tell RCU that it needs to pay attention, invoke its argument
@@ -167,20 +150,16 @@ static inline void rcu_init_nohz(void) { }
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
-#define rcu_note_voluntary_context_switch(t) \
- do { \
- rcu_all_qs(); \
- rcu_tasks_qs(t); \
- } while (0)
+#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */
#define rcu_tasks_qs(t) do { } while (0)
-#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
-#define call_rcu_tasks call_rcu_sched
-#define synchronize_rcu_tasks synchronize_sched
+#define rcu_note_voluntary_context_switch(t) do { } while (0)
+#define call_rcu_tasks call_rcu
+#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU */
@@ -325,9 +304,8 @@ static inline void rcu_preempt_sleep_check(void) { }
* Helper functions for rcu_dereference_check(), rcu_dereference_protected()
* and rcu_assign_pointer(). Some of these could be folded into their
* callers, but they are left separate in order to ease introduction of
- * multiple flavors of pointers to match the multiple flavors of RCU
- * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
- * the future.
+ * multiple pointers markings to match different RCU implementations
+ * (e.g., __srcu), should this make sense in the future.
*/
#ifdef __CHECKER__
@@ -686,14 +664,9 @@ static inline void rcu_read_unlock(void)
/**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
- * This is equivalent of rcu_read_lock(), but to be used when updates
- * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
- * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
- * softirq handler to be a quiescent state, a process in RCU read-side
- * critical section must be protected by disabling softirqs. Read-side
- * critical sections in interrupt context can use just rcu_read_lock(),
- * though this should at least be commented to avoid confusing people
- * reading the code.
+ * This is equivalent of rcu_read_lock(), but also disables softirqs.
+ * Note that anything else that disables softirqs can also serve as
+ * an RCU read-side critical section.
*
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke
@@ -726,10 +699,9 @@ static inline void rcu_read_unlock_bh(void)
/**
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
- * This is equivalent of rcu_read_lock(), but to be used when updates
- * are being done using call_rcu_sched() or synchronize_rcu_sched().
- * Read-side critical sections can also be introduced by anything that
- * disables preemption, including local_irq_disable() and friends.
+ * This is equivalent of rcu_read_lock(), but disables preemption.
+ * Read-side critical sections can also be introduced by anything else
+ * that disables preemption, including local_irq_disable() and friends.
*
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
* must occur in the same context, for example, it is illegal to invoke
@@ -885,4 +857,96 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
+/* Has the specified rcu_head structure been handed to call_rcu()? */
+
+/*
+ * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
+ * @rhp: The rcu_head structure to initialize.
+ *
+ * If you intend to invoke rcu_head_after_call_rcu() to test whether a
+ * given rcu_head structure has already been passed to call_rcu(), then
+ * you must also invoke this rcu_head_init() function on it just after
+ * allocating that structure. Calls to this function must not race with
+ * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
+ */
+static inline void rcu_head_init(struct rcu_head *rhp)
+{
+ rhp->func = (rcu_callback_t)~0L;
+}
+
+/*
+ * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()?
+ * @rhp: The rcu_head structure to test.
+ * @func: The function passed to call_rcu() along with @rhp.
+ *
+ * Returns @true if the @rhp has been passed to call_rcu() with @func,
+ * and @false otherwise. Emits a warning in any other case, including
+ * the case where @rhp has already been invoked after a grace period.
+ * Calls to this function must not race with callback invocation. One way
+ * to avoid such races is to enclose the call to rcu_head_after_call_rcu()
+ * in an RCU read-side critical section that includes a read-side fetch
+ * of the pointer to the structure containing @rhp.
+ */
+static inline bool
+rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
+{
+ if (READ_ONCE(rhp->func) == f)
+ return true;
+ WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L);
+ return false;
+}
+
+
+/* Transitional pre-consolidation compatibility definitions. */
+
+static inline void synchronize_rcu_bh(void)
+{
+ synchronize_rcu();
+}
+
+static inline void synchronize_rcu_bh_expedited(void)
+{
+ synchronize_rcu_expedited();
+}
+
+static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
+{
+ call_rcu(head, func);
+}
+
+static inline void rcu_barrier_bh(void)
+{
+ rcu_barrier();
+}
+
+static inline void synchronize_sched(void)
+{
+ synchronize_rcu();
+}
+
+static inline void synchronize_sched_expedited(void)
+{
+ synchronize_rcu_expedited();
+}
+
+static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
+{
+ call_rcu(head, func);
+}
+
+static inline void rcu_barrier_sched(void)
+{
+ rcu_barrier();
+}
+
+static inline unsigned long get_state_synchronize_sched(void)
+{
+ return get_state_synchronize_rcu();
+}
+
+static inline void cond_synchronize_sched(unsigned long oldstate)
+{
+ cond_synchronize_rcu(oldstate);
+}
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 57f371344152..c0578ba23c1a 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -31,21 +31,4 @@ do { \
#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
-/**
- * synchronize_rcu_mult - Wait concurrently for multiple grace periods
- * @...: List of call_rcu() functions for the flavors to wait on.
- *
- * This macro waits concurrently for multiple flavors of RCU grace periods.
- * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
- * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU
- * domain requires you to write a wrapper function for that SRCU domain's
- * call_srcu() function, supplying the corresponding srcu_struct.
- *
- * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
- * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
- * is automatically a grace period.
- */
-#define synchronize_rcu_mult(...) \
- _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
-
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 8d9a0ea8f0b5..af65d1f36ddb 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,12 +27,6 @@
#include <linux/ktime.h>
-struct rcu_dynticks;
-static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
-{
- return 0;
-}
-
/* Never flag non-existent other CPUs! */
static inline bool rcu_eqs_special_set(int cpu) { return false; }
@@ -46,53 +40,28 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
might_sleep();
}
-static inline unsigned long get_state_synchronize_sched(void)
-{
- return 0;
-}
-
-static inline void cond_synchronize_sched(unsigned long oldstate)
-{
- might_sleep();
-}
-
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
+extern void rcu_barrier(void);
static inline void synchronize_rcu_expedited(void)
{
- synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
+ synchronize_rcu();
}
-static inline void rcu_barrier(void)
+static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
{
- rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
-}
-
-static inline void synchronize_rcu_bh(void)
-{
- synchronize_sched();
-}
-
-static inline void synchronize_rcu_bh_expedited(void)
-{
- synchronize_sched();
+ call_rcu(head, func);
}
-static inline void synchronize_sched_expedited(void)
-{
- synchronize_sched();
-}
+void rcu_qs(void);
-static inline void kfree_call_rcu(struct rcu_head *head,
- rcu_callback_t func)
+static inline void rcu_softirq_qs(void)
{
- call_rcu(head, func);
+ rcu_qs();
}
#define rcu_note_context_switch(preempt) \
do { \
- rcu_sched_qs(); \
+ rcu_qs(); \
rcu_tasks_qs(current); \
} while (0)
@@ -108,6 +77,7 @@ static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
*/
static inline void rcu_virt_note_context_switch(int cpu) { }
static inline void rcu_cpu_stall_reset(void) { }
+static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_enter(void) { }
@@ -115,6 +85,11 @@ static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
static inline void exit_rcu(void) { }
+static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
+{
+ return false;
+}
+static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
#ifdef CONFIG_SRCU
void rcu_scheduler_starting(void);
#else /* #ifndef CONFIG_SRCU */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 914655848ef6..7f83179177d1 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,6 +30,7 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
+void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt);
int rcu_needs_cpu(u64 basem, u64 *nextevt);
void rcu_cpu_stall_reset(void);
@@ -44,41 +45,13 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(false);
}
-void synchronize_rcu_bh(void);
-void synchronize_sched_expedited(void);
void synchronize_rcu_expedited(void);
-
void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
-/**
- * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
- *
- * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
- * approach to force the grace period to end quickly. This consumes
- * significant time on all CPUs and is unfriendly to real-time workloads,
- * so is thus not recommended for any sort of common-case code. In fact,
- * if you are using synchronize_rcu_bh_expedited() in a loop, please
- * restructure your code to batch your updates, and then use a single
- * synchronize_rcu_bh() instead.
- *
- * Note that it is illegal to call this function while holding any lock
- * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
- * to call this function from a CPU-hotplug notifier. Failing to observe
- * these restriction will result in deadlock.
- */
-static inline void synchronize_rcu_bh_expedited(void)
-{
- synchronize_sched_expedited();
-}
-
void rcu_barrier(void);
-void rcu_barrier_bh(void);
-void rcu_barrier_sched(void);
bool rcu_eqs_special_set(int cpu);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
-unsigned long get_state_synchronize_sched(void);
-void cond_synchronize_sched(unsigned long oldstate);
void rcu_idle_enter(void);
void rcu_idle_exit(void);
@@ -93,7 +66,9 @@ void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
void rcu_end_inkernel_boot(void);
bool rcu_is_watching(void);
+#ifndef CONFIG_PREEMPT
void rcu_all_qs(void);
+#endif
/* RCUtree hotplug events */
int rcutree_prepare_cpu(unsigned int cpu);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 379505a53722..1781b6cb793c 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -268,6 +268,13 @@ typedef void (*regmap_unlock)(void *);
* field is NULL but precious_table (see below) is not, the
* check is performed on such table (a register is precious if
* it belongs to one of the ranges specified by precious_table).
+ * @writeable_noinc_reg: Optional callback returning true if the register
+ * supports multiple write operations without incrementing
+ * the register number. If this field is NULL but
+ * wr_noinc_table (see below) is not, the check is
+ * performed on such table (a register is no increment
+ * writeable if it belongs to one of the ranges specified
+ * by wr_noinc_table).
* @readable_noinc_reg: Optional callback returning true if the register
* supports multiple read operations without incrementing
* the register number. If this field is NULL but
@@ -302,6 +309,7 @@ typedef void (*regmap_unlock)(void *);
* @rd_table: As above, for read access.
* @volatile_table: As above, for volatile registers.
* @precious_table: As above, for precious registers.
+ * @wr_noinc_table: As above, for no increment writeable registers.
* @rd_noinc_table: As above, for no increment readable registers.
* @reg_defaults: Power on reset values for registers (for use with
* register cache support).
@@ -315,9 +323,12 @@ typedef void (*regmap_unlock)(void *);
* masks are used.
* @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
* if they are both empty.
- * @use_single_rw: If set, converts the bulk read and write operations into
- * a series of single read and write operations. This is useful
- * for device that does not support bulk read and write.
+ * @use_single_read: If set, converts the bulk read operation into a series of
+ * single read operations. This is useful for a device that
+ * does not support bulk read.
+ * @use_single_write: If set, converts the bulk write operation into a series of
+ * single write operations. This is useful for a device that
+ * does not support bulk write.
* @can_multi_write: If set, the device supports the multi write mode of bulk
* write operations, if clear multi write requests will be
* split into individual write operations
@@ -352,6 +363,7 @@ struct regmap_config {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg);
bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
bool disable_locking;
@@ -369,6 +381,7 @@ struct regmap_config {
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *wr_noinc_table;
const struct regmap_access_table *rd_noinc_table;
const struct reg_default *reg_defaults;
unsigned int num_reg_defaults;
@@ -380,7 +393,8 @@ struct regmap_config {
unsigned long write_flag_mask;
bool zero_flag_mask;
- bool use_single_rw;
+ bool use_single_read;
+ bool use_single_write;
bool can_multi_write;
enum regmap_endian reg_format_endian;
@@ -979,6 +993,8 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
+int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_count);
int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
@@ -1073,27 +1089,48 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
+/**
+ * struct regmap_irq_type - IRQ type definitions.
+ *
+ * @type_reg_offset: Offset register for the irq type setting.
+ * @type_rising_val: Register value to configure RISING type irq.
+ * @type_falling_val: Register value to configure FALLING type irq.
+ * @type_level_low_val: Register value to configure LEVEL_LOW type irq.
+ * @type_level_high_val: Register value to configure LEVEL_HIGH type irq.
+ * @types_supported: logical OR of IRQ_TYPE_* flags indicating supported types.
+ */
+struct regmap_irq_type {
+ unsigned int type_reg_offset;
+ unsigned int type_reg_mask;
+ unsigned int type_rising_val;
+ unsigned int type_falling_val;
+ unsigned int type_level_low_val;
+ unsigned int type_level_high_val;
+ unsigned int types_supported;
+};
/**
* struct regmap_irq - Description of an IRQ for the generic regmap irq_chip.
*
* @reg_offset: Offset of the status/mask register within the bank
* @mask: Mask used to flag/control the register.
- * @type_reg_offset: Offset register for the irq type setting.
- * @type_rising_mask: Mask bit to configure RISING type irq.
- * @type_falling_mask: Mask bit to configure FALLING type irq.
+ * @type: IRQ trigger type setting details if supported.
*/
struct regmap_irq {
unsigned int reg_offset;
unsigned int mask;
- unsigned int type_reg_offset;
- unsigned int type_rising_mask;
- unsigned int type_falling_mask;
+ struct regmap_irq_type type;
};
#define REGMAP_IRQ_REG(_irq, _off, _mask) \
[_irq] = { .reg_offset = (_off), .mask = (_mask) }
+#define REGMAP_IRQ_REG_LINE(_id, _reg_bits) \
+ [_id] = { \
+ .mask = BIT((_id) % (_reg_bits)), \
+ .reg_offset = (_id) / (_reg_bits), \
+ }
+
/**
* struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
@@ -1115,6 +1152,12 @@ struct regmap_irq {
* @ack_invert: Inverted ack register: cleared bits for ack.
* @wake_invert: Inverted wake register: cleared bits are wake enabled.
* @type_invert: Invert the type flags.
+ * @type_in_mask: Use the mask registers for controlling irq type. For
+ * interrupts defining type_rising/falling_mask use mask_base
+ * for edge configuration and never update bits in type_base.
+ * @clear_on_unmask: For chips with interrupts cleared on read: read the status
+ * registers before unmasking interrupts to clear any bits
+ * set when they were masked.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
*
* @num_regs: Number of registers in each control bank.
@@ -1153,6 +1196,8 @@ struct regmap_irq_chip {
bool wake_invert:1;
bool runtime_pm:1;
bool type_invert:1;
+ bool type_in_mask:1;
+ bool clear_on_unmask:1;
int num_regs;
@@ -1222,6 +1267,13 @@ static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_bulk_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_count)
{
diff --git a/include/linux/regset.h b/include/linux/regset.h
index 494cedaafdf2..a85c1707285c 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -376,7 +376,7 @@ static inline int copy_regset_to_user(struct task_struct *target,
if (!regset->get)
return -EOPNOTSUPP;
- if (!access_ok(VERIFY_WRITE, data, size))
+ if (!access_ok(data, size))
return -EFAULT;
return regset->get(target, regset, offset, size, NULL, data);
@@ -402,7 +402,7 @@ static inline int copy_regset_from_user(struct task_struct *target,
if (!regset->set)
return -EOPNOTSUPP;
- if (!access_ok(VERIFY_READ, data, size))
+ if (!access_ok(data, size))
return -EFAULT;
return regset->set(target, regset, offset, size, NULL, data);
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 25602afd4844..f3f76051e8b0 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -508,7 +508,7 @@ static inline int regulator_get_error_flags(struct regulator *regulator,
static inline int regulator_set_load(struct regulator *regulator, int load_uA)
{
- return REGULATOR_MODE_NORMAL;
+ return 0;
}
static inline int regulator_allow_bypass(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 0fd8fbb74763..389bcaf7900f 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -15,11 +15,12 @@
#ifndef __LINUX_REGULATOR_DRIVER_H_
#define __LINUX_REGULATOR_DRIVER_H_
-#define MAX_COUPLED 4
+#define MAX_COUPLED 2
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/regulator/consumer.h>
+#include <linux/ww_mutex.h>
struct gpio_desc;
struct regmap;
@@ -271,9 +272,16 @@ enum regulator_type {
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
* @min_dropout_uV: The minimum dropout voltage this regulator can handle
* @linear_ranges: A constant table of possible voltage ranges.
- * @n_linear_ranges: Number of entries in the @linear_ranges table.
+ * @linear_range_selectors: A constant table of voltage range selectors.
+ * If pickable ranges are used each range must
+ * have corresponding selector here.
+ * @n_linear_ranges: Number of entries in the @linear_ranges (and in
+ * linear_range_selectors if used) table(s).
* @volt_table: Voltage mapping table (if table based mapping)
*
+ * @vsel_range_reg: Register for range selector when using pickable ranges
+ * and regulator_regmap_X_voltage_X_pickable functions.
+ * @vsel_range_mask: Mask for register bitfield used for range selector
* @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
* @vsel_mask: Mask for register bitfield used for selector
* @csel_reg: Register for TPS65218 LS3 current regulator
@@ -338,10 +346,14 @@ struct regulator_desc {
int min_dropout_uV;
const struct regulator_linear_range *linear_ranges;
+ const unsigned int *linear_range_selectors;
+
int n_linear_ranges;
const unsigned int *volt_table;
+ unsigned int vsel_range_reg;
+ unsigned int vsel_range_mask;
unsigned int vsel_reg;
unsigned int vsel_mask;
unsigned int csel_reg;
@@ -451,7 +463,7 @@ struct regulator_dev {
struct coupling_desc coupling_desc;
struct blocking_notifier_head notifier;
- struct mutex mutex; /* consumer lock */
+ struct ww_mutex mutex; /* consumer lock */
struct task_struct *mutex_owner;
int ref_cnt;
struct module *owner;
@@ -462,7 +474,6 @@ struct regulator_dev {
struct regmap *regmap;
struct delayed_work disable_work;
- int deferred_disables;
void *reg_data; /* regulator_dev data */
@@ -498,18 +509,25 @@ int regulator_mode_to_status(unsigned int);
int regulator_list_voltage_linear(struct regulator_dev *rdev,
unsigned int selector);
+int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev,
+ unsigned int selector);
int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
unsigned int selector);
int regulator_list_voltage_table(struct regulator_dev *rdev,
unsigned int selector);
int regulator_map_voltage_linear(struct regulator_dev *rdev,
int min_uV, int max_uV);
+int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
+ int min_uV, int max_uV);
int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_iterate(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_ascend(struct regulator_dev *rdev,
int min_uV, int max_uV);
+int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev);
+int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
+ unsigned int sel);
int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev);
int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel);
int regulator_is_enabled_regmap(struct regulator_dev *rdev);
@@ -527,4 +545,7 @@ int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
bool enable);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
+void regulator_lock(struct regulator_dev *rdev);
+void regulator_unlock(struct regulator_dev *rdev);
+
#endif
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 48918be649d4..1a4340ed8e2b 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -24,8 +24,6 @@ struct regulator_init_data;
* @supply_name: Name of the regulator supply
* @input_supply: Name of the input regulator supply
* @microvolts: Output voltage of regulator
- * @gpio: GPIO to use for enable control
- * set to -EINVAL if not used
* @startup_delay: Start-up time in microseconds
* @gpio_is_open_drain: Gpio pin is open drain or normal type.
* If it is open drain type then HIGH will be set
@@ -49,7 +47,6 @@ struct fixed_voltage_config {
const char *supply_name;
const char *input_supply;
int microvolts;
- int gpio;
unsigned startup_delay;
unsigned gpio_is_open_drain:1;
unsigned enable_high:1;
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 3468703d663a..1d34a70ffda2 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -48,9 +48,9 @@ struct regulator;
* DISABLE_IN_SUSPEND - turn off regulator in suspend states
* ENABLE_IN_SUSPEND - keep regulator on in suspend states
*/
-#define DO_NOTHING_IN_SUSPEND (-1)
-#define DISABLE_IN_SUSPEND 0
-#define ENABLE_IN_SUSPEND 1
+#define DO_NOTHING_IN_SUSPEND 0
+#define DISABLE_IN_SUSPEND 1
+#define ENABLE_IN_SUSPEND 2
/* Regulator active discharge flags */
enum regulator_active_discharge {
@@ -158,6 +158,9 @@ struct regulation_constraints {
/* used for coupled regulators */
int max_spread;
+ /* used for changing voltage in steps */
+ int max_uV_step;
+
/* valid regulator operating modes for this machine */
unsigned int valid_modes_mask;
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index cb5aecd40f07..331d7d940c7a 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -33,7 +33,8 @@
#define PFUZE100_VGEN4 12
#define PFUZE100_VGEN5 13
#define PFUZE100_VGEN6 14
-#define PFUZE100_MAX_REGULATOR 15
+#define PFUZE100_COIN 15
+#define PFUZE100_MAX_REGULATOR 16
#define PFUZE200_SW1AB 0
#define PFUZE200_SW2 1
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index e3c5d856b6da..507a2b524208 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -305,14 +305,22 @@ struct fw_rsc_vdev {
struct fw_rsc_vdev_vring vring[0];
} __packed;
+struct rproc;
+
/**
* struct rproc_mem_entry - memory entry descriptor
* @va: virtual address
* @dma: dma address
* @len: length, in bytes
* @da: device address
+ * @release: release associated memory
* @priv: associated data
+ * @name: associated memory region name (optional)
* @node: list node
+ * @rsc_offset: offset in resource table
+ * @flags: iommu protection flags
+ * @of_resm_idx: reserved memory phandle index
+ * @alloc: specific memory allocator function
*/
struct rproc_mem_entry {
void *va;
@@ -320,10 +328,15 @@ struct rproc_mem_entry {
int len;
u32 da;
void *priv;
+ char name[32];
struct list_head node;
+ u32 rsc_offset;
+ u32 flags;
+ u32 of_resm_idx;
+ int (*alloc)(struct rproc *rproc, struct rproc_mem_entry *mem);
+ int (*release)(struct rproc *rproc, struct rproc_mem_entry *mem);
};
-struct rproc;
struct firmware;
/**
@@ -399,6 +412,9 @@ enum rproc_crash_type {
* @node: list node related to the rproc segment list
* @da: device address of the segment
* @size: size of the segment
+ * @priv: private data associated with the dump_segment
+ * @dump: custom dump function to fill device memory segment associated
+ * with coredump
*/
struct rproc_dump_segment {
struct list_head node;
@@ -406,6 +422,9 @@ struct rproc_dump_segment {
dma_addr_t da;
size_t size;
+ void *priv;
+ void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment,
+ void *dest);
loff_t offset;
};
@@ -439,7 +458,9 @@ struct rproc_dump_segment {
* @cached_table: copy of the resource table
* @table_sz: size of @cached_table
* @has_iommu: flag to indicate if remote processor is behind an MMU
+ * @auto_boot: flag to indicate if remote processor should be auto-started
* @dump_segments: list of segments in the firmware
+ * @nb_vdev: number of vdev currently handled by rproc
*/
struct rproc {
struct list_head node;
@@ -472,6 +493,7 @@ struct rproc {
bool has_iommu;
bool auto_boot;
struct list_head dump_segments;
+ int nb_vdev;
};
/**
@@ -499,7 +521,6 @@ struct rproc_subdev {
/**
* struct rproc_vring - remoteproc vring state
* @va: virtual address
- * @dma: dma address
* @len: length, in bytes
* @da: device address
* @align: vring alignment
@@ -509,7 +530,6 @@ struct rproc_subdev {
*/
struct rproc_vring {
void *va;
- dma_addr_t dma;
int len;
u32 da;
u32 align;
@@ -528,6 +548,7 @@ struct rproc_vring {
* @vdev: the virio device
* @vring: the vrings for this vdev
* @rsc_offset: offset of the vdev's resource entry
+ * @index: vdev position versus other vdev declared in resource table
*/
struct rproc_vdev {
struct kref refcount;
@@ -540,6 +561,7 @@ struct rproc_vdev {
struct virtio_device vdev;
struct rproc_vring vring[RVDEV_NUM_VRINGS];
u32 rsc_offset;
+ u32 index;
};
struct rproc *rproc_get_by_phandle(phandle phandle);
@@ -553,10 +575,29 @@ int rproc_add(struct rproc *rproc);
int rproc_del(struct rproc *rproc);
void rproc_free(struct rproc *rproc);
+void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem);
+
+struct rproc_mem_entry *
+rproc_mem_entry_init(struct device *dev,
+ void *va, dma_addr_t dma, int len, u32 da,
+ int (*alloc)(struct rproc *, struct rproc_mem_entry *),
+ int (*release)(struct rproc *, struct rproc_mem_entry *),
+ const char *name, ...);
+
+struct rproc_mem_entry *
+rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
+ u32 da, const char *name, ...);
+
int rproc_boot(struct rproc *rproc);
void rproc_shutdown(struct rproc *rproc);
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
+int rproc_coredump_add_custom_segment(struct rproc *rproc,
+ dma_addr_t da, size_t size,
+ void (*dumpfn)(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest),
+ void *priv);
static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
{
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 02166e815afb..2f0ffca35780 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -68,7 +68,6 @@ struct reservation_object_list {
* @seq: sequence count for managing RCU read-side synchronization
* @fence_excl: the exclusive fence, if there is one currently
* @fence: list of current shared fences
- * @staged: staged copy of shared fences for RCU updates
*/
struct reservation_object {
struct ww_mutex lock;
@@ -76,7 +75,6 @@ struct reservation_object {
struct dma_fence __rcu *fence_excl;
struct reservation_object_list __rcu *fence;
- struct reservation_object_list *staged;
};
#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
@@ -95,7 +93,6 @@ reservation_object_init(struct reservation_object *obj)
__seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
RCU_INIT_POINTER(obj->fence, NULL);
RCU_INIT_POINTER(obj->fence_excl, NULL);
- obj->staged = NULL;
}
/**
@@ -124,7 +121,6 @@ reservation_object_fini(struct reservation_object *obj)
kfree(fobj);
}
- kfree(obj->staged);
ww_mutex_destroy(&obj->lock);
}
@@ -218,6 +214,11 @@ reservation_object_trylock(struct reservation_object *obj)
static inline void
reservation_object_unlock(struct reservation_object *obj)
{
+#ifdef CONFIG_DEBUG_MUTEXES
+ /* Test shared fence slot reservation */
+ if (obj->fence)
+ obj->fence->shared_max = obj->fence->shared_count;
+#endif
ww_mutex_unlock(&obj->lock);
}
@@ -265,7 +266,8 @@ reservation_object_get_excl_rcu(struct reservation_object *obj)
return fence;
}
-int reservation_object_reserve_shared(struct reservation_object *obj);
+int reservation_object_reserve_shared(struct reservation_object *obj,
+ unsigned int num_fences);
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct dma_fence *fence);
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 09732c36f351..29af6d6b2f4b 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -116,7 +116,7 @@ static inline int device_reset_optional(struct device *dev)
* @id: reset line name
*
* Returns a struct reset_control or IS_ERR() condition containing errno.
- * If this function is called more then once for the same reset_control it will
+ * If this function is called more than once for the same reset_control it will
* return -EBUSY.
*
* See reset_control_get_shared for details on shared references to
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 5d83d0c1d06c..bba2920e9c05 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -10,7 +10,7 @@
#include <linux/time64.h>
struct timespec;
-struct compat_timespec;
+struct old_timespec32;
struct pollfd;
enum timespec_type {
@@ -40,7 +40,7 @@ struct restart_block {
enum timespec_type type;
union {
struct __kernel_timespec __user *rmtp;
- struct compat_timespec __user *compat_rmtp;
+ struct old_timespec32 __user *compat_rmtp;
};
u64 expires;
} nanosleep;
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index eb7111039247..20f9c6af7473 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -75,8 +75,19 @@ struct bucket_table {
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
+/*
+ * NULLS_MARKER() expects a hash value with the low
+ * bits mostly likely to be significant, and it discards
+ * the msb.
+ * We git it an address, in which the bottom 2 bits are
+ * always 0, and the msb might be significant.
+ * So we shift the address down one bit to align with
+ * expectations and avoid losing a significant bit.
+ */
+#define RHT_NULLS_MARKER(ptr) \
+ ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
#define INIT_RHT_NULLS_HEAD(ptr) \
- ((ptr) = (typeof(ptr)) NULLS_MARKER(0))
+ ((ptr) = RHT_NULLS_MARKER(&(ptr)))
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
{
@@ -471,6 +482,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
+ struct rhash_head __rcu * const *head;
struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@@ -478,13 +490,19 @@ static inline struct rhash_head *__rhashtable_lookup(
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
- rht_for_each_rcu(he, tbl, hash) {
- if (params.obj_cmpfn ?
- params.obj_cmpfn(&arg, rht_obj(ht, he)) :
- rhashtable_compare(&arg, rht_obj(ht, he)))
- continue;
- return he;
- }
+ head = rht_bucket(tbl, hash);
+ do {
+ rht_for_each_rcu_continue(he, *head, tbl, hash) {
+ if (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+ rhashtable_compare(&arg, rht_obj(ht, he)))
+ continue;
+ return he;
+ }
+ /* An object might have been moved to a different hash chain,
+ * while we walk along it - better check and retry.
+ */
+ } while (he != RHT_NULLS_MARKER(head));
/* Ensure we see any new tables. */
smp_rmb();
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 003d09ab308d..5b9ae62272bb 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
@@ -164,8 +164,8 @@ void ring_buffer_record_disable(struct ring_buffer *buffer);
void ring_buffer_record_enable(struct ring_buffer *buffer);
void ring_buffer_record_off(struct ring_buffer *buffer);
void ring_buffer_record_on(struct ring_buffer *buffer);
-int ring_buffer_record_is_on(struct ring_buffer *buffer);
-int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
+bool ring_buffer_record_is_on(struct ring_buffer *buffer);
+bool ring_buffer_record_is_set_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
@@ -189,6 +189,8 @@ bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer);
size_t ring_buffer_page_len(void *page);
+size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu);
+size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu);
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 6268208760e9..c1089fe5344a 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -87,20 +87,16 @@ struct rtc_class_ops {
int (*set_offset)(struct device *, long offset);
};
-typedef struct rtc_task {
- void (*func)(void *private_data);
- void *private_data;
-} rtc_task_t;
-
+struct rtc_device;
struct rtc_timer {
- struct rtc_task task;
struct timerqueue_node node;
ktime_t period;
+ void (*func)(struct rtc_device *rtc);
+ struct rtc_device *rtc;
int enabled;
};
-
/* flags */
#define RTC_DEV_BUSY 0
@@ -121,8 +117,6 @@ struct rtc_device {
wait_queue_head_t irq_queue;
struct fasync_struct *async_queue;
- struct rtc_task *irq_task;
- spinlock_t irq_task_lock;
int irq_freq;
int max_user_freq;
@@ -145,7 +139,6 @@ struct rtc_device {
bool registered;
- struct nvmem_device *nvmem;
/* Old ABI support */
bool nvram_old_abi;
struct bin_attribute *nvram;
@@ -174,19 +167,12 @@ struct rtc_device {
#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */
-extern struct rtc_device *rtc_device_register(const char *name,
- struct device *dev,
- const struct rtc_class_ops *ops,
- struct module *owner);
extern struct rtc_device *devm_rtc_device_register(struct device *dev,
const char *name,
const struct rtc_class_ops *ops,
struct module *owner);
struct rtc_device *devm_rtc_allocate_device(struct device *dev);
int __rtc_register_device(struct module *owner, struct rtc_device *rtc);
-extern void rtc_device_unregister(struct rtc_device *rtc);
-extern void devm_rtc_device_unregister(struct device *dev,
- struct rtc_device *rtc);
extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
@@ -204,25 +190,20 @@ extern void rtc_update_irq(struct rtc_device *rtc,
extern struct rtc_device *rtc_class_open(const char *name);
extern void rtc_class_close(struct rtc_device *rtc);
-extern int rtc_irq_register(struct rtc_device *rtc,
- struct rtc_task *task);
-extern void rtc_irq_unregister(struct rtc_device *rtc,
- struct rtc_task *task);
-extern int rtc_irq_set_state(struct rtc_device *rtc,
- struct rtc_task *task, int enabled);
-extern int rtc_irq_set_freq(struct rtc_device *rtc,
- struct rtc_task *task, int freq);
+extern int rtc_irq_set_state(struct rtc_device *rtc, int enabled);
+extern int rtc_irq_set_freq(struct rtc_device *rtc, int freq);
extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled);
extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled);
extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
unsigned int enabled);
void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode);
-void rtc_aie_update_irq(void *private);
-void rtc_uie_update_irq(void *private);
+void rtc_aie_update_irq(struct rtc_device *rtc);
+void rtc_uie_update_irq(struct rtc_device *rtc);
enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
-void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data);
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r),
+ struct rtc_device *rtc);
int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
ktime_t expires, ktime_t period);
void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer);
@@ -290,4 +271,20 @@ static inline int rtc_nvmem_register(struct rtc_device *rtc,
static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {}
#endif
+#ifdef CONFIG_RTC_INTF_SYSFS
+int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp);
+int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps);
+#else
+static inline
+int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp)
+{
+ return 0;
+}
+
+static inline
+int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps)
+{
+ return 0;
+}
+#endif
#endif /* _LINUX_RTC_H_ */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 5225832bd6ff..bb9cb84114c1 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -6,6 +6,7 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/wait.h>
+#include <linux/refcount.h>
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -34,6 +35,7 @@ extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
extern int rtnl_lock_killable(void);
+extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
extern wait_queue_head_t netdev_unregistering_wq;
extern struct rw_semaphore pernet_ops_rwsem;
@@ -83,6 +85,11 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
return rtnl_dereference(dev->ingress_queue);
}
+static inline struct netdev_queue *dev_ingress_queue_rcu(struct net_device *dev)
+{
+ return rcu_dereference(dev->ingress_queue);
+}
+
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
#ifdef CONFIG_NET_INGRESS
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index ab93b6eae696..67dbb57508b1 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -45,10 +45,10 @@ struct rw_semaphore {
};
/*
- * Setting bit 0 of the owner field with other non-zero bits will indicate
+ * Setting bit 1 of the owner field but not bit 0 will indicate
* that the rwsem is writer-owned with an unknown owner.
*/
-#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L)
+#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L)
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
diff --git a/include/linux/sa11x0-dma.h b/include/linux/sa11x0-dma.h
deleted file mode 100644
index 65839a58b8e5..000000000000
--- a/include/linux/sa11x0-dma.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * SA11x0 DMA Engine support
- *
- * Copyright (C) 2012 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __LINUX_SA11X0_DMA_H
-#define __LINUX_SA11X0_DMA_H
-
-struct dma_chan;
-
-#if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE)
-bool sa11x0_dma_filter_fn(struct dma_chan *, void *);
-#else
-static inline bool sa11x0_dma_filter_fn(struct dma_chan *c, void *d)
-{
- return false;
-}
-#endif
-
-#endif
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 804a50983ec5..14d558146aea 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -30,14 +30,24 @@ struct seq_file;
*/
struct sbitmap_word {
/**
- * @word: The bitmap word itself.
+ * @depth: Number of bits being used in @word/@cleared
*/
- unsigned long word;
+ unsigned long depth;
/**
- * @depth: Number of bits being used in @word.
+ * @word: word holding free bits
*/
- unsigned long depth;
+ unsigned long word ____cacheline_aligned_in_smp;
+
+ /**
+ * @cleared: word holding cleared bits
+ */
+ unsigned long cleared ____cacheline_aligned_in_smp;
+
+ /**
+ * @swap_lock: Held while swapping word <-> cleared
+ */
+ spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
@@ -125,6 +135,11 @@ struct sbitmap_queue {
*/
struct sbq_wait_state *ws;
+ /*
+ * @ws_active: count of currently active ws waitqueues
+ */
+ atomic_t ws_active;
+
/**
* @round_robin: Allocate bits in strict round-robin order.
*/
@@ -250,12 +265,14 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb,
nr = SB_NR_TO_BIT(sb, start);
while (scanned < sb->depth) {
- struct sbitmap_word *word = &sb->map[index];
- unsigned int depth = min_t(unsigned int, word->depth - nr,
+ unsigned long word;
+ unsigned int depth = min_t(unsigned int,
+ sb->map[index].depth - nr,
sb->depth - scanned);
scanned += depth;
- if (!word->word)
+ word = sb->map[index].word & ~sb->map[index].cleared;
+ if (!word)
goto next;
/*
@@ -265,7 +282,7 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb,
*/
depth += nr;
while (1) {
- nr = find_next_bit(&word->word, depth, nr);
+ nr = find_next_bit(&word, depth, nr);
if (nr >= depth)
break;
if (!fn(sb, (index << sb->shift) + nr, data))
@@ -310,6 +327,19 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
+/*
+ * This one is special, since it doesn't actually clear the bit, rather it
+ * sets the corresponding bit in the ->cleared mask instead. Paired with
+ * the caller doing sbitmap_batch_clear() if a given index is full, which
+ * will clear the previously freed entries in the corresponding ->word.
+ */
+static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
+{
+ unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
+
+ set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
+}
+
static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
unsigned int bitnr)
{
@@ -321,8 +351,6 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
-unsigned int sbitmap_weight(const struct sbitmap *sb);
-
/**
* sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
* @sb: Bitmap to show.
@@ -531,4 +559,45 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
*/
void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
+struct sbq_wait {
+ struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */
+ struct wait_queue_entry wait;
+};
+
+#define DEFINE_SBQ_WAIT(name) \
+ struct sbq_wait name = { \
+ .sbq = NULL, \
+ .wait = { \
+ .private = current, \
+ .func = autoremove_wake_function, \
+ .entry = LIST_HEAD_INIT((name).wait.entry), \
+ } \
+ }
+
+/*
+ * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
+ * internal state.
+ */
+void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait, int state);
+
+/*
+ * Must be paired with sbitmap_prepare_to_wait().
+ */
+void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait);
+
+/*
+ * Wrapper around add_wait_queue(), which maintains some extra internal state
+ */
+void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait);
+
+/*
+ * Must be paired with sbitmap_add_wait_queue()
+ */
+void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
+
#endif /* __LINUX_SCALE_BITMAP_H */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 093aa57120b0..b96f0d0b5b8f 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -324,10 +324,10 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
* Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
* is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
*/
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-#define SG_MAX_SEGMENTS 2048
-#else
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define SG_MAX_SEGMENTS SG_CHUNK_SIZE
+#else
+#define SG_MAX_SEGMENTS 2048
#endif
#ifdef CONFIG_SG_POOL
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 789923fbee3a..89541d248893 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -25,6 +25,7 @@
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
#include <linux/signal_types.h>
+#include <linux/psi_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
#include <linux/rseq.h>
@@ -175,7 +176,7 @@ struct task_group;
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
* However, with slightly different timing the wakeup TASK_RUNNING store can
- * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
+ * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
* a problem either because that will result in one extra go around the loop
* and our @cond test will save the day.
*
@@ -514,7 +515,7 @@ struct sched_dl_entity {
/*
* Actual scheduling parameters. Initialized with the values above,
- * they are continously updated during task execution. Note that
+ * they are continuously updated during task execution. Note that
* the remaining runtime could be < 0 in case we are in overrun.
*/
s64 runtime; /* Remaining runtime for this instance */
@@ -571,10 +572,8 @@ union rcu_special {
struct {
u8 blocked;
u8 need_qs;
- u8 exp_need_qs;
-
- /* Otherwise the compiler can store garbage here: */
- u8 pad;
+ u8 exp_hint; /* Hint for performance. */
+ u8 pad; /* No garbage from compiler! */
} b; /* Bits. */
u32 s; /* Set of bits. */
};
@@ -710,6 +709,10 @@ struct task_struct {
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
unsigned sched_remote_wakeup:1;
+#ifdef CONFIG_PSI
+ unsigned sched_psi_wake_requeue:1;
+#endif
+
/* Force alignment to the next boundary: */
unsigned :0;
@@ -723,9 +726,6 @@ struct task_struct {
#endif
#ifdef CONFIG_MEMCG
unsigned in_user_fault:1;
-#ifdef CONFIG_MEMCG_KMEM
- unsigned memcg_kmem_skip_account:1;
-#endif
#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
@@ -739,6 +739,12 @@ struct task_struct {
unsigned use_memdelay:1;
#endif
+ /*
+ * May usercopy functions fault on kernel addresses?
+ * This is not just a single bit because this can potentially nest.
+ */
+ unsigned int kernel_uaccess_faults_ok;
+
unsigned long atomic_flags; /* Flags requiring atomic access. */
struct restart_block restart_block;
@@ -779,7 +785,8 @@ struct task_struct {
struct list_head ptrace_entry;
/* PID/PID hash table linkage. */
- struct pid_link pids[PIDTYPE_MAX];
+ struct pid *thread_pid;
+ struct hlist_node pid_links[PIDTYPE_MAX];
struct list_head thread_group;
struct list_head thread_node;
@@ -853,6 +860,7 @@ struct task_struct {
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
unsigned long last_switch_count;
+ unsigned long last_switch_time;
#endif
/* Filesystem information: */
struct fs_struct *fs;
@@ -958,9 +966,13 @@ struct task_struct {
/* Ptrace state: */
unsigned long ptrace_message;
- siginfo_t *last_siginfo;
+ kernel_siginfo_t *last_siginfo;
struct task_io_accounting ioac;
+#ifdef CONFIG_PSI
+ /* Pressure stall state */
+ unsigned int psi_flags;
+#endif
#ifdef CONFIG_TASK_XACCT
/* Accumulated RSS usage: */
u64 acct_rss_mem1;
@@ -983,7 +995,7 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
#endif
-#ifdef CONFIG_INTEL_RDT
+#ifdef CONFIG_RESCTRL
u32 closid;
u32 rmid;
#endif
@@ -1106,6 +1118,7 @@ struct task_struct {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
int curr_ret_stack;
+ int curr_ret_depth;
/* Stack of return addresses for return function tracing: */
struct ftrace_ret_stack *ret_stack;
@@ -1190,6 +1203,11 @@ struct task_struct {
void *security;
#endif
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ unsigned long lowest_stack;
+ unsigned long prev_lowest_stack;
+#endif
+
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
@@ -1209,27 +1227,7 @@ struct task_struct {
static inline struct pid *task_pid(struct task_struct *task)
{
- return task->pids[PIDTYPE_PID].pid;
-}
-
-static inline struct pid *task_tgid(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_PID].pid;
-}
-
-/*
- * Without tasklist or RCU lock it is not safe to dereference
- * the result of task_pgrp/task_session even if task == current,
- * we can race with another thread doing sys_setsid/sys_setpgid.
- */
-static inline struct pid *task_pgrp(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_PGID].pid;
-}
-
-static inline struct pid *task_session(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_SID].pid;
+ return task->thread_pid;
}
/*
@@ -1278,7 +1276,7 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
*/
static inline int pid_alive(const struct task_struct *p)
{
- return p->pids[PIDTYPE_PID].pid != NULL;
+ return p->thread_pid != NULL;
}
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
@@ -1304,12 +1302,12 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
- return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
}
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
- return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
}
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
@@ -1407,6 +1405,7 @@ extern struct pid *cad_pid;
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
+#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
@@ -1457,6 +1456,8 @@ static inline bool is_percpu_thread(void)
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
+#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
+#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
@@ -1488,6 +1489,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+
+TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index 59667444669f..afa940cd50dc 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -20,6 +20,12 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned int flags));
void cpufreq_remove_update_util_hook(int cpu);
+
+static inline unsigned long map_util_freq(unsigned long util,
+ unsigned long freq, unsigned long cap)
+{
+ return (freq + (freq >> 2)) * util / cap;
+}
#endif /* CONFIG_CPU_FREQ */
#endif /* _LINUX_SCHED_CPUFREQ_H */
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index 4a6582c27dea..b0fb1446fe04 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -16,7 +16,7 @@ enum hk_flags {
};
#ifdef CONFIG_CPU_ISOLATION
-DECLARE_STATIC_KEY_FALSE(housekeeping_overriden);
+DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
extern int housekeeping_any_cpu(enum hk_flags flags);
extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
@@ -43,7 +43,7 @@ static inline void housekeeping_init(void) { }
static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
{
#ifdef CONFIG_CPU_ISOLATION
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
return housekeeping_test_cpu(cpu, flags);
#endif
return true;
diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h
index 80bc84ba5d2a..4859bea47a7b 100644
--- a/include/linux/sched/loadavg.h
+++ b/include/linux/sched/loadavg.h
@@ -22,10 +22,26 @@ extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
#define EXP_5 2014 /* 1/exp(5sec/5min) */
#define EXP_15 2037 /* 1/exp(5sec/15min) */
-#define CALC_LOAD(load,exp,n) \
- load *= exp; \
- load += n*(FIXED_1-exp); \
- load >>= FSHIFT;
+/*
+ * a1 = a0 * e + a * (1 - e)
+ */
+static inline unsigned long
+calc_load(unsigned long load, unsigned long exp, unsigned long active)
+{
+ unsigned long newload;
+
+ newload = load * exp + active * (FIXED_1 - exp);
+ if (active >= load)
+ newload += FIXED_1-1;
+
+ return newload / FIXED_1;
+}
+
+extern unsigned long calc_load_n(unsigned long load, unsigned long exp,
+ unsigned long active, unsigned int n);
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
extern void calc_global_load(unsigned long ticks);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index aebb370a0006..3bfa6a0cbba4 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -153,7 +153,7 @@ static inline gfp_t current_gfp_context(gfp_t flags)
{
/*
* NOIO implies both NOIO and NOFS and it is a weaker context
- * so always make sure it makes precendence
+ * so always make sure it makes precedence
*/
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
flags &= ~(__GFP_IO | __GFP_FS);
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 113d1ad1ced7..13789d10a50e 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -69,6 +69,11 @@ struct thread_group_cputimer {
bool checking_timer;
};
+struct multiprocess_signals {
+ sigset_t signal;
+ struct hlist_node node;
+};
+
/*
* NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
@@ -90,6 +95,9 @@ struct signal_struct {
/* shared signal handling: */
struct sigpending shared_pending;
+ /* For collecting multiprocess signals during fork */
+ struct hlist_head multiprocess;
+
/* thread group exit support */
int group_exit_code;
/* overloaded:
@@ -146,7 +154,8 @@ struct signal_struct {
#endif
- struct pid *leader_pid;
+ /* PID/PID hash table linkage. */
+ struct pid *pids[PIDTYPE_MAX];
#ifdef CONFIG_NO_HZ_FULL
atomic_t tick_dep_mask;
@@ -261,16 +270,16 @@ static inline int signal_group_exit(const struct signal_struct *sig)
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
+extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info);
-static inline int kernel_dequeue_signal(siginfo_t *info)
+static inline int kernel_dequeue_signal(void)
{
struct task_struct *tsk = current;
- siginfo_t __info;
+ kernel_siginfo_t __info;
int ret;
spin_lock_irq(&tsk->sighand->siglock);
- ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
+ ret = dequeue_signal(tsk, &tsk->blocked, &__info);
spin_unlock_irq(&tsk->sighand->siglock);
return ret;
@@ -313,12 +322,12 @@ int force_sig_pkuerr(void __user *addr, u32 pkey);
int force_sig_ptrace_errno_trap(int errno, void __user *addr);
-extern int send_sig_info(int, struct siginfo *, struct task_struct *);
-extern int force_sigsegv(int, struct task_struct *);
-extern int force_sig_info(int, struct siginfo *, struct task_struct *);
-extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
-extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
-extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
+extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
+extern void force_sigsegv(int sig, struct task_struct *p);
+extern int force_sig_info(int, struct kernel_siginfo *, struct task_struct *);
+extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
+extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
+extern int kill_pid_info_as_cred(int, struct kernel_siginfo *, struct pid *,
const struct cred *);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
@@ -329,7 +338,7 @@ extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
-extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
+extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
static inline int restart_syscall(void)
@@ -371,6 +380,7 @@ static inline int signal_pending_state(long state, struct task_struct *p)
*/
extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
+extern void calculate_sigpending(void);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
@@ -383,6 +393,8 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
}
+void task_join_group_stop(struct task_struct *task);
+
#ifdef TIF_RESTORE_SIGMASK
/*
* Legacy restore_sigmask accessors. These are inefficient on
@@ -463,9 +475,8 @@ static inline int kill_cad_pid(int sig, int priv)
}
/* These can be the second arg to send_sig_info/send_group_sig_info. */
-#define SEND_SIG_NOINFO ((struct siginfo *) 0)
-#define SEND_SIG_PRIV ((struct siginfo *) 1)
-#define SEND_SIG_FORCED ((struct siginfo *) 2)
+#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
+#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
/*
* True if we are on the alternate signal stack.
@@ -556,6 +567,37 @@ extern bool current_is_single_threaded(void);
typedef int (*proc_visitor)(struct task_struct *p, void *data);
void walk_process_tree(struct task_struct *top, proc_visitor, void *);
+static inline
+struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
+{
+ struct pid *pid;
+ if (type == PIDTYPE_PID)
+ pid = task_pid(task);
+ else
+ pid = task->signal->pids[type];
+ return pid;
+}
+
+static inline struct pid *task_tgid(struct task_struct *task)
+{
+ return task->signal->pids[PIDTYPE_TGID];
+}
+
+/*
+ * Without tasklist or RCU lock it is not safe to dereference
+ * the result of task_pgrp/task_session even if task == current,
+ * we can race with another thread doing sys_setsid/sys_setpgid.
+ */
+static inline struct pid *task_pgrp(struct task_struct *task)
+{
+ return task->signal->pids[PIDTYPE_PGID];
+}
+
+static inline struct pid *task_session(struct task_struct *task)
+{
+ return task->signal->pids[PIDTYPE_SID];
+}
+
static inline int get_nr_threads(struct task_struct *tsk)
{
return tsk->signal->nr_threads;
@@ -574,7 +616,7 @@ static inline bool thread_group_leader(struct task_struct *p)
*/
static inline bool has_group_leader_pid(struct task_struct *p)
{
- return task_pid(p) == p->signal->leader_pid;
+ return task_pid(p) == task_tgid(p);
}
static inline
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644
index 000000000000..59d3736c454c
--- /dev/null
+++ b/include/linux/sched/smt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SMT_H
+#define _LINUX_SCHED_SMT_H
+
+#include <linux/static_key.h>
+
+#ifdef CONFIG_SCHED_SMT
+extern struct static_key_false sched_smt_present;
+
+static __always_inline bool sched_smt_active(void)
+{
+ return static_branch_likely(&sched_smt_present);
+}
+#else
+static inline bool sched_smt_active(void) { return false; }
+#endif
+
+void arch_smt_update(void);
+
+#endif
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index 04f1321d14c4..568286411b43 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -8,7 +8,7 @@
* Various counters maintained by the scheduler and fork(),
* exposed via /proc, sys.c or used by drivers via these APIs.
*
- * ( Note that all these values are aquired without locking,
+ * ( Note that all these values are acquired without locking,
* so they can only be relied on in narrow circumstances. )
*/
@@ -20,7 +20,6 @@ extern unsigned long nr_running(void);
extern bool single_task_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
-extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
static inline int sched_info_on(void)
{
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 913488d828cb..a9c32daeb9d8 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -10,6 +10,7 @@ struct ctl_table;
extern int sysctl_hung_task_check_count;
extern unsigned int sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_timeout_secs;
+extern unsigned long sysctl_hung_task_check_interval_secs;
extern int sysctl_hung_task_warnings;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
void __user *buffer,
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 108ede99e533..44c6f15800ff 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -39,6 +39,8 @@ void __noreturn do_task_dead(void);
extern void proc_caches_init(void);
+extern void fork_init(void);
+
extern void release_task(struct task_struct * p);
#ifdef CONFIG_HAVE_COPY_THREAD_TLS
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 26347741ba50..c31d3a47a47c 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -23,10 +23,10 @@
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
-#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */
-#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */
+#define SD_ASYM_CPUCAPACITY 0x0040 /* Domain members have different CPU capacities */
+#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share CPU capacity */
#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
-#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
+#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share CPU pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
@@ -89,7 +89,6 @@ struct sched_domain {
unsigned int newidle_idx;
unsigned int wake_idx;
unsigned int forkexec_idx;
- unsigned int smt_gain;
int nohz_idle; /* NOHZ IDLE status */
int flags; /* See SD_* */
@@ -202,6 +201,14 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl);
# define SD_INIT_NAME(type)
#endif
+#ifndef arch_scale_cpu_capacity
+static __always_inline
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
+
#else /* CONFIG_SMP */
struct sched_domain_attr;
@@ -217,6 +224,14 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
return true;
}
+#ifndef arch_scale_cpu_capacity
+static __always_inline
+unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
+
#endif /* !CONFIG_SMP */
static inline int task_node(const struct task_struct *p)
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 96fe289c4c6e..39ad98c09c58 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -4,6 +4,7 @@
#include <linux/uidgid.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/ratelimit.h>
struct key;
@@ -12,7 +13,7 @@ struct key;
* Some day this will be a full-fledged user tracking system..
*/
struct user_struct {
- atomic_t __count; /* reference count */
+ refcount_t __count; /* reference count */
atomic_t processes; /* How many processes does this user have? */
atomic_t sigpending; /* How many pending signals does this user have? */
#ifdef CONFIG_FANOTIFY
@@ -59,7 +60,7 @@ extern struct user_struct root_user;
extern struct user_struct * alloc_uid(kuid_t);
static inline struct user_struct *get_uid(struct user_struct *u)
{
- atomic_inc(&u->__count);
+ refcount_inc(&u->__count);
return u;
}
extern void free_uid(struct user_struct *);
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index f4c9fc0fc755..3105055c00a7 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -91,6 +91,8 @@ struct scmi_clk_ops {
* to sustained performance level mapping
* @freq_get: gets the frequency for a given device using sustained frequency
* to sustained performance level mapping
+ * @est_power_get: gets the estimated power cost for a given performance domain
+ * at a given frequency
*/
struct scmi_perf_ops {
int (*limits_set)(const struct scmi_handle *handle, u32 domain,
@@ -110,6 +112,8 @@ struct scmi_perf_ops {
unsigned long rate, bool poll);
int (*freq_get)(const struct scmi_handle *handle, u32 domain,
unsigned long *rate, bool poll);
+ int (*est_power_get)(const struct scmi_handle *handle, u32 domain,
+ unsigned long *rate, unsigned long *power);
};
/**
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index e5320f6c8654..84868d37b35d 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -4,9 +4,10 @@
#include <uapi/linux/seccomp.h>
-#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
- SECCOMP_FILTER_FLAG_LOG | \
- SECCOMP_FILTER_FLAG_SPEC_ALLOW)
+#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
+ SECCOMP_FILTER_FLAG_LOG | \
+ SECCOMP_FILTER_FLAG_SPEC_ALLOW | \
+ SECCOMP_FILTER_FLAG_NEW_LISTENER)
#ifdef CONFIG_SECCOMP
@@ -43,7 +44,7 @@ extern void secure_computing_strict(int this_syscall);
#endif
extern long prctl_get_seccomp(void);
-extern long prctl_set_seccomp(unsigned long, char __user *);
+extern long prctl_set_seccomp(unsigned long, void __user *);
static inline int seccomp_mode(struct seccomp *s)
{
diff --git a/include/linux/security.h b/include/linux/security.h
index 75f4156c84d7..dbfb5a66babb 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -35,7 +35,7 @@
struct linux_binprm;
struct cred;
struct rlimit;
-struct siginfo;
+struct kernel_siginfo;
struct sembuf;
struct kern_ipc_perm;
struct audit_context;
@@ -182,36 +182,10 @@ static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id)
#ifdef CONFIG_SECURITY
-struct security_mnt_opts {
- char **mnt_opts;
- int *mnt_opts_flags;
- int num_mnt_opts;
-};
-
int call_lsm_notifier(enum lsm_event event, void *data);
int register_lsm_notifier(struct notifier_block *nb);
int unregister_lsm_notifier(struct notifier_block *nb);
-static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
-{
- opts->mnt_opts = NULL;
- opts->mnt_opts_flags = NULL;
- opts->num_mnt_opts = 0;
-}
-
-static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
-{
- int i;
- if (opts->mnt_opts)
- for (i = 0; i < opts->num_mnt_opts; i++)
- kfree(opts->mnt_opts[i]);
- kfree(opts->mnt_opts);
- opts->mnt_opts = NULL;
- kfree(opts->mnt_opts_flags);
- opts->mnt_opts_flags = NULL;
- opts->num_mnt_opts = 0;
-}
-
/* prototypes */
extern int security_init(void);
@@ -248,9 +222,10 @@ void security_bprm_committing_creds(struct linux_binprm *bprm);
void security_bprm_committed_creds(struct linux_binprm *bprm);
int security_sb_alloc(struct super_block *sb);
void security_sb_free(struct super_block *sb);
-int security_sb_copy_data(char *orig, char *copy);
-int security_sb_remount(struct super_block *sb, void *data);
-int security_sb_kern_mount(struct super_block *sb, int flags, void *data);
+void security_free_mnt_opts(void **mnt_opts);
+int security_sb_eat_lsm_opts(char *options, void **mnt_opts);
+int security_sb_remount(struct super_block *sb, void *mnt_opts);
+int security_sb_kern_mount(struct super_block *sb);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
int security_sb_mount(const char *dev_name, const struct path *path,
@@ -258,14 +233,15 @@ int security_sb_mount(const char *dev_name, const struct path *path,
int security_sb_umount(struct vfsmount *mnt, int flags);
int security_sb_pivotroot(const struct path *old_path, const struct path *new_path);
int security_sb_set_mnt_opts(struct super_block *sb,
- struct security_mnt_opts *opts,
+ void *mnt_opts,
unsigned long kern_flags,
unsigned long *set_kern_flags);
int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb,
unsigned long kern_flags,
unsigned long *set_kern_flags);
-int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
+int security_add_mnt_opt(const char *option, const char *val,
+ int len, void **mnt_opts);
int security_dentry_init_security(struct dentry *dentry, int mode,
const struct qstr *name, void **ctx,
u32 *ctxlen);
@@ -361,7 +337,7 @@ int security_task_setrlimit(struct task_struct *p, unsigned int resource,
int security_task_setscheduler(struct task_struct *p);
int security_task_getscheduler(struct task_struct *p);
int security_task_movememory(struct task_struct *p);
-int security_task_kill(struct task_struct *p, struct siginfo *info,
+int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int sig, const struct cred *cred);
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
@@ -403,8 +379,6 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
#else /* CONFIG_SECURITY */
-struct security_mnt_opts {
-};
static inline int call_lsm_notifier(enum lsm_event event, void *data)
{
@@ -421,11 +395,7 @@ static inline int unregister_lsm_notifier(struct notifier_block *nb)
return 0;
}
-static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
-{
-}
-
-static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
+static inline void security_free_mnt_opts(void **mnt_opts)
{
}
@@ -555,17 +525,19 @@ static inline int security_sb_alloc(struct super_block *sb)
static inline void security_sb_free(struct super_block *sb)
{ }
-static inline int security_sb_copy_data(char *orig, char *copy)
+static inline int security_sb_eat_lsm_opts(char *options,
+ void **mnt_opts)
{
return 0;
}
-static inline int security_sb_remount(struct super_block *sb, void *data)
+static inline int security_sb_remount(struct super_block *sb,
+ void *mnt_opts)
{
return 0;
}
-static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data)
+static inline int security_sb_kern_mount(struct super_block *sb)
{
return 0;
}
@@ -600,7 +572,7 @@ static inline int security_sb_pivotroot(const struct path *old_path,
}
static inline int security_sb_set_mnt_opts(struct super_block *sb,
- struct security_mnt_opts *opts,
+ void *mnt_opts,
unsigned long kern_flags,
unsigned long *set_kern_flags)
{
@@ -615,7 +587,8 @@ static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb,
return 0;
}
-static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
+static inline int security_add_mnt_opt(const char *option, const char *val,
+ int len, void **mnt_opts)
{
return 0;
}
@@ -1020,7 +993,7 @@ static inline int security_task_movememory(struct task_struct *p)
}
static inline int security_task_kill(struct task_struct *p,
- struct siginfo *info, int sig,
+ struct kernel_siginfo *info, int sig,
const struct cred *cred)
{
return 0;
@@ -1820,28 +1793,5 @@ static inline void security_bpf_prog_free(struct bpf_prog_aux *aux)
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_BPF_SYSCALL */
-#ifdef CONFIG_SECURITY
-
-static inline char *alloc_secdata(void)
-{
- return (char *)get_zeroed_page(GFP_KERNEL);
-}
-
-static inline void free_secdata(void *secdata)
-{
- free_page((unsigned long)secdata);
-}
-
-#else
-
-static inline char *alloc_secdata(void)
-{
- return (char *)1;
-}
-
-static inline void free_secdata(void *secdata)
-{ }
-#endif /* CONFIG_SECURITY */
-
#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index f153b2c7f0cd..070bf4e92df7 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -210,7 +210,7 @@ void serdev_device_wait_until_sent(struct serdev_device *, long);
int serdev_device_get_tiocm(struct serdev_device *);
int serdev_device_set_tiocm(struct serdev_device *, int, int);
void serdev_device_write_wakeup(struct serdev_device *);
-int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, unsigned long);
+int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, long);
void serdev_device_write_flush(struct serdev_device *);
int serdev_device_write_room(struct serdev_device *);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 18e21427bce4..5a655ba8d273 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -134,6 +134,10 @@ struct uart_8250_port {
void (*dl_write)(struct uart_8250_port *, int);
struct uart_8250_em485 *em485;
+
+ /* Serial port overrun backoff */
+ struct delayed_work overrun_backoff;
+ u32 overrun_backoff_time_ms;
};
static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 406edae44ca3..5fe2b037e833 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -22,6 +22,7 @@
#include <linux/bitops.h>
#include <linux/compiler.h>
+#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/circ_buf.h>
#include <linux/spinlock.h>
@@ -144,6 +145,8 @@ struct uart_port {
void (*handle_break)(struct uart_port *);
int (*rs485_config)(struct uart_port *,
struct serial_rs485 *rs485);
+ int (*iso7816_config)(struct uart_port *,
+ struct serial_iso7816 *iso7816);
unsigned int irq; /* irq number */
unsigned long irqflags; /* irq flags */
unsigned int uartclk; /* base uart clock */
@@ -173,6 +176,7 @@ struct uart_port {
struct console *cons; /* struct console, if any */
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ)
unsigned long sysrq; /* sysrq timeout */
+ unsigned int sysrq_ch; /* char for sysrq */
#endif
/* flags must be updated while holding port mutex */
@@ -260,6 +264,7 @@ struct uart_port {
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
+ struct serial_iso7816 iso7816;
void *private_data; /* generic platform data pointer */
};
@@ -482,8 +487,42 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
}
return 0;
}
+static inline int
+uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ if (port->sysrq) {
+ if (ch && time_before(jiffies, port->sysrq)) {
+ port->sysrq_ch = ch;
+ port->sysrq = 0;
+ return 1;
+ }
+ port->sysrq = 0;
+ }
+ return 0;
+}
+static inline void
+uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+ int sysrq_ch;
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ spin_unlock_irqrestore(&port->lock, irqflags);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
#else
-#define uart_handle_sysrq_char(port,ch) ({ (void)port; 0; })
+static inline int
+uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; }
+static inline int
+uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; }
+static inline void
+uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+ spin_unlock_irqrestore(&port->lock, irqflags);
+}
#endif
/*
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index c0e795d95477..1c89611e0e06 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -36,6 +36,7 @@ enum {
SCIx_SH4_SCIF_FIFODATA_REGTYPE,
SCIx_SH7705_SCIF_REGTYPE,
SCIx_HSCIF_REGTYPE,
+ SCIx_RZ_SCIFA_REGTYPE,
SCIx_NR_REGTYPES,
};
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index da5178216da5..2a986d282a97 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -17,6 +17,20 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
+#ifndef set_mce_nospec
+static inline int set_mce_nospec(unsigned long pfn)
+{
+ return 0;
+}
+#endif
+
+#ifndef clear_mce_nospec
+static inline int clear_mce_nospec(unsigned long pfn)
+{
+ return 0;
+}
+#endif
+
#ifndef CONFIG_ARCH_HAS_MEM_ENCRYPT
static inline int set_memory_encrypted(unsigned long addr, int numpages)
{
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index d37518e89db2..d9d9de3fcf8e 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -224,7 +224,7 @@ struct sfp_eeprom_ext {
*
* See the SFF-8472 specification and related documents for the definition
* of these structure members. This can be obtained from
- * ftp://ftp.seagate.com/sff
+ * https://www.snia.org/technology-communities/sff/specifications
*/
struct sfp_eeprom_id {
struct sfp_eeprom_base base;
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index d927647e6350..6dfd05ef5c2d 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -1,4 +1,5 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs
*
* extracted from shdma.c and headers
@@ -7,10 +8,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#ifndef SHDMA_BASE_H
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index b154fd2b084c..9443cafd1969 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -12,6 +12,9 @@
struct shrink_control {
gfp_t gfp_mask;
+ /* current node being shrunk (for NUMA aware shrinkers) */
+ int nid;
+
/*
* How many objects scan_objects should scan and try to reclaim.
* This is reset before every call, so it is safe for callees
@@ -26,9 +29,6 @@ struct shrink_control {
*/
unsigned long nr_scanned;
- /* current node being shrunk (for NUMA aware shrinkers) */
- int nid;
-
/* current memcg being shrunk (for memcg aware shrinkers) */
struct mem_cgroup *memcg;
};
@@ -63,9 +63,9 @@ struct shrinker {
unsigned long (*scan_objects)(struct shrinker *,
struct shrink_control *sc);
- int seeks; /* seeks to recreate an obj */
long batch; /* reclaim batch size, 0 = default */
- unsigned long flags;
+ int seeks; /* seeks to recreate an obj */
+ unsigned flags;
/* These are for internal use */
struct list_head list;
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 3c5200137b24..cc7e2c1cd444 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -11,17 +11,21 @@ struct task_struct;
/* for sysctl */
extern int print_fatal_signals;
-static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from)
+static inline void copy_siginfo(kernel_siginfo_t *to,
+ const kernel_siginfo_t *from)
{
memcpy(to, from, sizeof(*to));
}
-static inline void clear_siginfo(struct siginfo *info)
+static inline void clear_siginfo(kernel_siginfo_t *info)
{
memset(info, 0, sizeof(*info));
}
-int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
+#define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo))
+
+int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from);
+int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from);
enum siginfo_layout {
SIL_KILL,
@@ -36,7 +40,7 @@ enum siginfo_layout {
SIL_SYS,
};
-enum siginfo_layout siginfo_layout(int sig, int si_code);
+enum siginfo_layout siginfo_layout(unsigned sig, int si_code);
/*
* Define some primitives to manipulate sigset_t.
@@ -125,9 +129,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
b3 = b->sig[3]; b2 = b->sig[2]; \
r->sig[3] = op(a3, b3); \
r->sig[2] = op(a2, b2); \
+ /* fall through */ \
case 2: \
a1 = a->sig[1]; b1 = b->sig[1]; \
r->sig[1] = op(a1, b1); \
+ /* fall through */ \
case 1: \
a0 = a->sig[0]; b0 = b->sig[0]; \
r->sig[0] = op(a0, b0); \
@@ -157,7 +163,9 @@ static inline void name(sigset_t *set) \
switch (_NSIG_WORDS) { \
case 4: set->sig[3] = op(set->sig[3]); \
set->sig[2] = op(set->sig[2]); \
+ /* fall through */ \
case 2: set->sig[1] = op(set->sig[1]); \
+ /* fall through */ \
case 1: set->sig[0] = op(set->sig[0]); \
break; \
default: \
@@ -178,6 +186,7 @@ static inline void sigemptyset(sigset_t *set)
memset(set, 0, sizeof(sigset_t));
break;
case 2: set->sig[1] = 0;
+ /* fall through */
case 1: set->sig[0] = 0;
break;
}
@@ -190,6 +199,7 @@ static inline void sigfillset(sigset_t *set)
memset(set, -1, sizeof(sigset_t));
break;
case 2: set->sig[1] = -1;
+ /* fall through */
case 1: set->sig[0] = -1;
break;
}
@@ -254,18 +264,24 @@ static inline int valid_signal(unsigned long sig)
struct timespec;
struct pt_regs;
+enum pid_type;
extern int next_signal(struct sigpending *pending, sigset_t *mask);
-extern int do_send_sig_info(int sig, struct siginfo *info,
- struct task_struct *p, bool group);
-extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
-extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
+extern int do_send_sig_info(int sig, struct kernel_siginfo *info,
+ struct task_struct *p, enum pid_type type);
+extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
+ struct task_struct *p, enum pid_type type);
+extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern int sigprocmask(int, sigset_t *, sigset_t *);
+extern int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
+ sigset_t *oldset, size_t sigsetsize);
+extern void restore_user_sigmask(const void __user *usigmask,
+ sigset_t *sigsaved);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
extern int show_unhandled_signals;
-extern int get_signal(struct ksignal *ksig);
+extern bool get_signal(struct ksignal *ksig);
extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
extern void exit_signals(struct task_struct *tsk);
extern void kernel_sigaction(int, __sighandler_t);
@@ -287,7 +303,7 @@ static inline void disallow_signal(int sig)
extern struct kmem_cache *sighand_cachep;
-int unhandled_signal(struct task_struct *tsk, int sig);
+extern bool unhandled_signal(struct task_struct *tsk, int sig);
/*
* In POSIX a signal is sent either to a specific thread (Linux task)
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index 222ae696000b..f8a90ae9c6ec 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -9,6 +9,10 @@
#include <linux/list.h>
#include <uapi/linux/signal.h>
+typedef struct kernel_siginfo {
+ __SIGINFO;
+} kernel_siginfo_t;
+
/*
* Real Time signals may be queued.
*/
@@ -16,7 +20,7 @@
struct sigqueue {
struct list_head list;
int flags;
- siginfo_t info;
+ kernel_siginfo_t info;
struct user_struct *user;
};
@@ -60,7 +64,7 @@ struct old_sigaction {
struct ksignal {
struct k_sigaction ka;
- siginfo_t info;
+ kernel_siginfo_t info;
int sig;
};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 17a13e4785fc..93f56fddd92a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -243,6 +243,9 @@ struct scatterlist;
struct pipe_inode_info;
struct iov_iter;
struct napi_struct;
+struct bpf_prog;
+union bpf_attr;
+struct skb_ext;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack {
@@ -252,7 +255,6 @@ struct nf_conntrack {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
- refcount_t use;
enum {
BRNF_PROTO_UNCHANGED,
BRNF_PROTO_8021Q,
@@ -479,10 +481,11 @@ static inline void sock_zerocopy_get(struct ubuf_info *uarg)
}
void sock_zerocopy_put(struct ubuf_info *uarg);
-void sock_zerocopy_put_abort(struct ubuf_info *uarg);
+void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
+int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg);
@@ -613,6 +616,8 @@ typedef unsigned char *sk_buff_data_t;
* @pkt_type: Packet class
* @fclone: skbuff clone status
* @ipvs_property: skbuff is owned by ipvs
+ * @offload_fwd_mark: Packet was L2-forwarded in hardware
+ * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @tc_redirected: packet was redirected by a tc action
@@ -631,6 +636,7 @@ typedef unsigned char *sk_buff_data_t;
* @queue_mapping: Queue mapping for multiqueue devices
* @xmit_more: More SKBs are pending for this queue
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
+ * @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -660,6 +666,7 @@ typedef unsigned char *sk_buff_data_t;
* @data: Data head pointer
* @truesize: Buffer size
* @users: User count - see {datagram,tcp}.c
+ * @extensions: allocated extensions, valid if active_extensions is nonzero
*/
struct sk_buff {
@@ -689,7 +696,7 @@ struct sk_buff {
union {
ktime_t tstamp;
- u64 skb_mstamp;
+ u64 skb_mstamp_ns; /* earliest departure time */
};
/*
* This is the control buffer. It is free to use for every
@@ -707,15 +714,9 @@ struct sk_buff {
struct list_head tcp_tsorted_anchor;
};
-#ifdef CONFIG_XFRM
- struct sec_path *sp;
-#endif
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
unsigned long _nfct;
#endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- struct nf_bridge_info *nf_bridge;
-#endif
unsigned int len,
data_len;
__u16 mac_len,
@@ -742,7 +743,9 @@ struct sk_buff {
head_frag:1,
xmit_more:1,
pfmemalloc:1;
-
+#ifdef CONFIG_SKB_EXTENSIONS
+ __u8 active_extensions;
+#endif
/* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header()
*/
@@ -775,6 +778,14 @@ struct sk_buff {
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_VLAN_PRESENT_BIT 7
+#else
+#define PKT_VLAN_PRESENT_BIT 0
+#endif
+#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
+ __u8 __pkt_vlan_present_offset[0];
+ __u8 vlan_present:1;
__u8 csum_complete_sw:1;
__u8 csum_level:2;
__u8 csum_not_inet:1;
@@ -782,13 +793,13 @@ struct sk_buff {
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
- __u8 ipvs_property:1;
+ __u8 ipvs_property:1;
__u8 inner_protocol_type:1;
__u8 remcsum_offload:1;
#ifdef CONFIG_NET_SWITCHDEV
__u8 offload_fwd_mark:1;
- __u8 offload_mr_fwd_mark:1;
+ __u8 offload_l3_fwd_mark:1;
#endif
#ifdef CONFIG_NET_CLS_ACT
__u8 tc_skip_classify:1;
@@ -856,6 +867,11 @@ struct sk_buff {
*data;
unsigned int truesize;
refcount_t users;
+
+#ifdef CONFIG_SKB_EXTENSIONS
+ /* only useable after checking ->active_extensions != 0 */
+ struct skb_ext *extensions;
+#endif
};
#ifdef __KERNEL__
@@ -1080,11 +1096,6 @@ static inline int skb_pad(struct sk_buff *skb, int pad)
}
#define dev_kfree_skb(a) consume_skb(a)
-int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
- int getfrag(void *from, char *to, int offset,
- int len, int odd, struct sk_buff *skb),
- void *from, int length);
-
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
int offset, size_t size);
@@ -1192,6 +1203,24 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
const struct flow_dissector_key *key,
unsigned int key_count);
+#ifdef CONFIG_NET
+int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog);
+
+int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
+#else
+static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
@@ -1302,15 +1331,35 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
return is_zcopy ? skb_uarg(skb) : NULL;
}
-static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
+static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool *have_ref)
{
if (skb && uarg && !skb_zcopy(skb)) {
- sock_zerocopy_get(uarg);
+ if (unlikely(have_ref && *have_ref))
+ *have_ref = false;
+ else
+ sock_zerocopy_get(uarg);
skb_shinfo(skb)->destructor_arg = uarg;
skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
}
}
+static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
+{
+ skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
+ skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+}
+
+static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
+{
+ return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
+}
+
+static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
+{
+ return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
+}
+
/* Release a reference on a zerocopy structure */
static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
{
@@ -1320,7 +1369,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
if (uarg->callback == sock_zerocopy_callback) {
uarg->zerocopy = uarg->zerocopy && zerocopy;
sock_zerocopy_put(uarg);
- } else {
+ } else if (!skb_zcopy_is_nouarg(skb)) {
uarg->callback(uarg, zerocopy);
}
@@ -1334,11 +1383,22 @@ static inline void skb_zcopy_abort(struct sk_buff *skb)
struct ubuf_info *uarg = skb_zcopy(skb);
if (uarg) {
- sock_zerocopy_put_abort(uarg);
+ sock_zerocopy_put_abort(uarg, false);
skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
}
}
+static inline void skb_mark_not_on_list(struct sk_buff *skb)
+{
+ skb->next = NULL;
+}
+
+static inline void skb_list_del_init(struct sk_buff *skb)
+{
+ __list_del_entry(&skb->list);
+ skb_mark_not_on_list(skb);
+}
+
/**
* skb_queue_empty - check if a queue is empty
* @list: queue head
@@ -1593,6 +1653,17 @@ static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
}
/**
+ * __skb_peek - peek at the head of a non-empty &sk_buff_head
+ * @list_: list to peek at
+ *
+ * Like skb_peek(), but the caller knows that the list is not empty.
+ */
+static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
+{
+ return list_->next;
+}
+
+/**
* skb_peek_next - peek skb following the given one from a queue
* @skb: skb to start from
* @list_: list to peek at
@@ -1688,8 +1759,6 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
* The "__skb_xxxx()" functions are the non-atomic ones that
* can only be called with interrupts disabled.
*/
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
- struct sk_buff_head *list);
static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
@@ -2471,10 +2540,8 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len);
static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
{
- if (unlikely(skb_is_nonlinear(skb))) {
- WARN_ON(1);
+ if (WARN_ON(skb_is_nonlinear(skb)))
return;
- }
skb->len = len;
skb_set_tail_pointer(skb, len);
}
@@ -3272,6 +3339,9 @@ static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
}
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
struct msghdr *msg);
+int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len,
+ struct ahash_request *hash);
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
@@ -3292,7 +3362,6 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
unsigned int flags);
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len);
-int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -3468,13 +3537,19 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
case 32: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 24: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 16: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 8: diffs |= __it_diff(a, b, 64);
break;
case 28: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 20: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 12: diffs |= __it_diff(a, b, 64);
+ /* fall through */
case 4: diffs |= __it_diff(a, b, 32);
break;
}
@@ -3827,18 +3902,97 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
atomic_inc(&nfct->use);
}
#endif
+
+#ifdef CONFIG_SKB_EXTENSIONS
+enum skb_ext_id {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
+ SKB_EXT_BRIDGE_NF,
+#endif
+#ifdef CONFIG_XFRM
+ SKB_EXT_SEC_PATH,
+#endif
+ SKB_EXT_NUM, /* must be last */
+};
+
+/**
+ * struct skb_ext - sk_buff extensions
+ * @refcnt: 1 on allocation, deallocated on 0
+ * @offset: offset to add to @data to obtain extension address
+ * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
+ * @data: start of extension data, variable sized
+ *
+ * Note: offsets/lengths are stored in chunks of 8 bytes, this allows
+ * to use 'u8' types while allowing up to 2kb worth of extension data.
+ */
+struct skb_ext {
+ refcount_t refcnt;
+ u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
+ u8 chunks; /* same */
+ char data[0] __aligned(8);
+};
+
+void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
+void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
+void __skb_ext_put(struct skb_ext *ext);
+
+static inline void skb_ext_put(struct sk_buff *skb)
+{
+ if (skb->active_extensions)
+ __skb_ext_put(skb->extensions);
+}
+
+static inline void __skb_ext_copy(struct sk_buff *dst,
+ const struct sk_buff *src)
+{
+ dst->active_extensions = src->active_extensions;
+
+ if (src->active_extensions) {
+ struct skb_ext *ext = src->extensions;
+
+ refcount_inc(&ext->refcnt);
+ dst->extensions = ext;
+ }
+}
+
+static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
+{
+ skb_ext_put(dst);
+ __skb_ext_copy(dst, src);
+}
+
+static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
+{
+ return !!ext->offset[i];
+}
+
+static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
+{
+ return skb->active_extensions & (1 << id);
+}
+
+static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
{
- if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
- kfree(nf_bridge);
+ if (skb_ext_exist(skb, id))
+ __skb_ext_del(skb, id);
}
-static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
+
+static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
{
- if (nf_bridge)
- refcount_inc(&nf_bridge->use);
+ if (skb_ext_exist(skb, id)) {
+ struct skb_ext *ext = skb->extensions;
+
+ return (void *)ext + (ext->offset[id] << 3);
+ }
+
+ return NULL;
}
-#endif /* CONFIG_BRIDGE_NETFILTER */
+#else
+static inline void skb_ext_put(struct sk_buff *skb) {}
+static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
+static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
+static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
+#endif /* CONFIG_SKB_EXTENSIONS */
+
static inline void nf_reset(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -3846,8 +4000,7 @@ static inline void nf_reset(struct sk_buff *skb)
skb->_nfct = 0;
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- nf_bridge_put(skb->nf_bridge);
- skb->nf_bridge = NULL;
+ skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
#endif
}
@@ -3865,7 +4018,7 @@ static inline void ipvs_reset(struct sk_buff *skb)
#endif
}
-/* Note: This doesn't put any conntrack and bridge info in dst. */
+/* Note: This doesn't put any conntrack info in dst. */
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
bool copy)
{
@@ -3873,10 +4026,6 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
dst->_nfct = src->_nfct;
nf_conntrack_get(skb_nfct(src));
#endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- dst->nf_bridge = src->nf_bridge;
- nf_bridge_get(src->nf_bridge);
-#endif
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
if (copy)
dst->nf_trace = src->nf_trace;
@@ -3888,9 +4037,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb_nfct(dst));
#endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- nf_bridge_put(dst->nf_bridge);
-#endif
__nf_copy(dst, src, true);
}
@@ -3912,12 +4058,19 @@ static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif
+static inline int secpath_exists(const struct sk_buff *skb)
+{
+#ifdef CONFIG_XFRM
+ return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
+#else
+ return 0;
+#endif
+}
+
static inline bool skb_irq_freeable(const struct sk_buff *skb)
{
return !skb->destructor &&
-#if IS_ENABLED(CONFIG_XFRM)
- !skb->sp &&
-#endif
+ !secpath_exists(skb) &&
!skb_nfct(skb) &&
!skb->_skb_refdst &&
!skb_has_frag_list(skb);
@@ -3963,10 +4116,10 @@ static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
return skb->dst_pending_confirm != 0;
}
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
+static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
- return skb->sp;
+ return skb_ext_find(skb, SKB_EXT_SEC_PATH);
#else
return NULL;
#endif
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
new file mode 100644
index 000000000000..178a3933a71b
--- /dev/null
+++ b/include/linux/skmsg.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
+
+#ifndef _LINUX_SKMSG_H
+#define _LINUX_SKMSG_H
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/strparser.h>
+
+#define MAX_MSG_FRAGS MAX_SKB_FRAGS
+
+enum __sk_action {
+ __SK_DROP = 0,
+ __SK_PASS,
+ __SK_REDIRECT,
+ __SK_NONE,
+};
+
+struct sk_msg_sg {
+ u32 start;
+ u32 curr;
+ u32 end;
+ u32 size;
+ u32 copybreak;
+ bool copy[MAX_MSG_FRAGS];
+ /* The extra element is used for chaining the front and sections when
+ * the list becomes partitioned (e.g. end < start). The crypto APIs
+ * require the chaining.
+ */
+ struct scatterlist data[MAX_MSG_FRAGS + 1];
+};
+
+/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
+struct sk_msg {
+ struct sk_msg_sg sg;
+ void *data;
+ void *data_end;
+ u32 apply_bytes;
+ u32 cork_bytes;
+ u32 flags;
+ struct sk_buff *skb;
+ struct sock *sk_redir;
+ struct sock *sk;
+ struct list_head list;
+};
+
+struct sk_psock_progs {
+ struct bpf_prog *msg_parser;
+ struct bpf_prog *skb_parser;
+ struct bpf_prog *skb_verdict;
+};
+
+enum sk_psock_state_bits {
+ SK_PSOCK_TX_ENABLED,
+};
+
+struct sk_psock_link {
+ struct list_head list;
+ struct bpf_map *map;
+ void *link_raw;
+};
+
+struct sk_psock_parser {
+ struct strparser strp;
+ bool enabled;
+ void (*saved_data_ready)(struct sock *sk);
+};
+
+struct sk_psock_work_state {
+ struct sk_buff *skb;
+ u32 len;
+ u32 off;
+};
+
+struct sk_psock {
+ struct sock *sk;
+ struct sock *sk_redir;
+ u32 apply_bytes;
+ u32 cork_bytes;
+ u32 eval;
+ struct sk_msg *cork;
+ struct sk_psock_progs progs;
+ struct sk_psock_parser parser;
+ struct sk_buff_head ingress_skb;
+ struct list_head ingress_msg;
+ unsigned long state;
+ struct list_head link;
+ spinlock_t link_lock;
+ refcount_t refcnt;
+ void (*saved_unhash)(struct sock *sk);
+ void (*saved_close)(struct sock *sk, long timeout);
+ void (*saved_write_space)(struct sock *sk);
+ struct proto *sk_proto;
+ struct sk_psock_work_state work_state;
+ struct work_struct work;
+ union {
+ struct rcu_head rcu;
+ struct work_struct gc;
+ };
+};
+
+int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
+ int elem_first_coalesce);
+int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
+ u32 off, u32 len);
+void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
+int sk_msg_free(struct sock *sk, struct sk_msg *msg);
+int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
+void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
+void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
+ u32 bytes);
+
+void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
+void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
+
+int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
+ struct sk_msg *msg, u32 bytes);
+int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
+ struct sk_msg *msg, u32 bytes);
+
+static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
+{
+ WARN_ON(i == msg->sg.end && bytes);
+}
+
+static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
+{
+ if (psock->apply_bytes) {
+ if (psock->apply_bytes < bytes)
+ psock->apply_bytes = 0;
+ else
+ psock->apply_bytes -= bytes;
+ }
+}
+
+#define sk_msg_iter_var_prev(var) \
+ do { \
+ if (var == 0) \
+ var = MAX_MSG_FRAGS - 1; \
+ else \
+ var--; \
+ } while (0)
+
+#define sk_msg_iter_var_next(var) \
+ do { \
+ var++; \
+ if (var == MAX_MSG_FRAGS) \
+ var = 0; \
+ } while (0)
+
+#define sk_msg_iter_prev(msg, which) \
+ sk_msg_iter_var_prev(msg->sg.which)
+
+#define sk_msg_iter_next(msg, which) \
+ sk_msg_iter_var_next(msg->sg.which)
+
+static inline void sk_msg_clear_meta(struct sk_msg *msg)
+{
+ memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
+}
+
+static inline void sk_msg_init(struct sk_msg *msg)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS);
+ memset(msg, 0, sizeof(*msg));
+ sg_init_marker(msg->sg.data, MAX_MSG_FRAGS);
+}
+
+static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
+ int which, u32 size)
+{
+ dst->sg.data[which] = src->sg.data[which];
+ dst->sg.data[which].length = size;
+ dst->sg.size += size;
+ src->sg.data[which].length -= size;
+ src->sg.data[which].offset += size;
+}
+
+static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
+{
+ memcpy(dst, src, sizeof(*src));
+ sk_msg_init(src);
+}
+
+static inline bool sk_msg_full(const struct sk_msg *msg)
+{
+ return (msg->sg.end == msg->sg.start) && msg->sg.size;
+}
+
+static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
+{
+ if (sk_msg_full(msg))
+ return MAX_MSG_FRAGS;
+
+ return msg->sg.end >= msg->sg.start ?
+ msg->sg.end - msg->sg.start :
+ msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start);
+}
+
+static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
+{
+ return &msg->sg.data[which];
+}
+
+static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
+{
+ return msg->sg.data[which];
+}
+
+static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
+{
+ return sg_page(sk_msg_elem(msg, which));
+}
+
+static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
+{
+ return msg->flags & BPF_F_INGRESS;
+}
+
+static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
+{
+ struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
+
+ if (msg->sg.copy[msg->sg.start]) {
+ msg->data = NULL;
+ msg->data_end = NULL;
+ } else {
+ msg->data = sg_virt(sge);
+ msg->data_end = msg->data + sge->length;
+ }
+}
+
+static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
+ u32 len, u32 offset)
+{
+ struct scatterlist *sge;
+
+ get_page(page);
+ sge = sk_msg_elem(msg, msg->sg.end);
+ sg_set_page(sge, page, len, offset);
+ sg_unmark_end(sge);
+
+ msg->sg.copy[msg->sg.end] = true;
+ msg->sg.size += len;
+ sk_msg_iter_next(msg, end);
+}
+
+static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
+{
+ do {
+ msg->sg.copy[i] = copy_state;
+ sk_msg_iter_var_next(i);
+ if (i == msg->sg.end)
+ break;
+ } while (1);
+}
+
+static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
+{
+ sk_msg_sg_copy(msg, start, true);
+}
+
+static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
+{
+ sk_msg_sg_copy(msg, start, false);
+}
+
+static inline struct sk_psock *sk_psock(const struct sock *sk)
+{
+ return rcu_dereference_sk_user_data(sk);
+}
+
+static inline void sk_psock_queue_msg(struct sk_psock *psock,
+ struct sk_msg *msg)
+{
+ list_add_tail(&msg->list, &psock->ingress_msg);
+}
+
+static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
+{
+ return psock ? list_empty(&psock->ingress_msg) : true;
+}
+
+static inline void sk_psock_report_error(struct sk_psock *psock, int err)
+{
+ struct sock *sk = psock->sk;
+
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
+}
+
+struct sk_psock *sk_psock_init(struct sock *sk, int node);
+
+int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
+void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
+void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
+
+int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
+ struct sk_msg *msg);
+
+static inline struct sk_psock_link *sk_psock_init_link(void)
+{
+ return kzalloc(sizeof(struct sk_psock_link),
+ GFP_ATOMIC | __GFP_NOWARN);
+}
+
+static inline void sk_psock_free_link(struct sk_psock_link *link)
+{
+ kfree(link);
+}
+
+struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
+#if defined(CONFIG_BPF_STREAM_PARSER)
+void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link);
+#else
+static inline void sk_psock_unlink(struct sock *sk,
+ struct sk_psock_link *link)
+{
+}
+#endif
+
+void __sk_psock_purge_ingress_msg(struct sk_psock *psock);
+
+static inline void sk_psock_cork_free(struct sk_psock *psock)
+{
+ if (psock->cork) {
+ sk_msg_free(psock->sk, psock->cork);
+ kfree(psock->cork);
+ psock->cork = NULL;
+ }
+}
+
+static inline void sk_psock_update_proto(struct sock *sk,
+ struct sk_psock *psock,
+ struct proto *ops)
+{
+ psock->saved_unhash = sk->sk_prot->unhash;
+ psock->saved_close = sk->sk_prot->close;
+ psock->saved_write_space = sk->sk_write_space;
+
+ psock->sk_proto = sk->sk_prot;
+ sk->sk_prot = ops;
+}
+
+static inline void sk_psock_restore_proto(struct sock *sk,
+ struct sk_psock *psock)
+{
+ if (psock->sk_proto) {
+ sk->sk_prot = psock->sk_proto;
+ psock->sk_proto = NULL;
+ }
+}
+
+static inline void sk_psock_set_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ set_bit(bit, &psock->state);
+}
+
+static inline void sk_psock_clear_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ clear_bit(bit, &psock->state);
+}
+
+static inline bool sk_psock_test_state(const struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ return test_bit(bit, &psock->state);
+}
+
+static inline struct sk_psock *sk_psock_get_checked(struct sock *sk)
+{
+ struct sk_psock *psock;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (psock) {
+ if (sk->sk_prot->recvmsg != tcp_bpf_recvmsg) {
+ psock = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
+ if (!refcount_inc_not_zero(&psock->refcnt))
+ psock = ERR_PTR(-EBUSY);
+ }
+out:
+ rcu_read_unlock();
+ return psock;
+}
+
+static inline struct sk_psock *sk_psock_get(struct sock *sk)
+{
+ struct sk_psock *psock;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (psock && !refcount_inc_not_zero(&psock->refcnt))
+ psock = NULL;
+ rcu_read_unlock();
+ return psock;
+}
+
+void sk_psock_stop(struct sock *sk, struct sk_psock *psock);
+void sk_psock_destroy(struct rcu_head *rcu);
+void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
+
+static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
+{
+ if (refcount_dec_and_test(&psock->refcnt))
+ sk_psock_drop(sk, psock);
+}
+
+static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
+{
+ if (psock->parser.enabled)
+ psock->parser.saved_data_ready(sk);
+ else
+ sk->sk_data_ready(sk);
+}
+
+static inline void psock_set_prog(struct bpf_prog **pprog,
+ struct bpf_prog *prog)
+{
+ prog = xchg(pprog, prog);
+ if (prog)
+ bpf_prog_put(prog);
+}
+
+static inline void psock_progs_drop(struct sk_psock_progs *progs)
+{
+ psock_set_prog(&progs->msg_parser, NULL);
+ psock_set_prog(&progs->skb_parser, NULL);
+ psock_set_prog(&progs->skb_verdict, NULL);
+}
+
+#endif /* _LINUX_SKMSG_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ed9cbddeb4a6..11b45f7ae405 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -295,11 +295,42 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
(KMALLOC_MIN_SIZE) : 16)
+/*
+ * Whenever changing this, take care of that kmalloc_type() and
+ * create_kmalloc_caches() still work as intended.
+ */
+enum kmalloc_cache_type {
+ KMALLOC_NORMAL = 0,
+ KMALLOC_RECLAIM,
+#ifdef CONFIG_ZONE_DMA
+ KMALLOC_DMA,
+#endif
+ NR_KMALLOC_TYPES
+};
+
#ifndef CONFIG_SLOB
-extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
+extern struct kmem_cache *
+kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
+
+static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
+{
#ifdef CONFIG_ZONE_DMA
-extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+ /*
+ * The most common case is KMALLOC_NORMAL, so test for it
+ * with a single branch for both flags.
+ */
+ if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
+ return KMALLOC_NORMAL;
+
+ /*
+ * At least one of the flags has to be set. If both are, __GFP_DMA
+ * is more important.
+ */
+ return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
+#else
+ return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
#endif
+}
/*
* Figure out which kmalloc slab an allocation of a certain size
@@ -413,7 +444,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
{
void *ret = kmem_cache_alloc(s, flags);
- kasan_kmalloc(s, ret, size, flags);
+ ret = kasan_kmalloc(s, ret, size, flags);
return ret;
}
@@ -424,7 +455,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
- kasan_kmalloc(s, ret, size, gfpflags);
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
#endif /* CONFIG_TRACING */
@@ -455,64 +486,65 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* kmalloc is the normal method of allocating memory
* for objects smaller than page size in the kernel.
*
- * The @flags argument may be one of:
- *
- * %GFP_USER - Allocate memory on behalf of user. May sleep.
- *
- * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
- *
- * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
- * For example, use this inside interrupt handlers.
+ * The @flags argument may be one of the GFP flags defined at
+ * include/linux/gfp.h and described at
+ * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
*
- * %GFP_HIGHUSER - Allocate pages from high memory.
+ * The recommended usage of the @flags is described at
+ * :ref:`Documentation/core-api/memory-allocation.rst <memory-allocation>`
*
- * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
+ * Below is a brief outline of the most useful GFP flags
*
- * %GFP_NOFS - Do not make any fs calls while trying to get memory.
+ * %GFP_KERNEL
+ * Allocate normal kernel ram. May sleep.
*
- * %GFP_NOWAIT - Allocation will not sleep.
+ * %GFP_NOWAIT
+ * Allocation will not sleep.
*
- * %__GFP_THISNODE - Allocate node-local memory only.
+ * %GFP_ATOMIC
+ * Allocation will not sleep. May use emergency pools.
*
- * %GFP_DMA - Allocation suitable for DMA.
- * Should only be used for kmalloc() caches. Otherwise, use a
- * slab created with SLAB_DMA.
+ * %GFP_HIGHUSER
+ * Allocate memory from high memory on behalf of user.
*
* Also it is possible to set different flags by OR'ing
* in one or more of the following additional @flags:
*
- * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
+ * %__GFP_HIGH
+ * This allocation has high priority and may use emergency pools.
*
- * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
- * (think twice before using).
+ * %__GFP_NOFAIL
+ * Indicate that this allocation is in no way allowed to fail
+ * (think twice before using).
*
- * %__GFP_NORETRY - If memory is not immediately available,
- * then give up at once.
+ * %__GFP_NORETRY
+ * If memory is not immediately available,
+ * then give up at once.
*
- * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
+ * %__GFP_NOWARN
+ * If allocation fails, don't issue any warnings.
*
- * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail
- * eventually.
- *
- * There are other flags available as well, but these are not intended
- * for general use, and so are not documented here. For a full list of
- * potential flags, always refer to linux/gfp.h.
+ * %__GFP_RETRY_MAYFAIL
+ * Try really hard to succeed the allocation but fail
+ * eventually.
*/
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
+#ifndef CONFIG_SLOB
+ unsigned int index;
+#endif
if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large(size, flags);
#ifndef CONFIG_SLOB
- if (!(flags & GFP_DMA)) {
- unsigned int index = kmalloc_index(size);
+ index = kmalloc_index(size);
- if (!index)
- return ZERO_SIZE_PTR;
+ if (!index)
+ return ZERO_SIZE_PTR;
- return kmem_cache_alloc_trace(kmalloc_caches[index],
- flags, size);
- }
+ return kmem_cache_alloc_trace(
+ kmalloc_caches[kmalloc_type(flags)][index],
+ flags, size);
#endif
}
return __kmalloc(size, flags);
@@ -542,13 +574,14 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
+ size <= KMALLOC_MAX_CACHE_SIZE) {
unsigned int i = kmalloc_index(size);
if (!i)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_node_trace(kmalloc_caches[i],
+ return kmem_cache_alloc_node_trace(
+ kmalloc_caches[kmalloc_type(flags)][i],
flags, node, size);
}
#endif
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 3485c58cfd1c..9a5eafb7145b 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -104,4 +104,17 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
return object;
}
+/*
+ * We want to avoid an expensive divide : (offset / cache->size)
+ * Using the fact that size is a constant for a particular cache,
+ * we can replace (offset / cache->size) by
+ * reciprocal_divide(offset, cache->reciprocal_buffer_size)
+ */
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+ const struct page *page, void *obj)
+{
+ u32 offset = (obj - page->s_mem);
+ return reciprocal_divide(offset, cache->reciprocal_buffer_size);
+}
+
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 9fb239e12b82..a56f08ff3097 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -53,6 +53,10 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags);
+void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags, const struct cpumask *mask);
+
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
#ifdef CONFIG_SMP
diff --git a/include/linux/soc/amlogic/meson-canvas.h b/include/linux/soc/amlogic/meson-canvas.h
new file mode 100644
index 000000000000..b4dde2fbeb3f
--- /dev/null
+++ b/include/linux/soc/amlogic/meson-canvas.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ */
+#ifndef __SOC_MESON_CANVAS_H
+#define __SOC_MESON_CANVAS_H
+
+#include <linux/kernel.h>
+
+#define MESON_CANVAS_WRAP_NONE 0x00
+#define MESON_CANVAS_WRAP_X 0x01
+#define MESON_CANVAS_WRAP_Y 0x02
+
+#define MESON_CANVAS_BLKMODE_LINEAR 0x00
+#define MESON_CANVAS_BLKMODE_32x32 0x01
+#define MESON_CANVAS_BLKMODE_64x64 0x02
+
+#define MESON_CANVAS_ENDIAN_SWAP16 0x1
+#define MESON_CANVAS_ENDIAN_SWAP32 0x3
+#define MESON_CANVAS_ENDIAN_SWAP64 0x7
+#define MESON_CANVAS_ENDIAN_SWAP128 0xf
+
+struct meson_canvas;
+
+/**
+ * meson_canvas_get() - get a canvas provider instance
+ *
+ * @dev: consumer device pointer
+ */
+struct meson_canvas *meson_canvas_get(struct device *dev);
+
+/**
+ * meson_canvas_alloc() - take ownership of a canvas
+ *
+ * @canvas: canvas provider instance retrieved from meson_canvas_get()
+ * @canvas_index: will be filled with the canvas ID
+ */
+int meson_canvas_alloc(struct meson_canvas *canvas, u8 *canvas_index);
+
+/**
+ * meson_canvas_free() - remove ownership from a canvas
+ *
+ * @canvas: canvas provider instance retrieved from meson_canvas_get()
+ * @canvas_index: canvas ID that was obtained via meson_canvas_alloc()
+ */
+int meson_canvas_free(struct meson_canvas *canvas, u8 canvas_index);
+
+/**
+ * meson_canvas_config() - configure a canvas
+ *
+ * @canvas: canvas provider instance retrieved from meson_canvas_get()
+ * @canvas_index: canvas ID that was obtained via meson_canvas_alloc()
+ * @addr: physical address to the pixel buffer
+ * @stride: width of the buffer
+ * @height: height of the buffer
+ * @wrap: undocumented
+ * @blkmode: block mode (linear, 32x32, 64x64)
+ * @endian: byte swapping (swap16, swap32, swap64, swap128)
+ */
+int meson_canvas_config(struct meson_canvas *canvas, u8 canvas_index,
+ u32 addr, u32 stride, u32 height,
+ unsigned int wrap, unsigned int blkmode,
+ unsigned int endian);
+
+#endif
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
new file mode 100644
index 000000000000..54ade13a9b15
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ *
+ */
+
+#ifndef __MTK_CMDQ_H__
+#define __MTK_CMDQ_H__
+
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/mtk-cmdq-mailbox.h>
+#include <linux/timer.h>
+
+#define CMDQ_NO_TIMEOUT 0xffffffffu
+
+/** cmdq event maximum */
+#define CMDQ_MAX_EVENT 0x3ff
+
+struct cmdq_pkt;
+
+struct cmdq_client {
+ spinlock_t lock;
+ u32 pkt_cnt;
+ struct mbox_client client;
+ struct mbox_chan *chan;
+ struct timer_list timer;
+ u32 timeout_ms; /* in unit of microsecond */
+};
+
+/**
+ * cmdq_mbox_create() - create CMDQ mailbox client and channel
+ * @dev: device of CMDQ mailbox client
+ * @index: index of CMDQ mailbox channel
+ * @timeout: timeout of a pkt execution by GCE, in unit of microsecond, set
+ * CMDQ_NO_TIMEOUT if a timer is not used.
+ *
+ * Return: CMDQ mailbox client pointer
+ */
+struct cmdq_client *cmdq_mbox_create(struct device *dev, int index,
+ u32 timeout);
+
+/**
+ * cmdq_mbox_destroy() - destroy CMDQ mailbox client and channel
+ * @client: the CMDQ mailbox client
+ */
+void cmdq_mbox_destroy(struct cmdq_client *client);
+
+/**
+ * cmdq_pkt_create() - create a CMDQ packet
+ * @client: the CMDQ mailbox client
+ * @size: required CMDQ buffer size
+ *
+ * Return: CMDQ packet pointer
+ */
+struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size);
+
+/**
+ * cmdq_pkt_destroy() - destroy the CMDQ packet
+ * @pkt: the CMDQ packet
+ */
+void cmdq_pkt_destroy(struct cmdq_pkt *pkt);
+
+/**
+ * cmdq_pkt_write() - append write command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @value: the specified target register value
+ * @subsys: the CMDQ sub system code
+ * @offset: register offset from CMDQ sub system
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset);
+
+/**
+ * cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @value: the specified target register value
+ * @subsys: the CMDQ sub system code
+ * @offset: register offset from CMDQ sub system
+ * @mask: the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value,
+ u32 subsys, u32 offset, u32 mask);
+
+/**
+ * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @event: the desired event type to "wait and CLEAR"
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event);
+
+/**
+ * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @event: the desired event to be cleared
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u32 event);
+
+/**
+ * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
+ * packet and call back at the end of done packet
+ * @pkt: the CMDQ packet
+ * @cb: called at the end of done packet
+ * @data: this data will pass back to cb
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Trigger CMDQ to asynchronously execute the CMDQ packet and call back
+ * at the end of done packet. Note that this is an ASYNC function. When the
+ * function returned, it may or may not be finished.
+ */
+int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
+ void *data);
+
+/**
+ * cmdq_pkt_flush() - trigger CMDQ to execute the CMDQ packet
+ * @pkt: the CMDQ packet
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Trigger CMDQ to execute the CMDQ packet. Note that this is a
+ * synchronous flush function. When the function returned, the recorded
+ * commands have been done.
+ */
+int cmdq_pkt_flush(struct cmdq_pkt *pkt);
+
+#endif /* __MTK_CMDQ_H__ */
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 7e3b9c605ab2..69c285b1c990 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -70,25 +70,51 @@ struct llcc_slice_config {
/**
* llcc_drv_data - Data associated with the llcc driver
* @regmap: regmap associated with the llcc device
+ * @bcast_regmap: regmap associated with llcc broadcast offset
* @cfg: pointer to the data structure for slice configuration
* @lock: mutex associated with each slice
* @cfg_size: size of the config data table
* @max_slices: max slices as read from device tree
- * @bcast_off: Offset of the broadcast bank
* @num_banks: Number of llcc banks
* @bitmap: Bit map to track the active slice ids
* @offsets: Pointer to the bank offsets array
+ * @ecc_irq: interrupt for llcc cache error detection and reporting
*/
struct llcc_drv_data {
struct regmap *regmap;
+ struct regmap *bcast_regmap;
const struct llcc_slice_config *cfg;
struct mutex lock;
u32 cfg_size;
u32 max_slices;
- u32 bcast_off;
u32 num_banks;
unsigned long *bitmap;
u32 *offsets;
+ int ecc_irq;
+};
+
+/**
+ * llcc_edac_reg_data - llcc edac registers data for each error type
+ * @name: Name of the error
+ * @synd_reg: Syndrome register address
+ * @count_status_reg: Status register address to read the error count
+ * @ways_status_reg: Status register address to read the error ways
+ * @reg_cnt: Number of registers
+ * @count_mask: Mask value to get the error count
+ * @ways_mask: Mask value to get the error ways
+ * @count_shift: Shift value to get the error count
+ * @ways_shift: Shift value to get the error ways
+ */
+struct llcc_edac_reg_data {
+ char *name;
+ u64 synd_reg;
+ u64 count_status_reg;
+ u64 ways_status_reg;
+ u32 reg_cnt;
+ u32 count_mask;
+ u32 ways_mask;
+ u8 count_shift;
+ u8 ways_shift;
};
#if IS_ENABLED(CONFIG_QCOM_LLCC)
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index f4de33654a60..5efa2b67fa55 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -166,7 +166,7 @@ struct qmi_ops {
struct qmi_txn {
struct qmi_handle *qmi;
- int id;
+ u16 id;
struct mutex lock;
struct completion completion;
diff --git a/include/linux/soc/renesas/rcar-sysc.h b/include/linux/soc/renesas/rcar-sysc.h
index 8a6086d2e9c3..00fae6fd234d 100644
--- a/include/linux/soc/renesas/rcar-sysc.h
+++ b/include/linux/soc/renesas/rcar-sysc.h
@@ -2,16 +2,7 @@
#ifndef __LINUX_SOC_RENESAS_RCAR_SYSC_H__
#define __LINUX_SOC_RENESAS_RCAR_SYSC_H__
-#include <linux/types.h>
-
-struct rcar_sysc_ch {
- u16 chan_offs;
- u8 chan_bit;
- u8 isr_bit;
-};
-
-int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch);
-int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch);
-void rcar_sysc_init(phys_addr_t base, u32 syscier);
+int rcar_sysc_power_down_cpu(unsigned int cpu);
+int rcar_sysc_power_up_cpu(unsigned int cpu);
#endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 7ed4713d5337..ab2041a00e01 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -286,6 +286,7 @@ struct ucred {
#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */
#define MSG_MORE 0x8000 /* Sender will send more */
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
+#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
@@ -348,7 +349,8 @@ struct ucred {
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
-struct timespec;
+struct __kernel_timespec;
+struct old_timespec32;
/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff
* forbid_cmsg_compat==false
@@ -357,8 +359,10 @@ extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg,
unsigned int flags, bool forbid_cmsg_compat);
extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg,
unsigned int flags, bool forbid_cmsg_compat);
-extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
- unsigned int flags, struct timespec *timeout);
+extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
+ unsigned int vlen, unsigned int flags,
+ struct __kernel_timespec __user *timeout,
+ struct old_timespec32 __user *timeout32);
extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
bool forbid_cmsg_compat);
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 962971e6a9c7..df313913e856 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -678,6 +678,9 @@ struct sdw_master_ops {
* @defer_msg: Defer message
* @clk_stop_timeout: Clock stop timeout computed
* @bank_switch_timeout: Bank switch timeout computed
+ * @multi_link: Store bus property that indicates if multi links
+ * are supported. This flag is populated by drivers after reading
+ * appropriate firmware (ACPI/DT).
*/
struct sdw_bus {
struct device *dev;
@@ -694,6 +697,7 @@ struct sdw_bus {
struct sdw_defer defer_msg;
unsigned int clk_stop_timeout;
u32 bank_switch_timeout;
+ bool multi_link;
};
int sdw_add_bus_master(struct sdw_bus *bus);
@@ -768,14 +772,18 @@ struct sdw_stream_params {
* @params: Stream parameters
* @state: Current state of the stream
* @type: Stream type PCM or PDM
- * @m_rt: Master runtime
+ * @master_list: List of Master runtime(s) in this stream.
+ * master_list can contain only one m_rt per Master instance
+ * for a stream
+ * @m_rt_count: Count of Master runtime(s) in this stream
*/
struct sdw_stream_runtime {
char *name;
struct sdw_stream_params params;
enum sdw_stream_state state;
enum sdw_stream_type type;
- struct sdw_master_runtime *m_rt;
+ struct list_head master_list;
+ int m_rt_count;
};
struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name);
diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h
index bfde741a543d..778ae8eb1f3e 100644
--- a/include/linux/spi/mmc_spi.h
+++ b/include/linux/spi/mmc_spi.h
@@ -8,11 +8,6 @@
struct device;
struct mmc_host;
-#define MMC_SPI_USE_CD_GPIO (1 << 0)
-#define MMC_SPI_USE_RO_GPIO (1 << 1)
-#define MMC_SPI_CD_GPIO_ACTIVE_LOW (1 << 2)
-#define MMC_SPI_RO_GPIO_ACTIVE_LOW (1 << 3)
-
/* Put this in platform_data of a device being used to manage an MMC/SD
* card slot. (Modeled after PXA mmc glue; see that for usage examples.)
*
@@ -27,16 +22,6 @@ struct mmc_spi_platform_data {
void *);
void (*exit)(struct device *, void *);
- /*
- * Card Detect and Read Only GPIOs. To enable debouncing on the card
- * detect GPIO, set the cd_debounce to the debounce time in
- * microseconds.
- */
- unsigned int flags;
- unsigned int cd_gpio;
- unsigned int cd_debounce;
- unsigned int ro_gpio;
-
/* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */
unsigned long caps;
unsigned long caps2;
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 9ec4c147abbc..b0674e330ef6 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -25,6 +25,7 @@ struct dma_chan;
struct pxa2xx_spi_master {
u16 num_chipselect;
u8 enable_dma;
+ bool is_slave;
/* DMA engine specific config */
bool (*dma_filter)(struct dma_chan *chan, void *param);
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index b2bd4b4127c4..3fe24500c5ee 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -57,10 +57,12 @@
/**
* enum spi_mem_data_dir - describes the direction of a SPI memory data
* transfer from the controller perspective
+ * @SPI_MEM_NO_DATA: no data transferred
* @SPI_MEM_DATA_IN: data coming from the SPI memory
- * @SPI_MEM_DATA_OUT: data sent the SPI memory
+ * @SPI_MEM_DATA_OUT: data sent to the SPI memory
*/
enum spi_mem_data_dir {
+ SPI_MEM_NO_DATA,
SPI_MEM_DATA_IN,
SPI_MEM_DATA_OUT,
};
@@ -81,8 +83,10 @@ enum spi_mem_data_dir {
* @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
* @data.buswidth: number of IO lanes used to send/receive the data
* @data.dir: direction of the transfer
- * @data.buf.in: input buffer
- * @data.buf.out: output buffer
+ * @data.nbytes: number of data bytes to send/receive. Can be zero if the
+ * operation does not involve transferring data
+ * @data.buf.in: input buffer (must be DMA-able)
+ * @data.buf.out: output buffer (must be DMA-able)
*/
struct spi_mem_op {
struct {
@@ -105,7 +109,6 @@ struct spi_mem_op {
u8 buswidth;
enum spi_mem_data_dir dir;
unsigned int nbytes;
- /* buf.{in,out} must be DMA-able. */
union {
void *in;
const void *out;
@@ -122,6 +125,49 @@ struct spi_mem_op {
}
/**
+ * struct spi_mem_dirmap_info - Direct mapping information
+ * @op_tmpl: operation template that should be used by the direct mapping when
+ * the memory device is accessed
+ * @offset: absolute offset this direct mapping is pointing to
+ * @length: length in byte of this direct mapping
+ *
+ * These information are used by the controller specific implementation to know
+ * the portion of memory that is directly mapped and the spi_mem_op that should
+ * be used to access the device.
+ * A direct mapping is only valid for one direction (read or write) and this
+ * direction is directly encoded in the ->op_tmpl.data.dir field.
+ */
+struct spi_mem_dirmap_info {
+ struct spi_mem_op op_tmpl;
+ u64 offset;
+ u64 length;
+};
+
+/**
+ * struct spi_mem_dirmap_desc - Direct mapping descriptor
+ * @mem: the SPI memory device this direct mapping is attached to
+ * @info: information passed at direct mapping creation time
+ * @nodirmap: set to 1 if the SPI controller does not implement
+ * ->mem_ops->dirmap_create() or when this function returned an
+ * error. If @nodirmap is true, all spi_mem_dirmap_{read,write}()
+ * calls will use spi_mem_exec_op() to access the memory. This is a
+ * degraded mode that allows spi_mem drivers to use the same code
+ * no matter whether the controller supports direct mapping or not
+ * @priv: field pointing to controller specific data
+ *
+ * Common part of a direct mapping descriptor. This object is created by
+ * spi_mem_dirmap_create() and controller implementation of ->create_dirmap()
+ * can create/attach direct mapping resources to the descriptor in the ->priv
+ * field.
+ */
+struct spi_mem_dirmap_desc {
+ struct spi_mem *mem;
+ struct spi_mem_dirmap_info info;
+ unsigned int nodirmap;
+ void *priv;
+};
+
+/**
* struct spi_mem - describes a SPI memory device
* @spi: the underlying SPI device
* @drvpriv: spi_mem_driver private data
@@ -176,10 +222,32 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* Note that if the implementation of this function allocates memory
* dynamically, then it should do so with devm_xxx(), as we don't
* have a ->free_name() function.
+ * @dirmap_create: create a direct mapping descriptor that can later be used to
+ * access the memory device. This method is optional
+ * @dirmap_destroy: destroy a memory descriptor previous created by
+ * ->dirmap_create()
+ * @dirmap_read: read data from the memory device using the direct mapping
+ * created by ->dirmap_create(). The function can return less
+ * data than requested (for example when the request is crossing
+ * the currently mapped area), and the caller of
+ * spi_mem_dirmap_read() is responsible for calling it again in
+ * this case.
+ * @dirmap_write: write data to the memory device using the direct mapping
+ * created by ->dirmap_create(). The function can return less
+ * data than requested (for example when the request is crossing
+ * the currently mapped area), and the caller of
+ * spi_mem_dirmap_write() is responsible for calling it again in
+ * this case.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
* case for QSPI controllers.
+ *
+ * Note on ->dirmap_{read,write}(): drivers should avoid accessing the direct
+ * mapping from the CPU because doing that can stall the CPU waiting for the
+ * SPI mem transaction to finish, and this will make real-time maintainers
+ * unhappy and might make your system less reactive. Instead, drivers should
+ * use DMA to access this direct mapping.
*/
struct spi_controller_mem_ops {
int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op);
@@ -188,6 +256,12 @@ struct spi_controller_mem_ops {
int (*exec_op)(struct spi_mem *mem,
const struct spi_mem_op *op);
const char *(*get_name)(struct spi_mem *mem);
+ int (*dirmap_create)(struct spi_mem_dirmap_desc *desc);
+ void (*dirmap_destroy)(struct spi_mem_dirmap_desc *desc);
+ ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf);
+ ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf);
};
/**
@@ -248,6 +322,15 @@ int spi_mem_exec_op(struct spi_mem *mem,
const char *spi_mem_get_name(struct spi_mem *mem);
+struct spi_mem_dirmap_desc *
+spi_mem_dirmap_create(struct spi_mem *mem,
+ const struct spi_mem_dirmap_info *info);
+void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc);
+ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf);
+ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf);
+
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a64235e05321..314d922ca607 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -1,15 +1,6 @@
-/*
- * Copyright (C) 2005 David Brownell
+/* SPDX-License-Identifier: GPL-2.0-or-later
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2005 David Brownell
*/
#ifndef __LINUX_SPI_H
@@ -163,10 +154,15 @@ struct spi_device {
#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
+#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
+#define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */
+#define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */
+#define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */
int irq;
void *controller_state;
void *controller_data;
char modalias[SPI_NAME_SIZE];
+ const char *driver_override;
int cs_gpio; /* chip select gpio */
/* the statistics */
@@ -177,7 +173,6 @@ struct spi_device {
* the controller talks to each chip, like:
* - memory packing (12 bit samples into low bits, others zeroed)
* - priority
- * - drop chipselect after each word
* - chipselect delays
* - ...
*/
@@ -711,6 +706,8 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @delay_usecs: microseconds to delay after this transfer before
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
+ * @word_delay: clock cycles to inter word delay after each word size
+ * (set by bits_per_word) transmission.
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
@@ -793,6 +790,7 @@ struct spi_transfer {
u8 bits_per_word;
u16 delay_usecs;
u32 speed_hz;
+ u16 word_delay;
struct list_head transfer_list;
};
@@ -1277,7 +1275,6 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
{ return 0; }
#endif
-
/* If you're hotplugging an adapter with devices (parport, usb, etc)
* use spi_new_device() to describe each device. You can also call
* spi_unregister_device() to start making that device vanish, but
@@ -1309,6 +1306,22 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
}
+/* OF support code */
+#if IS_ENABLED(CONFIG_OF)
+
+/* must call put_device() when done with returned spi_device device */
+extern struct spi_device *
+of_find_spi_device_by_node(struct device_node *node);
+
+#else
+
+static inline struct spi_device *
+of_find_spi_device_by_node(struct device_node *node)
+{
+ return NULL;
+}
+
+#endif /* IS_ENABLED(CONFIG_OF) */
/* Compatibility layer */
#define spi_master spi_controller
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 3e72a291c401..c614375cd264 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -38,20 +38,20 @@ struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int __init_srcu_struct(struct srcu_struct *sp, const char *name,
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key);
-#define init_srcu_struct(sp) \
+#define init_srcu_struct(ssp) \
({ \
static struct lock_class_key __srcu_key; \
\
- __init_srcu_struct((sp), #sp, &__srcu_key); \
+ __init_srcu_struct((ssp), #ssp, &__srcu_key); \
})
#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-int init_srcu_struct(struct srcu_struct *sp);
+int init_srcu_struct(struct srcu_struct *ssp);
#define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -67,28 +67,28 @@ int init_srcu_struct(struct srcu_struct *sp);
struct srcu_struct { };
#endif
-void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
+void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
void (*func)(struct rcu_head *head));
-void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced);
-int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
-void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
-void synchronize_srcu(struct srcu_struct *sp);
+void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced);
+int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+void synchronize_srcu(struct srcu_struct *ssp);
/**
* cleanup_srcu_struct - deconstruct a sleep-RCU structure
- * @sp: structure to clean up.
+ * @ssp: structure to clean up.
*
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory.
*/
-static inline void cleanup_srcu_struct(struct srcu_struct *sp)
+static inline void cleanup_srcu_struct(struct srcu_struct *ssp)
{
- _cleanup_srcu_struct(sp, false);
+ _cleanup_srcu_struct(ssp, false);
}
/**
* cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure
- * @sp: structure to clean up.
+ * @ssp: structure to clean up.
*
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory. Also,
@@ -103,16 +103,16 @@ static inline void cleanup_srcu_struct(struct srcu_struct *sp)
* (with high probability, anyway), and will also cause the srcu_struct
* to be leaked.
*/
-static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp)
+static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp)
{
- _cleanup_srcu_struct(sp, true);
+ _cleanup_srcu_struct(ssp, true);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
* srcu_read_lock_held - might we be in SRCU read-side critical section?
- * @sp: The srcu_struct structure to check
+ * @ssp: The srcu_struct structure to check
*
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
@@ -126,16 +126,16 @@ static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp)
* relies on normal RCU, it can be called from the CPU which
* is in the idle loop from an RCU point of view or offline.
*/
-static inline int srcu_read_lock_held(const struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{
if (!debug_lockdep_rcu_enabled())
return 1;
- return lock_is_held(&sp->dep_map);
+ return lock_is_held(&ssp->dep_map);
}
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-static inline int srcu_read_lock_held(const struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{
return 1;
}
@@ -145,7 +145,7 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing
- * @sp: pointer to the srcu_struct, which is used to check that we
+ * @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section.
* @c: condition to check for update-side use
*
@@ -154,24 +154,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
* to 1. The @c argument will normally be a logical expression containing
* lockdep_is_held() calls.
*/
-#define srcu_dereference_check(p, sp, c) \
- __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu)
+#define srcu_dereference_check(p, ssp, c) \
+ __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing
- * @sp: pointer to the srcu_struct, which is used to check that we
+ * @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section.
*
* Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
* is enabled, invoking this outside of an RCU read-side critical
* section will result in an RCU-lockdep splat.
*/
-#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0)
+#define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0)
+
+/**
+ * srcu_dereference_notrace - no tracing and no lockdep calls from here
+ * @p: the pointer to fetch and protect for later dereferencing
+ * @ssp: pointer to the srcu_struct, which is used to check that we
+ * really are in an SRCU read-side critical section.
+ */
+#define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1)
/**
* srcu_read_lock - register a new reader for an SRCU-protected structure.
- * @sp: srcu_struct in which to register the new reader.
+ * @ssp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section. Note that SRCU read-side
* critical sections may be nested. However, it is illegal to
@@ -186,44 +194,44 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
* srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
* was invoked in process context.
*/
-static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
+static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- retval = __srcu_read_lock(sp);
- rcu_lock_acquire(&(sp)->dep_map);
+ retval = __srcu_read_lock(ssp);
+ rcu_lock_acquire(&(ssp)->dep_map);
return retval;
}
/* Used by tracing, cannot be traced and cannot invoke lockdep. */
static inline notrace int
-srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
+srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- retval = __srcu_read_lock(sp);
+ retval = __srcu_read_lock(ssp);
return retval;
}
/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
- * @sp: srcu_struct in which to unregister the old reader.
+ * @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock().
*
* Exit an SRCU read-side critical section.
*/
-static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
- __releases(sp)
+static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
+ __releases(ssp)
{
- rcu_lock_release(&(sp)->dep_map);
- __srcu_read_unlock(sp, idx);
+ rcu_lock_release(&(ssp)->dep_map);
+ __srcu_read_unlock(ssp, idx);
}
/* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void
-srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
+srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{
- __srcu_read_unlock(sp, idx);
+ __srcu_read_unlock(ssp, idx);
}
/**
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index f41d2fb09f87..b19216aaaef2 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -60,7 +60,7 @@ void srcu_drive_gp(struct work_struct *wp);
#define DEFINE_STATIC_SRCU(name) \
static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
-void synchronize_srcu(struct srcu_struct *sp);
+void synchronize_srcu(struct srcu_struct *ssp);
/*
* Counts the new reader in the appropriate per-CPU element of the
@@ -68,36 +68,36 @@ void synchronize_srcu(struct srcu_struct *sp);
* __srcu_read_unlock() must be in the same handler instance. Returns an
* index that must be passed to the matching srcu_read_unlock().
*/
-static inline int __srcu_read_lock(struct srcu_struct *sp)
+static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
- idx = READ_ONCE(sp->srcu_idx);
- WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1);
+ idx = READ_ONCE(ssp->srcu_idx);
+ WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
return idx;
}
-static inline void synchronize_srcu_expedited(struct srcu_struct *sp)
+static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
{
- synchronize_srcu(sp);
+ synchronize_srcu(ssp);
}
-static inline void srcu_barrier(struct srcu_struct *sp)
+static inline void srcu_barrier(struct srcu_struct *ssp)
{
- synchronize_srcu(sp);
+ synchronize_srcu(ssp);
}
/* Defined here to avoid size increase for non-torture kernels. */
-static inline void srcu_torture_stats_print(struct srcu_struct *sp,
+static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
char *tt, char *tf)
{
int idx;
- idx = READ_ONCE(sp->srcu_idx) & 0x1;
+ idx = READ_ONCE(ssp->srcu_idx) & 0x1;
pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
tt, tf, idx,
- READ_ONCE(sp->srcu_lock_nesting[!idx]),
- READ_ONCE(sp->srcu_lock_nesting[idx]));
+ READ_ONCE(ssp->srcu_lock_nesting[!idx]),
+ READ_ONCE(ssp->srcu_lock_nesting[idx]));
}
#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 745d4ca4dd50..6f292bd3e7db 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -51,7 +51,7 @@ struct srcu_data {
unsigned long grpmask; /* Mask for leaf srcu_node */
/* ->srcu_data_have_cbs[]. */
int cpu;
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
};
/*
@@ -105,12 +105,13 @@ struct srcu_struct {
#define SRCU_STATE_SCAN2 2
#define __SRCU_STRUCT_INIT(name, pcpu_name) \
- { \
- .sda = &pcpu_name, \
- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
- .srcu_gp_seq_needed = 0 - 1, \
- __SRCU_DEP_MAP_INIT(name) \
- }
+{ \
+ .sda = &pcpu_name, \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .srcu_gp_seq_needed = -1UL, \
+ .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
+ __SRCU_DEP_MAP_INIT(name) \
+}
/*
* Define and initialize a srcu struct at build time.
@@ -137,8 +138,8 @@ struct srcu_struct {
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
-void synchronize_srcu_expedited(struct srcu_struct *sp);
-void srcu_barrier(struct srcu_struct *sp);
-void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf);
+void synchronize_srcu_expedited(struct srcu_struct *ssp);
+void srcu_barrier(struct srcu_struct *ssp);
+void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
#endif
diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h
new file mode 100644
index 000000000000..3d5c3271a9a8
--- /dev/null
+++ b/include/linux/stackleak.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_STACKLEAK_H
+#define _LINUX_STACKLEAK_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+/*
+ * Check that the poison value points to the unused hole in the
+ * virtual memory map for your platform.
+ */
+#define STACKLEAK_POISON -0xBEEF
+#define STACKLEAK_SEARCH_DEPTH 128
+
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#include <asm/stacktrace.h>
+
+static inline void stackleak_task_init(struct task_struct *t)
+{
+ t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long);
+# ifdef CONFIG_STACKLEAK_METRICS
+ t->prev_lowest_stack = t->lowest_stack;
+# endif
+}
+
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+int stack_erasing_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+#endif
+
+#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
+static inline void stackleak_task_init(struct task_struct *t) { }
+#endif
+
+#endif
diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h
index 4b268d86a784..8b369a41c03c 100644
--- a/include/linux/start_kernel.h
+++ b/include/linux/start_kernel.h
@@ -9,5 +9,7 @@
up something else. */
extern asmlinkage void __init start_kernel(void);
+extern void __init arch_call_rest_init(void);
+extern void __ref rest_init(void);
#endif /* _LINUX_START_KERNEL_H */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c43e9a01b892..7ddfc65586b0 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -30,6 +30,7 @@
#define MTL_MAX_RX_QUEUES 8
#define MTL_MAX_TX_QUEUES 8
+#define STMMAC_CH_MAX 8
#define STMMAC_RX_COE_NONE 0
#define STMMAC_RX_COE_TYPE1 1
diff --git a/include/linux/string.h b/include/linux/string.h
index 4a5a0eb7df51..7927b875f80c 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -131,6 +131,13 @@ static inline void *memset_p(void **p, void *v, __kernel_size_t n)
return memset64((uint64_t *)p, (uintptr_t)v, n);
}
+extern void **__memcat_p(void **a, void **b);
+#define memcat_p(a, b) ({ \
+ BUILD_BUG_ON_MSG(!__same_type(*(a), *(b)), \
+ "type mismatch in memcat_p()"); \
+ (typeof(*a) *)__memcat_p((void **)(a), (void **)(b)); \
+})
+
#ifndef __HAVE_ARCH_MEMCPY
extern void * memcpy(void *,const void *,__kernel_size_t);
#endif
@@ -449,4 +456,24 @@ static inline void memcpy_and_pad(void *dest, size_t dest_len,
memcpy(dest, src, dest_len);
}
+/**
+ * str_has_prefix - Test if a string has a given prefix
+ * @str: The string to test
+ * @prefix: The string to see if @str starts with
+ *
+ * A common way to test a prefix of a string is to do:
+ * strncmp(str, prefix, sizeof(prefix) - 1)
+ *
+ * But this can lead to bugs due to typos, or if prefix is a pointer
+ * and not a constant. Instead use str_has_prefix().
+ *
+ * Returns: 0 if @str does not start with @prefix
+ strlen(@prefix) if @str does start with @prefix
+ */
+static __always_inline size_t str_has_prefix(const char *str, const char *prefix)
+{
+ size_t len = strlen(prefix);
+ return strncmp(str, prefix, len) == 0 ? len : 0;
+}
+
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index d9af474a857d..eed3cb16ccf1 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -37,21 +37,9 @@
struct rpcsec_gss_info;
-/* auth_cred ac_flags bits */
-enum {
- RPC_CRED_KEY_EXPIRE_SOON = 1, /* underlying cred key will expire soon */
- RPC_CRED_NOTIFY_TIMEOUT = 2, /* nofity generic cred when underlying
- key will expire soon */
-};
-
-/* Work around the lack of a VFS credential */
struct auth_cred {
- kuid_t uid;
- kgid_t gid;
- struct group_info *group_info;
- const char *principal;
- unsigned long ac_flags;
- unsigned char machine_cred : 1;
+ const struct cred *cred;
+ const char *principal; /* If present, this is a machine credential */
};
/*
@@ -67,9 +55,8 @@ struct rpc_cred {
const struct rpc_credops *cr_ops;
unsigned long cr_expire; /* when to gc */
unsigned long cr_flags; /* various flags */
- atomic_t cr_count; /* ref count */
-
- kuid_t cr_uid;
+ refcount_t cr_count; /* ref count */
+ const struct cred *cr_cred;
/* per-flavor data */
};
@@ -78,8 +65,7 @@ struct rpc_cred {
#define RPCAUTH_CRED_HASHED 2
#define RPCAUTH_CRED_NEGATIVE 3
-/* rpc_auth au_flags */
-#define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */
+const struct cred *rpc_machine_cred(void);
/*
* Client authentication handle
@@ -100,7 +86,7 @@ struct rpc_auth {
* differ from the flavor in
* au_ops->au_flavor in gss
* case) */
- atomic_t au_count; /* Reference counter */
+ refcount_t au_count; /* Reference counter */
struct rpc_cred_cache * au_credcache;
/* per-flavor data */
@@ -116,7 +102,6 @@ struct rpc_auth_create_args {
/* Flags for rpcauth_lookupcred() */
#define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */
-#define RPCAUTH_LOOKUP_RCU 0x02 /* lock-less lookup */
/*
* Client authentication ops
@@ -125,7 +110,8 @@ struct rpc_authops {
struct module *owner;
rpc_authflavor_t au_flavor; /* flavor (RPC_AUTH_*) */
char * au_name;
- struct rpc_auth * (*create)(struct rpc_auth_create_args *, struct rpc_clnt *);
+ struct rpc_auth * (*create)(const struct rpc_auth_create_args *,
+ struct rpc_clnt *);
void (*destroy)(struct rpc_auth *);
int (*hash_cred)(struct auth_cred *, unsigned int);
@@ -145,7 +131,6 @@ struct rpc_credops {
void (*crdestroy)(struct rpc_cred *);
int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
- struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int);
__be32 * (*crmarshal)(struct rpc_task *, __be32 *);
int (*crrefresh)(struct rpc_task *);
__be32 * (*crvalidate)(struct rpc_task *, __be32 *);
@@ -154,27 +139,21 @@ struct rpc_credops {
int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
void *, __be32 *, void *);
int (*crkey_timeout)(struct rpc_cred *);
- bool (*crkey_to_expire)(struct rpc_cred *);
char * (*crstringify_acceptor)(struct rpc_cred *);
+ bool (*crneed_reencode)(struct rpc_task *);
};
extern const struct rpc_authops authunix_ops;
extern const struct rpc_authops authnull_ops;
int __init rpc_init_authunix(void);
-int __init rpc_init_generic_auth(void);
int __init rpcauth_init_module(void);
void rpcauth_remove_module(void);
-void rpc_destroy_generic_auth(void);
void rpc_destroy_authunix(void);
-struct rpc_cred * rpc_lookup_cred(void);
-struct rpc_cred * rpc_lookup_cred_nonblock(void);
-struct rpc_cred * rpc_lookup_generic_cred(struct auth_cred *, int, gfp_t);
-struct rpc_cred * rpc_lookup_machine_cred(const char *service_name);
int rpcauth_register(const struct rpc_authops *);
int rpcauth_unregister(const struct rpc_authops *);
-struct rpc_auth * rpcauth_create(struct rpc_auth_create_args *,
+struct rpc_auth * rpcauth_create(const struct rpc_auth_create_args *,
struct rpc_clnt *);
void rpcauth_release(struct rpc_auth *);
rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t,
@@ -185,45 +164,24 @@ int rpcauth_list_flavors(rpc_authflavor_t *, int);
struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t);
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
-struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int);
void put_rpccred(struct rpc_cred *);
__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
+bool rpcauth_xmit_need_reencode(struct rpc_task *task);
int rpcauth_refreshcred(struct rpc_task *);
void rpcauth_invalcred(struct rpc_task *);
int rpcauth_uptodatecred(struct rpc_task *);
int rpcauth_init_credcache(struct rpc_auth *);
void rpcauth_destroy_credcache(struct rpc_auth *);
void rpcauth_clear_credcache(struct rpc_cred_cache *);
-int rpcauth_key_timeout_notify(struct rpc_auth *,
- struct rpc_cred *);
-bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *);
char * rpcauth_stringify_acceptor(struct rpc_cred *);
static inline
-struct rpc_cred * get_rpccred(struct rpc_cred *cred)
-{
- if (cred != NULL)
- atomic_inc(&cred->cr_count);
- return cred;
-}
-
-/**
- * get_rpccred_rcu - get a reference to a cred using rcu-protected pointer
- * @cred: cred of which to take a reference
- *
- * In some cases, we may have a pointer to a credential to which we
- * want to take a reference, but don't already have one. Because these
- * objects are freed using RCU, we can access the cr_count while its
- * on its way to destruction and only take a reference if it's not already
- * zero.
- */
-static inline struct rpc_cred *
-get_rpccred_rcu(struct rpc_cred *cred)
+struct rpc_cred *get_rpccred(struct rpc_cred *cred)
{
- if (atomic_inc_not_zero(&cred->cr_count))
+ if (cred != NULL && refcount_inc_not_zero(&cred->cr_count))
return cred;
return NULL;
}
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index 0c9eac351aab..30427b729070 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -70,6 +70,7 @@ struct gss_cl_ctx {
refcount_t count;
enum rpc_gss_proc gc_proc;
u32 gc_seq;
+ u32 gc_seq_xmit;
spinlock_t gc_seq_lock;
struct gss_ctx *gc_gss_ctx;
struct xdr_netobj gc_wire_ctx;
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 4397a4824c81..d4229a78524a 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -34,6 +34,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef CONFIG_SUNRPC_BACKCHANNEL
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
+void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task);
void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
@@ -46,11 +47,14 @@ void xprt_free_bc_rqst(struct rpc_rqst *req);
/*
* Determine if a shared backchannel is in use
*/
-static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
+static inline bool svc_is_backchannel(const struct svc_rqst *rqstp)
{
- if (rqstp->rq_server->sv_bc_xprt)
- return 1;
- return 0;
+ return rqstp->rq_server->sv_bc_enabled;
+}
+
+static inline void set_bc_enabled(struct svc_serv *serv)
+{
+ serv->sv_bc_enabled = true;
}
#else /* CONFIG_SUNRPC_BACKCHANNEL */
static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
@@ -59,9 +63,13 @@ static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
return 0;
}
-static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
+static inline bool svc_is_backchannel(const struct svc_rqst *rqstp)
+{
+ return false;
+}
+
+static inline void set_bc_enabled(struct svc_serv *serv)
{
- return 0;
}
static inline void xprt_free_bc_request(struct rpc_rqst *req)
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 40d2822f0e2f..5a3e95017fc6 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -67,7 +67,7 @@ struct cache_detail {
struct module * owner;
int hash_size;
struct hlist_head * hash_table;
- rwlock_t hash_lock;
+ spinlock_t hash_lock;
char *name;
void (*cache_put)(struct kref *);
@@ -168,8 +168,8 @@ extern const struct file_operations content_file_operations_pipefs;
extern const struct file_operations cache_flush_operations_pipefs;
extern struct cache_head *
-sunrpc_cache_lookup(struct cache_detail *detail,
- struct cache_head *key, int hash);
+sunrpc_cache_lookup_rcu(struct cache_detail *detail,
+ struct cache_head *key, int hash);
extern struct cache_head *
sunrpc_cache_update(struct cache_detail *detail,
struct cache_head *new, struct cache_head *old, int hash);
@@ -186,6 +186,12 @@ static inline struct cache_head *cache_get(struct cache_head *h)
return h;
}
+static inline struct cache_head *cache_get_rcu(struct cache_head *h)
+{
+ if (kref_get_unless_zero(&h->ref))
+ return h;
+ return NULL;
+}
static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
{
@@ -224,9 +230,9 @@ extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *);
/* Must store cache_detail in seq_file->private if using next three functions */
-extern void *cache_seq_start(struct seq_file *file, loff_t *pos);
-extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos);
-extern void cache_seq_stop(struct seq_file *file, void *p);
+extern void *cache_seq_start_rcu(struct seq_file *file, loff_t *pos);
+extern void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos);
+extern void cache_seq_stop_rcu(struct seq_file *file, void *p);
extern void qword_add(char **bpp, int *lp, char *str);
extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 9b11b6a0978c..1c441714d569 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -66,6 +66,7 @@ struct rpc_clnt {
struct rpc_rtt cl_rtt_default;
struct rpc_timeout cl_timeout_default;
const struct rpc_program *cl_program;
+ const char * cl_principal; /* use for machine cred */
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *cl_debugfs; /* debugfs directory */
#endif
@@ -127,8 +128,8 @@ struct rpc_create_args {
};
struct rpc_add_xprt_test {
- int (*add_xprt_test)(struct rpc_clnt *,
- struct rpc_xprt *,
+ void (*add_xprt_test)(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt,
void *calldata);
void *data;
};
@@ -156,6 +157,7 @@ int rpc_switch_client_transport(struct rpc_clnt *,
void rpc_shutdown_client(struct rpc_clnt *);
void rpc_release_client(struct rpc_clnt *);
+void rpc_task_release_transport(struct rpc_task *);
void rpc_task_release_client(struct rpc_task *);
int rpcb_create_local(struct net *);
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 7df625d41e35..02c0412e368c 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -71,10 +71,10 @@ struct gss_krb5_enctype {
const u32 keyed_cksum; /* is it a keyed cksum? */
const u32 keybytes; /* raw key len, in bytes */
const u32 keylength; /* final key len, in bytes */
- u32 (*encrypt) (struct crypto_skcipher *tfm,
+ u32 (*encrypt) (struct crypto_sync_skcipher *tfm,
void *iv, void *in, void *out,
int length); /* encryption function */
- u32 (*decrypt) (struct crypto_skcipher *tfm,
+ u32 (*decrypt) (struct crypto_sync_skcipher *tfm,
void *iv, void *in, void *out,
int length); /* decryption function */
u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
@@ -98,17 +98,17 @@ struct krb5_ctx {
u32 enctype;
u32 flags;
const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
- struct crypto_skcipher *enc;
- struct crypto_skcipher *seq;
- struct crypto_skcipher *acceptor_enc;
- struct crypto_skcipher *initiator_enc;
- struct crypto_skcipher *acceptor_enc_aux;
- struct crypto_skcipher *initiator_enc_aux;
+ struct crypto_sync_skcipher *enc;
+ struct crypto_sync_skcipher *seq;
+ struct crypto_sync_skcipher *acceptor_enc;
+ struct crypto_sync_skcipher *initiator_enc;
+ struct crypto_sync_skcipher *acceptor_enc_aux;
+ struct crypto_sync_skcipher *initiator_enc_aux;
u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
u8 cksum[GSS_KRB5_MAX_KEYLEN];
s32 endtime;
- u32 seq_send;
- u64 seq_send64;
+ atomic_t seq_send;
+ atomic64_t seq_send64;
struct xdr_netobj mech_used;
u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
@@ -118,8 +118,6 @@ struct krb5_ctx {
u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
};
-extern spinlock_t krb5_seq_lock;
-
/* The length of the Kerberos GSS token header */
#define GSS_KRB5_TOK_HDR_LEN (16)
@@ -262,24 +260,24 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
u32
-krb5_encrypt(struct crypto_skcipher *key,
+krb5_encrypt(struct crypto_sync_skcipher *key,
void *iv, void *in, void *out, int length);
u32
-krb5_decrypt(struct crypto_skcipher *key,
+krb5_decrypt(struct crypto_sync_skcipher *key,
void *iv, void *in, void *out, int length);
int
-gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf,
+gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf,
int offset, struct page **pages);
int
-gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf,
+gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf,
int offset);
s32
krb5_make_seq_num(struct krb5_ctx *kctx,
- struct crypto_skcipher *key,
+ struct crypto_sync_skcipher *key,
int direction,
u32 seqnum, unsigned char *cksum, unsigned char *buf);
@@ -320,12 +318,12 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
int
krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
- struct crypto_skcipher *cipher,
+ struct crypto_sync_skcipher *cipher,
unsigned char *cksum);
int
krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
- struct crypto_skcipher *cipher,
+ struct crypto_sync_skcipher *cipher,
s32 seqnum);
void
gss_krb5_make_confounder(char *p, u32 conflen);
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index 9baed7b355b2..1b3751327575 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -82,7 +82,7 @@ void rpc_count_iostats(const struct rpc_task *,
struct rpc_iostats *);
void rpc_count_iostats_metrics(const struct rpc_task *,
struct rpc_iostats *);
-void rpc_print_iostats(struct seq_file *, struct rpc_clnt *);
+void rpc_clnt_show_stats(struct seq_file *, struct rpc_clnt *);
void rpc_free_iostats(struct rpc_iostats *);
#else /* CONFIG_PROC_FS */
@@ -95,7 +95,7 @@ static inline void rpc_count_iostats_metrics(const struct rpc_task *task,
{
}
-static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {}
+static inline void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt) {}
static inline void rpc_free_iostats(struct rpc_iostats *stats) {}
#endif /* CONFIG_PROC_FS */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 592653becd91..219aa3910a0c 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -26,7 +26,7 @@ struct rpc_message {
const struct rpc_procinfo *rpc_proc; /* Procedure information */
void * rpc_argp; /* Arguments */
void * rpc_resp; /* Result */
- struct rpc_cred * rpc_cred; /* Credentials */
+ const struct cred * rpc_cred; /* Credentials */
};
struct rpc_call_ops;
@@ -71,6 +71,7 @@ struct rpc_task {
struct rpc_clnt * tk_client; /* RPC client */
struct rpc_xprt * tk_xprt; /* Transport */
+ struct rpc_cred * tk_op_cred; /* cred being operated on */
struct rpc_rqst * tk_rqstp; /* RPC request */
@@ -105,6 +106,7 @@ struct rpc_task_setup {
struct rpc_task *task;
struct rpc_clnt *rpc_client;
struct rpc_xprt *rpc_xprt;
+ struct rpc_cred *rpc_op_cred; /* credential being operated on */
const struct rpc_message *rpc_message;
const struct rpc_call_ops *callback_ops;
void *callback_data;
@@ -118,6 +120,7 @@ struct rpc_task_setup {
*/
#define RPC_TASK_ASYNC 0x0001 /* is an async task */
#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
+#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
@@ -131,7 +134,6 @@ struct rpc_task_setup {
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
-#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
@@ -140,8 +142,9 @@ struct rpc_task_setup {
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
#define RPC_TASK_ACTIVE 2
-#define RPC_TASK_MSG_RECV 3
-#define RPC_TASK_MSG_RECV_WAIT 4
+#define RPC_TASK_NEED_XMIT 3
+#define RPC_TASK_NEED_RECV 4
+#define RPC_TASK_MSG_PIN_WAIT 5
#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
@@ -188,7 +191,6 @@ struct rpc_timer {
struct rpc_wait_queue {
spinlock_t lock;
struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
- pid_t owner; /* process id of last task serviced */
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char nr; /* # tasks remaining for cookie */
@@ -204,7 +206,6 @@ struct rpc_wait_queue {
* from a single cookie. The aim is to improve
* performance of NFS operations such as read/write.
*/
-#define RPC_BATCH_COUNT 16
#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
/*
@@ -234,6 +235,9 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
struct rpc_task *task);
void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
+void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *,
+ struct rpc_task *,
+ int);
void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 574368e8a16f..e52385340b3b 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -109,7 +109,7 @@ struct svc_serv {
spinlock_t sv_cb_lock; /* protects the svc_cb_list */
wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
* entries in the svc_cb_list */
- struct svc_xprt *sv_bc_xprt; /* callback on fore channel */
+ bool sv_bc_enabled; /* service uses backchannel */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
@@ -295,9 +295,12 @@ struct svc_rqst {
struct svc_cacherep * rq_cacherep; /* cache info */
struct task_struct *rq_task; /* service thread */
spinlock_t rq_lock; /* per-request lock */
+ struct net *rq_bc_net; /* pointer to backchannel's
+ * net namespace
+ */
};
-#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
+#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
/*
* Rigorous type checking on sockaddr type conversions
@@ -496,9 +499,11 @@ void svc_reserve(struct svc_rqst *rqstp, int space);
struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
+ struct page **pages,
struct kvec *first, size_t total);
char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
- struct kvec *first, size_t total);
+ struct kvec *first, void *p,
+ size_t total);
#define RPC_MAX_ADDRBUFLEN (63U)
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index fd78f78df5c6..981f0d726ad4 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -113,13 +113,14 @@ struct svcxprt_rdma {
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
-#define RPCRDMA_LISTEN_BACKLOG 10
-#define RPCRDMA_MAX_REQUESTS 32
-
-/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
- * current NFSv4.1 implementation supports one backchannel slot.
+/*
+ * Default connection parameters
*/
-#define RPCRDMA_MAX_BC_REQUESTS 2
+enum {
+ RPCRDMA_LISTEN_BACKLOG = 10,
+ RPCRDMA_MAX_REQUESTS = 64,
+ RPCRDMA_MAX_BC_REQUESTS = 2,
+};
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
@@ -134,6 +135,7 @@ struct svc_rdma_recv_ctxt {
u32 rc_byte_len;
unsigned int rc_page_count;
unsigned int rc_hdr_count;
+ u32 rc_inv_rkey;
struct page *rc_pages[RPCSVC_MAXPAGES];
};
@@ -191,7 +193,6 @@ extern int svc_rdma_sendto(struct svc_rqst *);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern void svc_sq_reap(struct svcxprt_rdma *);
extern void svc_rq_reap(struct svcxprt_rdma *);
-extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
extern struct svc_xprt_class svc_rdma_class;
#ifdef CONFIG_SUNRPC_BACKCHANNEL
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index c3d72066d4b1..b3f9577e17d6 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -20,7 +20,6 @@ struct svc_xprt_ops {
struct svc_xprt *(*xpo_accept)(struct svc_xprt *);
int (*xpo_has_wspace)(struct svc_xprt *);
int (*xpo_recvfrom)(struct svc_rqst *);
- void (*xpo_prep_reply_hdr)(struct svc_rqst *);
int (*xpo_sendto)(struct svc_rqst *);
void (*xpo_release_rqst)(struct svc_rqst *);
void (*xpo_detach)(struct svc_xprt *);
@@ -84,7 +83,6 @@ struct svc_xprt {
struct sockaddr_storage xpt_remote; /* remote peer's address */
size_t xpt_remotelen; /* length of address */
char xpt_remotebuf[INET6_ADDRSTRLEN + 10];
- struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
struct list_head xpt_users; /* callbacks on free */
struct net *xpt_net;
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 7c3656505847..3e53a6e2ada7 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -31,6 +31,7 @@ struct svc_cred {
/* name of form servicetype@hostname, passed down by
* rpc.svcgssd, or computed from the above: */
char *cr_principal;
+ char *cr_targ_princ;
struct gss_api_mech *cr_gss_mech;
};
@@ -39,6 +40,7 @@ static inline void init_svc_cred(struct svc_cred *cred)
cred->cr_group_info = NULL;
cred->cr_raw_principal = NULL;
cred->cr_principal = NULL;
+ cred->cr_targ_princ = NULL;
cred->cr_gss_mech = NULL;
}
@@ -48,6 +50,7 @@ static inline void free_svc_cred(struct svc_cred *cred)
put_group_info(cred->cr_group_info);
kfree(cred->cr_raw_principal);
kfree(cred->cr_principal);
+ kfree(cred->cr_targ_princ);
gss_mech_put(cred->cr_gss_mech);
init_svc_cred(cred);
}
@@ -79,6 +82,7 @@ struct auth_domain {
struct hlist_node hash;
char *name;
struct auth_ops *flavour;
+ struct rcu_head rcu_head;
};
/*
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 2bd68177a442..2ec128060239 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -18,6 +18,7 @@
#include <asm/unaligned.h>
#include <linux/scatterlist.h>
+struct bio_vec;
struct rpc_rqst;
/*
@@ -52,12 +53,14 @@ struct xdr_buf {
struct kvec head[1], /* RPC header + non-page data */
tail[1]; /* Appended after page data */
+ struct bio_vec *bvec;
struct page ** pages; /* Array of pages */
unsigned int page_base, /* Start of page data */
page_len, /* Length of page data */
flags; /* Flags for data disposition */
#define XDRBUF_READ 0x01 /* target of file read */
#define XDRBUF_WRITE 0x02 /* source of file write */
+#define XDRBUF_SPARSE_PAGES 0x04 /* Page array is sparse */
unsigned int buflen, /* Total length of storage buffer */
len; /* Length of XDR encoded message */
@@ -69,6 +72,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
buf->head[0].iov_base = start;
buf->head[0].iov_len = len;
buf->tail[0].iov_len = 0;
+ buf->pages = NULL;
buf->page_len = 0;
buf->flags = 0;
buf->len = 0;
@@ -115,6 +119,9 @@ __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *);
void xdr_inline_pages(struct xdr_buf *, unsigned int,
struct page **, unsigned int, unsigned int);
void xdr_terminate_string(struct xdr_buf *, const u32);
+size_t xdr_buf_pagecount(struct xdr_buf *buf);
+int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp);
+void xdr_free_bvec(struct xdr_buf *buf);
static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len)
{
@@ -177,10 +184,7 @@ struct xdr_skb_reader {
typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len);
-size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len);
extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
-extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
- struct xdr_skb_reader *, xdr_skb_read_actor);
extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32);
extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 336fd1a19cca..ad7e910b119d 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -82,7 +82,14 @@ struct rpc_rqst {
struct page **rq_enc_pages; /* scratch pages for use by
gss privacy code */
void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
- struct list_head rq_list;
+
+ union {
+ struct list_head rq_list; /* Slot allocation list */
+ struct rb_node rq_recv; /* Receive queue */
+ };
+
+ struct list_head rq_xmit; /* Send queue */
+ struct list_head rq_xmit2; /* Send queue */
void *rq_buffer; /* Call XDR encode buffer */
size_t rq_callsize;
@@ -103,6 +110,7 @@ struct rpc_rqst {
/* A cookie used to track the
state of the transport
connection */
+ atomic_t rq_pin;
/*
* Partial send handling
@@ -133,7 +141,8 @@ struct rpc_xprt_ops {
void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
int (*buf_alloc)(struct rpc_task *task);
void (*buf_free)(struct rpc_task *task);
- int (*send_request)(struct rpc_task *task);
+ void (*prepare_request)(struct rpc_rqst *req);
+ int (*send_request)(struct rpc_rqst *req);
void (*set_retrans_timeout)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
void (*release_request)(struct rpc_task *task);
@@ -148,7 +157,6 @@ struct rpc_xprt_ops {
void (*inject_disconnect)(struct rpc_xprt *xprt);
int (*bc_setup)(struct rpc_xprt *xprt,
unsigned int min_reqs);
- int (*bc_up)(struct svc_serv *serv, struct net *net);
size_t (*bc_maxpayload)(struct rpc_xprt *xprt);
void (*bc_free_rqst)(struct rpc_rqst *rqst);
void (*bc_destroy)(struct rpc_xprt *xprt,
@@ -234,9 +242,12 @@ struct rpc_xprt {
*/
spinlock_t transport_lock; /* lock transport info */
spinlock_t reserve_lock; /* lock slot table */
- spinlock_t recv_lock; /* lock receive list */
+ spinlock_t queue_lock; /* send/receive queue lock */
u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */
+
+ struct list_head xmit_queue; /* Send queue */
+
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct svc_serv *bc_serv; /* The RPC service which will */
@@ -248,7 +259,8 @@ struct rpc_xprt {
struct list_head bc_pa_list; /* List of preallocated
* backchannel rpc_rqst's */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
- struct list_head recv;
+
+ struct rb_root recv_queue; /* Receive queue */
struct {
unsigned long bind_count, /* total number of binds */
@@ -325,15 +337,18 @@ struct xprt_class {
struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
void xprt_connect(struct rpc_task *task);
void xprt_reserve(struct rpc_task *task);
-void xprt_request_init(struct rpc_task *task);
void xprt_retry_reserve(struct rpc_task *task);
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_free_slot(struct rpc_xprt *xprt,
struct rpc_rqst *req);
-void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
+void xprt_request_prepare(struct rpc_rqst *req);
bool xprt_prepare_transmit(struct rpc_task *task);
+void xprt_request_enqueue_transmit(struct rpc_task *task);
+void xprt_request_enqueue_receive(struct rpc_task *task);
+void xprt_request_wait_receive(struct rpc_task *task);
+bool xprt_request_need_retransmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task);
void xprt_end_transmit(struct rpc_task *task);
int xprt_adjust_timeout(struct rpc_rqst *req);
@@ -373,8 +388,8 @@ int xprt_load_transport(const char *);
void xprt_set_retrans_timeout_def(struct rpc_task *task);
void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
-void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action);
-void xprt_write_space(struct rpc_xprt *xprt);
+void xprt_wait_for_buffer_space(struct rpc_xprt *xprt);
+bool xprt_write_space(struct rpc_xprt *xprt);
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
void xprt_update_rtt(struct rpc_task *task);
@@ -382,6 +397,7 @@ void xprt_complete_rqst(struct rpc_task *task, int copied);
void xprt_pin_rqst(struct rpc_rqst *req);
void xprt_unpin_rqst(struct rpc_rqst *req);
void xprt_release_rqst_cong(struct rpc_task *task);
+bool xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req);
void xprt_disconnect_done(struct rpc_xprt *xprt);
void xprt_force_disconnect(struct rpc_xprt *xprt);
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
@@ -400,6 +416,8 @@ void xprt_unlock_connect(struct rpc_xprt *, void *);
#define XPRT_BINDING (5)
#define XPRT_CLOSING (6)
#define XPRT_CONGESTED (9)
+#define XPRT_CWND_WAIT (10)
+#define XPRT_WRITE_SPACE (11)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index ae0f99b9b965..458bfe0137f5 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -30,15 +30,25 @@ struct sock_xprt {
/*
* State of TCP reply receive
*/
- __be32 tcp_fraghdr,
- tcp_xid,
- tcp_calldir;
+ struct {
+ struct {
+ __be32 fraghdr,
+ xid,
+ calldir;
+ } __attribute__((packed));
- u32 tcp_offset,
- tcp_reclen;
+ u32 offset,
+ len;
- unsigned long tcp_copied,
- tcp_flags;
+ unsigned long copied;
+ } recv;
+
+ /*
+ * State of TCP transmit queue
+ */
+ struct {
+ u32 offset;
+ } xmit;
/*
* Connection of transports
@@ -68,20 +78,8 @@ struct sock_xprt {
};
/*
- * TCP receive state flags
- */
-#define TCP_RCV_LAST_FRAG (1UL << 0)
-#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
-#define TCP_RCV_COPY_XID (1UL << 2)
-#define TCP_RCV_COPY_DATA (1UL << 3)
-#define TCP_RCV_READ_CALLDIR (1UL << 4)
-#define TCP_RCV_COPY_CALLDIR (1UL << 5)
-
-/*
* TCP RPC flags
*/
-#define TCP_RPC_REPLY (1UL << 6)
-
#define XPRT_SOCK_CONNECTING 1U
#define XPRT_SOCK_DATA_READY (2)
#define XPRT_SOCK_UPD_TIMEOUT (3)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 5a28ac9284f0..3f529ad9a9d2 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -251,6 +251,7 @@ static inline bool idle_should_enter_s2idle(void)
return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
}
+extern bool pm_suspend_via_s2idle(void);
extern void __init pm_states_init(void);
extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
extern void s2idle_wake(void);
@@ -282,6 +283,7 @@ static inline void pm_set_suspend_via_firmware(void) {}
static inline void pm_set_resume_via_firmware(void) {}
static inline bool pm_suspend_via_firmware(void) { return false; }
static inline bool pm_resume_via_firmware(void) { return false; }
+static inline bool pm_suspend_via_s2idle(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1a8bd05a335e..622025ac1461 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -18,6 +18,8 @@ struct notifier_block;
struct bio;
+struct pagevec;
+
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
#define SWAP_FLAG_PRIO_SHIFT 0
@@ -167,13 +169,14 @@ enum {
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
SWP_BLKDEV = (1 << 6), /* its a block device */
- SWP_FILE = (1 << 7), /* set after swap_activate success */
- SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
- SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
- SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
- SWP_SYNCHRONOUS_IO = (1 << 11), /* synchronous IO is efficient */
+ SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
+ SWP_FS = (1 << 8), /* swap file goes through fs */
+ SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
+ SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
+ SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
+ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
/* add others here before... */
- SWP_SCANNING = (1 << 12), /* refcount in scan_swap_map */
+ SWP_SCANNING = (1 << 13), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
@@ -232,7 +235,6 @@ struct swap_info_struct {
unsigned long flags; /* SWP_USED etc: see above */
signed short prio; /* swap priority of this type */
struct plist_node list; /* entry in swap_active_head */
- struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
signed char type; /* strange name for an index */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
@@ -273,6 +275,16 @@ struct swap_info_struct {
*/
struct work_struct discard_work; /* discard worker */
struct swap_cluster_list discard_clusters; /* discard clusters list */
+ struct plist_node avail_lists[0]; /*
+ * entries in swap_avail_heads, one
+ * entry per node.
+ * Must be last as the number of the
+ * array is nr_node_ids, which is not
+ * a fixed value so have to allocate
+ * dynamically.
+ * And it has to be an array so that
+ * plist_for_each_* can work.
+ */
};
#ifdef CONFIG_64BIT
@@ -296,23 +308,17 @@ struct vma_swap_readahead {
/* linux/mm/workingset.c */
void *workingset_eviction(struct address_space *mapping, struct page *page);
-bool workingset_refault(void *shadow);
+void workingset_refault(struct page *page, void *shadow);
void workingset_activation(struct page *page);
-/* Do not use directly, use workingset_lookup_update */
-void workingset_update_node(struct radix_tree_node *node);
-
-/* Returns workingset_update_node() if the mapping has shadow entries. */
-#define workingset_lookup_update(mapping) \
-({ \
- radix_tree_update_node_t __helper = workingset_update_node; \
- if (dax_mapping(mapping) || shmem_mapping(mapping)) \
- __helper = NULL; \
- __helper; \
-})
+/* Only track the nodes of mappings with shadow entries */
+void workingset_update_node(struct xa_node *node);
+#define mapping_set_update(xas, mapping) do { \
+ if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
+ xas_set_update(xas, workingset_update_node); \
+} while (0)
/* linux/mm/page_alloc.c */
-extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
extern unsigned long nr_free_buffer_pages(void);
extern unsigned long nr_free_pagecache_pages(void);
@@ -362,18 +368,12 @@ extern unsigned long vm_total_pages;
extern int node_reclaim_mode;
extern int sysctl_min_unmapped_ratio;
extern int sysctl_min_slab_ratio;
-extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
#else
#define node_reclaim_mode 0
-static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
- unsigned int order)
-{
- return 0;
-}
#endif
extern int page_evictable(struct page *page);
-extern void check_move_unevictable_pages(struct page **, int nr_pages);
+extern void check_move_unevictable_pages(struct pagevec *pvec);
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
@@ -408,7 +408,7 @@ extern void show_swap_cache_info(void);
extern int add_to_swap(struct page *page);
extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
-extern void __delete_from_swap_cache(struct page *);
+extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
extern void delete_from_swap_cache(struct page *);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
@@ -447,7 +447,7 @@ extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(struct page *page);
extern void put_swap_page(struct page *page, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
-extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]);
+extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
extern void swap_shmem_alloc(swp_entry_t);
extern int swap_duplicate(swp_entry_t);
@@ -562,7 +562,8 @@ static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
return -1;
}
-static inline void __delete_from_swap_cache(struct page *page)
+static inline void __delete_from_swap_cache(struct page *page,
+ swp_entry_t entry)
{
}
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 1d3877c39a00..4d961668e5fc 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -4,6 +4,7 @@
#include <linux/radix-tree.h>
#include <linux/bug.h>
+#include <linux/mm_types.h>
/*
* swapcache pages are stored in the swapper_space radix tree. We want to
@@ -17,9 +18,8 @@
*
* swp_entry_t's are *never* stored anywhere in their arch-dependent format.
*/
-#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
- (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
-#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
+#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
+#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
/*
* Store a type+offset into a swp_entry_t in an arch-independent format
@@ -28,8 +28,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
{
swp_entry_t ret;
- ret.val = (type << SWP_TYPE_SHIFT(ret)) |
- (offset & SWP_OFFSET_MASK(ret));
+ ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
return ret;
}
@@ -39,7 +38,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
*/
static inline unsigned swp_type(swp_entry_t entry)
{
- return (entry.val >> SWP_TYPE_SHIFT(entry));
+ return (entry.val >> SWP_TYPE_SHIFT);
}
/*
@@ -48,7 +47,7 @@ static inline unsigned swp_type(swp_entry_t entry)
*/
static inline pgoff_t swp_offset(swp_entry_t entry)
{
- return entry.val & SWP_OFFSET_MASK(entry);
+ return entry.val & SWP_OFFSET_MASK;
}
#ifdef CONFIG_MMU
@@ -89,16 +88,13 @@ static inline swp_entry_t radix_to_swp_entry(void *arg)
{
swp_entry_t entry;
- entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+ entry.val = xa_to_value(arg);
return entry;
}
static inline void *swp_to_radix_entry(swp_entry_t entry)
{
- unsigned long value;
-
- value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
- return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
+ return xa_mk_value(entry.val);
}
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
@@ -134,7 +130,7 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
return pfn_to_page(swp_offset(entry));
}
-int device_private_entry_fault(struct vm_area_struct *vma,
+vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
unsigned long addr,
swp_entry_t entry,
unsigned int flags,
@@ -169,7 +165,7 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
return NULL;
}
-static inline int device_private_entry_fault(struct vm_area_struct *vma,
+static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
unsigned long addr,
swp_entry_t entry,
unsigned int flags,
@@ -340,11 +336,6 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
return swp_type(entry) == SWP_HWPOISON;
}
-static inline bool test_set_page_hwpoison(struct page *page)
-{
- return TestSetPageHWPoison(page);
-}
-
static inline void num_poisoned_pages_inc(void)
{
atomic_long_inc(&num_poisoned_pages);
@@ -367,11 +358,6 @@ static inline int is_hwpoison_entry(swp_entry_t swp)
return 0;
}
-static inline bool test_set_page_hwpoison(struct page *page)
-{
- return false;
-}
-
static inline void num_poisoned_pages_inc(void)
{
}
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 965be92c33b5..7c007ed7505f 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -16,8 +16,6 @@ enum swiotlb_force {
SWIOTLB_NO_FORCE, /* swiotlb=noforce */
};
-extern enum swiotlb_force swiotlb_force;
-
/*
* Maximum allowable number of contiguous slabs to map,
* must be a power of 2. What is the appropriate value ?
@@ -46,9 +44,6 @@ enum dma_sync_target {
SYNC_FOR_DEVICE = 1,
};
-/* define the last possible byte of physical address space as a mapping error */
-#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
-
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t phys, size_t size,
@@ -65,65 +60,44 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target);
-/* Accessory functions. */
-
-void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
- gfp_t flags, unsigned long attrs);
-void swiotlb_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, unsigned long attrs);
-
-extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs);
-extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-
-extern int
-swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- unsigned long attrs);
-
-extern void
-swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs);
-
-extern void
-swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir);
-
-extern void
-swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir);
-
-extern void
-swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir);
-
-extern void
-swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir);
-
-extern int
-swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
-
extern int
swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
-extern void __init swiotlb_exit(void);
+extern enum swiotlb_force swiotlb_force;
+extern phys_addr_t io_tlb_start, io_tlb_end;
+
+static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+{
+ return paddr >= io_tlb_start && paddr < io_tlb_end;
+}
+
+bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
#else
-static inline void swiotlb_exit(void) { }
-static inline unsigned int swiotlb_max_segment(void) { return 0; }
-#endif
+#define swiotlb_force SWIOTLB_NO_FORCE
+static inline bool is_swiotlb_buffer(phys_addr_t paddr)
+{
+ return false;
+}
+static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
+ dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return false;
+}
+static inline void swiotlb_exit(void)
+{
+}
+static inline unsigned int swiotlb_max_segment(void)
+{
+ return 0;
+}
+#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
-extern int is_swiotlb_buffer(phys_addr_t paddr);
extern void swiotlb_set_max_segment(unsigned int);
-extern const struct dma_map_ops swiotlb_dma_ops;
-
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 2ff814c92f7f..257cccba3062 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -60,7 +60,7 @@ struct tms;
struct utimbuf;
struct mq_attr;
struct compat_stat;
-struct compat_timeval;
+struct old_timeval32;
struct robust_list_head;
struct getcpu_cache;
struct old_linux_dirent;
@@ -296,12 +296,18 @@ asmlinkage long sys_io_getevents(aio_context_t ctx_id,
long min_nr,
long nr,
struct io_event __user *events,
- struct timespec __user *timeout);
+ struct __kernel_timespec __user *timeout);
asmlinkage long sys_io_pgetevents(aio_context_t ctx_id,
long min_nr,
long nr,
struct io_event __user *events,
- struct timespec __user *timeout,
+ struct __kernel_timespec __user *timeout,
+ const struct __aio_sigset *sig);
+asmlinkage long sys_io_pgetevents_time32(aio_context_t ctx_id,
+ long min_nr,
+ long nr,
+ struct io_event __user *events,
+ struct old_timespec32 __user *timeout,
const struct __aio_sigset *sig);
/* fs/xattr.c */
@@ -466,10 +472,16 @@ asmlinkage long sys_sendfile64(int out_fd, int in_fd,
/* fs/select.c */
asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
- fd_set __user *, struct timespec __user *,
+ fd_set __user *, struct __kernel_timespec __user *,
+ void __user *);
+asmlinkage long sys_pselect6_time32(int, fd_set __user *, fd_set __user *,
+ fd_set __user *, struct old_timespec32 __user *,
void __user *);
asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
- struct timespec __user *, const sigset_t __user *,
+ struct __kernel_timespec __user *, const sigset_t __user *,
+ size_t);
+asmlinkage long sys_ppoll_time32(struct pollfd __user *, unsigned int,
+ struct old_timespec32 __user *, const sigset_t __user *,
size_t);
/* fs/signalfd.c */
@@ -513,7 +525,8 @@ asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *
/* fs/utimes.c */
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
- struct timespec __user *utimes, int flags);
+ struct __kernel_timespec __user *utimes,
+ int flags);
/* kernel/acct.c */
asmlinkage long sys_acct(const char __user *name);
@@ -540,7 +553,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags);
/* kernel/futex.c */
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
- struct timespec __user *utime, u32 __user *uaddr2,
+ struct __kernel_timespec __user *utime, u32 __user *uaddr2,
u32 val3);
asmlinkage long sys_get_robust_list(int pid,
struct robust_list_head __user * __user *head_ptr,
@@ -613,7 +626,7 @@ asmlinkage long sys_sched_yield(void);
asmlinkage long sys_sched_get_priority_max(int policy);
asmlinkage long sys_sched_get_priority_min(int policy);
asmlinkage long sys_sched_rr_get_interval(pid_t pid,
- struct timespec __user *interval);
+ struct __kernel_timespec __user *interval);
/* kernel/signal.c */
asmlinkage long sys_restart_syscall(void);
@@ -634,7 +647,11 @@ asmlinkage long sys_rt_sigprocmask(int how, sigset_t __user *set,
asmlinkage long sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize);
asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
siginfo_t __user *uinfo,
- const struct timespec __user *uts,
+ const struct __kernel_timespec __user *uts,
+ size_t sigsetsize);
+asmlinkage long sys_rt_sigtimedwait_time32(const sigset_t __user *uthese,
+ siginfo_t __user *uinfo,
+ const struct old_timespec32 __user *uts,
size_t sigsetsize);
asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo);
@@ -829,7 +846,10 @@ asmlinkage long sys_perf_event_open(
asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags,
- struct timespec __user *timeout);
+ struct __kernel_timespec __user *timeout);
+asmlinkage long sys_recvmmsg_time32(int fd, struct mmsghdr __user *msg,
+ unsigned int vlen, unsigned flags,
+ struct old_timespec32 __user *timeout);
asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
int options, struct rusage __user *ru);
@@ -878,7 +898,7 @@ asmlinkage long sys_renameat2(int olddfd, const char __user *oldname,
int newdfd, const char __user *newname,
unsigned int flags);
asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
- const char __user *uargs);
+ void __user *uargs);
asmlinkage long sys_getrandom(char __user *buf, size_t count,
unsigned int flags);
asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
@@ -954,8 +974,6 @@ asmlinkage long sys_access(const char __user *filename, int mode);
asmlinkage long sys_rename(const char __user *oldname,
const char __user *newname);
asmlinkage long sys_symlink(const char __user *old, const char __user *new);
-asmlinkage long sys_utimes(char __user *filename,
- struct timeval __user *utimes);
#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
asmlinkage long sys_stat64(const char __user *filename,
struct stat64 __user *statbuf);
@@ -985,14 +1003,18 @@ asmlinkage long sys_alarm(unsigned int seconds);
asmlinkage long sys_getpgrp(void);
asmlinkage long sys_pause(void);
asmlinkage long sys_time(time_t __user *tloc);
+#ifdef __ARCH_WANT_SYS_UTIME
asmlinkage long sys_utime(char __user *filename,
struct utimbuf __user *times);
+asmlinkage long sys_utimes(char __user *filename,
+ struct timeval __user *utimes);
+asmlinkage long sys_futimesat(int dfd, const char __user *filename,
+ struct timeval __user *utimes);
+#endif
asmlinkage long sys_creat(const char __user *pathname, umode_t mode);
asmlinkage long sys_getdents(unsigned int fd,
struct linux_dirent __user *dirent,
unsigned int count);
-asmlinkage long sys_futimesat(int dfd, const char __user *filename,
- struct timeval __user *utimes);
asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timeval __user *tvp);
asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 987cefa337de..786816cf4aa5 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -234,7 +234,7 @@ int __must_check sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns);
int __must_check sysfs_create_files(struct kobject *kobj,
- const struct attribute **attr);
+ const struct attribute * const *attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
@@ -243,7 +243,7 @@ void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr);
int __must_check sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr);
@@ -342,7 +342,7 @@ static inline int sysfs_create_file_ns(struct kobject *kobj,
}
static inline int sysfs_create_files(struct kobject *kobj,
- const struct attribute **attr)
+ const struct attribute * const *attr)
{
return 0;
}
@@ -377,7 +377,7 @@ static inline bool sysfs_remove_file_self(struct kobject *kobj,
}
static inline void sysfs_remove_files(struct kobject *kobj,
- const struct attribute **attr)
+ const struct attribute * const *attr)
{
}
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index b9626aa7e90c..3e2a80cc7b56 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -39,12 +39,13 @@ struct t10_pi_tuple {
static inline u32 t10_pi_ref_tag(struct request *rq)
{
+ unsigned int shift = ilog2(queue_logical_block_size(rq->q));
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
- return blk_rq_pos(rq) >>
- (rq->q->integrity.interval_exp - 9) & 0xffffffff;
-#else
- return -1U;
+ if (rq->q->integrity.interval_exp)
+ shift = rq->q->integrity.interval_exp;
#endif
+ return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
}
extern const struct blk_integrity_profile t10_pi_type1_crc;
diff --git a/include/linux/tc.h b/include/linux/tc.h
index f92511e57cdb..a60639f37963 100644
--- a/include/linux/tc.h
+++ b/include/linux/tc.h
@@ -84,6 +84,7 @@ struct tc_dev {
device. */
struct device dev; /* Generic device interface. */
struct resource resource; /* Address space of this device. */
+ u64 dma_mask; /* DMA addressable range. */
char vendor[9];
char name[9];
char firmware[9];
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 263e37271afd..a9b0280687d5 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -196,6 +196,7 @@ struct tcp_sock {
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
+ u32 compressed_ack_rcv_nxt;
u32 tsoffset; /* timestamp offset */
@@ -248,6 +249,9 @@ struct tcp_sock {
syn_smc:1; /* SYN includes SMC */
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
+ u64 tcp_wstamp_ns; /* departure time for next sent data packet */
+ u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
+
/* RTT measurement */
u64 tcp_mstamp; /* most recent packet received/sent */
u32 srtt_us; /* smoothed round trip time << 3 in usecs */
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index a2b3dfcee0b5..6cfe05893a76 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -453,6 +453,79 @@ static inline int tee_shm_get_id(struct tee_shm *shm)
*/
struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
+/**
+ * tee_client_open_context() - Open a TEE context
+ * @start: if not NULL, continue search after this context
+ * @match: function to check TEE device
+ * @data: data for match function
+ * @vers: if not NULL, version data of TEE device of the context returned
+ *
+ * This function does an operation similar to open("/dev/teeX") in user space.
+ * A returned context must be released with tee_client_close_context().
+ *
+ * Returns a TEE context of the first TEE device matched by the match()
+ * callback or an ERR_PTR.
+ */
+struct tee_context *
+tee_client_open_context(struct tee_context *start,
+ int (*match)(struct tee_ioctl_version_data *,
+ const void *),
+ const void *data, struct tee_ioctl_version_data *vers);
+
+/**
+ * tee_client_close_context() - Close a TEE context
+ * @ctx: TEE context to close
+ *
+ * Note that all sessions previously opened with this context will be
+ * closed when this function is called.
+ */
+void tee_client_close_context(struct tee_context *ctx);
+
+/**
+ * tee_client_get_version() - Query version of TEE
+ * @ctx: TEE context to TEE to query
+ * @vers: Pointer to version data
+ */
+void tee_client_get_version(struct tee_context *ctx,
+ struct tee_ioctl_version_data *vers);
+
+/**
+ * tee_client_open_session() - Open a session to a Trusted Application
+ * @ctx: TEE context
+ * @arg: Open session arguments, see description of
+ * struct tee_ioctl_open_session_arg
+ * @param: Parameters passed to the Trusted Application
+ *
+ * Returns < 0 on error else see @arg->ret for result. If @arg->ret
+ * is TEEC_SUCCESS the session identifier is available in @arg->session.
+ */
+int tee_client_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+
+/**
+ * tee_client_close_session() - Close a session to a Trusted Application
+ * @ctx: TEE Context
+ * @session: Session id
+ *
+ * Return < 0 on error else 0, regardless the session will not be
+ * valid after this function has returned.
+ */
+int tee_client_close_session(struct tee_context *ctx, u32 session);
+
+/**
+ * tee_client_invoke_func() - Invoke a function in a Trusted Application
+ * @ctx: TEE Context
+ * @arg: Invoke arguments, see description of
+ * struct tee_ioctl_invoke_arg
+ * @param: Parameters passed to the Trusted Application
+ *
+ * Returns < 0 on error else see @arg->ret for result.
+ */
+int tee_client_invoke_func(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+
static inline bool tee_param_is_memref(struct tee_param *param)
{
switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
diff --git a/include/linux/thinkpad_acpi.h b/include/linux/thinkpad_acpi.h
deleted file mode 100644
index 9fb317970c01..000000000000
--- a/include/linux/thinkpad_acpi.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __THINKPAD_ACPI_H__
-#define __THINKPAD_ACPI_H__
-
-/* These two functions return 0 if success, or negative error code
- (e g -ENODEV if no led present) */
-
-enum {
- TPACPI_LED_MUTE,
- TPACPI_LED_MICMUTE,
- TPACPI_LED_MAX,
-};
-
-int tpacpi_led_set(int whichled, bool on);
-
-#endif
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index a3ed26082bc1..bf6ec83e60ee 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt service API
*
@@ -5,10 +6,6 @@
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef THUNDERBOLT_H_
diff --git a/include/linux/time32.h b/include/linux/time32.h
index 0b14f936100a..118b9977080c 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -13,6 +13,36 @@
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
+typedef s32 old_time32_t;
+
+struct old_timespec32 {
+ old_time32_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct old_timeval32 {
+ old_time32_t tv_sec;
+ s32 tv_usec;
+};
+
+struct old_itimerspec32 {
+ struct old_timespec32 it_interval;
+ struct old_timespec32 it_value;
+};
+
+struct old_utimbuf32 {
+ old_time32_t actime;
+ old_time32_t modtime;
+};
+
+extern int get_old_timespec32(struct timespec64 *, const void __user *);
+extern int put_old_timespec32(const struct timespec64 *, void __user *);
+extern int get_old_itimerspec32(struct itimerspec64 *its,
+ const struct old_itimerspec32 __user *uits);
+extern int put_old_itimerspec32(const struct itimerspec64 *its,
+ struct old_itimerspec32 __user *uits);
+
+
#if __BITS_PER_LONG == 64
/* timespec64 is defined as timespec here */
@@ -66,31 +96,6 @@ static inline int timespec_compare(const struct timespec *lhs, const struct time
return lhs->tv_nsec - rhs->tv_nsec;
}
-extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
-
-static inline struct timespec timespec_add(struct timespec lhs,
- struct timespec rhs)
-{
- struct timespec ts_delta;
-
- set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
- lhs.tv_nsec + rhs.tv_nsec);
- return ts_delta;
-}
-
-/*
- * sub = lhs - rhs, in normalized form
- */
-static inline struct timespec timespec_sub(struct timespec lhs,
- struct timespec rhs)
-{
- struct timespec ts_delta;
-
- set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
- lhs.tv_nsec - rhs.tv_nsec);
- return ts_delta;
-}
-
/*
* Returns true if the timespec is norm, false if denorm:
*/
@@ -105,16 +110,6 @@ static inline bool timespec_valid(const struct timespec *ts)
return true;
}
-static inline bool timespec_valid_strict(const struct timespec *ts)
-{
- if (!timespec_valid(ts))
- return false;
- /* Disallow values that could overflow ktime_t */
- if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
- return false;
- return true;
-}
-
/**
* timespec_to_ns - Convert timespec to nanoseconds
* @ts: pointer to the timespec variable to be converted
@@ -149,19 +144,6 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
a->tv_nsec = ns;
}
-/**
- * time_to_tm - converts the calendar time to local broken-down time
- *
- * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
- * Coordinated Universal Time (UTC).
- * @offset offset seconds adding to totalsecs.
- * @result pointer to struct tm variable to receive broken-down time
- */
-static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
-{
- time64_to_tm(totalsecs, offset, result);
-}
-
static inline unsigned long mktime(const unsigned int year,
const unsigned int mon, const unsigned int day,
const unsigned int hour, const unsigned int min,
@@ -183,8 +165,6 @@ static inline bool timeval_valid(const struct timeval *tv)
return true;
}
-extern struct timespec timespec_trunc(struct timespec t, unsigned int gran);
-
/**
* timeval_to_ns - Convert timeval to nanoseconds
* @ts: pointer to the timeval variable to be converted
@@ -207,4 +187,18 @@ static inline s64 timeval_to_ns(const struct timeval *tv)
extern struct timeval ns_to_timeval(const s64 nsec);
extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec);
+/*
+ * Old names for the 32-bit time_t interfaces, these will be removed
+ * when everything uses the new names.
+ */
+#define compat_time_t old_time32_t
+#define compat_timeval old_timeval32
+#define compat_timespec old_timespec32
+#define compat_itimerspec old_itimerspec32
+#define ns_to_compat_timeval ns_to_old_timeval32
+#define get_compat_itimerspec64 get_old_itimerspec32
+#define put_compat_itimerspec64 put_old_itimerspec32
+#define compat_get_timespec64 get_old_timespec32
+#define compat_put_timespec64 put_old_timespec32
+
#endif
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 5d738804e3d6..a8ab0f143ac4 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -258,34 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
-void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
- struct timespec64 *boot_offset);
+void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
+ struct timespec64 *boot_offset);
extern int update_persistent_clock64(struct timespec64 now);
-/*
- * deprecated aliases, don't use in new code
- */
-#define getnstimeofday64(ts) ktime_get_real_ts64(ts)
-#define get_monotonic_boottime64(ts) ktime_get_boottime_ts64(ts)
-#define getrawmonotonic64(ts) ktime_get_raw_ts64(ts)
-#define timekeeping_clocktai64(ts) ktime_get_clocktai_ts64(ts)
-
-static inline struct timespec64 current_kernel_time64(void)
-{
- struct timespec64 ts;
-
- ktime_get_coarse_real_ts64(&ts);
-
- return ts;
-}
-
-static inline struct timespec64 get_monotonic_coarse64(void)
-{
- struct timespec64 ts;
-
- ktime_get_coarse_ts64(&ts);
-
- return ts;
-}
-
#endif
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
index 8762c2f45f8b..cc59cc9e0e84 100644
--- a/include/linux/timekeeping32.h
+++ b/include/linux/timekeeping32.h
@@ -6,27 +6,9 @@
* over time so we can remove the file here.
*/
-extern void do_gettimeofday(struct timeval *tv);
-unsigned long get_seconds(void);
-
-static inline struct timespec current_kernel_time(void)
+static inline unsigned long get_seconds(void)
{
- struct timespec64 ts64;
-
- ktime_get_coarse_real_ts64(&ts64);
-
- return timespec64_to_timespec(ts64);
-}
-
-/**
- * Deprecated. Use do_settimeofday64().
- */
-static inline int do_settimeofday(const struct timespec *ts)
-{
- struct timespec64 ts64;
-
- ts64 = timespec_to_timespec64(*ts);
- return do_settimeofday64(&ts64);
+ return ktime_get_real_seconds();
}
static inline void getnstimeofday(struct timespec *ts)
@@ -45,14 +27,6 @@ static inline void ktime_get_ts(struct timespec *ts)
*ts = timespec64_to_timespec(ts64);
}
-static inline void ktime_get_real_ts(struct timespec *ts)
-{
- struct timespec64 ts64;
-
- ktime_get_real_ts64(&ts64);
- *ts = timespec64_to_timespec(ts64);
-}
-
static inline void getrawmonotonic(struct timespec *ts)
{
struct timespec64 ts64;
@@ -61,15 +35,6 @@ static inline void getrawmonotonic(struct timespec *ts)
*ts = timespec64_to_timespec(ts64);
}
-static inline struct timespec get_monotonic_coarse(void)
-{
- struct timespec64 ts64;
-
- ktime_get_coarse_ts64(&ts64);
-
- return timespec64_to_timespec(ts64);
-}
-
static inline void getboottime(struct timespec *ts)
{
struct timespec64 ts64;
@@ -78,23 +43,4 @@ static inline void getboottime(struct timespec *ts)
*ts = timespec64_to_timespec(ts64);
}
-/*
- * Timespec interfaces utilizing the ktime based ones
- */
-static inline void get_monotonic_boottime(struct timespec *ts)
-{
- *ts = ktime_to_timespec(ktime_get_boottime());
-}
-
-static inline void timekeeping_clocktai(struct timespec *ts)
-{
- *ts = ktime_to_timespec(ktime_get_clocktai());
-}
-
-/*
- * Persistent clock related interfaces
- */
-extern void read_persistent_clock(struct timespec *ts);
-extern int update_persistent_clock(struct timespec now);
-
#endif
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 61dfd93b6ee4..48fad21109fc 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -77,7 +77,7 @@ void torture_shutdown_absorb(const char *title);
int torture_shutdown_init(int ssecs, void (*cleanup)(void));
/* Task stuttering, which forces load/no-load transitions. */
-void stutter_wait(const char *title);
+bool stutter_wait(const char *title);
int torture_stutter_init(int s);
/* Initialization and cleanup. */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 4609b94142d4..b49a55cf775f 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -53,8 +53,8 @@ struct tpm_class_ops {
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
extern int tpm_is_tpm2(struct tpm_chip *chip);
-extern int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
-extern int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash);
+extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf);
+extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash);
extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
extern int tpm_seal_trusted(struct tpm_chip *chip,
@@ -69,15 +69,18 @@ static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
return -ENODEV;
}
-static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+
+static inline int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf)
{
return -ENODEV;
}
-static inline int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx,
+
+static inline int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
const u8 *hash)
{
return -ENODEV;
}
+
static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
{
return -ENODEV;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 78a010e19ed4..8a62731673f7 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -471,7 +471,8 @@ void perf_event_detach_bpf_prog(struct perf_event *event);
int perf_event_query_prog_array(struct perf_event *event, void __user *info);
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
-struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name);
+struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
+void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
u64 *probe_offset, u64 *probe_addr);
@@ -502,10 +503,13 @@ static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf
{
return -EOPNOTSUPP;
}
-static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
+static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
{
return NULL;
}
+static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
+{
+}
static inline int bpf_get_perf_event_info(const struct perf_event *event,
u32 *prog_id, u32 *fd_type,
const char **buf, u64 *probe_offset,
@@ -575,7 +579,8 @@ extern int bpf_get_kprobe_info(const struct perf_event *event,
bool perf_type_tracepoint);
#endif
#ifdef CONFIG_UPROBE_EVENTS
-extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe);
+extern int perf_uprobe_init(struct perf_event *event,
+ unsigned long ref_ctr_offset, bool is_retprobe);
extern void perf_uprobe_destroy(struct perf_event *event);
extern int bpf_get_uprobe_info(const struct perf_event *event,
u32 *fd_type, const char **filename,
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 05589a3e37f4..df20f8bdbfa3 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -83,8 +83,8 @@ static inline int ptrace_report_syscall(struct pt_regs *regs)
* tracehook_report_syscall_entry - task is about to attempt a system call
* @regs: user register state of current task
*
- * This will be called if %TIF_SYSCALL_TRACE has been set, when the
- * current task has just entered the kernel for a system call.
+ * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set,
+ * when the current task has just entered the kernel for a system call.
* Full user register state is available here. Changing the values
* in @regs can affect the system call number and arguments to be tried.
* It is safe to block here, preventing the system call from beginning.
@@ -123,15 +123,10 @@ static inline __must_check int tracehook_report_syscall_entry(
*/
static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
{
- if (step) {
- siginfo_t info;
- clear_siginfo(&info);
- user_single_step_siginfo(current, regs, &info);
- force_sig_info(SIGTRAP, &info, current);
- return;
- }
-
- ptrace_report_syscall(regs);
+ if (step)
+ user_single_step_report(regs);
+ else
+ ptrace_report_syscall(regs);
}
/**
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index 22c5a46e9693..49ba9cde7e4b 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -35,6 +35,12 @@ struct tracepoint {
struct tracepoint_func __rcu *funcs;
};
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+typedef const int tracepoint_ptr_t;
+#else
+typedef struct tracepoint * const tracepoint_ptr_t;
+#endif
+
struct bpf_raw_event_map {
struct tracepoint *tp;
void *bpf_func;
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 19a690b559ca..9c3186578ce0 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -15,6 +15,7 @@
*/
#include <linux/smp.h>
+#include <linux/srcu.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/cpumask.h>
@@ -33,6 +34,8 @@ struct trace_eval_map {
#define TRACEPOINT_DEFAULT_PRIO 10
+extern struct srcu_struct tracepoint_srcu;
+
extern int
tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
extern int
@@ -75,10 +78,16 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
* probe unregistration and the end of module exit to make sure there is no
* caller executing a probe when it is freed.
*/
+#ifdef CONFIG_TRACEPOINTS
static inline void tracepoint_synchronize_unregister(void)
{
- synchronize_sched();
+ synchronize_srcu(&tracepoint_srcu);
+ synchronize_rcu();
}
+#else
+static inline void tracepoint_synchronize_unregister(void)
+{ }
+#endif
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
extern int syscall_regfunc(void);
@@ -90,6 +99,29 @@ extern void syscall_unregfunc(void);
#define TRACE_DEFINE_ENUM(x)
#define TRACE_DEFINE_SIZEOF(x)
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
+{
+ return offset_to_ptr(p);
+}
+
+#define __TRACEPOINT_ENTRY(name) \
+ asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \
+ " .balign 4 \n" \
+ " .long __tracepoint_" #name " - . \n" \
+ " .previous \n")
+#else
+static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
+{
+ return *p;
+}
+
+#define __TRACEPOINT_ENTRY(name) \
+ static tracepoint_ptr_t __tracepoint_ptr_##name __used \
+ __attribute__((section("__tracepoints_ptrs"))) = \
+ &__tracepoint_##name
+#endif
+
#endif /* _LINUX_TRACEPOINT_H */
/*
@@ -129,18 +161,33 @@ extern void syscall_unregfunc(void);
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/
-#define __DO_TRACE(tp, proto, args, cond, rcucheck) \
+#define __DO_TRACE(tp, proto, args, cond, rcuidle) \
do { \
struct tracepoint_func *it_func_ptr; \
void *it_func; \
void *__data; \
+ int __maybe_unused __idx = 0; \
\
if (!(cond)) \
return; \
- if (rcucheck) \
+ \
+ /* srcu can't be used from NMI */ \
+ WARN_ON_ONCE(rcuidle && in_nmi()); \
+ \
+ /* keep srcu and sched-rcu usage consistent */ \
+ preempt_disable_notrace(); \
+ \
+ /* \
+ * For rcuidle callers, use srcu since sched-rcu \
+ * doesn't work from the idle path. \
+ */ \
+ if (rcuidle) { \
+ __idx = srcu_read_lock_notrace(&tracepoint_srcu);\
rcu_irq_enter_irqson(); \
- rcu_read_lock_sched_notrace(); \
- it_func_ptr = rcu_dereference_sched((tp)->funcs); \
+ } \
+ \
+ it_func_ptr = rcu_dereference_raw((tp)->funcs); \
+ \
if (it_func_ptr) { \
do { \
it_func = (it_func_ptr)->func; \
@@ -148,9 +195,13 @@ extern void syscall_unregfunc(void);
((void(*)(proto))(it_func))(args); \
} while ((++it_func_ptr)->func); \
} \
- rcu_read_unlock_sched_notrace(); \
- if (rcucheck) \
+ \
+ if (rcuidle) { \
rcu_irq_exit_irqson(); \
+ srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
+ } \
+ \
+ preempt_enable_notrace(); \
} while (0)
#ifndef MODULE
@@ -234,11 +285,9 @@ extern void syscall_unregfunc(void);
static const char __tpstrtab_##name[] \
__attribute__((section("__tracepoints_strings"))) = #name; \
struct tracepoint __tracepoint_##name \
- __attribute__((section("__tracepoints"))) = \
+ __attribute__((section("__tracepoints"), used)) = \
{ __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
- static struct tracepoint * const __tracepoint_ptr_##name __used \
- __attribute__((section("__tracepoints_ptrs"))) = \
- &__tracepoint_##name;
+ __TRACEPOINT_ENTRY(name);
#define DEFINE_TRACE(name) \
DEFINE_TRACE_FN(name, NULL, NULL);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index c56e3978b00f..bfa4e2ee94a9 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -366,6 +366,7 @@ struct tty_file_private {
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
#define TTY_HUPPED 18 /* Post driver->hangup() */
#define TTY_HUPPING 19 /* Hangup in progress */
+#define TTY_LDISC_CHANGING 20 /* Change pending - non-block IO */
#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
/* Values for tty->flow_change */
@@ -383,6 +384,12 @@ static inline void tty_set_flow_change(struct tty_struct *tty, int val)
smp_mb();
}
+static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file)
+{
+ return file->f_flags & O_NONBLOCK ||
+ test_bit(TTY_LDISC_CHANGING, &tty->flags);
+}
+
static inline bool tty_io_error(struct tty_struct *tty)
{
return test_bit(TTY_IO_ERROR, &tty->flags);
@@ -556,6 +563,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
extern void tty_release_struct(struct tty_struct *tty, int idx);
extern int tty_release(struct inode *inode, struct file *filp);
extern void tty_init_termios(struct tty_struct *tty);
+extern void tty_save_termios(struct tty_struct *tty);
extern int tty_standard_install(struct tty_driver *driver,
struct tty_struct *tty);
@@ -746,8 +754,6 @@ static inline int tty_audit_push(void)
/* tty_ioctl.c */
extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
-extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
/* vt.c */
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 71dbc891851a..358446247ccd 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -249,6 +249,7 @@
struct tty_struct;
struct tty_driver;
struct serial_icounter_struct;
+struct serial_struct;
struct tty_operations {
struct tty_struct * (*lookup)(struct tty_driver *driver,
@@ -287,6 +288,8 @@ struct tty_operations {
int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
int (*get_icount)(struct tty_struct *tty,
struct serial_icounter_struct *icount);
+ int (*get_serial)(struct tty_struct *tty, struct serial_struct *p);
+ int (*set_serial)(struct tty_struct *tty, struct serial_struct *p);
void (*show_fdinfo)(struct tty_struct *tty, struct seq_file *m);
#ifdef CONFIG_CONSOLE_POLL
int (*poll_init)(struct tty_driver *driver, int line, char *options);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 840894ca3fc0..b1e6043e9917 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -54,11 +54,17 @@
* low-level driver can "grab" an ioctl request before the line
* discpline has a chance to see it.
*
- * long (*compat_ioctl)(struct tty_struct * tty, struct file * file,
+ * int (*compat_ioctl)(struct tty_struct * tty, struct file * file,
* unsigned int cmd, unsigned long arg);
*
* Process ioctl calls from 32-bit process on 64-bit system
*
+ * NOTE: only ioctls that are neither "pointer to compatible
+ * structure" nor tty-generic. Something private that takes
+ * an integer or a pointer to wordsize-sensitive structure
+ * belongs here, but most of ldiscs will happily leave
+ * it NULL.
+ *
* void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
*
* This function notifies the line discpline that a change has
@@ -184,7 +190,7 @@ struct tty_ldisc_ops {
const unsigned char *buf, size_t nr);
int (*ioctl)(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
- long (*compat_ioctl)(struct tty_struct *tty, struct file *file,
+ int (*compat_ioctl)(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
__poll_t (*poll)(struct tty_struct *, struct file *,
diff --git a/include/linux/types.h b/include/linux/types.h
index 9834e90aa010..c2615d6a019e 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -212,8 +212,8 @@ struct ustat {
* weird ABI and we need to ask it explicitly.
*
* The alignment is required to guarantee that bit 0 of @next will be
- * clear under normal conditions -- as long as we use call_rcu(),
- * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
+ * clear under normal conditions -- as long as we use call_rcu() or
+ * call_srcu() to queue the callback.
*
* This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer;
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index efe79c1cdd47..37b226e8df13 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -6,9 +6,6 @@
#include <linux/thread_info.h>
#include <linux/kasan-checks.h>
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
#include <asm/uaccess.h>
@@ -111,7 +108,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
might_fault();
- if (likely(access_ok(VERIFY_READ, from, n))) {
+ if (likely(access_ok(from, n))) {
kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
}
@@ -129,7 +126,7 @@ static inline unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
- if (access_ok(VERIFY_WRITE, to, n)) {
+ if (access_ok(to, n)) {
kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
}
@@ -160,7 +157,7 @@ static __always_inline unsigned long __must_check
copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_fault();
- if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
+ if (access_ok(to, n) && access_ok(from, n))
n = raw_copy_in_user(to, from, n);
return n;
}
@@ -267,7 +264,7 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
probe_kernel_read(&retval, addr, sizeof(retval))
#ifndef user_access_begin
-#define user_access_begin() do { } while (0)
+#define user_access_begin(ptr,len) access_ok(ptr, len)
#define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 320d49d85484..2725c83395bf 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -49,7 +49,13 @@ struct udp_sock {
unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
+ no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
+ encap_enabled:1, /* This socket enabled encap
+ * processing; UDP tunnels and
+ * different encapsulation layer set
+ * this
+ */
+ gro_enabled:1; /* Can accept GRO packets */
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -71,6 +77,7 @@ struct udp_sock {
* For encapsulation sockets.
*/
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
void (*encap_destroy)(struct sock *sk);
/* GRO functions for UDP socket */
@@ -115,6 +122,23 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
return udp_sk(sk)->no_check6_rx;
}
+static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int gso_size;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ gso_size = skb_shinfo(skb)->gso_size;
+ put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size);
+ }
+}
+
+static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+{
+ return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
+ skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
+}
+
#define udp_portaddr_for_each_entry(__sk, list) \
hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 409c845d4cd3..ecf584f6b82d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/thread_info.h>
+#include <crypto/hash.h>
#include <uapi/linux/uio.h>
struct page;
@@ -21,15 +22,16 @@ struct kvec {
size_t iov_len;
};
-enum {
+enum iter_type {
ITER_IOVEC = 0,
ITER_KVEC = 2,
ITER_BVEC = 4,
ITER_PIPE = 8,
+ ITER_DISCARD = 16,
};
struct iov_iter {
- int type;
+ unsigned int type;
size_t iov_offset;
size_t count;
union {
@@ -47,6 +49,41 @@ struct iov_iter {
};
};
+static inline enum iter_type iov_iter_type(const struct iov_iter *i)
+{
+ return i->type & ~(READ | WRITE);
+}
+
+static inline bool iter_is_iovec(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_IOVEC;
+}
+
+static inline bool iov_iter_is_kvec(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_KVEC;
+}
+
+static inline bool iov_iter_is_bvec(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_BVEC;
+}
+
+static inline bool iov_iter_is_pipe(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_PIPE;
+}
+
+static inline bool iov_iter_is_discard(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_DISCARD;
+}
+
+static inline unsigned char iov_iter_rw(const struct iov_iter *i)
+{
+ return i->type & (READ | WRITE);
+}
+
/*
* Total number of bytes covered by an iovec.
*
@@ -74,7 +111,8 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
}
#define iov_for_each(iov, iter, start) \
- if (!((start).type & (ITER_BVEC | ITER_PIPE))) \
+ if (iov_iter_type(start) == ITER_IOVEC || \
+ iov_iter_type(start) == ITER_KVEC) \
for (iter = (start); \
(iter).count && \
((iov = iov_iter_iovec(&(iter))), 1); \
@@ -172,7 +210,7 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
static __always_inline __must_check
size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
+ if (unlikely(!check_copy_size(addr, bytes, true)))
return 0;
else
return _copy_to_iter_mcsafe(addr, bytes, i);
@@ -181,14 +219,15 @@ size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
-void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
+void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
unsigned long nr_segs, size_t count);
-void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
+void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
unsigned long nr_segs, size_t count);
-void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
+void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
-void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe,
+void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
size_t count);
+void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
@@ -202,19 +241,6 @@ static inline size_t iov_iter_count(const struct iov_iter *i)
return i->count;
}
-static inline bool iter_is_iovec(const struct iov_iter *i)
-{
- return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
-}
-
-/*
- * Get one of READ or WRITE out of iter->type without any other flags OR'd in
- * with it.
- *
- * The ?: is just for type safety.
- */
-#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE))
-
/*
* Cap the iov_iter by given limit; note that the second argument is
* *not* the new size - it's upper limit for such. Passing it a value
@@ -241,9 +267,11 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
+ struct iov_iter *i);
int import_iovec(int type, const struct iovec __user * uvector,
unsigned nr_segs, unsigned fast_segs,
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 6f8b68cd460f..a3cd7cb67a69 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -133,6 +133,7 @@ extern void uio_event_notify(struct uio_info *info);
#define UIO_MEM_PHYS 1
#define UIO_MEM_LOGICAL 2
#define UIO_MEM_VIRTUAL 3
+#define UIO_MEM_IOVA 4
/* defines for uio_port->porttype */
#define UIO_PORT_NONE 0
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 5c812acbb80a..235f51b62c71 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -44,6 +44,7 @@ struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *), void *data);
struct umh_info {
+ const char *cmdline;
struct file *pipe_to_umh;
struct file *pipe_from_umh;
pid_t pid;
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 0a294e950df8..103a48a48872 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -121,8 +121,9 @@ extern bool is_swbp_insn(uprobe_opcode_t *insn);
extern bool is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
-extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
+extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_mmap(struct vm_area_struct *vma);
@@ -160,6 +161,10 @@ uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
{
return -ENOSYS;
}
+static inline int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc)
+{
+ return -ENOSYS;
+}
static inline int
uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add)
{
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 4cdd515a4385..5e49e82c4368 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -407,11 +407,11 @@ struct usb_host_bos {
};
int __usb_get_extra_descriptor(char *buffer, unsigned size,
- unsigned char type, void **ptr);
+ unsigned char type, void **ptr, size_t min);
#define usb_get_extra_descriptor(ifpoint, type, ptr) \
__usb_get_extra_descriptor((ifpoint)->extra, \
(ifpoint)->extralen, \
- type, (void **)ptr)
+ type, (void **)ptr, sizeof(**(ptr)))
/* ----------------------------------------------------------------------- */
diff --git a/include/linux/usb/ccid.h b/include/linux/usb/ccid.h
new file mode 100644
index 000000000000..3431446d6864
--- /dev/null
+++ b/include/linux/usb/ccid.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018 Vincent Pelletier
+ */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __CCID_H
+#define __CCID_H
+
+#include <linux/types.h>
+
+#define USB_INTERFACE_CLASS_CCID 0x0b
+
+struct ccid_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __le16 bcdCCID;
+ __u8 bMaxSlotIndex;
+ __u8 bVoltageSupport;
+ __le32 dwProtocols;
+ __le32 dwDefaultClock;
+ __le32 dwMaximumClock;
+ __u8 bNumClockSupported;
+ __le32 dwDataRate;
+ __le32 dwMaxDataRate;
+ __u8 bNumDataRatesSupported;
+ __le32 dwMaxIFSD;
+ __le32 dwSynchProtocols;
+ __le32 dwMechanical;
+ __le32 dwFeatures;
+ __le32 dwMaxCCIDMessageLength;
+ __u8 bClassGetResponse;
+ __u8 bClassEnvelope;
+ __le16 wLcdLayout;
+ __u8 bPINSupport;
+ __u8 bMaxCCIDBusySlots;
+} __attribute__ ((packed));
+
+#endif /* __CCID_H */
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 07f99362bc90..911e05af671e 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -60,9 +60,12 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_OVERRIDE_RX_BURST BIT(11)
#define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */
#define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
+#define CI_HDRC_IMX_IS_HSIC BIT(14)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
+#define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2
+#define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3
int (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
struct usb_otg_caps ci_otg_caps;
@@ -77,6 +80,12 @@ struct ci_hdrc_platform_data {
struct ci_hdrc_cable vbus_extcon;
struct ci_hdrc_cable id_extcon;
u32 phy_clkgate_delay_us;
+
+ /* pins */
+ struct pinctrl *pctl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_host;
+ struct pinctrl_state *pins_device;
};
/* Default offset of capability registers */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index e5cd84a0f84a..7595056b96c1 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -61,6 +61,8 @@ struct usb_ep;
* invalidated by the error may first be dequeued.
* @context: For use by the completion callback
* @list: For use by the gadget driver.
+ * @frame_number: Reports the interval number in (micro)frame in which the
+ * isochronous transfer was transmitted or received.
* @status: Reports completion code, zero or a negative errno.
* Normally, faults block the transfer queue from advancing until
* the completion callback returns.
@@ -112,6 +114,8 @@ struct usb_request {
void *context;
struct list_head list;
+ unsigned frame_number; /* ISO ONLY */
+
int status;
unsigned actual;
};
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 97e2ddec18b1..7dc3a411bece 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -235,11 +235,6 @@ static inline struct usb_hcd *bus_to_hcd(struct usb_bus *bus)
return container_of(bus, struct usb_hcd, self);
}
-struct hcd_timeout { /* timeouts we allocate */
- struct list_head timeout_list;
- struct timer_list timer;
-};
-
/*-------------------------------------------------------------------------*/
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index b7a99ce56bc9..a1be64c9940f 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -66,4 +66,7 @@
/* Device needs a pause after every control message. */
#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
+/* Hub needs extra delay after resetting its port. */
+#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 106551a5616e..1c19f77ed541 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -285,6 +285,8 @@ struct usb_serial_driver {
int (*write_room)(struct tty_struct *tty);
int (*ioctl)(struct tty_struct *tty,
unsigned int cmd, unsigned long arg);
+ int (*get_serial)(struct tty_struct *tty, struct serial_struct *ss);
+ int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss);
void (*set_termios)(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
void (*break_ctl)(struct tty_struct *tty, int break_state);
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 7e7fbfb84e8e..50c74a77db55 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -89,6 +89,7 @@ struct tcpc_config {
enum typec_port_data data;
enum typec_role default_role;
bool try_role_hw; /* try.{src,snk} implemented in hardware */
+ bool self_powered; /* port belongs to a self powered device */
const struct typec_altmode_desc *alt_modes;
};
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index e2ec3582e549..d8860f2d0976 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -28,7 +28,7 @@ struct usbnet {
/* housekeeping */
struct usb_device *udev;
struct usb_interface *intf;
- struct driver_info *driver_info;
+ const struct driver_info *driver_info;
const char *driver_name;
void *driver_priv;
wait_queue_head_t wait;
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index e091f0a11b11..37c9eba75c98 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -28,7 +28,7 @@
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
-extern int handle_userfault(struct vm_fault *vmf, unsigned long reason);
+extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len,
@@ -77,7 +77,8 @@ extern void userfaultfd_unmap_complete(struct mm_struct *mm,
#else /* CONFIG_USERFAULTFD */
/* mm helpers */
-static inline int handle_userfault(struct vm_fault *vmf, unsigned long reason)
+static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
+ unsigned long reason)
{
return VM_FAULT_SIGBUS;
}
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index a34539b7f750..7e6ac0114d55 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -133,15 +133,18 @@ struct vga_switcheroo_handler {
* @can_switch: check if the device is in a position to switch now.
* Mandatory. The client should return false if a user space process
* has one of its device files open
+ * @gpu_bound: notify the client id to audio client when the GPU is bound.
*
* Client callbacks. A client can be either a GPU or an audio device on a GPU.
* The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
* set to NULL. For audio clients, the @reprobe member is bogus.
+ * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients.
*/
struct vga_switcheroo_client_ops {
void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
void (*reprobe)(struct pci_dev *dev);
bool (*can_switch)(struct pci_dev *dev);
+ void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id);
};
#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 9397628a1967..cb462f9ab7dd 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -5,6 +5,24 @@
#include <linux/if_vlan.h>
#include <uapi/linux/virtio_net.h>
+static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr)
+{
+ switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ case VIRTIO_NET_HDR_GSO_UDP:
+ skb->protocol = cpu_to_be16(ETH_P_IP);
+ break;
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ skb->protocol = cpu_to_be16(ETH_P_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
const struct virtio_net_hdr *hdr,
bool little_endian)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 5c7f010676a7..47a3441cf4c4 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_DEBUG_VM_VMACACHE
VMACACHE_FIND_CALLS,
VMACACHE_FIND_HITS,
- VMACACHE_FULL_FLUSHES,
#endif
#ifdef CONFIG_SWAP
SWAP_RA,
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
index 3e9a963edd6a..6fce268a4588 100644
--- a/include/linux/vmacache.h
+++ b/include/linux/vmacache.h
@@ -10,7 +10,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
}
-extern void vmacache_flush_all(struct mm_struct *mm);
extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
unsigned long addr);
@@ -24,10 +23,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
static inline void vmacache_invalidate(struct mm_struct *mm)
{
mm->vmacache_seqnum++;
-
- /* deal with overflows */
- if (unlikely(mm->vmacache_seqnum == 0))
- vmacache_flush_all(mm);
}
#endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index f25cef84b41d..2db8d60981fe 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -239,11 +239,6 @@ extern unsigned long node_page_state(struct pglist_data *pgdat,
#define node_page_state(node, item) global_node_page_state(item)
#endif /* CONFIG_NUMA */
-#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
-#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
-#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
-#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
-
#ifdef CONFIG_SMP
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
void __inc_zone_page_state(struct page *, enum zone_stat_item);
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 3fd07912909c..8dc77e40bc03 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -135,13 +135,6 @@ extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
int deflt);
int vty_init(const struct file_operations *console_fops);
-static inline bool vt_force_oops_output(struct vc_data *vc)
-{
- if (oops_in_progress && vc->vc_panic_force_write && panic_timeout >= 0)
- return true;
- return false;
-}
-
extern char vt_dont_switch;
extern int default_utf8;
extern int global_cursor_default;
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 694101f744c7..3111585c371f 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -274,6 +274,8 @@ struct w1_family {
struct w1_family_ops *fops;
+ const struct of_device_id *of_match_table;
+
atomic_t refcnt;
};
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d9f131ecf708..ed7c122cb31f 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1052,10 +1052,9 @@ do { \
__ret; \
})
-#define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \
- lock, timeout) \
+#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
- TASK_INTERRUPTIBLE, 0, timeout, \
+ state, 0, timeout, \
spin_unlock_irq(&lock); \
__ret = schedule_timeout(__ret); \
spin_lock_irq(&lock));
@@ -1089,8 +1088,19 @@ do { \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
- __ret = __wait_event_interruptible_lock_irq_timeout( \
- wq_head, condition, lock, timeout); \
+ __ret = __wait_event_lock_irq_timeout( \
+ wq_head, condition, lock, timeout, \
+ TASK_INTERRUPTIBLE); \
+ __ret; \
+})
+
+#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_lock_irq_timeout( \
+ wq_head, condition, lock, timeout, \
+ TASK_UNINTERRUPTIBLE); \
__ret; \
})
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 44985c4a1e86..417d9f37077a 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -90,9 +90,6 @@ struct watchdog_ops {
*
* The driver-data field may not be accessed directly. It must be accessed
* via the watchdog_set_drvdata and watchdog_get_drvdata helpers.
- *
- * The lock field is for watchdog core internal use only and should not be
- * touched.
*/
struct watchdog_device {
int id;
diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h
index d6ba7d39a62f..e497e621dbb7 100644
--- a/include/linux/wkup_m3_ipc.h
+++ b/include/linux/wkup_m3_ipc.h
@@ -40,6 +40,12 @@ struct wkup_m3_ipc {
struct mbox_chan *mbox;
struct wkup_m3_ipc_ops *ops;
+ int is_rtc_only;
+};
+
+struct wkup_m3_wakeup_src {
+ int irq_nr;
+ char src[10];
};
struct wkup_m3_ipc_ops {
@@ -48,8 +54,11 @@ struct wkup_m3_ipc_ops {
int (*prepare_low_power)(struct wkup_m3_ipc *m3_ipc, int state);
int (*finish_low_power)(struct wkup_m3_ipc *m3_ipc);
int (*request_pm_status)(struct wkup_m3_ipc *m3_ipc);
+ const char *(*request_wake_src)(struct wkup_m3_ipc *m3_ipc);
+ void (*set_rtc_only)(struct wkup_m3_ipc *m3_ipc);
};
struct wkup_m3_ipc *wkup_m3_ipc_get(void);
void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc);
+void wkup_m3_set_rtc_only_mode(void);
#endif /* _LINUX_WKUP_M3_IPC_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fdfd04e348f6..738a0c24874f 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -246,7 +246,8 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
*
* @bio is a part of the writeback in progress controlled by @wbc. Perform
* writeback specific initialization. This is used to apply the cgroup
- * writeback context.
+ * writeback context. Must be called after the bio has been associated with
+ * a device.
*/
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
@@ -257,7 +258,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
* regular writeback instead of writing things out itself.
*/
if (wbc->wb)
- bio_associate_blkcg(bio, wbc->wb->blkcg_css);
+ bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
}
#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 2dfc8006fe64..f492e21c4aa2 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -4,10 +4,371 @@
/*
* eXtensible Arrays
* Copyright (c) 2017 Microsoft Corporation
- * Author: Matthew Wilcox <mawilcox@microsoft.com>
+ * Author: Matthew Wilcox <willy@infradead.org>
+ *
+ * See Documentation/core-api/xarray.rst for how to use the XArray.
*/
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/gfp.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/*
+ * The bottom two bits of the entry determine how the XArray interprets
+ * the contents:
+ *
+ * 00: Pointer entry
+ * 10: Internal entry
+ * x1: Value entry or tagged pointer
+ *
+ * Attempting to store internal entries in the XArray is a bug.
+ *
+ * Most internal entries are pointers to the next node in the tree.
+ * The following internal entries have a special meaning:
+ *
+ * 0-62: Sibling entries
+ * 256: Zero entry
+ * 257: Retry entry
+ *
+ * Errors are also represented as internal entries, but use the negative
+ * space (-4094 to -2). They're never stored in the slots array; only
+ * returned by the normal API.
+ */
+
+#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
+
+/**
+ * xa_mk_value() - Create an XArray entry from an integer.
+ * @v: Value to store in XArray.
+ *
+ * Context: Any context.
+ * Return: An entry suitable for storing in the XArray.
+ */
+static inline void *xa_mk_value(unsigned long v)
+{
+ WARN_ON((long)v < 0);
+ return (void *)((v << 1) | 1);
+}
+
+/**
+ * xa_to_value() - Get value stored in an XArray entry.
+ * @entry: XArray entry.
+ *
+ * Context: Any context.
+ * Return: The value stored in the XArray entry.
+ */
+static inline unsigned long xa_to_value(const void *entry)
+{
+ return (unsigned long)entry >> 1;
+}
+
+/**
+ * xa_is_value() - Determine if an entry is a value.
+ * @entry: XArray entry.
+ *
+ * Context: Any context.
+ * Return: True if the entry is a value, false if it is a pointer.
+ */
+static inline bool xa_is_value(const void *entry)
+{
+ return (unsigned long)entry & 1;
+}
+
+/**
+ * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
+ * @p: Plain pointer.
+ * @tag: Tag value (0, 1 or 3).
+ *
+ * If the user of the XArray prefers, they can tag their pointers instead
+ * of storing value entries. Three tags are available (0, 1 and 3).
+ * These are distinct from the xa_mark_t as they are not replicated up
+ * through the array and cannot be searched for.
+ *
+ * Context: Any context.
+ * Return: An XArray entry.
+ */
+static inline void *xa_tag_pointer(void *p, unsigned long tag)
+{
+ return (void *)((unsigned long)p | tag);
+}
+
+/**
+ * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
+ * @entry: XArray entry.
+ *
+ * If you have stored a tagged pointer in the XArray, call this function
+ * to get the untagged version of the pointer.
+ *
+ * Context: Any context.
+ * Return: A pointer.
+ */
+static inline void *xa_untag_pointer(void *entry)
+{
+ return (void *)((unsigned long)entry & ~3UL);
+}
+
+/**
+ * xa_pointer_tag() - Get the tag stored in an XArray entry.
+ * @entry: XArray entry.
+ *
+ * If you have stored a tagged pointer in the XArray, call this function
+ * to get the tag of that pointer.
+ *
+ * Context: Any context.
+ * Return: A tag.
+ */
+static inline unsigned int xa_pointer_tag(void *entry)
+{
+ return (unsigned long)entry & 3UL;
+}
+
+/*
+ * xa_mk_internal() - Create an internal entry.
+ * @v: Value to turn into an internal entry.
+ *
+ * Context: Any context.
+ * Return: An XArray internal entry corresponding to this value.
+ */
+static inline void *xa_mk_internal(unsigned long v)
+{
+ return (void *)((v << 2) | 2);
+}
+
+/*
+ * xa_to_internal() - Extract the value from an internal entry.
+ * @entry: XArray entry.
+ *
+ * Context: Any context.
+ * Return: The value which was stored in the internal entry.
+ */
+static inline unsigned long xa_to_internal(const void *entry)
+{
+ return (unsigned long)entry >> 2;
+}
+
+/*
+ * xa_is_internal() - Is the entry an internal entry?
+ * @entry: XArray entry.
+ *
+ * Context: Any context.
+ * Return: %true if the entry is an internal entry.
+ */
+static inline bool xa_is_internal(const void *entry)
+{
+ return ((unsigned long)entry & 3) == 2;
+}
+
+/**
+ * xa_is_err() - Report whether an XArray operation returned an error
+ * @entry: Result from calling an XArray function
+ *
+ * If an XArray operation cannot complete an operation, it will return
+ * a special value indicating an error. This function tells you
+ * whether an error occurred; xa_err() tells you which error occurred.
+ *
+ * Context: Any context.
+ * Return: %true if the entry indicates an error.
+ */
+static inline bool xa_is_err(const void *entry)
+{
+ return unlikely(xa_is_internal(entry));
+}
+
+/**
+ * xa_err() - Turn an XArray result into an errno.
+ * @entry: Result from calling an XArray function.
+ *
+ * If an XArray operation cannot complete an operation, it will return
+ * a special pointer value which encodes an errno. This function extracts
+ * the errno from the pointer value, or returns 0 if the pointer does not
+ * represent an errno.
+ *
+ * Context: Any context.
+ * Return: A negative errno or 0.
+ */
+static inline int xa_err(void *entry)
+{
+ /* xa_to_internal() would not do sign extension. */
+ if (xa_is_err(entry))
+ return (long)entry >> 2;
+ return 0;
+}
+
+typedef unsigned __bitwise xa_mark_t;
+#define XA_MARK_0 ((__force xa_mark_t)0U)
+#define XA_MARK_1 ((__force xa_mark_t)1U)
+#define XA_MARK_2 ((__force xa_mark_t)2U)
+#define XA_PRESENT ((__force xa_mark_t)8U)
+#define XA_MARK_MAX XA_MARK_2
+#define XA_FREE_MARK XA_MARK_0
+
+enum xa_lock_type {
+ XA_LOCK_IRQ = 1,
+ XA_LOCK_BH = 2,
+};
+
+/*
+ * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
+ * and we remain compatible with that.
+ */
+#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
+#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
+#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
+#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
+ (__force unsigned)(mark)))
+
+#define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK))
+
+/**
+ * struct xarray - The anchor of the XArray.
+ * @xa_lock: Lock that protects the contents of the XArray.
+ *
+ * To use the xarray, define it statically or embed it in your data structure.
+ * It is a very small data structure, so it does not usually make sense to
+ * allocate it separately and keep a pointer to it in your data structure.
+ *
+ * You may use the xa_lock to protect your own data structures as well.
+ */
+/*
+ * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
+ * If the only non-NULL entry in the array is at index 0, @xa_head is that
+ * entry. If any other entry in the array is non-NULL, @xa_head points
+ * to an @xa_node.
+ */
+struct xarray {
+ spinlock_t xa_lock;
+/* private: The rest of the data structure is not to be used directly. */
+ gfp_t xa_flags;
+ void __rcu * xa_head;
+};
+
+#define XARRAY_INIT(name, flags) { \
+ .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
+ .xa_flags = flags, \
+ .xa_head = NULL, \
+}
+
+/**
+ * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
+ * @name: A string that names your XArray.
+ * @flags: XA_FLAG values.
+ *
+ * This is intended for file scope definitions of XArrays. It declares
+ * and initialises an empty XArray with the chosen name and flags. It is
+ * equivalent to calling xa_init_flags() on the array, but it does the
+ * initialisation at compiletime instead of runtime.
+ */
+#define DEFINE_XARRAY_FLAGS(name, flags) \
+ struct xarray name = XARRAY_INIT(name, flags)
+
+/**
+ * DEFINE_XARRAY() - Define an XArray.
+ * @name: A string that names your XArray.
+ *
+ * This is intended for file scope definitions of XArrays. It declares
+ * and initialises an empty XArray with the chosen name. It is equivalent
+ * to calling xa_init() on the array, but it does the initialisation at
+ * compiletime instead of runtime.
+ */
+#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
+
+/**
+ * DEFINE_XARRAY_ALLOC() - Define an XArray which can allocate IDs.
+ * @name: A string that names your XArray.
+ *
+ * This is intended for file scope definitions of allocating XArrays.
+ * See also DEFINE_XARRAY().
+ */
+#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
+
+void xa_init_flags(struct xarray *, gfp_t flags);
+void *xa_load(struct xarray *, unsigned long index);
+void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
+void *xa_erase(struct xarray *, unsigned long index);
+void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
+ void *entry, gfp_t);
+bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
+void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
+void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
+void *xa_find(struct xarray *xa, unsigned long *index,
+ unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
+void *xa_find_after(struct xarray *xa, unsigned long *index,
+ unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
+unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
+ unsigned long max, unsigned int n, xa_mark_t);
+void xa_destroy(struct xarray *);
+
+/**
+ * xa_init() - Initialise an empty XArray.
+ * @xa: XArray.
+ *
+ * An empty XArray is full of NULL entries.
+ *
+ * Context: Any context.
+ */
+static inline void xa_init(struct xarray *xa)
+{
+ xa_init_flags(xa, 0);
+}
+
+/**
+ * xa_empty() - Determine if an array has any present entries.
+ * @xa: XArray.
+ *
+ * Context: Any context.
+ * Return: %true if the array contains only NULL pointers.
+ */
+static inline bool xa_empty(const struct xarray *xa)
+{
+ return xa->xa_head == NULL;
+}
+
+/**
+ * xa_marked() - Inquire whether any entry in this array has a mark set
+ * @xa: Array
+ * @mark: Mark value
+ *
+ * Context: Any context.
+ * Return: %true if any entry has this mark set.
+ */
+static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
+{
+ return xa->xa_flags & XA_FLAGS_MARK(mark);
+}
+
+/**
+ * xa_for_each() - Iterate over a portion of an XArray.
+ * @xa: XArray.
+ * @entry: Entry retrieved from array.
+ * @index: Index of @entry.
+ * @max: Maximum index to retrieve from array.
+ * @filter: Selection criterion.
+ *
+ * Initialise @index to the lowest index you want to retrieve from the
+ * array. During the iteration, @entry will have the value of the entry
+ * stored in @xa at @index. The iteration will skip all entries in the
+ * array which do not match @filter. You may modify @index during the
+ * iteration if you want to skip or reprocess indices. It is safe to modify
+ * the array during the iteration. At the end of the iteration, @entry will
+ * be set to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)). xa_for_each()
+ * will spin if it hits a retry entry; if you intend to see retry entries,
+ * you should use the xas_for_each() iterator instead. The xas_for_each()
+ * iterator will expand into more inline code than xa_for_each().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ */
+#define xa_for_each(xa, entry, index, max, filter) \
+ for (entry = xa_find(xa, &index, max, filter); entry; \
+ entry = xa_find_after(xa, &index, max, filter))
#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
@@ -21,4 +382,1127 @@
#define xa_unlock_irqrestore(xa, flags) \
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
+/*
+ * Versions of the normal API which require the caller to hold the
+ * xa_lock. If the GFP flags allow it, they will drop the lock to
+ * allocate memory, then reacquire it afterwards. These functions
+ * may also re-enable interrupts if the XArray flags indicate the
+ * locking should be interrupt safe.
+ */
+void *__xa_erase(struct xarray *, unsigned long index);
+void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
+void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
+ void *entry, gfp_t);
+int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
+int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
+void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
+void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
+
+/**
+ * __xa_insert() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * If you would rather see the existing entry in the array, use __xa_cmpxchg().
+ * This function is for users who don't care what the entry is, only that
+ * one is present.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if the @gfp flags permit.
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int __xa_insert(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
+ if (!curr)
+ return 0;
+ if (xa_is_err(curr))
+ return xa_err(curr);
+ return -EEXIST;
+}
+
+/**
+ * xa_store_bh() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_store() except it disables softirqs
+ * while holding the array lock.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_bh(xa);
+ curr = __xa_store(xa, index, entry, gfp);
+ xa_unlock_bh(xa);
+
+ return curr;
+}
+
+/**
+ * xa_store_irq() - Erase this entry from the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_store() except it disables interrupts
+ * while holding the array lock.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_irq(xa);
+ curr = __xa_store(xa, index, entry, gfp);
+ xa_unlock_irq(xa);
+
+ return curr;
+}
+
+/**
+ * xa_erase_bh() - Erase this entry from the XArray.
+ * @xa: XArray.
+ * @index: Index of entry.
+ *
+ * This function is the equivalent of calling xa_store() with %NULL as
+ * the third argument. The XArray does not need to allocate memory, so
+ * the user does not need to provide GFP flags.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
+{
+ void *entry;
+
+ xa_lock_bh(xa);
+ entry = __xa_erase(xa, index);
+ xa_unlock_bh(xa);
+
+ return entry;
+}
+
+/**
+ * xa_erase_irq() - Erase this entry from the XArray.
+ * @xa: XArray.
+ * @index: Index of entry.
+ *
+ * This function is the equivalent of calling xa_store() with %NULL as
+ * the third argument. The XArray does not need to allocate memory, so
+ * the user does not need to provide GFP flags.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
+{
+ void *entry;
+
+ xa_lock_irq(xa);
+ entry = __xa_erase(xa, index);
+ xa_unlock_irq(xa);
+
+ return entry;
+}
+
+/**
+ * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
+ * Context: Any context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock(xa);
+ curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+ xa_unlock(xa);
+
+ return curr;
+}
+
+/**
+ * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_cmpxchg() except it disables softirqs
+ * while holding the array lock.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs. May sleep if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_bh(xa);
+ curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+ xa_unlock_bh(xa);
+
+ return curr;
+}
+
+/**
+ * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_cmpxchg() except it disables interrupts
+ * while holding the array lock.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts. May sleep if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_irq(xa);
+ curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+ xa_unlock_irq(xa);
+
+ return curr;
+}
+
+/**
+ * xa_insert() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * If you would rather see the existing entry in the array, use xa_cmpxchg().
+ * This function is for users who don't care what the entry is, only that
+ * one is present.
+ *
+ * Context: Process context. Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
+ if (!curr)
+ return 0;
+ if (xa_is_err(curr))
+ return xa_err(curr);
+ return -EEXIST;
+}
+
+/**
+ * xa_alloc() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @max: Maximum ID to allocate (inclusive).
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocates an unused ID in the range specified by @id and @max.
+ * Updates the @id pointer with the index, then stores the entry at that
+ * index. A concurrent lookup will not see an uninitialised @id.
+ *
+ * Context: Process context. Takes and releases the xa_lock. May sleep if
+ * the @gfp flags permit.
+ * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
+ * there is no more space in the XArray.
+ */
+static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
+ gfp_t gfp)
+{
+ int err;
+
+ xa_lock(xa);
+ err = __xa_alloc(xa, id, max, entry, gfp);
+ xa_unlock(xa);
+
+ return err;
+}
+
+/**
+ * xa_alloc_bh() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @max: Maximum ID to allocate (inclusive).
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocates an unused ID in the range specified by @id and @max.
+ * Updates the @id pointer with the index, then stores the entry at that
+ * index. A concurrent lookup will not see an uninitialised @id.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs. May sleep if the @gfp flags permit.
+ * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
+ * there is no more space in the XArray.
+ */
+static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry,
+ gfp_t gfp)
+{
+ int err;
+
+ xa_lock_bh(xa);
+ err = __xa_alloc(xa, id, max, entry, gfp);
+ xa_unlock_bh(xa);
+
+ return err;
+}
+
+/**
+ * xa_alloc_irq() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @max: Maximum ID to allocate (inclusive).
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocates an unused ID in the range specified by @id and @max.
+ * Updates the @id pointer with the index, then stores the entry at that
+ * index. A concurrent lookup will not see an uninitialised @id.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts. May sleep if the @gfp flags permit.
+ * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
+ * there is no more space in the XArray.
+ */
+static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
+ gfp_t gfp)
+{
+ int err;
+
+ xa_lock_irq(xa);
+ err = __xa_alloc(xa, id, max, entry, gfp);
+ xa_unlock_irq(xa);
+
+ return err;
+}
+
+/**
+ * xa_reserve() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * Ensures there is somewhere to store an entry at @index in the array.
+ * If there is already something stored at @index, this function does
+ * nothing. If there was nothing there, the entry is marked as reserved.
+ * Loading from a reserved entry returns a %NULL pointer.
+ *
+ * If you do not use the entry that you have reserved, call xa_release()
+ * or xa_erase() to free any unnecessary memory.
+ *
+ * Context: Any context. Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock(xa);
+
+ return ret;
+}
+
+/**
+ * xa_reserve_bh() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * A softirq-disabling version of xa_reserve().
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock_bh(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock_bh(xa);
+
+ return ret;
+}
+
+/**
+ * xa_reserve_irq() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * An interrupt-disabling version of xa_reserve().
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock_irq(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock_irq(xa);
+
+ return ret;
+}
+
+/**
+ * xa_release() - Release a reserved entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ *
+ * After calling xa_reserve(), you can call this function to release the
+ * reservation. If the entry at @index has been stored to, this function
+ * will do nothing.
+ */
+static inline void xa_release(struct xarray *xa, unsigned long index)
+{
+ xa_cmpxchg(xa, index, NULL, NULL, 0);
+}
+
+/* Everything below here is the Advanced API. Proceed with caution. */
+
+/*
+ * The xarray is constructed out of a set of 'chunks' of pointers. Choosing
+ * the best chunk size requires some tradeoffs. A power of two recommends
+ * itself so that we can walk the tree based purely on shifts and masks.
+ * Generally, the larger the better; as the number of slots per level of the
+ * tree increases, the less tall the tree needs to be. But that needs to be
+ * balanced against the memory consumption of each node. On a 64-bit system,
+ * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we
+ * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
+ */
+#ifndef XA_CHUNK_SHIFT
+#define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
+#endif
+#define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT)
+#define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)
+#define XA_MAX_MARKS 3
+#define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG)
+
+/*
+ * @count is the count of every non-NULL element in the ->slots array
+ * whether that is a value entry, a retry entry, a user pointer,
+ * a sibling entry or a pointer to the next level of the tree.
+ * @nr_values is the count of every element in ->slots which is
+ * either a value entry or a sibling of a value entry.
+ */
+struct xa_node {
+ unsigned char shift; /* Bits remaining in each slot */
+ unsigned char offset; /* Slot offset in parent */
+ unsigned char count; /* Total entry count */
+ unsigned char nr_values; /* Value entry count */
+ struct xa_node __rcu *parent; /* NULL at top of tree */
+ struct xarray *array; /* The array we belong to */
+ union {
+ struct list_head private_list; /* For tree user */
+ struct rcu_head rcu_head; /* Used when freeing node */
+ };
+ void __rcu *slots[XA_CHUNK_SIZE];
+ union {
+ unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS];
+ unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS];
+ };
+};
+
+void xa_dump(const struct xarray *);
+void xa_dump_node(const struct xa_node *);
+
+#ifdef XA_DEBUG
+#define XA_BUG_ON(xa, x) do { \
+ if (x) { \
+ xa_dump(xa); \
+ BUG(); \
+ } \
+ } while (0)
+#define XA_NODE_BUG_ON(node, x) do { \
+ if (x) { \
+ if (node) xa_dump_node(node); \
+ BUG(); \
+ } \
+ } while (0)
+#else
+#define XA_BUG_ON(xa, x) do { } while (0)
+#define XA_NODE_BUG_ON(node, x) do { } while (0)
+#endif
+
+/* Private */
+static inline void *xa_head(const struct xarray *xa)
+{
+ return rcu_dereference_check(xa->xa_head,
+ lockdep_is_held(&xa->xa_lock));
+}
+
+/* Private */
+static inline void *xa_head_locked(const struct xarray *xa)
+{
+ return rcu_dereference_protected(xa->xa_head,
+ lockdep_is_held(&xa->xa_lock));
+}
+
+/* Private */
+static inline void *xa_entry(const struct xarray *xa,
+ const struct xa_node *node, unsigned int offset)
+{
+ XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+ return rcu_dereference_check(node->slots[offset],
+ lockdep_is_held(&xa->xa_lock));
+}
+
+/* Private */
+static inline void *xa_entry_locked(const struct xarray *xa,
+ const struct xa_node *node, unsigned int offset)
+{
+ XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
+ return rcu_dereference_protected(node->slots[offset],
+ lockdep_is_held(&xa->xa_lock));
+}
+
+/* Private */
+static inline struct xa_node *xa_parent(const struct xarray *xa,
+ const struct xa_node *node)
+{
+ return rcu_dereference_check(node->parent,
+ lockdep_is_held(&xa->xa_lock));
+}
+
+/* Private */
+static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
+ const struct xa_node *node)
+{
+ return rcu_dereference_protected(node->parent,
+ lockdep_is_held(&xa->xa_lock));
+}
+
+/* Private */
+static inline void *xa_mk_node(const struct xa_node *node)
+{
+ return (void *)((unsigned long)node | 2);
+}
+
+/* Private */
+static inline struct xa_node *xa_to_node(const void *entry)
+{
+ return (struct xa_node *)((unsigned long)entry - 2);
+}
+
+/* Private */
+static inline bool xa_is_node(const void *entry)
+{
+ return xa_is_internal(entry) && (unsigned long)entry > 4096;
+}
+
+/* Private */
+static inline void *xa_mk_sibling(unsigned int offset)
+{
+ return xa_mk_internal(offset);
+}
+
+/* Private */
+static inline unsigned long xa_to_sibling(const void *entry)
+{
+ return xa_to_internal(entry);
+}
+
+/**
+ * xa_is_sibling() - Is the entry a sibling entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a sibling entry.
+ */
+static inline bool xa_is_sibling(const void *entry)
+{
+ return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) &&
+ (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
+}
+
+#define XA_ZERO_ENTRY xa_mk_internal(256)
+#define XA_RETRY_ENTRY xa_mk_internal(257)
+
+/**
+ * xa_is_zero() - Is the entry a zero entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a zero entry.
+ */
+static inline bool xa_is_zero(const void *entry)
+{
+ return unlikely(entry == XA_ZERO_ENTRY);
+}
+
+/**
+ * xa_is_retry() - Is the entry a retry entry?
+ * @entry: Entry retrieved from the XArray
+ *
+ * Return: %true if the entry is a retry entry.
+ */
+static inline bool xa_is_retry(const void *entry)
+{
+ return unlikely(entry == XA_RETRY_ENTRY);
+}
+
+/**
+ * typedef xa_update_node_t - A callback function from the XArray.
+ * @node: The node which is being processed
+ *
+ * This function is called every time the XArray updates the count of
+ * present and value entries in a node. It allows advanced users to
+ * maintain the private_list in the node.
+ *
+ * Context: The xa_lock is held and interrupts may be disabled.
+ * Implementations should not drop the xa_lock, nor re-enable
+ * interrupts.
+ */
+typedef void (*xa_update_node_t)(struct xa_node *node);
+
+/*
+ * The xa_state is opaque to its users. It contains various different pieces
+ * of state involved in the current operation on the XArray. It should be
+ * declared on the stack and passed between the various internal routines.
+ * The various elements in it should not be accessed directly, but only
+ * through the provided accessor functions. The below documentation is for
+ * the benefit of those working on the code, not for users of the XArray.
+ *
+ * @xa_node usually points to the xa_node containing the slot we're operating
+ * on (and @xa_offset is the offset in the slots array). If there is a
+ * single entry in the array at index 0, there are no allocated xa_nodes to
+ * point to, and so we store %NULL in @xa_node. @xa_node is set to
+ * the value %XAS_RESTART if the xa_state is not walked to the correct
+ * position in the tree of nodes for this operation. If an error occurs
+ * during an operation, it is set to an %XAS_ERROR value. If we run off the
+ * end of the allocated nodes, it is set to %XAS_BOUNDS.
+ */
+struct xa_state {
+ struct xarray *xa;
+ unsigned long xa_index;
+ unsigned char xa_shift;
+ unsigned char xa_sibs;
+ unsigned char xa_offset;
+ unsigned char xa_pad; /* Helps gcc generate better code */
+ struct xa_node *xa_node;
+ struct xa_node *xa_alloc;
+ xa_update_node_t xa_update;
+};
+
+/*
+ * We encode errnos in the xas->xa_node. If an error has happened, we need to
+ * drop the lock to fix it, and once we've done so the xa_state is invalid.
+ */
+#define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
+#define XAS_BOUNDS ((struct xa_node *)1UL)
+#define XAS_RESTART ((struct xa_node *)3UL)
+
+#define __XA_STATE(array, index, shift, sibs) { \
+ .xa = array, \
+ .xa_index = index, \
+ .xa_shift = shift, \
+ .xa_sibs = sibs, \
+ .xa_offset = 0, \
+ .xa_pad = 0, \
+ .xa_node = XAS_RESTART, \
+ .xa_alloc = NULL, \
+ .xa_update = NULL \
+}
+
+/**
+ * XA_STATE() - Declare an XArray operation state.
+ * @name: Name of this operation state (usually xas).
+ * @array: Array to operate on.
+ * @index: Initial index of interest.
+ *
+ * Declare and initialise an xa_state on the stack.
+ */
+#define XA_STATE(name, array, index) \
+ struct xa_state name = __XA_STATE(array, index, 0, 0)
+
+/**
+ * XA_STATE_ORDER() - Declare an XArray operation state.
+ * @name: Name of this operation state (usually xas).
+ * @array: Array to operate on.
+ * @index: Initial index of interest.
+ * @order: Order of entry.
+ *
+ * Declare and initialise an xa_state on the stack. This variant of
+ * XA_STATE() allows you to specify the 'order' of the element you
+ * want to operate on.`
+ */
+#define XA_STATE_ORDER(name, array, index, order) \
+ struct xa_state name = __XA_STATE(array, \
+ (index >> order) << order, \
+ order - (order % XA_CHUNK_SHIFT), \
+ (1U << (order % XA_CHUNK_SHIFT)) - 1)
+
+#define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
+#define xas_trylock(xas) xa_trylock((xas)->xa)
+#define xas_lock(xas) xa_lock((xas)->xa)
+#define xas_unlock(xas) xa_unlock((xas)->xa)
+#define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
+#define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
+#define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
+#define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
+#define xas_lock_irqsave(xas, flags) \
+ xa_lock_irqsave((xas)->xa, flags)
+#define xas_unlock_irqrestore(xas, flags) \
+ xa_unlock_irqrestore((xas)->xa, flags)
+
+/**
+ * xas_error() - Return an errno stored in the xa_state.
+ * @xas: XArray operation state.
+ *
+ * Return: 0 if no error has been noted. A negative errno if one has.
+ */
+static inline int xas_error(const struct xa_state *xas)
+{
+ return xa_err(xas->xa_node);
+}
+
+/**
+ * xas_set_err() - Note an error in the xa_state.
+ * @xas: XArray operation state.
+ * @err: Negative error number.
+ *
+ * Only call this function with a negative @err; zero or positive errors
+ * will probably not behave the way you think they should. If you want
+ * to clear the error from an xa_state, use xas_reset().
+ */
+static inline void xas_set_err(struct xa_state *xas, long err)
+{
+ xas->xa_node = XA_ERROR(err);
+}
+
+/**
+ * xas_invalid() - Is the xas in a retry or error state?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas cannot be used for operations.
+ */
+static inline bool xas_invalid(const struct xa_state *xas)
+{
+ return (unsigned long)xas->xa_node & 3;
+}
+
+/**
+ * xas_valid() - Is the xas a valid cursor into the array?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas can be used for operations.
+ */
+static inline bool xas_valid(const struct xa_state *xas)
+{
+ return !xas_invalid(xas);
+}
+
+/**
+ * xas_is_node() - Does the xas point to a node?
+ * @xas: XArray operation state.
+ *
+ * Return: %true if the xas currently references a node.
+ */
+static inline bool xas_is_node(const struct xa_state *xas)
+{
+ return xas_valid(xas) && xas->xa_node;
+}
+
+/* True if the pointer is something other than a node */
+static inline bool xas_not_node(struct xa_node *node)
+{
+ return ((unsigned long)node & 3) || !node;
+}
+
+/* True if the node represents RESTART or an error */
+static inline bool xas_frozen(struct xa_node *node)
+{
+ return (unsigned long)node & 2;
+}
+
+/* True if the node represents head-of-tree, RESTART or BOUNDS */
+static inline bool xas_top(struct xa_node *node)
+{
+ return node <= XAS_RESTART;
+}
+
+/**
+ * xas_reset() - Reset an XArray operation state.
+ * @xas: XArray operation state.
+ *
+ * Resets the error or walk state of the @xas so future walks of the
+ * array will start from the root. Use this if you have dropped the
+ * xarray lock and want to reuse the xa_state.
+ *
+ * Context: Any context.
+ */
+static inline void xas_reset(struct xa_state *xas)
+{
+ xas->xa_node = XAS_RESTART;
+}
+
+/**
+ * xas_retry() - Retry the operation if appropriate.
+ * @xas: XArray operation state.
+ * @entry: Entry from xarray.
+ *
+ * The advanced functions may sometimes return an internal entry, such as
+ * a retry entry or a zero entry. This function sets up the @xas to restart
+ * the walk from the head of the array if needed.
+ *
+ * Context: Any context.
+ * Return: true if the operation needs to be retried.
+ */
+static inline bool xas_retry(struct xa_state *xas, const void *entry)
+{
+ if (xa_is_zero(entry))
+ return true;
+ if (!xa_is_retry(entry))
+ return false;
+ xas_reset(xas);
+ return true;
+}
+
+void *xas_load(struct xa_state *);
+void *xas_store(struct xa_state *, void *entry);
+void *xas_find(struct xa_state *, unsigned long max);
+void *xas_find_conflict(struct xa_state *);
+
+bool xas_get_mark(const struct xa_state *, xa_mark_t);
+void xas_set_mark(const struct xa_state *, xa_mark_t);
+void xas_clear_mark(const struct xa_state *, xa_mark_t);
+void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
+void xas_init_marks(const struct xa_state *);
+
+bool xas_nomem(struct xa_state *, gfp_t);
+void xas_pause(struct xa_state *);
+
+void xas_create_range(struct xa_state *);
+
+/**
+ * xas_reload() - Refetch an entry from the xarray.
+ * @xas: XArray operation state.
+ *
+ * Use this function to check that a previously loaded entry still has
+ * the same value. This is useful for the lockless pagecache lookup where
+ * we walk the array with only the RCU lock to protect us, lock the page,
+ * then check that the page hasn't moved since we looked it up.
+ *
+ * The caller guarantees that @xas is still valid. If it may be in an
+ * error or restart state, call xas_load() instead.
+ *
+ * Return: The entry at this location in the xarray.
+ */
+static inline void *xas_reload(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+
+ if (node)
+ return xa_entry(xas->xa, node, xas->xa_offset);
+ return xa_head(xas->xa);
+}
+
+/**
+ * xas_set() - Set up XArray operation state for a different index.
+ * @xas: XArray operation state.
+ * @index: New index into the XArray.
+ *
+ * Move the operation state to refer to a different index. This will
+ * have the effect of starting a walk from the top; see xas_next()
+ * to move to an adjacent index.
+ */
+static inline void xas_set(struct xa_state *xas, unsigned long index)
+{
+ xas->xa_index = index;
+ xas->xa_node = XAS_RESTART;
+}
+
+/**
+ * xas_set_order() - Set up XArray operation state for a multislot entry.
+ * @xas: XArray operation state.
+ * @index: Target of the operation.
+ * @order: Entry occupies 2^@order indices.
+ */
+static inline void xas_set_order(struct xa_state *xas, unsigned long index,
+ unsigned int order)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
+ xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
+ xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ xas->xa_node = XAS_RESTART;
+#else
+ BUG_ON(order > 0);
+ xas_set(xas, index);
+#endif
+}
+
+/**
+ * xas_set_update() - Set up XArray operation state for a callback.
+ * @xas: XArray operation state.
+ * @update: Function to call when updating a node.
+ *
+ * The XArray can notify a caller after it has updated an xa_node.
+ * This is advanced functionality and is only needed by the page cache.
+ */
+static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
+{
+ xas->xa_update = update;
+}
+
+/**
+ * xas_next_entry() - Advance iterator to next present entry.
+ * @xas: XArray operation state.
+ * @max: Highest index to return.
+ *
+ * xas_next_entry() is an inline function to optimise xarray traversal for
+ * speed. It is equivalent to calling xas_find(), and will call xas_find()
+ * for all the hard cases.
+ *
+ * Return: The next present entry after the one currently referred to by @xas.
+ */
+static inline void *xas_next_entry(struct xa_state *xas, unsigned long max)
+{
+ struct xa_node *node = xas->xa_node;
+ void *entry;
+
+ if (unlikely(xas_not_node(node) || node->shift ||
+ xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)))
+ return xas_find(xas, max);
+
+ do {
+ if (unlikely(xas->xa_index >= max))
+ return xas_find(xas, max);
+ if (unlikely(xas->xa_offset == XA_CHUNK_MASK))
+ return xas_find(xas, max);
+ entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
+ if (unlikely(xa_is_internal(entry)))
+ return xas_find(xas, max);
+ xas->xa_offset++;
+ xas->xa_index++;
+ } while (!entry);
+
+ return entry;
+}
+
+/* Private */
+static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
+ xa_mark_t mark)
+{
+ unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark];
+ unsigned int offset = xas->xa_offset;
+
+ if (advance)
+ offset++;
+ if (XA_CHUNK_SIZE == BITS_PER_LONG) {
+ if (offset < XA_CHUNK_SIZE) {
+ unsigned long data = *addr & (~0UL << offset);
+ if (data)
+ return __ffs(data);
+ }
+ return XA_CHUNK_SIZE;
+ }
+
+ return find_next_bit(addr, XA_CHUNK_SIZE, offset);
+}
+
+/**
+ * xas_next_marked() - Advance iterator to next marked entry.
+ * @xas: XArray operation state.
+ * @max: Highest index to return.
+ * @mark: Mark to search for.
+ *
+ * xas_next_marked() is an inline function to optimise xarray traversal for
+ * speed. It is equivalent to calling xas_find_marked(), and will call
+ * xas_find_marked() for all the hard cases.
+ *
+ * Return: The next marked entry after the one currently referred to by @xas.
+ */
+static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
+ xa_mark_t mark)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned int offset;
+
+ if (unlikely(xas_not_node(node) || node->shift))
+ return xas_find_marked(xas, max, mark);
+ offset = xas_find_chunk(xas, true, mark);
+ xas->xa_offset = offset;
+ xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset;
+ if (xas->xa_index > max)
+ return NULL;
+ if (offset == XA_CHUNK_SIZE)
+ return xas_find_marked(xas, max, mark);
+ return xa_entry(xas->xa, node, offset);
+}
+
+/*
+ * If iterating while holding a lock, drop the lock and reschedule
+ * every %XA_CHECK_SCHED loops.
+ */
+enum {
+ XA_CHECK_SCHED = 4096,
+};
+
+/**
+ * xas_for_each() - Iterate over a range of an XArray.
+ * @xas: XArray operation state.
+ * @entry: Entry retrieved from the array.
+ * @max: Maximum index to retrieve from array.
+ *
+ * The loop body will be executed for each entry present in the xarray
+ * between the current xas position and @max. @entry will be set to
+ * the entry retrieved from the xarray. It is safe to delete entries
+ * from the array in the loop body. You should hold either the RCU lock
+ * or the xa_lock while iterating. If you need to drop the lock, call
+ * xas_pause() first.
+ */
+#define xas_for_each(xas, entry, max) \
+ for (entry = xas_find(xas, max); entry; \
+ entry = xas_next_entry(xas, max))
+
+/**
+ * xas_for_each_marked() - Iterate over a range of an XArray.
+ * @xas: XArray operation state.
+ * @entry: Entry retrieved from the array.
+ * @max: Maximum index to retrieve from array.
+ * @mark: Mark to search for.
+ *
+ * The loop body will be executed for each marked entry in the xarray
+ * between the current xas position and @max. @entry will be set to
+ * the entry retrieved from the xarray. It is safe to delete entries
+ * from the array in the loop body. You should hold either the RCU lock
+ * or the xa_lock while iterating. If you need to drop the lock, call
+ * xas_pause() first.
+ */
+#define xas_for_each_marked(xas, entry, max, mark) \
+ for (entry = xas_find_marked(xas, max, mark); entry; \
+ entry = xas_next_marked(xas, max, mark))
+
+/**
+ * xas_for_each_conflict() - Iterate over a range of an XArray.
+ * @xas: XArray operation state.
+ * @entry: Entry retrieved from the array.
+ *
+ * The loop body will be executed for each entry in the XArray that lies
+ * within the range specified by @xas. If the loop completes successfully,
+ * any entries that lie in this range will be replaced by @entry. The caller
+ * may break out of the loop; if they do so, the contents of the XArray will
+ * be unchanged. The operation may fail due to an out of memory condition.
+ * The caller may also call xa_set_err() to exit the loop while setting an
+ * error to record the reason.
+ */
+#define xas_for_each_conflict(xas, entry) \
+ while ((entry = xas_find_conflict(xas)))
+
+void *__xas_next(struct xa_state *);
+void *__xas_prev(struct xa_state *);
+
+/**
+ * xas_prev() - Move iterator to previous index.
+ * @xas: XArray operation state.
+ *
+ * If the @xas was in an error state, it will remain in an error state
+ * and this function will return %NULL. If the @xas has never been walked,
+ * it will have the effect of calling xas_load(). Otherwise one will be
+ * subtracted from the index and the state will be walked to the correct
+ * location in the array for the next operation.
+ *
+ * If the iterator was referencing index 0, this function wraps
+ * around to %ULONG_MAX.
+ *
+ * Return: The entry at the new index. This may be %NULL or an internal
+ * entry.
+ */
+static inline void *xas_prev(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+
+ if (unlikely(xas_not_node(node) || node->shift ||
+ xas->xa_offset == 0))
+ return __xas_prev(xas);
+
+ xas->xa_index--;
+ xas->xa_offset--;
+ return xa_entry(xas->xa, node, xas->xa_offset);
+}
+
+/**
+ * xas_next() - Move state to next index.
+ * @xas: XArray operation state.
+ *
+ * If the @xas was in an error state, it will remain in an error state
+ * and this function will return %NULL. If the @xas has never been walked,
+ * it will have the effect of calling xas_load(). Otherwise one will be
+ * added to the index and the state will be walked to the correct
+ * location in the array for the next operation.
+ *
+ * If the iterator was referencing index %ULONG_MAX, this function wraps
+ * around to 0.
+ *
+ * Return: The entry at the new index. This may be %NULL or an internal
+ * entry.
+ */
+static inline void *xas_next(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+
+ if (unlikely(xas_not_node(node) || node->shift ||
+ xas->xa_offset == XA_CHUNK_MASK))
+ return __xas_next(xas);
+
+ xas->xa_index++;
+ xas->xa_offset++;
+ return xa_entry(xas->xa, node, xas->xa_offset);
+}
+
#endif /* _LINUX_XARRAY_H */
diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h
index 9e1f42cb57e9..52b073fea17f 100644
--- a/include/linux/xxhash.h
+++ b/include/linux/xxhash.h
@@ -107,6 +107,29 @@ uint32_t xxh32(const void *input, size_t length, uint32_t seed);
*/
uint64_t xxh64(const void *input, size_t length, uint64_t seed);
+/**
+ * xxhash() - calculate wordsize hash of the input with a given seed
+ * @input: The data to hash.
+ * @length: The length of the data to hash.
+ * @seed: The seed can be used to alter the result predictably.
+ *
+ * If the hash does not need to be comparable between machines with
+ * different word sizes, this function will call whichever of xxh32()
+ * or xxh64() is faster.
+ *
+ * Return: wordsize hash of the data.
+ */
+
+static inline unsigned long xxhash(const void *input, size_t length,
+ uint64_t seed)
+{
+#if BITS_PER_LONG == 64
+ return xxh64(input, length, seed);
+#else
+ return xxh32(input, length, seed);
+#endif
+}
+
/*-****************************
* Streaming Hash Functions
*****************************/
diff --git a/include/math-emu/op-2.h b/include/math-emu/op-2.h
index 4f26ecc1411b..13a374f51a22 100644
--- a/include/math-emu/op-2.h
+++ b/include/math-emu/op-2.h
@@ -31,61 +31,56 @@
#define _FP_FRAC_HIGH_2(X) (X##_f1)
#define _FP_FRAC_LOW_2(X) (X##_f0)
#define _FP_FRAC_WORD_2(X,w) (X##_f##w)
+#define _FP_FRAC_SLL_2(X, N) ( \
+ (void) (((N) < _FP_W_TYPE_SIZE) \
+ ? ({ \
+ if (__builtin_constant_p(N) && (N) == 1) { \
+ X##_f1 = X##_f1 + X##_f1 + \
+ (((_FP_WS_TYPE) (X##_f0)) < 0); \
+ X##_f0 += X##_f0; \
+ } else { \
+ X##_f1 = X##_f1 << (N) | X##_f0 >> \
+ (_FP_W_TYPE_SIZE - (N)); \
+ X##_f0 <<= (N); \
+ } \
+ 0; \
+ }) \
+ : ({ \
+ X##_f1 = X##_f0 << ((N) - _FP_W_TYPE_SIZE); \
+ X##_f0 = 0; \
+ })))
+
+
+#define _FP_FRAC_SRL_2(X, N) ( \
+ (void) (((N) < _FP_W_TYPE_SIZE) \
+ ? ({ \
+ X##_f0 = X##_f0 >> (N) | X##_f1 << (_FP_W_TYPE_SIZE - (N)); \
+ X##_f1 >>= (N); \
+ }) \
+ : ({ \
+ X##_f0 = X##_f1 >> ((N) - _FP_W_TYPE_SIZE); \
+ X##_f1 = 0; \
+ })))
-#define _FP_FRAC_SLL_2(X,N) \
- do { \
- if ((N) < _FP_W_TYPE_SIZE) \
- { \
- if (__builtin_constant_p(N) && (N) == 1) \
- { \
- X##_f1 = X##_f1 + X##_f1 + (((_FP_WS_TYPE)(X##_f0)) < 0); \
- X##_f0 += X##_f0; \
- } \
- else \
- { \
- X##_f1 = X##_f1 << (N) | X##_f0 >> (_FP_W_TYPE_SIZE - (N)); \
- X##_f0 <<= (N); \
- } \
- } \
- else \
- { \
- X##_f1 = X##_f0 << ((N) - _FP_W_TYPE_SIZE); \
- X##_f0 = 0; \
- } \
- } while (0)
-
-#define _FP_FRAC_SRL_2(X,N) \
- do { \
- if ((N) < _FP_W_TYPE_SIZE) \
- { \
- X##_f0 = X##_f0 >> (N) | X##_f1 << (_FP_W_TYPE_SIZE - (N)); \
- X##_f1 >>= (N); \
- } \
- else \
- { \
- X##_f0 = X##_f1 >> ((N) - _FP_W_TYPE_SIZE); \
- X##_f1 = 0; \
- } \
- } while (0)
/* Right shift with sticky-lsb. */
-#define _FP_FRAC_SRS_2(X,N,sz) \
- do { \
- if ((N) < _FP_W_TYPE_SIZE) \
- { \
- X##_f0 = (X##_f1 << (_FP_W_TYPE_SIZE - (N)) | X##_f0 >> (N) | \
- (__builtin_constant_p(N) && (N) == 1 \
- ? X##_f0 & 1 \
- : (X##_f0 << (_FP_W_TYPE_SIZE - (N))) != 0)); \
- X##_f1 >>= (N); \
- } \
- else \
- { \
- X##_f0 = (X##_f1 >> ((N) - _FP_W_TYPE_SIZE) | \
- (((X##_f1 << (2*_FP_W_TYPE_SIZE - (N))) | X##_f0) != 0)); \
- X##_f1 = 0; \
- } \
- } while (0)
+#define _FP_FRAC_SRS_2(X, N, sz) ( \
+ (void) (((N) < _FP_W_TYPE_SIZE) \
+ ? ({ \
+ X##_f0 = (X##_f1 << (_FP_W_TYPE_SIZE - (N)) | X##_f0 >> (N) \
+ | (__builtin_constant_p(N) && (N) == 1 \
+ ? X##_f0 & 1 \
+ : (X##_f0 << (_FP_W_TYPE_SIZE - (N))) != 0)); \
+ X##_f1 >>= (N); \
+ }) \
+ : ({ \
+ X##_f0 = (X##_f1 >> ((N) - _FP_W_TYPE_SIZE) \
+ | ((((N) == _FP_W_TYPE_SIZE \
+ ? 0 \
+ : (X##_f1 << (2*_FP_W_TYPE_SIZE - (N)))) \
+ | X##_f0) != 0)); \
+ X##_f1 = 0; \
+ })))
#define _FP_FRAC_ADDI_2(X,I) \
__FP_FRAC_ADDI_2(X##_f1, X##_f0, I)
diff --git a/include/math-emu/soft-fp.h b/include/math-emu/soft-fp.h
index 3f284bc03180..5650c1628383 100644
--- a/include/math-emu/soft-fp.h
+++ b/include/math-emu/soft-fp.h
@@ -138,7 +138,7 @@ do { \
_FP_FRAC_ADDI_##wc(X, _FP_WORK_ROUND); \
} while (0)
-#define _FP_ROUND_ZERO(wc, X) 0
+#define _FP_ROUND_ZERO(wc, X) (void)0
#define _FP_ROUND_PINF(wc, X) \
do { \
diff --git a/include/media/cec.h b/include/media/cec.h
index ff9847f7f99d..707411ef8ba2 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -63,7 +63,6 @@ struct cec_data {
struct delayed_work work;
struct completion c;
u8 attempts;
- bool new_initiator;
bool blocking;
bool completed;
};
@@ -156,6 +155,7 @@ struct cec_adapter {
unsigned int transmit_queue_sz;
struct list_head wait_queue;
struct cec_data *transmitting;
+ bool transmit_in_progress;
struct task_struct *kthread_config;
struct completion config_completion;
@@ -174,6 +174,7 @@ struct cec_adapter {
bool is_configuring;
bool is_configured;
bool cec_pin_is_high;
+ u8 last_initiator;
u32 monitor_all_cnt;
u32 monitor_pin_cnt;
u32 follower_cnt;
@@ -198,9 +199,7 @@ struct cec_adapter {
u16 phys_addrs[15];
u32 sequence;
- char device_name[32];
char input_phys[32];
- char input_drv[32];
};
static inline void *cec_get_drvdata(const struct cec_adapter *adap)
@@ -332,67 +331,6 @@ void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
unsigned int *offset);
-/**
- * cec_set_edid_phys_addr() - find and set the physical address
- *
- * @edid: pointer to the EDID data
- * @size: size in bytes of the EDID data
- * @phys_addr: the new physical address
- *
- * This function finds the location of the physical address in the EDID
- * and fills in the given physical address and updates the checksum
- * at the end of the EDID block. It does nothing if the EDID doesn't
- * contain a physical address.
- */
-void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
-
-/**
- * cec_phys_addr_for_input() - calculate the PA for an input
- *
- * @phys_addr: the physical address of the parent
- * @input: the number of the input port, must be between 1 and 15
- *
- * This function calculates a new physical address based on the input
- * port number. For example:
- *
- * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
- *
- * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
- *
- * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
- *
- * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
- *
- * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
- */
-u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
-
-/**
- * cec_phys_addr_validate() - validate a physical address from an EDID
- *
- * @phys_addr: the physical address to validate
- * @parent: if not %NULL, then this is filled with the parents PA.
- * @port: if not %NULL, then this is filled with the input port.
- *
- * This validates a physical address as read from an EDID. If the
- * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
- * then it will return -EINVAL.
- *
- * The parent PA is passed into %parent and the input port is passed into
- * %port. For example:
- *
- * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
- *
- * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
- *
- * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
- *
- * PA = f.f.f.f: has parent f.f.f.f and input port 0.
- *
- * Return: 0 if the PA is valid, -EINVAL if not.
- */
-int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
-
#else
static inline int cec_register_adapter(struct cec_adapter *adap,
@@ -427,25 +365,6 @@ static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
return CEC_PHYS_ADDR_INVALID;
}
-static inline void cec_set_edid_phys_addr(u8 *edid, unsigned int size,
- u16 phys_addr)
-{
-}
-
-static inline u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
-{
- return CEC_PHYS_ADDR_INVALID;
-}
-
-static inline int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
-{
- if (parent)
- *parent = phys_addr;
- if (port)
- *port = 0;
- return 0;
-}
-
#endif
/**
@@ -461,4 +380,74 @@ static inline void cec_phys_addr_invalidate(struct cec_adapter *adap)
cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
}
+/**
+ * cec_get_edid_spa_location() - find location of the Source Physical Address
+ *
+ * @edid: the EDID
+ * @size: the size of the EDID
+ *
+ * This EDID is expected to be a CEA-861 compliant, which means that there are
+ * at least two blocks and one or more of the extensions blocks are CEA-861
+ * blocks.
+ *
+ * The returned location is guaranteed to be <= size-2.
+ *
+ * This is an inline function since it is used by both CEC and V4L2.
+ * Ideally this would go in a module shared by both, but it is overkill to do
+ * that for just a single function.
+ */
+static inline unsigned int cec_get_edid_spa_location(const u8 *edid,
+ unsigned int size)
+{
+ unsigned int blocks = size / 128;
+ unsigned int block;
+ u8 d;
+
+ /* Sanity check: at least 2 blocks and a multiple of the block size */
+ if (blocks < 2 || size % 128)
+ return 0;
+
+ /*
+ * If there are fewer extension blocks than the size, then update
+ * 'blocks'. It is allowed to have more extension blocks than the size,
+ * since some hardware can only read e.g. 256 bytes of the EDID, even
+ * though more blocks are present. The first CEA-861 extension block
+ * should normally be in block 1 anyway.
+ */
+ if (edid[0x7e] + 1 < blocks)
+ blocks = edid[0x7e] + 1;
+
+ for (block = 1; block < blocks; block++) {
+ unsigned int offset = block * 128;
+
+ /* Skip any non-CEA-861 extension blocks */
+ if (edid[offset] != 0x02 || edid[offset + 1] != 0x03)
+ continue;
+
+ /* search Vendor Specific Data Block (tag 3) */
+ d = edid[offset + 2] & 0x7f;
+ /* Check if there are Data Blocks */
+ if (d <= 4)
+ continue;
+ if (d > 4) {
+ unsigned int i = offset + 4;
+ unsigned int end = offset + d;
+
+ /* Note: 'end' is always < 'size' */
+ do {
+ u8 tag = edid[i] >> 5;
+ u8 len = edid[i] & 0x1f;
+
+ if (tag == 3 && len >= 5 && i + len <= end &&
+ edid[i + 1] == 0x03 &&
+ edid[i + 2] == 0x0c &&
+ edid[i + 3] == 0x00)
+ return i + 4;
+ i += len + 1;
+ } while (i < end);
+ }
+ }
+ return 0;
+}
+
#endif /* _MEDIA_CEC_H */
diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h
index 79a566d7defd..5c31a7682492 100644
--- a/include/media/davinci/vpbe.h
+++ b/include/media/davinci/vpbe.h
@@ -100,10 +100,6 @@ struct vpbe_config {
struct vpbe_device;
struct vpbe_device_ops {
- /* crop cap for the display */
- int (*g_cropcap)(struct vpbe_device *vpbe_dev,
- struct v4l2_cropcap *cropcap);
-
/* Enumerate the outputs */
int (*enum_outputs)(struct vpbe_device *vpbe_dev,
struct v4l2_output *output);
diff --git a/include/media/media-device.h b/include/media/media-device.h
index bcc6ec434f1f..c8ddbfe8b74c 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -27,6 +27,7 @@
struct ida;
struct device;
+struct media_device;
/**
* struct media_entity_notify - Media Entity Notify
@@ -50,10 +51,32 @@ struct media_entity_notify {
* struct media_device_ops - Media device operations
* @link_notify: Link state change notification callback. This callback is
* called with the graph_mutex held.
+ * @req_alloc: Allocate a request. Set this if you need to allocate a struct
+ * larger then struct media_request. @req_alloc and @req_free must
+ * either both be set or both be NULL.
+ * @req_free: Free a request. Set this if @req_alloc was set as well, leave
+ * to NULL otherwise.
+ * @req_validate: Validate a request, but do not queue yet. The req_queue_mutex
+ * lock is held when this op is called.
+ * @req_queue: Queue a validated request, cannot fail. If something goes
+ * wrong when queueing this request then it should be marked
+ * as such internally in the driver and any related buffers
+ * must eventually return to vb2 with state VB2_BUF_STATE_ERROR.
+ * The req_queue_mutex lock is held when this op is called.
+ * It is important that vb2 buffer objects are queued last after
+ * all other object types are queued: queueing a buffer kickstarts
+ * the request processing, so all other objects related to the
+ * request (and thus the buffer) must be available to the driver.
+ * And once a buffer is queued, then the driver can complete
+ * or delete objects from the request before req_queue exits.
*/
struct media_device_ops {
int (*link_notify)(struct media_link *link, u32 flags,
unsigned int notification);
+ struct media_request *(*req_alloc)(struct media_device *mdev);
+ void (*req_free)(struct media_request *req);
+ int (*req_validate)(struct media_request *req);
+ void (*req_queue)(struct media_request *req);
};
/**
@@ -88,6 +111,9 @@ struct media_device_ops {
* @disable_source: Disable Source Handler function pointer
*
* @ops: Operation handler callbacks
+ * @req_queue_mutex: Serialise the MEDIA_REQUEST_IOC_QUEUE ioctl w.r.t.
+ * other operations that stop or start streaming.
+ * @request_id: Used to generate unique request IDs
*
* This structure represents an abstract high-level media device. It allows easy
* access to entities and provides basic media device-level support. The
@@ -158,6 +184,9 @@ struct media_device {
void (*disable_source)(struct media_entity *entity);
const struct media_device_ops *ops;
+
+ struct mutex req_queue_mutex;
+ atomic_t request_id;
};
/* We don't need to include pci.h or usb.h here */
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 3aa3d58d1d58..e5f6960d92f6 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -156,11 +156,40 @@ struct media_link {
};
/**
+ * enum media_pad_signal_type - type of the signal inside a media pad
+ *
+ * @PAD_SIGNAL_DEFAULT:
+ * Default signal. Use this when all inputs or all outputs are
+ * uniquely identified by the pad number.
+ * @PAD_SIGNAL_ANALOG:
+ * The pad contains an analog signal. It can be Radio Frequency,
+ * Intermediate Frequency, a baseband signal or sub-cariers.
+ * Tuner inputs, IF-PLL demodulators, composite and s-video signals
+ * should use it.
+ * @PAD_SIGNAL_DV:
+ * Contains a digital video signal, with can be a bitstream of samples
+ * taken from an analog TV video source. On such case, it usually
+ * contains the VBI data on it.
+ * @PAD_SIGNAL_AUDIO:
+ * Contains an Intermediate Frequency analog signal from an audio
+ * sub-carrier or an audio bitstream. IF signals are provided by tuners
+ * and consumed by audio AM/FM decoders. Bitstream audio is provided by
+ * an audio decoder.
+ */
+enum media_pad_signal_type {
+ PAD_SIGNAL_DEFAULT = 0,
+ PAD_SIGNAL_ANALOG,
+ PAD_SIGNAL_DV,
+ PAD_SIGNAL_AUDIO,
+};
+
+/**
* struct media_pad - A media pad graph object.
*
* @graph_obj: Embedded structure containing the media object common data
* @entity: Entity this pad belongs to
* @index: Pad index in the entity pads array, numbered from 0 to n
+ * @sig_type: Type of the signal inside a media pad
* @flags: Pad flags, as defined in
* :ref:`include/uapi/linux/media.h <media_header>`
* (seek for ``MEDIA_PAD_FL_*``)
@@ -169,6 +198,7 @@ struct media_pad {
struct media_gobj graph_obj; /* must be first field in struct */
struct media_entity *entity;
u16 index;
+ enum media_pad_signal_type sig_type;
unsigned long flags;
};
@@ -641,6 +671,24 @@ static inline void media_entity_cleanup(struct media_entity *entity) {}
#endif
/**
+ * media_get_pad_index() - retrieves a pad index from an entity
+ *
+ * @entity: entity where the pads belong
+ * @is_sink: true if the pad is a sink, false if it is a source
+ * @sig_type: type of signal of the pad to be search
+ *
+ * This helper function finds the first pad index inside an entity that
+ * satisfies both @is_sink and @sig_type conditions.
+ *
+ * Return:
+ *
+ * On success, return the pad number. If the pad was not found or the media
+ * entity is a NULL pointer, return -EINVAL.
+ */
+int media_get_pad_index(struct media_entity *entity, bool is_sink,
+ enum media_pad_signal_type sig_type);
+
+/**
* media_create_pad_link() - creates a link between two entities.
*
* @source: pointer to &media_entity of the source pad.
diff --git a/include/media/media-request.h b/include/media/media-request.h
new file mode 100644
index 000000000000..bd36d7431698
--- /dev/null
+++ b/include/media/media-request.h
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Media device request objects
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Author: Hans Verkuil <hans.verkuil@cisco.com>
+ * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
+ */
+
+#ifndef MEDIA_REQUEST_H
+#define MEDIA_REQUEST_H
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/refcount.h>
+
+#include <media/media-device.h>
+
+/**
+ * enum media_request_state - media request state
+ *
+ * @MEDIA_REQUEST_STATE_IDLE: Idle
+ * @MEDIA_REQUEST_STATE_VALIDATING: Validating the request, no state changes
+ * allowed
+ * @MEDIA_REQUEST_STATE_QUEUED: Queued
+ * @MEDIA_REQUEST_STATE_COMPLETE: Completed, the request is done
+ * @MEDIA_REQUEST_STATE_CLEANING: Cleaning, the request is being re-inited
+ * @MEDIA_REQUEST_STATE_UPDATING: The request is being updated, i.e.
+ * request objects are being added,
+ * modified or removed
+ * @NR_OF_MEDIA_REQUEST_STATE: The number of media request states, used
+ * internally for sanity check purposes
+ */
+enum media_request_state {
+ MEDIA_REQUEST_STATE_IDLE,
+ MEDIA_REQUEST_STATE_VALIDATING,
+ MEDIA_REQUEST_STATE_QUEUED,
+ MEDIA_REQUEST_STATE_COMPLETE,
+ MEDIA_REQUEST_STATE_CLEANING,
+ MEDIA_REQUEST_STATE_UPDATING,
+ NR_OF_MEDIA_REQUEST_STATE,
+};
+
+struct media_request_object;
+
+/**
+ * struct media_request - Media device request
+ * @mdev: Media device this request belongs to
+ * @kref: Reference count
+ * @debug_str: Prefix for debug messages (process name:fd)
+ * @state: The state of the request
+ * @updating_count: count the number of request updates that are in progress
+ * @access_count: count the number of request accesses that are in progress
+ * @objects: List of @struct media_request_object request objects
+ * @num_incomplete_objects: The number of incomplete objects in the request
+ * @poll_wait: Wait queue for poll
+ * @lock: Serializes access to this struct
+ */
+struct media_request {
+ struct media_device *mdev;
+ struct kref kref;
+ char debug_str[TASK_COMM_LEN + 11];
+ enum media_request_state state;
+ unsigned int updating_count;
+ unsigned int access_count;
+ struct list_head objects;
+ unsigned int num_incomplete_objects;
+ wait_queue_head_t poll_wait;
+ spinlock_t lock;
+};
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+
+/**
+ * media_request_lock_for_access - Lock the request to access its objects
+ *
+ * @req: The media request
+ *
+ * Use before accessing a completed request. A reference to the request must
+ * be held during the access. This usually takes place automatically through
+ * a file handle. Use @media_request_unlock_for_access when done.
+ */
+static inline int __must_check
+media_request_lock_for_access(struct media_request *req)
+{
+ unsigned long flags;
+ int ret = -EBUSY;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
+ req->access_count++;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ return ret;
+}
+
+/**
+ * media_request_unlock_for_access - Unlock a request previously locked for
+ * access
+ *
+ * @req: The media request
+ *
+ * Unlock a request that has previously been locked using
+ * @media_request_lock_for_access.
+ */
+static inline void media_request_unlock_for_access(struct media_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (!WARN_ON(!req->access_count))
+ req->access_count--;
+ spin_unlock_irqrestore(&req->lock, flags);
+}
+
+/**
+ * media_request_lock_for_update - Lock the request for updating its objects
+ *
+ * @req: The media request
+ *
+ * Use before updating a request, i.e. adding, modifying or removing a request
+ * object in it. A reference to the request must be held during the update. This
+ * usually takes place automatically through a file handle. Use
+ * @media_request_unlock_for_update when done.
+ */
+static inline int __must_check
+media_request_lock_for_update(struct media_request *req)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state == MEDIA_REQUEST_STATE_IDLE ||
+ req->state == MEDIA_REQUEST_STATE_UPDATING) {
+ req->state = MEDIA_REQUEST_STATE_UPDATING;
+ req->updating_count++;
+ } else {
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ return ret;
+}
+
+/**
+ * media_request_unlock_for_update - Unlock a request previously locked for
+ * update
+ *
+ * @req: The media request
+ *
+ * Unlock a request that has previously been locked using
+ * @media_request_lock_for_update.
+ */
+static inline void media_request_unlock_for_update(struct media_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&req->lock, flags);
+ WARN_ON(req->updating_count <= 0);
+ if (!--req->updating_count)
+ req->state = MEDIA_REQUEST_STATE_IDLE;
+ spin_unlock_irqrestore(&req->lock, flags);
+}
+
+/**
+ * media_request_get - Get the media request
+ *
+ * @req: The media request
+ *
+ * Get the media request.
+ */
+static inline void media_request_get(struct media_request *req)
+{
+ kref_get(&req->kref);
+}
+
+/**
+ * media_request_put - Put the media request
+ *
+ * @req: The media request
+ *
+ * Put the media request. The media request will be released
+ * when the refcount reaches 0.
+ */
+void media_request_put(struct media_request *req);
+
+/**
+ * media_request_get_by_fd - Get a media request by fd
+ *
+ * @mdev: Media device this request belongs to
+ * @request_fd: The file descriptor of the request
+ *
+ * Get the request represented by @request_fd that is owned
+ * by the media device.
+ *
+ * Return a -EACCES error pointer if requests are not supported
+ * by this driver. Return -EINVAL if the request was not found.
+ * Return the pointer to the request if found: the caller will
+ * have to call @media_request_put when it finished using the
+ * request.
+ */
+struct media_request *
+media_request_get_by_fd(struct media_device *mdev, int request_fd);
+
+/**
+ * media_request_alloc - Allocate the media request
+ *
+ * @mdev: Media device this request belongs to
+ * @alloc_fd: Store the request's file descriptor in this int
+ *
+ * Allocated the media request and put the fd in @alloc_fd.
+ */
+int media_request_alloc(struct media_device *mdev,
+ int *alloc_fd);
+
+#else
+
+static inline void media_request_get(struct media_request *req)
+{
+}
+
+static inline void media_request_put(struct media_request *req)
+{
+}
+
+static inline struct media_request *
+media_request_get_by_fd(struct media_device *mdev, int request_fd)
+{
+ return ERR_PTR(-EACCES);
+}
+
+#endif
+
+/**
+ * struct media_request_object_ops - Media request object operations
+ * @prepare: Validate and prepare the request object, optional.
+ * @unprepare: Unprepare the request object, optional.
+ * @queue: Queue the request object, optional.
+ * @unbind: Unbind the request object, optional.
+ * @release: Release the request object, required.
+ */
+struct media_request_object_ops {
+ int (*prepare)(struct media_request_object *object);
+ void (*unprepare)(struct media_request_object *object);
+ void (*queue)(struct media_request_object *object);
+ void (*unbind)(struct media_request_object *object);
+ void (*release)(struct media_request_object *object);
+};
+
+/**
+ * struct media_request_object - An opaque object that belongs to a media
+ * request
+ *
+ * @ops: object's operations
+ * @priv: object's priv pointer
+ * @req: the request this object belongs to (can be NULL)
+ * @list: List entry of the object for @struct media_request
+ * @kref: Reference count of the object, acquire before releasing req->lock
+ * @completed: If true, then this object was completed.
+ *
+ * An object related to the request. This struct is always embedded in
+ * another struct that contains the actual data for this request object.
+ */
+struct media_request_object {
+ const struct media_request_object_ops *ops;
+ void *priv;
+ struct media_request *req;
+ struct list_head list;
+ struct kref kref;
+ bool completed;
+};
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+
+/**
+ * media_request_object_get - Get a media request object
+ *
+ * @obj: The object
+ *
+ * Get a media request object.
+ */
+static inline void media_request_object_get(struct media_request_object *obj)
+{
+ kref_get(&obj->kref);
+}
+
+/**
+ * media_request_object_put - Put a media request object
+ *
+ * @obj: The object
+ *
+ * Put a media request object. Once all references are gone, the
+ * object's memory is released.
+ */
+void media_request_object_put(struct media_request_object *obj);
+
+/**
+ * media_request_object_find - Find an object in a request
+ *
+ * @req: The media request
+ * @ops: Find an object with this ops value
+ * @priv: Find an object with this priv value
+ *
+ * Both @ops and @priv must be non-NULL.
+ *
+ * Returns the object pointer or NULL if not found. The caller must
+ * call media_request_object_put() once it finished using the object.
+ *
+ * Since this function needs to walk the list of objects it takes
+ * the @req->lock spin lock to make this safe.
+ */
+struct media_request_object *
+media_request_object_find(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv);
+
+/**
+ * media_request_object_init - Initialise a media request object
+ *
+ * @obj: The object
+ *
+ * Initialise a media request object. The object will be released using the
+ * release callback of the ops once it has no references (this function
+ * initialises references to one).
+ */
+void media_request_object_init(struct media_request_object *obj);
+
+/**
+ * media_request_object_bind - Bind a media request object to a request
+ *
+ * @req: The media request
+ * @ops: The object ops for this object
+ * @priv: A driver-specific priv pointer associated with this object
+ * @is_buffer: Set to true if the object a buffer object.
+ * @obj: The object
+ *
+ * Bind this object to the request and set the ops and priv values of
+ * the object so it can be found later with media_request_object_find().
+ *
+ * Every bound object must be unbound or completed by the kernel at some
+ * point in time, otherwise the request will never complete. When the
+ * request is released all completed objects will be unbound by the
+ * request core code.
+ *
+ * Buffer objects will be added to the end of the request's object
+ * list, non-buffer objects will be added to the front of the list.
+ * This ensures that all buffer objects are at the end of the list
+ * and that all non-buffer objects that they depend on are processed
+ * first.
+ */
+int media_request_object_bind(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv, bool is_buffer,
+ struct media_request_object *obj);
+
+/**
+ * media_request_object_unbind - Unbind a media request object
+ *
+ * @obj: The object
+ *
+ * Unbind the media request object from the request.
+ */
+void media_request_object_unbind(struct media_request_object *obj);
+
+/**
+ * media_request_object_complete - Mark the media request object as complete
+ *
+ * @obj: The object
+ *
+ * Mark the media request object as complete. Only bound objects can
+ * be completed.
+ */
+void media_request_object_complete(struct media_request_object *obj);
+
+#else
+
+static inline int __must_check
+media_request_lock_for_access(struct media_request *req)
+{
+ return -EINVAL;
+}
+
+static inline void media_request_unlock_for_access(struct media_request *req)
+{
+}
+
+static inline int __must_check
+media_request_lock_for_update(struct media_request *req)
+{
+ return -EINVAL;
+}
+
+static inline void media_request_unlock_for_update(struct media_request *req)
+{
+}
+
+static inline void media_request_object_get(struct media_request_object *obj)
+{
+}
+
+static inline void media_request_object_put(struct media_request_object *obj)
+{
+}
+
+static inline struct media_request_object *
+media_request_object_find(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv)
+{
+ return NULL;
+}
+
+static inline void media_request_object_init(struct media_request_object *obj)
+{
+ obj->ops = NULL;
+ obj->req = NULL;
+}
+
+static inline int media_request_object_bind(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv, bool is_buffer,
+ struct media_request_object *obj)
+{
+ return 0;
+}
+
+static inline void media_request_object_unbind(struct media_request_object *obj)
+{
+}
+
+static inline void media_request_object_complete(struct media_request_object *obj)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h
new file mode 100644
index 000000000000..d21f40edc09e
--- /dev/null
+++ b/include/media/mpeg2-ctrls.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * These are the MPEG2 state controls for use with stateless MPEG-2
+ * codec drivers.
+ *
+ * It turns out that these structs are not stable yet and will undergo
+ * more changes. So keep them private until they are stable and ready to
+ * become part of the official public API.
+ */
+
+#ifndef _MPEG2_CTRLS_H_
+#define _MPEG2_CTRLS_H_
+
+#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250)
+#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251)
+
+/* enum v4l2_ctrl_type type values */
+#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103
+#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104
+
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4
+
+struct v4l2_mpeg2_sequence {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
+ __u16 horizontal_size;
+ __u16 vertical_size;
+ __u32 vbv_buffer_size;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
+ __u8 profile_and_level_indication;
+ __u8 progressive_sequence;
+ __u8 chroma_format;
+ __u8 pad;
+};
+
+struct v4l2_mpeg2_picture {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
+ __u8 picture_coding_type;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
+ __u8 f_code[2][2];
+ __u8 intra_dc_precision;
+ __u8 picture_structure;
+ __u8 top_field_first;
+ __u8 frame_pred_frame_dct;
+ __u8 concealment_motion_vectors;
+ __u8 q_scale_type;
+ __u8 intra_vlc_format;
+ __u8 alternate_scan;
+ __u8 repeat_first_field;
+ __u8 progressive_frame;
+ __u8 pad;
+};
+
+struct v4l2_ctrl_mpeg2_slice_params {
+ __u32 bit_size;
+ __u32 data_bit_offset;
+
+ struct v4l2_mpeg2_sequence sequence;
+ struct v4l2_mpeg2_picture picture;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
+ __u8 quantiser_scale_code;
+
+ __u8 backward_ref_index;
+ __u8 forward_ref_index;
+ __u8 pad;
+};
+
+struct v4l2_ctrl_mpeg2_quantization {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
+ __u8 load_intra_quantiser_matrix;
+ __u8 load_non_intra_quantiser_matrix;
+ __u8 load_chroma_intra_quantiser_matrix;
+ __u8 load_chroma_non_intra_quantiser_matrix;
+
+ __u8 intra_quantiser_matrix[64];
+ __u8 non_intra_quantiser_matrix[64];
+ __u8 chroma_intra_quantiser_matrix[64];
+ __u8 chroma_non_intra_quantiser_matrix[64];
+};
+
+#endif
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 61571773a98d..c0cfbe16a854 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -317,13 +317,6 @@ struct ir_raw_event {
unsigned carrier_report:1;
};
-#define DEFINE_IR_RAW_EVENT(event) struct ir_raw_event event = {}
-
-static inline void init_ir_raw_event(struct ir_raw_event *ev)
-{
- memset(ev, 0, sizeof(*ev));
-}
-
#define IR_DEFAULT_TIMEOUT MS_TO_NS(125)
#define IR_MAX_DURATION 500000000 /* 500 ms */
#define US_TO_NS(usec) ((usec) * 1000)
@@ -344,9 +337,7 @@ int ir_raw_encode_carrier(enum rc_proto protocol);
static inline void ir_raw_event_reset(struct rc_dev *dev)
{
- struct ir_raw_event ev = { .reset = true };
-
- ir_raw_event_store(dev, &ev);
+ ir_raw_event_store(dev, &((struct ir_raw_event) { .reset = true }));
dev->idle = true;
ir_raw_event_handle(dev);
}
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index bfa3017cecba..d621acadfbf3 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -277,6 +277,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_WINFAST "rc-winfast"
#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe"
#define RC_MAP_SU3000 "rc-su3000"
+#define RC_MAP_XBOX_DVD "rc-xbox-dvd"
#define RC_MAP_ZX_IRDEC "rc-zx-irdec"
/*
diff --git a/include/media/rcar-fcp.h b/include/media/rcar-fcp.h
index b60a7b176c37..179240fb163b 100644
--- a/include/media/rcar-fcp.h
+++ b/include/media/rcar-fcp.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* rcar-fcp.h -- R-Car Frame Compression Processor Driver
*
* Copyright (C) 2016 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __MEDIA_RCAR_FCP_H__
#define __MEDIA_RCAR_FCP_H__
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 1592d323c577..1497bda66c3b 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -20,9 +20,6 @@ struct v4l2_device;
struct v4l2_subdev;
struct v4l2_async_notifier;
-/* A random max subdevice number, used to allocate an array on stack */
-#define V4L2_MAX_SUBDEVS 128U
-
/**
* enum v4l2_async_match_type - type of asynchronous subdevice logic to be used
* in order to identify a match
@@ -73,6 +70,8 @@ enum v4l2_async_match_type {
* @match.custom.priv:
* Driver-specific private struct with match parameters
* to be used if %V4L2_ASYNC_MATCH_CUSTOM.
+ * @asd_list: used to add struct v4l2_async_subdev objects to the
+ * master notifier @asd_list
* @list: used to link struct v4l2_async_subdev objects, waiting to be
* probed, to a notifier->waiting list
*
@@ -90,14 +89,15 @@ struct v4l2_async_subdev {
unsigned short address;
} i2c;
struct {
- bool (*match)(struct device *,
- struct v4l2_async_subdev *);
+ bool (*match)(struct device *dev,
+ struct v4l2_async_subdev *sd);
void *priv;
} custom;
} match;
/* v4l2-async core private: not to be used by drivers */
struct list_head list;
+ struct list_head asd_list;
};
/**
@@ -121,30 +121,108 @@ struct v4l2_async_notifier_operations {
* struct v4l2_async_notifier - v4l2_device notifier data
*
* @ops: notifier operations
- * @num_subdevs: number of subdevices used in the subdevs array
- * @max_subdevs: number of subdevices allocated in the subdevs array
- * @subdevs: array of pointers to subdevice descriptors
* @v4l2_dev: v4l2_device of the root notifier, NULL otherwise
* @sd: sub-device that registered the notifier, NULL otherwise
* @parent: parent notifier
+ * @asd_list: master list of struct v4l2_async_subdev
* @waiting: list of struct v4l2_async_subdev, waiting for their drivers
* @done: list of struct v4l2_subdev, already probed
* @list: member in a global list of notifiers
*/
struct v4l2_async_notifier {
const struct v4l2_async_notifier_operations *ops;
- unsigned int num_subdevs;
- unsigned int max_subdevs;
- struct v4l2_async_subdev **subdevs;
struct v4l2_device *v4l2_dev;
struct v4l2_subdev *sd;
struct v4l2_async_notifier *parent;
+ struct list_head asd_list;
struct list_head waiting;
struct list_head done;
struct list_head list;
};
/**
+ * v4l2_async_notifier_init - Initialize a notifier.
+ *
+ * @notifier: pointer to &struct v4l2_async_notifier
+ *
+ * This function initializes the notifier @asd_list. It must be called
+ * before the first call to @v4l2_async_notifier_add_subdev.
+ */
+void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier);
+
+/**
+ * v4l2_async_notifier_add_subdev - Add an async subdev to the
+ * notifier's master asd list.
+ *
+ * @notifier: pointer to &struct v4l2_async_notifier
+ * @asd: pointer to &struct v4l2_async_subdev
+ *
+ * Call this function before registering a notifier to link the
+ * provided asd to the notifiers master @asd_list.
+ */
+int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd);
+
+/**
+ * v4l2_async_notifier_add_fwnode_subdev - Allocate and add a fwnode async
+ * subdev to the notifier's master asd_list.
+ *
+ * @notifier: pointer to &struct v4l2_async_notifier
+ * @fwnode: fwnode handle of the sub-device to be matched
+ * @asd_struct_size: size of the driver's async sub-device struct, including
+ * sizeof(struct v4l2_async_subdev). The &struct
+ * v4l2_async_subdev shall be the first member of
+ * the driver's async sub-device struct, i.e. both
+ * begin at the same memory address.
+ *
+ * Allocate a fwnode-matched asd of size asd_struct_size, and add it
+ * to the notifiers @asd_list.
+ */
+struct v4l2_async_subdev *
+v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *fwnode,
+ unsigned int asd_struct_size);
+
+/**
+ * v4l2_async_notifier_add_i2c_subdev - Allocate and add an i2c async
+ * subdev to the notifier's master asd_list.
+ *
+ * @notifier: pointer to &struct v4l2_async_notifier
+ * @adapter_id: I2C adapter ID to be matched
+ * @address: I2C address of sub-device to be matched
+ * @asd_struct_size: size of the driver's async sub-device struct, including
+ * sizeof(struct v4l2_async_subdev). The &struct
+ * v4l2_async_subdev shall be the first member of
+ * the driver's async sub-device struct, i.e. both
+ * begin at the same memory address.
+ *
+ * Same as above but for I2C matched sub-devices.
+ */
+struct v4l2_async_subdev *
+v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
+ int adapter_id, unsigned short address,
+ unsigned int asd_struct_size);
+
+/**
+ * v4l2_async_notifier_add_devname_subdev - Allocate and add a device-name
+ * async subdev to the notifier's master asd_list.
+ *
+ * @notifier: pointer to &struct v4l2_async_notifier
+ * @device_name: device name string to be matched
+ * @asd_struct_size: size of the driver's async sub-device struct, including
+ * sizeof(struct v4l2_async_subdev). The &struct
+ * v4l2_async_subdev shall be the first member of
+ * the driver's async sub-device struct, i.e. both
+ * begin at the same memory address.
+ *
+ * Same as above but for device-name matched sub-devices.
+ */
+struct v4l2_async_subdev *
+v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier,
+ const char *device_name,
+ unsigned int asd_struct_size);
+
+/**
* v4l2_async_notifier_register - registers a subdevice asynchronous notifier
*
* @v4l2_dev: pointer to &struct v4l2_device
@@ -164,7 +242,8 @@ int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
struct v4l2_async_notifier *notifier);
/**
- * v4l2_async_notifier_unregister - unregisters a subdevice asynchronous notifier
+ * v4l2_async_notifier_unregister - unregisters a subdevice
+ * asynchronous notifier
*
* @notifier: pointer to &struct v4l2_async_notifier
*/
@@ -177,7 +256,9 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier);
* Release memory resources related to a notifier, including the async
* sub-devices allocated for the purposes of the notifier but not the notifier
* itself. The user is responsible for calling this function to clean up the
- * notifier after calling @v4l2_async_notifier_parse_fwnode_endpoints or
+ * notifier after calling
+ * @v4l2_async_notifier_add_subdev,
+ * @v4l2_async_notifier_parse_fwnode_endpoints or
* @v4l2_fwnode_reference_parse_sensor_common.
*
* There is no harm from calling v4l2_async_notifier_cleanup in other
@@ -213,8 +294,8 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd);
* An error is returned if the module is no longer loaded on any attempts
* to register it.
*/
-int __must_check v4l2_async_register_subdev_sensor_common(
- struct v4l2_subdev *sd);
+int __must_check
+v4l2_async_register_subdev_sensor_common(struct v4l2_subdev *sd);
/**
* v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index cdc87ec61e54..0c511ed8ffb0 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -155,6 +155,18 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
const unsigned short *probe_addrs);
/**
+ * v4l2_i2c_subdev_set_name - Set name for an I²C sub-device
+ *
+ * @sd: pointer to &struct v4l2_subdev
+ * @client: pointer to struct i2c_client
+ * @devname: the name of the device; if NULL, the I²C device's name will be used
+ * @postfix: sub-device specific string to put right after the I²C device name;
+ * may be NULL
+ */
+void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd, struct i2c_client *client,
+ const char *devname, const char *postfix);
+
+/**
* v4l2_i2c_subdev_init - Initializes a &struct v4l2_subdev with data from
* an i2c_client struct.
*
@@ -283,7 +295,7 @@ struct v4l2_priv_tun_config {
* @height: pointer to height that will be adjusted if needed.
* @hmin: minimum height.
* @hmax: maximum height.
- * @halign: least significant bit on width.
+ * @halign: least significant bit on height.
* @salign: least significant bit for the image size (e. g.
* :math:`width * height`).
*
@@ -384,4 +396,9 @@ int v4l2_g_parm_cap(struct video_device *vdev,
int v4l2_s_parm_cap(struct video_device *vdev,
struct v4l2_subdev *sd, struct v4l2_streamparm *a);
+/* Compare two v4l2_fract structs */
+#define V4L2_FRACT_COMPARE(a, OP, b) \
+ ((u64)(a).numerator * (b).denominator OP \
+ (u64)(b).numerator * (a).denominator)
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index f615ba1b29dd..d63cf227b0ab 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -20,6 +20,13 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/videodev2.h>
+#include <media/media-request.h>
+
+/*
+ * Include the mpeg2 stateless codec compound control definitions.
+ * This will move to the public headers once this API is fully stable.
+ */
+#include <media/mpeg2-ctrls.h>
/* forward references */
struct file;
@@ -34,13 +41,15 @@ struct poll_table_struct;
/**
* union v4l2_ctrl_ptr - A pointer to a control value.
- * @p_s32: Pointer to a 32-bit signed value.
- * @p_s64: Pointer to a 64-bit signed value.
- * @p_u8: Pointer to a 8-bit unsigned value.
- * @p_u16: Pointer to a 16-bit unsigned value.
- * @p_u32: Pointer to a 32-bit unsigned value.
- * @p_char: Pointer to a string.
- * @p: Pointer to a compound value.
+ * @p_s32: Pointer to a 32-bit signed value.
+ * @p_s64: Pointer to a 64-bit signed value.
+ * @p_u8: Pointer to a 8-bit unsigned value.
+ * @p_u16: Pointer to a 16-bit unsigned value.
+ * @p_u32: Pointer to a 32-bit unsigned value.
+ * @p_char: Pointer to a string.
+ * @p_mpeg2_slice_params: Pointer to a MPEG2 slice parameters structure.
+ * @p_mpeg2_quantization: Pointer to a MPEG2 quantization data structure.
+ * @p: Pointer to a compound value.
*/
union v4l2_ctrl_ptr {
s32 *p_s32;
@@ -49,6 +58,8 @@ union v4l2_ctrl_ptr {
u16 *p_u16;
u32 *p_u32;
char *p_char;
+ struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
+ struct v4l2_ctrl_mpeg2_quantization *p_mpeg2_quantization;
void *p;
};
@@ -247,6 +258,19 @@ struct v4l2_ctrl {
* @ctrl: The actual control information.
* @helper: Pointer to helper struct. Used internally in
* ``prepare_ext_ctrls`` function at ``v4l2-ctrl.c``.
+ * @from_other_dev: If true, then @ctrl was defined in another
+ * device than the &struct v4l2_ctrl_handler.
+ * @req_done: Internal flag: if the control handler containing this control
+ * reference is bound to a media request, then this is set when
+ * the control has been applied. This prevents applying controls
+ * from a cluster with multiple controls twice (when the first
+ * control of a cluster is applied, they all are).
+ * @req: If set, this refers to another request that sets this control.
+ * @p_req: If the control handler containing this control reference
+ * is bound to a media request, then this points to the
+ * value of the control that should be applied when the request
+ * is executed, or to the value of the control at the time
+ * that the request was completed.
*
* Each control handler has a list of these refs. The list_head is used to
* keep a sorted-by-control-ID list of all controls, while the next pointer
@@ -257,6 +281,10 @@ struct v4l2_ctrl_ref {
struct v4l2_ctrl_ref *next;
struct v4l2_ctrl *ctrl;
struct v4l2_ctrl_helper *helper;
+ bool from_other_dev;
+ bool req_done;
+ struct v4l2_ctrl_ref *req;
+ union v4l2_ctrl_ptr p_req;
};
/**
@@ -280,6 +308,17 @@ struct v4l2_ctrl_ref {
* @notify_priv: Passed as argument to the v4l2_ctrl notify callback.
* @nr_of_buckets: Total number of buckets in the array.
* @error: The error code of the first failed control addition.
+ * @request_is_queued: True if the request was queued.
+ * @requests: List to keep track of open control handler request objects.
+ * For the parent control handler (@req_obj.req == NULL) this
+ * is the list header. When the parent control handler is
+ * removed, it has to unbind and put all these requests since
+ * they refer to the parent.
+ * @requests_queued: List of the queued requests. This determines the order
+ * in which these controls are applied. Once the request is
+ * completed it is removed from this list.
+ * @req_obj: The &struct media_request_object, used to link into a
+ * &struct media_request. This request object has a refcount.
*/
struct v4l2_ctrl_handler {
struct mutex _lock;
@@ -292,6 +331,10 @@ struct v4l2_ctrl_handler {
void *notify_priv;
u16 nr_of_buckets;
int error;
+ bool request_is_queued;
+ struct list_head requests;
+ struct list_head requests_queued;
+ struct media_request_object req_obj;
};
/**
@@ -633,6 +676,8 @@ typedef bool (*v4l2_ctrl_filter)(const struct v4l2_ctrl *ctrl);
* @add: The control handler whose controls you want to add to
* the @hdl control handler.
* @filter: This function will filter which controls should be added.
+ * @from_other_dev: If true, then the controls in @add were defined in another
+ * device than @hdl.
*
* Does nothing if either of the two handlers is a NULL pointer.
* If @filter is NULL, then all controls are added. Otherwise only those
@@ -642,7 +687,8 @@ typedef bool (*v4l2_ctrl_filter)(const struct v4l2_ctrl *ctrl);
*/
int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_handler *add,
- v4l2_ctrl_filter filter);
+ v4l2_ctrl_filter filter,
+ bool from_other_dev);
/**
* v4l2_ctrl_radio_filter() - Standard filter for radio controls.
@@ -729,6 +775,22 @@ struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id);
void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active);
/**
+ * __v4l2_ctrl_grab() - Unlocked variant of v4l2_ctrl_grab.
+ *
+ * @ctrl: The control to (de)activate.
+ * @grabbed: True if the control should become grabbed.
+ *
+ * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically.
+ * Does nothing if @ctrl == NULL.
+ * The V4L2_EVENT_CTRL event will be generated afterwards.
+ * This will usually be called when starting or stopping streaming in the
+ * driver.
+ *
+ * This function assumes that the control handler is locked by the caller.
+ */
+void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
+
+/**
* v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed.
*
* @ctrl: The control to (de)activate.
@@ -743,7 +805,15 @@ void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active);
* This function assumes that the control handler is not locked and will
* take the lock itself.
*/
-void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
+static inline void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
+{
+ if (!ctrl)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+ __v4l2_ctrl_grab(ctrl, grabbed);
+ v4l2_ctrl_unlock(ctrl);
+}
/**
*__v4l2_ctrl_modify_range() - Unlocked variant of v4l2_ctrl_modify_range()
@@ -1046,6 +1116,84 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
*/
__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
+/**
+ * v4l2_ctrl_request_setup - helper function to apply control values in a request
+ *
+ * @req: The request
+ * @parent: The parent control handler ('priv' in media_request_object_find())
+ *
+ * This is a helper function to call the control handler's s_ctrl callback with
+ * the control values contained in the request. Do note that this approach of
+ * applying control values in a request is only applicable to memory-to-memory
+ * devices.
+ */
+void v4l2_ctrl_request_setup(struct media_request *req,
+ struct v4l2_ctrl_handler *parent);
+
+/**
+ * v4l2_ctrl_request_complete - Complete a control handler request object
+ *
+ * @req: The request
+ * @parent: The parent control handler ('priv' in media_request_object_find())
+ *
+ * This function is to be called on each control handler that may have had a
+ * request object associated with it, i.e. control handlers of a driver that
+ * supports requests.
+ *
+ * The function first obtains the values of any volatile controls in the control
+ * handler and attach them to the request. Then, the function completes the
+ * request object.
+ */
+void v4l2_ctrl_request_complete(struct media_request *req,
+ struct v4l2_ctrl_handler *parent);
+
+/**
+ * v4l2_ctrl_request_hdl_find - Find the control handler in the request
+ *
+ * @req: The request
+ * @parent: The parent control handler ('priv' in media_request_object_find())
+ *
+ * This function finds the control handler in the request. It may return
+ * NULL if not found. When done, you must call v4l2_ctrl_request_put_hdl()
+ * with the returned handler pointer.
+ *
+ * If the request is not in state VALIDATING or QUEUED, then this function
+ * will always return NULL.
+ *
+ * Note that in state VALIDATING the req_queue_mutex is held, so
+ * no objects can be added or deleted from the request.
+ *
+ * In state QUEUED it is the driver that will have to ensure this.
+ */
+struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
+ struct v4l2_ctrl_handler *parent);
+
+/**
+ * v4l2_ctrl_request_hdl_put - Put the control handler
+ *
+ * @hdl: Put this control handler
+ *
+ * This function released the control handler previously obtained from'
+ * v4l2_ctrl_request_hdl_find().
+ */
+static inline void v4l2_ctrl_request_hdl_put(struct v4l2_ctrl_handler *hdl)
+{
+ if (hdl)
+ media_request_object_put(&hdl->req_obj);
+}
+
+/**
+ * v4l2_ctrl_request_ctrl_find() - Find a control with the given ID.
+ *
+ * @hdl: The control handler from the request.
+ * @id: The ID of the control to find.
+ *
+ * This function returns a pointer to the control if this control is
+ * part of the request or NULL otherwise.
+ */
+struct v4l2_ctrl *
+v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id);
+
/* Helpers for ioctl_ops */
/**
@@ -1112,11 +1260,12 @@ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
* :ref:`VIDIOC_G_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl
*
* @hdl: pointer to &struct v4l2_ctrl_handler
+ * @mdev: pointer to &struct media_device
* @c: pointer to &struct v4l2_ext_controls
*
* If hdl == NULL then they will all return -EINVAL.
*/
-int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
struct v4l2_ext_controls *c);
/**
@@ -1124,11 +1273,13 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl,
* :ref:`VIDIOC_TRY_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl
*
* @hdl: pointer to &struct v4l2_ctrl_handler
+ * @mdev: pointer to &struct media_device
* @c: pointer to &struct v4l2_ext_controls
*
* If hdl == NULL then they will all return -EINVAL.
*/
int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct media_device *mdev,
struct v4l2_ext_controls *c);
/**
@@ -1137,11 +1288,13 @@ int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl,
*
* @fh: pointer to &struct v4l2_fh
* @hdl: pointer to &struct v4l2_ctrl_handler
+ * @mdev: pointer to &struct media_device
* @c: pointer to &struct v4l2_ext_controls
*
* If hdl == NULL then they will all return -EINVAL.
*/
int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct media_device *mdev,
struct v4l2_ext_controls *c);
/**
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 456ac13eca1d..48531e57cc5a 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -74,10 +74,19 @@ struct v4l2_ctrl_handler;
* indicates that file->private_data points to &struct v4l2_fh.
* This flag is set by the core when v4l2_fh_init() is called.
* All new drivers should use it.
+ * @V4L2_FL_QUIRK_INVERTED_CROP:
+ * some old M2M drivers use g/s_crop/cropcap incorrectly: crop and
+ * compose are swapped. If this flag is set, then the selection
+ * targets are swapped in the g/s_crop/cropcap functions in v4l2-ioctl.c.
+ * This allows those drivers to correctly implement the selection API,
+ * but the old crop API will still work as expected in order to preserve
+ * backwards compatibility.
+ * Never set this flag for new drivers.
*/
enum v4l2_video_device_flags {
- V4L2_FL_REGISTERED = 0,
- V4L2_FL_USES_V4L2_FH = 1,
+ V4L2_FL_REGISTERED = 0,
+ V4L2_FL_USES_V4L2_FH = 1,
+ V4L2_FL_QUIRK_INVERTED_CROP = 2,
};
/* Priority helper functions */
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index b330e4a08a6b..ac7677a183ff 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -211,6 +211,17 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
sd->v4l2_dev->notify(sd, notification, arg);
}
+/**
+ * v4l2_device_supports_requests - Test if requests are supported.
+ *
+ * @v4l2_dev: pointer to struct v4l2_device
+ */
+static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
+{
+ return v4l2_dev->mdev && v4l2_dev->mdev->ops &&
+ v4l2_dev->mdev->ops->req_queue;
+}
+
/* Helper macros to iterate over all subdevs. */
/**
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 17cb27df1b81..2cc0cabc124f 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -10,6 +10,17 @@
#include <linux/videodev2.h>
+/**
+ * v4l2_calc_timeperframe - helper function to calculate timeperframe based
+ * v4l2_dv_timings fields.
+ * @t: Timings for the video mode.
+ *
+ * Calculates the expected timeperframe using the pixel clock value and
+ * horizontal/vertical measures. This means that v4l2_dv_timings structure
+ * must be correctly and fully filled.
+ */
+struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t);
+
/*
* v4l2_dv_timings_presets: list of all dv_timings presets.
*/
@@ -234,4 +245,10 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
const struct hdmi_vendor_infoframe *hdmi,
unsigned int height);
+u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset);
+void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
+u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input);
+int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
+
#endif
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index ea73fef8bdc0..8586cfb49828 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -38,10 +38,13 @@ struct v4l2_ctrl_handler;
* @prio: priority of the file handler, as defined by &enum v4l2_priority
*
* @wait: event' s wait queue
+ * @subscribe_lock: serialise changes to the subscribed list; guarantee that
+ * the add and del event callbacks are orderly called
* @subscribed: list of subscribed events
* @available: list of events waiting to be dequeued
* @navailable: number of available events at @available list
* @sequence: event sequence number
+ *
* @m2m_ctx: pointer to &struct v4l2_m2m_ctx
*/
struct v4l2_fh {
@@ -52,6 +55,7 @@ struct v4l2_fh {
/* Events */
wait_queue_head_t wait;
+ struct mutex subscribe_lock;
struct list_head subscribed;
struct list_head available;
unsigned int navailable;
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index 9cccab618b98..6d9d9f1839ac 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
struct fwnode_handle;
struct v4l2_async_notifier;
@@ -70,8 +71,8 @@ struct v4l2_fwnode_bus_parallel {
* @clock_lane: the number of the clock lane
*/
struct v4l2_fwnode_bus_mipi_csi1 {
- bool clock_inv;
- bool strobe;
+ unsigned char clock_inv:1;
+ unsigned char strobe:1;
bool lane_polarity[2];
unsigned char data_lane;
unsigned char clock_lane;
@@ -130,19 +131,30 @@ struct v4l2_fwnode_link {
* @fwnode: pointer to the endpoint's fwnode handle
* @vep: pointer to the V4L2 fwnode data structure
*
- * All properties are optional. If none are found, we don't set any flags. This
- * means the port has a static configuration and no properties have to be
- * specified explicitly. If any properties that identify the bus as parallel
- * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
- * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
- * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
- * reference to @fwnode.
+ * This function parses the V4L2 fwnode endpoint specific parameters from the
+ * firmware. The caller is responsible for assigning @vep.bus_type to a valid
+ * media bus type. The caller may also set the default configuration for the
+ * endpoint --- a configuration that shall be in line with the DT binding
+ * documentation. Should a device support multiple bus types, the caller may
+ * call this function once the correct type is found --- with a default
+ * configuration valid for that type.
+ *
+ * As a compatibility means guessing the bus type is also supported by setting
+ * @vep.bus_type to V4L2_MBUS_UNKNOWN. The caller may not provide a default
+ * configuration in this case as the defaults are specific to a given bus type.
+ * This functionality is deprecated and should not be used in new drivers and it
+ * is only supported for CSI-2 D-PHY, parallel and Bt.656 busses.
+ *
+ * The function does not change the V4L2 fwnode endpoint state if it fails.
*
* NOTE: This function does not parse properties the size of which is variable
* without a low fixed limit. Please use v4l2_fwnode_endpoint_alloc_parse() in
* new drivers instead.
*
- * Return: 0 on success or a negative error code on failure.
+ * Return: %0 on success or a negative error code on failure:
+ * %-ENOMEM on memory allocation failure
+ * %-EINVAL on parsing failure
+ * %-ENXIO on mismatching bus types
*/
int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep);
@@ -160,14 +172,23 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep);
/**
* v4l2_fwnode_endpoint_alloc_parse() - parse all fwnode node properties
* @fwnode: pointer to the endpoint's fwnode handle
+ * @vep: pointer to the V4L2 fwnode data structure
*
- * All properties are optional. If none are found, we don't set any flags. This
- * means the port has a static configuration and no properties have to be
- * specified explicitly. If any properties that identify the bus as parallel
- * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
- * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
- * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
- * reference to @fwnode.
+ * This function parses the V4L2 fwnode endpoint specific parameters from the
+ * firmware. The caller is responsible for assigning @vep.bus_type to a valid
+ * media bus type. The caller may also set the default configuration for the
+ * endpoint --- a configuration that shall be in line with the DT binding
+ * documentation. Should a device support multiple bus types, the caller may
+ * call this function once the correct type is found --- with a default
+ * configuration valid for that type.
+ *
+ * As a compatibility means guessing the bus type is also supported by setting
+ * @vep.bus_type to V4L2_MBUS_UNKNOWN. The caller may not provide a default
+ * configuration in this case as the defaults are specific to a given bus type.
+ * This functionality is deprecated and should not be used in new drivers and it
+ * is only supported for CSI-2 D-PHY, parallel and Bt.656 busses.
+ *
+ * The function does not change the V4L2 fwnode endpoint state if it fails.
*
* v4l2_fwnode_endpoint_alloc_parse() has two important differences to
* v4l2_fwnode_endpoint_parse():
@@ -177,11 +198,13 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep);
* 2. The memory it has allocated to store the variable size data must be freed
* using v4l2_fwnode_endpoint_free() when no longer needed.
*
- * Return: Pointer to v4l2_fwnode_endpoint if successful, on an error pointer
- * on error.
+ * Return: %0 on success or a negative error code on failure:
+ * %-ENOMEM on memory allocation failure
+ * %-EINVAL on parsing failure
+ * %-ENXIO on mismatching bus types
*/
-struct v4l2_fwnode_endpoint *v4l2_fwnode_endpoint_alloc_parse(
- struct fwnode_handle *fwnode);
+int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep);
/**
* v4l2_fwnode_parse_link() - parse a link between two endpoints
@@ -213,7 +236,6 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
*/
void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link);
-
/**
* typedef parse_endpoint_func - Driver's callback function to be called on
* each V4L2 fwnode endpoint.
@@ -232,7 +254,6 @@ typedef int (*parse_endpoint_func)(struct device *dev,
struct v4l2_fwnode_endpoint *vep,
struct v4l2_async_subdev *asd);
-
/**
* v4l2_async_notifier_parse_fwnode_endpoints - Parse V4L2 fwnode endpoints in a
* device node
@@ -247,7 +268,7 @@ typedef int (*parse_endpoint_func)(struct device *dev,
* endpoint. Optional.
*
* Parse the fwnode endpoints of the @dev device and populate the async sub-
- * devices array of the notifier. The @parse_endpoint callback function is
+ * devices list in the notifier. The @parse_endpoint callback function is
* called for each endpoint with the corresponding async sub-device pointer to
* let the caller initialize the driver-specific part of the async sub-device
* structure.
@@ -258,11 +279,6 @@ typedef int (*parse_endpoint_func)(struct device *dev,
* This function may not be called on a registered notifier and may be called on
* a notifier only once.
*
- * Do not change the notifier's subdevs array, take references to the subdevs
- * array itself or change the notifier's num_subdevs field. This is because this
- * function allocates and reallocates the subdevs array based on parsing
- * endpoints.
- *
* The &struct v4l2_fwnode_endpoint passed to the callback function
* @parse_endpoint is released once the function is finished. If there is a need
* to retain that configuration, the user needs to allocate memory for it.
@@ -276,10 +292,11 @@ typedef int (*parse_endpoint_func)(struct device *dev,
* %-EINVAL if graph or endpoint parsing failed
* Other error codes as returned by @parse_endpoint
*/
-int v4l2_async_notifier_parse_fwnode_endpoints(
- struct device *dev, struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- parse_endpoint_func parse_endpoint);
+int
+v4l2_async_notifier_parse_fwnode_endpoints(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ parse_endpoint_func parse_endpoint);
/**
* v4l2_async_notifier_parse_fwnode_endpoints_by_port - Parse V4L2 fwnode
@@ -303,7 +320,7 @@ int v4l2_async_notifier_parse_fwnode_endpoints(
* devices). In this case the driver must know which ports to parse.
*
* Parse the fwnode endpoints of the @dev device on a given @port and populate
- * the async sub-devices array of the notifier. The @parse_endpoint callback
+ * the async sub-devices list of the notifier. The @parse_endpoint callback
* function is called for each endpoint with the corresponding async sub-device
* pointer to let the caller initialize the driver-specific part of the async
* sub-device structure.
@@ -314,11 +331,6 @@ int v4l2_async_notifier_parse_fwnode_endpoints(
* This function may not be called on a registered notifier and may be called on
* a notifier only once per port.
*
- * Do not change the notifier's subdevs array, take references to the subdevs
- * array itself or change the notifier's num_subdevs field. This is because this
- * function allocates and reallocates the subdevs array based on parsing
- * endpoints.
- *
* The &struct v4l2_fwnode_endpoint passed to the callback function
* @parse_endpoint is released once the function is finished. If there is a need
* to retain that configuration, the user needs to allocate memory for it.
@@ -332,10 +344,12 @@ int v4l2_async_notifier_parse_fwnode_endpoints(
* %-EINVAL if graph or endpoint parsing failed
* Other error codes as returned by @parse_endpoint
*/
-int v4l2_async_notifier_parse_fwnode_endpoints_by_port(
- struct device *dev, struct v4l2_async_notifier *notifier,
- size_t asd_struct_size, unsigned int port,
- parse_endpoint_func parse_endpoint);
+int
+v4l2_async_notifier_parse_fwnode_endpoints_by_port(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ unsigned int port,
+ parse_endpoint_func parse_endpoint);
/**
* v4l2_fwnode_reference_parse_sensor_common - parse common references on
@@ -355,7 +369,44 @@ int v4l2_async_notifier_parse_fwnode_endpoints_by_port(
* -ENOMEM if memory allocation failed
* -EINVAL if property parsing failed
*/
-int v4l2_async_notifier_parse_fwnode_sensor_common(
- struct device *dev, struct v4l2_async_notifier *notifier);
+int v4l2_async_notifier_parse_fwnode_sensor_common(struct device *dev,
+ struct v4l2_async_notifier *notifier);
+
+/**
+ * v4l2_async_register_fwnode_subdev - registers a sub-device to the
+ * asynchronous sub-device framework
+ * and parses fwnode endpoints
+ *
+ * @sd: pointer to struct &v4l2_subdev
+ * @asd_struct_size: size of the driver's async sub-device struct, including
+ * sizeof(struct v4l2_async_subdev). The &struct
+ * v4l2_async_subdev shall be the first member of
+ * the driver's async sub-device struct, i.e. both
+ * begin at the same memory address.
+ * @ports: array of port id's to parse for fwnode endpoints. If NULL, will
+ * parse all ports owned by the sub-device.
+ * @num_ports: number of ports in @ports array. Ignored if @ports is NULL.
+ * @parse_endpoint: Driver's callback function called on each V4L2 fwnode
+ * endpoint. Optional.
+ *
+ * This function is just like v4l2_async_register_subdev() with the
+ * exception that calling it will also allocate a notifier for the
+ * sub-device, parse the sub-device's firmware node endpoints using
+ * v4l2_async_notifier_parse_fwnode_endpoints() or
+ * v4l2_async_notifier_parse_fwnode_endpoints_by_port(), and
+ * registers the sub-device notifier. The sub-device is similarly
+ * unregistered by calling v4l2_async_unregister_subdev().
+ *
+ * While registered, the subdev module is marked as in-use.
+ *
+ * An error is returned if the module is no longer loaded on any attempts
+ * to register it.
+ */
+int
+v4l2_async_register_fwnode_subdev(struct v4l2_subdev *sd,
+ size_t asd_struct_size,
+ unsigned int *ports,
+ unsigned int num_ports,
+ parse_endpoint_func parse_endpoint);
#endif /* _V4L2_FWNODE_H */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 5848d92c30da..8533ece5026e 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -48,6 +48,9 @@ struct v4l2_fh;
* @vidioc_enum_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic
* for metadata capture
+ * @vidioc_enum_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic
+ * for metadata output
* @vidioc_g_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -80,6 +83,8 @@ struct v4l2_fh;
* Radio output
* @vidioc_g_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
+ * @vidioc_g_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for metadata output
* @vidioc_s_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -112,6 +117,8 @@ struct v4l2_fh;
* Radio output
* @vidioc_s_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
+ * @vidioc_s_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for metadata output
* @vidioc_try_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -146,6 +153,8 @@ struct v4l2_fh;
* Radio output
* @vidioc_try_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
+ * @vidioc_try_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for metadata output
* @vidioc_reqbufs: pointer to the function that implements
* :ref:`VIDIOC_REQBUFS <vidioc_reqbufs>` ioctl
* @vidioc_querybuf: pointer to the function that implements
@@ -220,12 +229,8 @@ struct v4l2_fh;
* :ref:`VIDIOC_G_MODULATOR <vidioc_g_modulator>` ioctl
* @vidioc_s_modulator: pointer to the function that implements
* :ref:`VIDIOC_S_MODULATOR <vidioc_g_modulator>` ioctl
- * @vidioc_cropcap: pointer to the function that implements
- * :ref:`VIDIOC_CROPCAP <vidioc_cropcap>` ioctl
- * @vidioc_g_crop: pointer to the function that implements
- * :ref:`VIDIOC_G_CROP <vidioc_g_crop>` ioctl
- * @vidioc_s_crop: pointer to the function that implements
- * :ref:`VIDIOC_S_CROP <vidioc_g_crop>` ioctl
+ * @vidioc_g_pixelaspect: pointer to the function that implements
+ * the pixelaspect part of the :ref:`VIDIOC_CROPCAP <vidioc_cropcap>` ioctl
* @vidioc_g_selection: pointer to the function that implements
* :ref:`VIDIOC_G_SELECTION <vidioc_g_selection>` ioctl
* @vidioc_s_selection: pointer to the function that implements
@@ -318,6 +323,8 @@ struct v4l2_ioctl_ops {
struct v4l2_fmtdesc *f);
int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
/* VIDIOC_G_FMT handlers */
int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh,
@@ -346,6 +353,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_g_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_g_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_S_FMT handlers */
int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh,
@@ -374,6 +383,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_s_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_s_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_TRY_FMT handlers */
int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh,
@@ -402,6 +413,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_try_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_try_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* Buffer handlers */
int (*vidioc_reqbufs)(struct file *file, void *fh,
@@ -491,12 +504,8 @@ struct v4l2_ioctl_ops {
int (*vidioc_s_modulator)(struct file *file, void *fh,
const struct v4l2_modulator *a);
/* Crop ioctls */
- int (*vidioc_cropcap)(struct file *file, void *fh,
- struct v4l2_cropcap *a);
- int (*vidioc_g_crop)(struct file *file, void *fh,
- struct v4l2_crop *a);
- int (*vidioc_s_crop)(struct file *file, void *fh,
- const struct v4l2_crop *a);
+ int (*vidioc_g_pixelaspect)(struct file *file, void *fh,
+ int buf_type, struct v4l2_fract *aspect);
int (*vidioc_g_selection)(struct file *file, void *fh,
struct v4l2_selection *s);
int (*vidioc_s_selection)(struct file *file, void *fh,
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index 2634d9dc9916..bf5043c1ab6b 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -23,84 +23,6 @@
#include <media/v4l2-dev.h>
#include <linux/types.h>
-/**
- * enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER
- *
- * @TUNER_PAD_RF_INPUT: Radiofrequency (RF) sink pad, usually linked to a
- * RF connector entity.
- * @TUNER_PAD_OUTPUT: Tuner video output source pad. Contains the video
- * chrominance and luminance or the hole bandwidth
- * of the signal converted to an Intermediate Frequency
- * (IF) or to baseband (on zero-IF tuners).
- * @TUNER_PAD_AUD_OUT: Tuner audio output source pad. Tuners used to decode
- * analog TV signals have an extra pad for audio output.
- * Old tuners use an analog stage with a saw filter for
- * the audio IF frequency. The output of the pad is, in
- * this case, the audio IF, with should be decoded either
- * by the bridge chipset (that's the case of cx2388x
- * chipsets) or may require an external IF sound
- * processor, like msp34xx. On modern silicon tuners,
- * the audio IF decoder is usually incorporated at the
- * tuner. On such case, the output of this pad is an
- * audio sampled data.
- * @TUNER_NUM_PADS: Number of pads of the tuner.
- */
-enum tuner_pad_index {
- TUNER_PAD_RF_INPUT,
- TUNER_PAD_OUTPUT,
- TUNER_PAD_AUD_OUT,
- TUNER_NUM_PADS
-};
-
-/**
- * enum if_vid_dec_pad_index - video IF-PLL pad index for
- * MEDIA_ENT_F_IF_VID_DECODER
- *
- * @IF_VID_DEC_PAD_IF_INPUT: video Intermediate Frequency (IF) sink pad
- * @IF_VID_DEC_PAD_OUT: IF-PLL video output source pad. Contains the
- * video chrominance and luminance IF signals.
- * @IF_VID_DEC_PAD_NUM_PADS: Number of pads of the video IF-PLL.
- */
-enum if_vid_dec_pad_index {
- IF_VID_DEC_PAD_IF_INPUT,
- IF_VID_DEC_PAD_OUT,
- IF_VID_DEC_PAD_NUM_PADS
-};
-
-/**
- * enum if_aud_dec_pad_index - audio/sound IF-PLL pad index for
- * MEDIA_ENT_F_IF_AUD_DECODER
- *
- * @IF_AUD_DEC_PAD_IF_INPUT: audio Intermediate Frequency (IF) sink pad
- * @IF_AUD_DEC_PAD_OUT: IF-PLL audio output source pad. Contains the
- * audio sampled stream data, usually connected
- * to the bridge bus via an Inter-IC Sound (I2S)
- * bus.
- * @IF_AUD_DEC_PAD_NUM_PADS: Number of pads of the audio IF-PLL.
- */
-enum if_aud_dec_pad_index {
- IF_AUD_DEC_PAD_IF_INPUT,
- IF_AUD_DEC_PAD_OUT,
- IF_AUD_DEC_PAD_NUM_PADS
-};
-
-/**
- * enum demod_pad_index - analog TV pad index for MEDIA_ENT_F_ATV_DECODER
- *
- * @DEMOD_PAD_IF_INPUT: IF input sink pad.
- * @DEMOD_PAD_VID_OUT: Video output source pad.
- * @DEMOD_PAD_VBI_OUT: Vertical Blank Interface (VBI) output source pad.
- * @DEMOD_PAD_AUDIO_OUT: Audio output source pad.
- * @DEMOD_NUM_PADS: Maximum number of output pads.
- */
-enum demod_pad_index {
- DEMOD_PAD_IF_INPUT,
- DEMOD_PAD_VID_OUT,
- DEMOD_PAD_VBI_OUT,
- DEMOD_PAD_AUDIO_OUT,
- DEMOD_NUM_PADS
-};
-
/* We don't need to include pci.h or usb.h here */
struct pci_dev;
struct usb_device;
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 4bbb5f3d2b02..66cb746ceeb5 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -14,7 +14,6 @@
#include <linux/v4l2-mediabus.h>
#include <linux/bitops.h>
-
/* Parallel flags */
/*
* Can the client run in master or in slave mode. By "Master mode" an operation
@@ -63,26 +62,34 @@
#define V4L2_MBUS_CSI2_CONTINUOUS_CLOCK BIT(8)
#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(9)
-#define V4L2_MBUS_CSI2_LANES (V4L2_MBUS_CSI2_1_LANE | V4L2_MBUS_CSI2_2_LANE | \
- V4L2_MBUS_CSI2_3_LANE | V4L2_MBUS_CSI2_4_LANE)
-#define V4L2_MBUS_CSI2_CHANNELS (V4L2_MBUS_CSI2_CHANNEL_0 | V4L2_MBUS_CSI2_CHANNEL_1 | \
- V4L2_MBUS_CSI2_CHANNEL_2 | V4L2_MBUS_CSI2_CHANNEL_3)
+#define V4L2_MBUS_CSI2_LANES (V4L2_MBUS_CSI2_1_LANE | \
+ V4L2_MBUS_CSI2_2_LANE | \
+ V4L2_MBUS_CSI2_3_LANE | \
+ V4L2_MBUS_CSI2_4_LANE)
+#define V4L2_MBUS_CSI2_CHANNELS (V4L2_MBUS_CSI2_CHANNEL_0 | \
+ V4L2_MBUS_CSI2_CHANNEL_1 | \
+ V4L2_MBUS_CSI2_CHANNEL_2 | \
+ V4L2_MBUS_CSI2_CHANNEL_3)
/**
* enum v4l2_mbus_type - media bus type
+ * @V4L2_MBUS_UNKNOWN: unknown bus type, no V4L2 mediabus configuration
* @V4L2_MBUS_PARALLEL: parallel interface with hsync and vsync
* @V4L2_MBUS_BT656: parallel interface with embedded synchronisation, can
* also be used for BT.1120
* @V4L2_MBUS_CSI1: MIPI CSI-1 serial interface
* @V4L2_MBUS_CCP2: CCP2 (Compact Camera Port 2)
- * @V4L2_MBUS_CSI2: MIPI CSI-2 serial interface
+ * @V4L2_MBUS_CSI2_DPHY: MIPI CSI-2 serial interface, with D-PHY
+ * @V4L2_MBUS_CSI2_CPHY: MIPI CSI-2 serial interface, with C-PHY
*/
enum v4l2_mbus_type {
+ V4L2_MBUS_UNKNOWN,
V4L2_MBUS_PARALLEL,
V4L2_MBUS_BT656,
V4L2_MBUS_CSI1,
V4L2_MBUS_CCP2,
- V4L2_MBUS_CSI2,
+ V4L2_MBUS_CSI2_DPHY,
+ V4L2_MBUS_CSI2_CPHY,
};
/**
@@ -102,8 +109,9 @@ struct v4l2_mbus_config {
* @pix_fmt: pointer to &struct v4l2_pix_format to be filled
* @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model
*/
-static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
- const struct v4l2_mbus_framefmt *mbus_fmt)
+static inline void
+v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
+ const struct v4l2_mbus_framefmt *mbus_fmt)
{
pix_fmt->width = mbus_fmt->width;
pix_fmt->height = mbus_fmt->height;
@@ -124,7 +132,7 @@ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt,
* @code: data format code (from &enum v4l2_mbus_pixelcode)
*/
static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
- const struct v4l2_pix_format *pix_fmt,
+ const struct v4l2_pix_format *pix_fmt,
u32 code)
{
mbus_fmt->width = pix_fmt->width;
@@ -144,9 +152,9 @@ static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt,
* @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be filled
* @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model
*/
-static inline void v4l2_fill_pix_format_mplane(
- struct v4l2_pix_format_mplane *pix_mp_fmt,
- const struct v4l2_mbus_framefmt *mbus_fmt)
+static inline void
+v4l2_fill_pix_format_mplane(struct v4l2_pix_format_mplane *pix_mp_fmt,
+ const struct v4l2_mbus_framefmt *mbus_fmt)
{
pix_mp_fmt->width = mbus_fmt->width;
pix_mp_fmt->height = mbus_fmt->height;
@@ -164,9 +172,9 @@ static inline void v4l2_fill_pix_format_mplane(
* @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled
* @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be used as model
*/
-static inline void v4l2_fill_mbus_format_mplane(
- struct v4l2_mbus_framefmt *mbus_fmt,
- const struct v4l2_pix_format_mplane *pix_mp_fmt)
+static inline void
+v4l2_fill_mbus_format_mplane(struct v4l2_mbus_framefmt *mbus_fmt,
+ const struct v4l2_pix_format_mplane *pix_mp_fmt)
{
mbus_fmt->width = pix_mp_fmt->width;
mbus_fmt->height = pix_mp_fmt->height;
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index d655720e16a1..5467264771ec 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -622,6 +622,10 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
}
+/* v4l2 request helper */
+
+void v4l2_m2m_request_queue(struct media_request *req);
+
/* v4l2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h
index 595c3ba05f23..c86474dc7b55 100644
--- a/include/media/v4l2-rect.h
+++ b/include/media/v4l2-rect.h
@@ -83,6 +83,32 @@ static inline bool v4l2_rect_same_size(const struct v4l2_rect *r1,
}
/**
+ * v4l2_rect_same_position() - return true if r1 has the same position as r2
+ * @r1: rectangle.
+ * @r2: rectangle.
+ *
+ * Return true if both rectangles have the same position
+ */
+static inline bool v4l2_rect_same_position(const struct v4l2_rect *r1,
+ const struct v4l2_rect *r2)
+{
+ return r1->top == r2->top && r1->left == r2->left;
+}
+
+/**
+ * v4l2_rect_equal() - return true if r1 equals r2
+ * @r1: rectangle.
+ * @r2: rectangle.
+ *
+ * Return true if both rectangles have the same size and position.
+ */
+static inline bool v4l2_rect_equal(const struct v4l2_rect *r1,
+ const struct v4l2_rect *r2)
+{
+ return v4l2_rect_same_size(r1, r2) && v4l2_rect_same_position(r1, r2);
+}
+
+/**
* v4l2_rect_intersect() - calculate the intersection of two rects.
* @r: intersection of @r1 and @r2.
* @r1: rectangle.
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 9102d6ca566e..47af609dc8f1 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -776,7 +776,11 @@ struct v4l2_subdev_internal_ops {
#define V4L2_SUBDEV_FL_IS_SPI (1U << 1)
/* Set this flag if this subdev needs a device node. */
#define V4L2_SUBDEV_FL_HAS_DEVNODE (1U << 2)
-/* Set this flag if this subdev generates events. */
+/*
+ * Set this flag if this subdev generates events.
+ * Note controls can send events, thus drivers exposing controls
+ * should set this flag.
+ */
#define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3)
struct regulator_bulk_data;
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index f6818f732f34..4a737b2c610b 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -17,6 +17,7 @@
#include <linux/poll.h>
#include <linux/dma-buf.h>
#include <linux/bitops.h>
+#include <media/media-request.h>
#define VB2_MAX_FRAME (32)
#define VB2_MAX_PLANES (8)
@@ -203,8 +204,8 @@ enum vb2_io_modes {
/**
* enum vb2_buffer_state - current video buffer state.
* @VB2_BUF_STATE_DEQUEUED: buffer under userspace control.
+ * @VB2_BUF_STATE_IN_REQUEST: buffer is queued in media request.
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf.
- * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver.
* @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver.
* @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver.
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
@@ -217,8 +218,8 @@ enum vb2_io_modes {
*/
enum vb2_buffer_state {
VB2_BUF_STATE_DEQUEUED,
+ VB2_BUF_STATE_IN_REQUEST,
VB2_BUF_STATE_PREPARING,
- VB2_BUF_STATE_PREPARED,
VB2_BUF_STATE_QUEUED,
VB2_BUF_STATE_REQUEUEING,
VB2_BUF_STATE_ACTIVE,
@@ -238,6 +239,9 @@ struct vb2_queue;
* @num_planes: number of planes in the buffer
* on an internal driver queue.
* @timestamp: frame timestamp in ns.
+ * @request: the request this buffer is associated with.
+ * @req_obj: used to bind this buffer to a request. This
+ * request object has a refcount.
*/
struct vb2_buffer {
struct vb2_queue *vb2_queue;
@@ -246,10 +250,18 @@ struct vb2_buffer {
unsigned int memory;
unsigned int num_planes;
u64 timestamp;
+ struct media_request *request;
+ struct media_request_object req_obj;
/* private: internal use only
*
* state: current buffer state; do not change
+ * synced: this buffer has been synced for DMA, i.e. the
+ * 'prepare' memop was called. It is cleared again
+ * after the 'finish' memop is called.
+ * prepared: this buffer has been prepared, i.e. the
+ * buf_prepare op was called. It is cleared again
+ * after the 'buf_finish' op is called.
* queued_entry: entry on the queued buffers list, which holds
* all buffers queued from userspace
* done_entry: entry on the list that stores all buffers ready
@@ -257,6 +269,8 @@ struct vb2_buffer {
* vb2_plane: per-plane information; do not change
*/
enum vb2_buffer_state state;
+ bool synced;
+ bool prepared;
struct vb2_plane planes[VB2_MAX_PLANES];
struct list_head queued_entry;
@@ -287,6 +301,7 @@ struct vb2_buffer {
u32 cnt_buf_finish;
u32 cnt_buf_cleanup;
u32 cnt_buf_queue;
+ u32 cnt_buf_request_complete;
/* This counts the number of calls to vb2_buffer_done() */
u32 cnt_buf_done;
@@ -380,6 +395,11 @@ struct vb2_buffer {
* ioctl; might be called before @start_streaming callback
* if user pre-queued buffers before calling
* VIDIOC_STREAMON().
+ * @buf_request_complete: a buffer that was never queued to the driver but is
+ * associated with a queued request was canceled.
+ * The driver will have to mark associated objects in the
+ * request as completed; required if requests are
+ * supported.
*/
struct vb2_ops {
int (*queue_setup)(struct vb2_queue *q,
@@ -398,6 +418,8 @@ struct vb2_ops {
void (*stop_streaming)(struct vb2_queue *q);
void (*buf_queue)(struct vb2_buffer *vb);
+
+ void (*buf_request_complete)(struct vb2_buffer *vb);
};
/**
@@ -406,6 +428,9 @@ struct vb2_ops {
* @verify_planes_array: Verify that a given user space structure contains
* enough planes for the buffer. This is called
* for each dequeued buffer.
+ * @init_buffer: given a &vb2_buffer initialize the extra data after
+ * struct vb2_buffer.
+ * For V4L2 this is a &struct vb2_v4l2_buffer.
* @fill_user_buffer: given a &vb2_buffer fill in the userspace structure.
* For V4L2 this is a &struct v4l2_buffer.
* @fill_vb2_buffer: given a userspace structure, fill in the &vb2_buffer.
@@ -416,9 +441,9 @@ struct vb2_ops {
*/
struct vb2_buf_ops {
int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
+ void (*init_buffer)(struct vb2_buffer *vb);
void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
- int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
- struct vb2_plane *planes);
+ int (*fill_vb2_buffer)(struct vb2_buffer *vb, struct vb2_plane *planes);
void (*copy_timestamp)(struct vb2_buffer *vb, const void *pb);
};
@@ -449,6 +474,13 @@ struct vb2_buf_ops {
* @quirk_poll_must_check_waiting_for_buffers: Return %EPOLLERR at poll when QBUF
* has not been called. This is a vb1 idiom that has been adopted
* also by vb2.
+ * @supports_requests: this queue supports the Request API.
+ * @uses_qbuf: qbuf was used directly for this queue. Set to 1 the first
+ * time this is called. Set to 0 when the queue is canceled.
+ * If this is 1, then you cannot queue buffers from a request.
+ * @uses_requests: requests are used for this queue. Set to 1 the first time
+ * a request is queued. Set to 0 when the queue is canceled.
+ * If this is 1, then you cannot queue buffers directly.
* @lock: pointer to a mutex that protects the &struct vb2_queue. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -516,6 +548,9 @@ struct vb2_queue {
unsigned fileio_write_immediately:1;
unsigned allow_zero_bytesused:1;
unsigned quirk_poll_must_check_waiting_for_buffers:1;
+ unsigned supports_requests:1;
+ unsigned uses_qbuf:1;
+ unsigned uses_requests:1;
struct mutex *lock;
void *owner;
@@ -752,12 +787,17 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
* @index: id number of the buffer
* @pb: buffer structure passed from userspace to
* v4l2_ioctl_ops->vidioc_qbuf handler in driver
+ * @req: pointer to &struct media_request, may be NULL.
*
* Videobuf2 core helper to implement VIDIOC_QBUF() operation. It is called
* internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``.
*
* This function:
*
+ * #) If @req is non-NULL, then the buffer will be bound to this
+ * media request and it returns. The buffer will be prepared and
+ * queued to the driver (i.e. the next two steps) when the request
+ * itself is queued.
* #) if necessary, calls &vb2_ops->buf_prepare callback in the driver
* (if provided), in which driver-specific buffer initialization can
* be performed;
@@ -766,7 +806,8 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
*
* Return: returns zero on success; an error code otherwise.
*/
-int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+ struct media_request *req);
/**
* vb2_core_dqbuf() - Dequeue a buffer to the userspace
@@ -1143,4 +1184,19 @@ bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb);
*/
int vb2_verify_memory_type(struct vb2_queue *q,
enum vb2_memory memory, unsigned int type);
+
+/**
+ * vb2_request_object_is_buffer() - return true if the object is a buffer
+ *
+ * @obj: the request object.
+ */
+bool vb2_request_object_is_buffer(struct media_request_object *obj);
+
+/**
+ * vb2_request_buffer_cnt() - return the number of buffers in the request
+ *
+ * @req: the request.
+ */
+unsigned int vb2_request_buffer_cnt(struct media_request *req);
+
#endif /* _MEDIA_VIDEOBUF2_CORE_H */
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 3d5e2d739f05..727855463838 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -32,6 +32,8 @@
* &enum v4l2_field.
* @timecode: frame timecode.
* @sequence: sequence count of this frame.
+ * @request_fd: the request_fd associated with this buffer
+ * @planes: plane information (userptr/fd, length, bytesused, data_offset).
*
* Should contain enough information to be able to cover all the fields
* of &struct v4l2_buffer at ``videodev2.h``.
@@ -43,6 +45,8 @@ struct vb2_v4l2_buffer {
__u32 field;
struct v4l2_timecode timecode;
__u32 sequence;
+ __s32 request_fd;
+ struct vb2_plane planes[VB2_MAX_PLANES];
};
/*
@@ -77,6 +81,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
* vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
*
* @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @mdev: pointer to &struct media_device, may be NULL.
* @b: buffer structure passed from userspace to
* &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver
*
@@ -88,15 +93,19 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
* #) verifies the passed buffer,
* #) calls &vb2_ops->buf_prepare callback in the driver (if provided),
* in which driver-specific buffer initialization can be performed.
+ * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set,
+ * then bind the prepared buffer to the request.
*
* The return values from this function are intended to be directly returned
* from &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver.
*/
-int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
+int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b);
/**
* vb2_qbuf() - Queue a buffer from userspace
* @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @mdev: pointer to &struct media_device, may be NULL.
* @b: buffer structure passed from userspace to
* &v4l2_ioctl_ops->vidioc_qbuf handler in driver
*
@@ -105,6 +114,8 @@ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
* This function:
*
* #) verifies the passed buffer;
+ * #) if @b->request_fd is non-zero and @mdev->ops->req_queue is set,
+ * then bind the buffer to the request.
* #) if necessary, calls &vb2_ops->buf_prepare callback in the driver
* (if provided), in which driver-specific buffer initialization can
* be performed;
@@ -114,7 +125,8 @@ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b);
* The return values from this function are intended to be directly returned
* from &v4l2_ioctl_ops->vidioc_qbuf handler in driver.
*/
-int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b);
+int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b);
/**
* vb2_expbuf() - Export a buffer as a file descriptor
@@ -291,4 +303,8 @@ void vb2_ops_wait_prepare(struct vb2_queue *vq);
*/
void vb2_ops_wait_finish(struct vb2_queue *vq);
+struct media_request;
+int vb2_request_validate(struct media_request *req);
+void vb2_request_queue(struct media_request *req);
+
#endif /* _MEDIA_VIDEOBUF2_V4L2_H */
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 3093b9cb9067..1cf868360701 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* vsp1.h -- R-Car VSP1 API
*
* Copyright (C) 2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __MEDIA_VSP1_H__
#define __MEDIA_VSP1_H__
@@ -46,7 +42,7 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
/**
* struct vsp1_du_atomic_config - VSP atomic configuration parameters
* @pixelformat: plane pixel format (V4L2 4CC)
- * @pitch: line pitch in bytes, for all planes
+ * @pitch: line pitch in bytes for the first plane
* @mem: DMA memory address for each plane of the frame buffer
* @src: source rectangle in the frame buffer (integer coordinates)
* @dst: destination rectangle on the display (integer coordinates)
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index b8eb51a661e5..beede1e1a919 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -336,6 +336,9 @@ enum p9_qid_t {
#define P9_NOFID (u32)(~0)
#define P9_MAXWELEM 16
+/* Minimal header size: size[4] type[1] tag[2] */
+#define P9_HDRSZ 7
+
/* ample room for Twrite/Rread header */
#define P9_IOHDRSZ 24
@@ -558,19 +561,12 @@ struct p9_fcall {
size_t offset;
size_t capacity;
+ struct kmem_cache *cache;
u8 *sdata;
};
-struct p9_idpool;
-
int p9_errstr2errno(char *errstr, int len);
-struct p9_idpool *p9_idpool_create(void);
-void p9_idpool_destroy(struct p9_idpool *);
-int p9_idpool_get(struct p9_idpool *p);
-void p9_idpool_put(int id, struct p9_idpool *p);
-int p9_idpool_check(int id, struct p9_idpool *p);
-
int p9_error_init(void);
int p9_trans_fd_init(void);
void p9_trans_fd_exit(void);
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 0fa0fbab33b0..947a570307a6 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -64,22 +64,15 @@ enum p9_trans_status {
/**
* enum p9_req_status_t - status of a request
- * @REQ_STATUS_IDLE: request slot unused
* @REQ_STATUS_ALLOC: request has been allocated but not sent
* @REQ_STATUS_UNSENT: request waiting to be sent
* @REQ_STATUS_SENT: request sent to server
* @REQ_STATUS_RCVD: response received from server
* @REQ_STATUS_FLSHD: request has been flushed
* @REQ_STATUS_ERROR: request encountered an error on the client side
- *
- * The @REQ_STATUS_IDLE state is used to mark a request slot as unused
- * but use is actually tracked by the idpool structure which handles tag
- * id allocation.
- *
*/
enum p9_req_status_t {
- REQ_STATUS_IDLE,
REQ_STATUS_ALLOC,
REQ_STATUS_UNSENT,
REQ_STATUS_SENT,
@@ -92,70 +85,46 @@ enum p9_req_status_t {
* struct p9_req_t - request slots
* @status: status of this request slot
* @t_err: transport error
- * @flush_tag: tag of request being flushed (for flush requests)
* @wq: wait_queue for the client to block on for this request
* @tc: the request fcall structure
* @rc: the response fcall structure
* @aux: transport specific data (provided for trans_fd migration)
* @req_list: link for higher level objects to chain requests
- *
- * Transport use an array to track outstanding requests
- * instead of a list. While this may incurr overhead during initial
- * allocation or expansion, it makes request lookup much easier as the
- * tag id is a index into an array. (We use tag+1 so that we can accommodate
- * the -1 tag for the T_VERSION request).
- * This also has the nice effect of only having to allocate wait_queues
- * once, instead of constantly allocating and freeing them. Its possible
- * other resources could benefit from this scheme as well.
- *
*/
-
struct p9_req_t {
int status;
int t_err;
+ struct kref refcount;
wait_queue_head_t wq;
- struct p9_fcall *tc;
- struct p9_fcall *rc;
+ struct p9_fcall tc;
+ struct p9_fcall rc;
void *aux;
-
struct list_head req_list;
};
/**
* struct p9_client - per client instance state
- * @lock: protect @fidlist
+ * @lock: protect @fids and @reqs
* @msize: maximum data size negotiated by protocol
- * @dotu: extension flags negotiated by protocol
* @proto_version: 9P protocol version to use
* @trans_mod: module API instantiated with this client
+ * @status: connection state
* @trans: tranport instance state and API
* @fids: All active FID handles
- * @tagpool - transaction id accounting for session
- * @reqs - 2D array of requests
- * @max_tag - current maximum tag id allocated
- * @name - node name used as client id
+ * @reqs: All active requests.
+ * @name: node name used as client id
*
* The client structure is used to keep track of various per-client
* state that has been instantiated.
- * In order to minimize per-transaction overhead we use a
- * simple array to lookup requests instead of a hash table
- * or linked list. In order to support larger number of
- * transactions, we make this a 2D array, allocating new rows
- * when we need to grow the total number of the transactions.
- *
- * Each row is 256 requests and we'll support up to 256 rows for
- * a total of 64k concurrent requests per session.
- *
- * Bugs: duplicated data and potentially unnecessary elements.
*/
-
struct p9_client {
- spinlock_t lock; /* protect client structure */
+ spinlock_t lock;
unsigned int msize;
unsigned char proto_version;
struct p9_trans_module *trans_mod;
enum p9_trans_status status;
void *trans;
+ struct kmem_cache *fcall_cache;
union {
struct {
@@ -170,10 +139,7 @@ struct p9_client {
} trans_opts;
struct idr fids;
-
- struct p9_idpool *tagpool;
- struct p9_req_t *reqs[P9_ROW_MAXTAG];
- int max_tag;
+ struct idr reqs;
char name[__NEW_UTS_LEN + 1];
};
@@ -266,7 +232,21 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, const char *name, int mode,
kgid_t gid, struct p9_qid *);
int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
+void p9_fcall_fini(struct p9_fcall *fc);
struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
+
+static inline void p9_req_get(struct p9_req_t *r)
+{
+ kref_get(&r->refcount);
+}
+
+static inline int p9_req_try_get(struct p9_req_t *r)
+{
+ return kref_get_unless_zero(&r->refcount);
+}
+
+int p9_req_put(struct p9_req_t *r);
+
void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
@@ -279,4 +259,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *, const char *, u64 *);
int p9_client_xattrcreate(struct p9_fid *, const char *, u64, int);
int p9_client_readlink(struct p9_fid *fid, char **target);
+int p9_client_init(void);
+void p9_client_exit(void);
+
#endif /* NET_9P_CLIENT_H */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 1ad5b19e83a9..dbc795ec659e 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -13,7 +13,7 @@
#include <net/netns/generic.h>
struct tcf_idrinfo {
- spinlock_t lock;
+ struct mutex lock;
struct idr action_idr;
};
@@ -23,20 +23,20 @@ struct tc_action {
const struct tc_action_ops *ops;
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
__u32 order;
- struct list_head list;
struct tcf_idrinfo *idrinfo;
u32 tcfa_index;
refcount_t tcfa_refcnt;
atomic_t tcfa_bindcnt;
- u32 tcfa_capab;
int tcfa_action;
struct tcf_t tcfa_tm;
struct gnet_stats_basic_packed tcfa_bstats;
+ struct gnet_stats_basic_packed tcfa_bstats_hw;
struct gnet_stats_queue tcfa_qstats;
struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
struct tcf_chain *goto_chain;
@@ -44,7 +44,6 @@ struct tc_action {
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
#define tcf_bindcnt common.tcfa_bindcnt
-#define tcf_capab common.tcfa_capab
#define tcf_action common.tcfa_action
#define tcf_tm common.tcfa_tm
#define tcf_bstats common.tcfa_bstats
@@ -88,8 +87,7 @@ struct tc_action_ops {
struct tcf_result *); /* called under RCU BH lock*/
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
void (*cleanup)(struct tc_action *);
- int (*lookup)(struct net *net, struct tc_action **a, u32 index,
- struct netlink_ext_ack *extack);
+ int (*lookup)(struct net *net, struct tc_action **a, u32 index);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
int bind, bool rtnl_held,
@@ -98,11 +96,10 @@ struct tc_action_ops {
struct netlink_callback *, int,
const struct tc_action_ops *,
struct netlink_ext_ack *);
- void (*stats_update)(struct tc_action *, u64, u32, u64);
+ void (*stats_update)(struct tc_action *, u64, u32, u64, bool);
size_t (*get_fill_size)(const struct tc_action *act);
struct net_device *(*get_dev)(const struct tc_action *a);
void (*put_dev)(struct net_device *dev);
- int (*delete)(struct net *net, u32 index);
};
struct tc_action_net {
@@ -120,7 +117,7 @@ int tc_action_net_init(struct tc_action_net *tn,
if (!tn->idrinfo)
return -ENOMEM;
tn->ops = ops;
- spin_lock_init(&tn->idrinfo->lock);
+ mutex_init(&tn->idrinfo->lock);
idr_init(&tn->idrinfo->action_idr);
return err;
}
@@ -148,8 +145,6 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
const struct tc_action_ops *ops,
struct netlink_ext_ack *extack);
int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
-bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
- int bind);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops,
int bind, bool cpustats);
@@ -158,7 +153,6 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind);
-int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
static inline int tcf_idr_release(struct tc_action *a, bool bind)
@@ -190,45 +184,15 @@ int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
#endif /* CONFIG_NET_CLS_ACT */
static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
- u64 packets, u64 lastuse)
+ u64 packets, u64 lastuse, bool hw)
{
#ifdef CONFIG_NET_CLS_ACT
if (!a->ops->stats_update)
return;
- a->ops->stats_update(a, bytes, packets, lastuse);
+ a->ops->stats_update(a, bytes, packets, lastuse, hw);
#endif
}
-#ifdef CONFIG_NET_CLS_ACT
-int tc_setup_cb_egdev_register(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv);
-void tc_setup_cb_egdev_unregister(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv);
-int tc_setup_cb_egdev_call(const struct net_device *dev,
- enum tc_setup_type type, void *type_data,
- bool err_stop);
-#else
-static inline
-int tc_setup_cb_egdev_register(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
- return 0;
-}
-
-static inline
-void tc_setup_cb_egdev_unregister(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
-}
-
-static inline
-int tc_setup_cb_egdev_call(const struct net_device *dev,
- enum tc_setup_type type, void *type_data,
- bool err_stop)
-{
- return 0;
-}
-#endif
#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 6def0351bcc3..1656c5978498 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -265,6 +265,11 @@ extern const struct ipv6_stub *ipv6_stub __read_mostly;
struct ipv6_bpf_stub {
int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
bool force_bind_address_no_port, bool with_lock);
+ struct sock *(*udp6_lib_lookup)(struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, __be16 dport,
+ int dif, int sdif, struct udp_table *tbl,
+ struct sk_buff *skb);
};
extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
@@ -312,6 +317,8 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
+int ipv6_anycast_init(void);
+void ipv6_anycast_cleanup(void);
/* Device notifier */
int register_inet6addr_notifier(struct notifier_block *nb);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index f53edb3754bc..1adefe42c0a6 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -13,6 +13,7 @@
#define _NET_RXRPC_H
#include <linux/rxrpc.h>
+#include <linux/ktime.h>
struct key;
struct sock;
@@ -76,6 +77,10 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *, struct key *);
int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
enum rxrpc_call_completion *, u32 *);
-u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *);
+u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
+u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
+ ktime_t *);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index a5ba41b3b867..ddbba838d048 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -13,7 +13,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp);
void unix_gc(void);
void wait_for_unix_gc(void);
struct sock *unix_get_socket(struct file *filp);
-struct sock *unix_peer_get(struct sock *);
+struct sock *unix_peer_get(struct sock *sk);
#define UNIX_HASH_SIZE 256
#define UNIX_HASH_BITS 8
@@ -40,7 +40,7 @@ struct unix_skb_parms {
u32 consumed;
} __randomize_layout;
-#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
+#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
@@ -52,7 +52,7 @@ struct unix_skb_parms {
struct unix_sock {
/* WARNING: sk has to be the first member */
struct sock sk;
- struct unix_address *addr;
+ struct unix_address *addr;
struct path path;
struct mutex iolock, bindlock;
struct sock *peer;
@@ -63,7 +63,7 @@ struct unix_sock {
#define UNIX_GC_CANDIDATE 0
#define UNIX_GC_MAYBE_CYCLE 1
struct socket_wq peer_wq;
- wait_queue_entry_t peer_wake;
+ wait_queue_entry_t peer_wake;
};
static inline struct unix_sock *unix_sk(const struct sock *sk)
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index cdd9f1fe7cfa..c36dc1e20556 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1517,6 +1517,20 @@ struct hci_cp_le_write_def_data_len {
__le16 tx_time;
} __packed;
+#define HCI_OP_LE_ADD_TO_RESOLV_LIST 0x2027
+struct hci_cp_le_add_to_resolv_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 peer_irk[16];
+ __u8 local_irk[16];
+} __packed;
+
+#define HCI_OP_LE_DEL_FROM_RESOLV_LIST 0x2028
+struct hci_cp_le_del_from_resolv_list {
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+} __packed;
+
#define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029
#define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 0db1b9b428b7..e5ea633ea368 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -103,6 +103,14 @@ struct bdaddr_list {
u8 bdaddr_type;
};
+struct bdaddr_list_with_irk {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 peer_irk[16];
+ u8 local_irk[16];
+};
+
struct bt_uuid {
struct list_head list;
u8 uuid[16];
@@ -259,6 +267,8 @@ struct hci_dev {
__u16 le_max_tx_time;
__u16 le_max_rx_len;
__u16 le_max_rx_time;
+ __u8 le_max_key_size;
+ __u8 le_min_key_size;
__u16 discov_interleaved_timeout;
__u16 conn_info_min_age;
__u16 conn_info_max_age;
@@ -1058,8 +1068,15 @@ int hci_inquiry(void __user *arg);
struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
bdaddr_t *bdaddr, u8 type);
+struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
+ struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type, u8 *peer_irk, u8 *local_irk);
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
void hci_bdaddr_list_clear(struct list_head *list);
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 0697fd413087..093aedebdf0c 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -277,12 +277,19 @@ struct l2cap_conn_rsp {
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
#define L2CAP_CR_BAD_AMP 0x0005
-#define L2CAP_CR_AUTHENTICATION 0x0005
-#define L2CAP_CR_AUTHORIZATION 0x0006
-#define L2CAP_CR_BAD_KEY_SIZE 0x0007
-#define L2CAP_CR_ENCRYPTION 0x0008
-#define L2CAP_CR_INVALID_SCID 0x0009
-#define L2CAP_CR_SCID_IN_USE 0x000A
+#define L2CAP_CR_INVALID_SCID 0x0006
+#define L2CAP_CR_SCID_IN_USE 0x0007
+
+/* credit based connect results */
+#define L2CAP_CR_LE_SUCCESS 0x0000
+#define L2CAP_CR_LE_BAD_PSM 0x0002
+#define L2CAP_CR_LE_NO_MEM 0x0004
+#define L2CAP_CR_LE_AUTHENTICATION 0x0005
+#define L2CAP_CR_LE_AUTHORIZATION 0x0006
+#define L2CAP_CR_LE_BAD_KEY_SIZE 0x0007
+#define L2CAP_CR_LE_ENCRYPTION 0x0008
+#define L2CAP_CR_LE_INVALID_SCID 0x0009
+#define L2CAP_CR_LE_SCID_IN_USE 0X000A
/* connect/create channel status */
#define L2CAP_CS_NO_INFO 0x0000
@@ -455,9 +462,6 @@ struct l2cap_conn_param_update_rsp {
#define L2CAP_CONN_PARAM_ACCEPTED 0x0000
#define L2CAP_CONN_PARAM_REJECTED 0x0001
-#define L2CAP_LE_MAX_CREDITS 10
-#define L2CAP_LE_DEFAULT_MPS 230
-
struct l2cap_le_conn_req {
__le16 psm;
__le16 scid;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index a2d058170ea3..b46d68acf701 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -139,12 +139,6 @@ struct bond_parm_tbl {
int mode;
};
-struct netdev_notify_work {
- struct delayed_work work;
- struct net_device *dev;
- struct netdev_bonding_info bonding_info;
-};
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@@ -172,6 +166,7 @@ struct slave {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
#endif
+ struct delayed_work notify_work;
struct kobject kobj;
struct rtnl_link_stats64 slave_stats;
};
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9a850973e09a..e0c41eb1c860 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -149,7 +149,7 @@ enum ieee80211_channel_flags {
*/
struct ieee80211_channel {
enum nl80211_band band;
- u16 center_freq;
+ u32 center_freq;
u16 hw_value;
u32 flags;
int max_antenna_gain;
@@ -775,6 +775,14 @@ struct cfg80211_crypto_settings {
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
+ * @ftm_responder: enable FTM responder functionality; -1 for no change
+ * (which also implies no change in LCI/civic location data)
+ * @lci: Measurement Report element content, starting with Measurement Token
+ * (measurement type 8)
+ * @civicloc: Measurement Report element content, starting with Measurement
+ * Token (measurement type 11)
+ * @lci_len: LCI data length
+ * @civicloc_len: Civic location data length
*/
struct cfg80211_beacon_data {
const u8 *head, *tail;
@@ -782,12 +790,17 @@ struct cfg80211_beacon_data {
const u8 *proberesp_ies;
const u8 *assocresp_ies;
const u8 *probe_resp;
+ const u8 *lci;
+ const u8 *civicloc;
+ s8 ftm_responder;
size_t head_len, tail_len;
size_t beacon_ies_len;
size_t proberesp_ies_len;
size_t assocresp_ies_len;
size_t probe_resp_len;
+ size_t lci_len;
+ size_t civicloc_len;
};
struct mac_address {
@@ -849,6 +862,7 @@ struct cfg80211_bitrate_mask {
* @beacon_rate: bitrate to be used for beacons
* @ht_cap: HT capabilities (or %NULL if HT isn't enabled)
* @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled)
+ * @he_cap: HE capabilities (or %NULL if HE isn't enabled)
* @ht_required: stations must support HT
* @vht_required: stations must support VHT
*/
@@ -874,6 +888,7 @@ struct cfg80211_ap_settings {
const struct ieee80211_ht_cap *ht_cap;
const struct ieee80211_vht_cap *vht_cap;
+ const struct ieee80211_he_cap_elem *he_cap;
bool ht_required, vht_required;
};
@@ -1283,6 +1298,7 @@ struct cfg80211_tid_stats {
* @rx_beacon: number of beacons received from this peer
* @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
* from this peer
+ * @connected_to_gate: true if mesh STA has a path to mesh gate
* @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
* @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
* (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
@@ -1290,6 +1306,10 @@ struct cfg80211_tid_stats {
* @ack_signal: signal strength (in dBm) of the last ACK frame.
* @avg_ack_signal: average rssi value of ack packet for the no of msdu's has
* been sent.
+ * @rx_mpdu_count: number of MPDUs received from this station
+ * @fcs_err_count: number of packets (MPDUs) received from this station with
+ * an FCS error. This counter should be incremented only when TA of the
+ * received packet with an FCS error matches the peer MAC address.
*/
struct station_info {
u64 filled;
@@ -1333,9 +1353,14 @@ struct station_info {
u64 rx_beacon;
u64 rx_duration;
u8 rx_beacon_signal_avg;
+ u8 connected_to_gate;
+
struct cfg80211_tid_stats *pertid;
s8 ack_signal;
s8 avg_ack_signal;
+
+ u32 rx_mpdu_count;
+ u32 fcs_err_count;
};
#if IS_ENABLED(CONFIG_CFG80211)
@@ -1539,6 +1564,10 @@ struct bss_parameters {
* @plink_timeout: If no tx activity is seen from a STA we've established
* peering with for longer than this time (in seconds), then remove it
* from the STA's list of peers. Default is 30 minutes.
+ * @dot11MeshConnectedToMeshGate: if set to true, advertise that this STA is
+ * connected to a mesh gate in mesh formation info. If false, the
+ * value in mesh formation is determined by the presence of root paths
+ * in the mesh path table
*/
struct mesh_config {
u16 dot11MeshRetryTimeout;
@@ -1558,6 +1587,7 @@ struct mesh_config {
u16 dot11MeshHWMPperrMinInterval;
u16 dot11MeshHWMPnetDiameterTraversalTime;
u8 dot11MeshHWMPRootMode;
+ bool dot11MeshConnectedToMeshGate;
u16 dot11MeshHWMPRannInterval;
bool dot11MeshGateAnnouncementProtocol;
bool dot11MeshForwarding;
@@ -2795,6 +2825,224 @@ struct cfg80211_external_auth_params {
};
/**
+ * struct cfg80211_ftm_responder_stats - FTM responder statistics
+ *
+ * @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to
+ * indicate the relevant values in this struct for them
+ * @success_num: number of FTM sessions in which all frames were successfully
+ * answered
+ * @partial_num: number of FTM sessions in which part of frames were
+ * successfully answered
+ * @failed_num: number of failed FTM sessions
+ * @asap_num: number of ASAP FTM sessions
+ * @non_asap_num: number of non-ASAP FTM sessions
+ * @total_duration_ms: total sessions durations - gives an indication
+ * of how much time the responder was busy
+ * @unknown_triggers_num: number of unknown FTM triggers - triggers from
+ * initiators that didn't finish successfully the negotiation phase with
+ * the responder
+ * @reschedule_requests_num: number of FTM reschedule requests - initiator asks
+ * for a new scheduling although it already has scheduled FTM slot
+ * @out_of_window_triggers_num: total FTM triggers out of scheduled window
+ */
+struct cfg80211_ftm_responder_stats {
+ u32 filled;
+ u32 success_num;
+ u32 partial_num;
+ u32 failed_num;
+ u32 asap_num;
+ u32 non_asap_num;
+ u64 total_duration_ms;
+ u32 unknown_triggers_num;
+ u32 reschedule_requests_num;
+ u32 out_of_window_triggers_num;
+};
+
+/**
+ * struct cfg80211_pmsr_ftm_result - FTM result
+ * @failure_reason: if this measurement failed (PMSR status is
+ * %NL80211_PMSR_STATUS_FAILURE), this gives a more precise
+ * reason than just "failure"
+ * @burst_index: if reporting partial results, this is the index
+ * in [0 .. num_bursts-1] of the burst that's being reported
+ * @num_ftmr_attempts: number of FTM request frames transmitted
+ * @num_ftmr_successes: number of FTM request frames acked
+ * @busy_retry_time: if failure_reason is %NL80211_PMSR_FTM_FAILURE_PEER_BUSY,
+ * fill this to indicate in how many seconds a retry is deemed possible
+ * by the responder
+ * @num_bursts_exp: actual number of bursts exponent negotiated
+ * @burst_duration: actual burst duration negotiated
+ * @ftms_per_burst: actual FTMs per burst negotiated
+ * @lci_len: length of LCI information (if present)
+ * @civicloc_len: length of civic location information (if present)
+ * @lci: LCI data (may be %NULL)
+ * @civicloc: civic location data (may be %NULL)
+ * @rssi_avg: average RSSI over FTM action frames reported
+ * @rssi_spread: spread of the RSSI over FTM action frames reported
+ * @tx_rate: bitrate for transmitted FTM action frame response
+ * @rx_rate: bitrate of received FTM action frame
+ * @rtt_avg: average of RTTs measured (must have either this or @dist_avg)
+ * @rtt_variance: variance of RTTs measured (note that standard deviation is
+ * the square root of the variance)
+ * @rtt_spread: spread of the RTTs measured
+ * @dist_avg: average of distances (mm) measured
+ * (must have either this or @rtt_avg)
+ * @dist_variance: variance of distances measured (see also @rtt_variance)
+ * @dist_spread: spread of distances measured (see also @rtt_spread)
+ * @num_ftmr_attempts_valid: @num_ftmr_attempts is valid
+ * @num_ftmr_successes_valid: @num_ftmr_successes is valid
+ * @rssi_avg_valid: @rssi_avg is valid
+ * @rssi_spread_valid: @rssi_spread is valid
+ * @tx_rate_valid: @tx_rate is valid
+ * @rx_rate_valid: @rx_rate is valid
+ * @rtt_avg_valid: @rtt_avg is valid
+ * @rtt_variance_valid: @rtt_variance is valid
+ * @rtt_spread_valid: @rtt_spread is valid
+ * @dist_avg_valid: @dist_avg is valid
+ * @dist_variance_valid: @dist_variance is valid
+ * @dist_spread_valid: @dist_spread is valid
+ */
+struct cfg80211_pmsr_ftm_result {
+ const u8 *lci;
+ const u8 *civicloc;
+ unsigned int lci_len;
+ unsigned int civicloc_len;
+ enum nl80211_peer_measurement_ftm_failure_reasons failure_reason;
+ u32 num_ftmr_attempts, num_ftmr_successes;
+ s16 burst_index;
+ u8 busy_retry_time;
+ u8 num_bursts_exp;
+ u8 burst_duration;
+ u8 ftms_per_burst;
+ s32 rssi_avg;
+ s32 rssi_spread;
+ struct rate_info tx_rate, rx_rate;
+ s64 rtt_avg;
+ s64 rtt_variance;
+ s64 rtt_spread;
+ s64 dist_avg;
+ s64 dist_variance;
+ s64 dist_spread;
+
+ u16 num_ftmr_attempts_valid:1,
+ num_ftmr_successes_valid:1,
+ rssi_avg_valid:1,
+ rssi_spread_valid:1,
+ tx_rate_valid:1,
+ rx_rate_valid:1,
+ rtt_avg_valid:1,
+ rtt_variance_valid:1,
+ rtt_spread_valid:1,
+ dist_avg_valid:1,
+ dist_variance_valid:1,
+ dist_spread_valid:1;
+};
+
+/**
+ * struct cfg80211_pmsr_result - peer measurement result
+ * @addr: address of the peer
+ * @host_time: host time (use ktime_get_boottime() adjust to the time when the
+ * measurement was made)
+ * @ap_tsf: AP's TSF at measurement time
+ * @status: status of the measurement
+ * @final: if reporting partial results, mark this as the last one; if not
+ * reporting partial results always set this flag
+ * @ap_tsf_valid: indicates the @ap_tsf value is valid
+ * @type: type of the measurement reported, note that we only support reporting
+ * one type at a time, but you can report multiple results separately and
+ * they're all aggregated for userspace.
+ */
+struct cfg80211_pmsr_result {
+ u64 host_time, ap_tsf;
+ enum nl80211_peer_measurement_status status;
+
+ u8 addr[ETH_ALEN];
+
+ u8 final:1,
+ ap_tsf_valid:1;
+
+ enum nl80211_peer_measurement_type type;
+
+ union {
+ struct cfg80211_pmsr_ftm_result ftm;
+ };
+};
+
+/**
+ * struct cfg80211_pmsr_ftm_request_peer - FTM request data
+ * @requested: indicates FTM is requested
+ * @preamble: frame preamble to use
+ * @burst_period: burst period to use
+ * @asap: indicates to use ASAP mode
+ * @num_bursts_exp: number of bursts exponent
+ * @burst_duration: burst duration
+ * @ftms_per_burst: number of FTMs per burst
+ * @ftmr_retries: number of retries for FTM request
+ * @request_lci: request LCI information
+ * @request_civicloc: request civic location information
+ *
+ * See also nl80211 for the respective attribute documentation.
+ */
+struct cfg80211_pmsr_ftm_request_peer {
+ enum nl80211_preamble preamble;
+ u16 burst_period;
+ u8 requested:1,
+ asap:1,
+ request_lci:1,
+ request_civicloc:1;
+ u8 num_bursts_exp;
+ u8 burst_duration;
+ u8 ftms_per_burst;
+ u8 ftmr_retries;
+};
+
+/**
+ * struct cfg80211_pmsr_request_peer - peer data for a peer measurement request
+ * @addr: MAC address
+ * @chandef: channel to use
+ * @report_ap_tsf: report the associated AP's TSF
+ * @ftm: FTM data, see &struct cfg80211_pmsr_ftm_request_peer
+ */
+struct cfg80211_pmsr_request_peer {
+ u8 addr[ETH_ALEN];
+ struct cfg80211_chan_def chandef;
+ u8 report_ap_tsf:1;
+ struct cfg80211_pmsr_ftm_request_peer ftm;
+};
+
+/**
+ * struct cfg80211_pmsr_request - peer measurement request
+ * @cookie: cookie, set by cfg80211
+ * @nl_portid: netlink portid - used by cfg80211
+ * @drv_data: driver data for this request, if required for aborting,
+ * not otherwise freed or anything by cfg80211
+ * @mac_addr: MAC address used for (randomised) request
+ * @mac_addr_mask: MAC address mask used for randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
+ * @list: used by cfg80211 to hold on to the request
+ * @timeout: timeout (in milliseconds) for the whole operation, if
+ * zero it means there's no timeout
+ * @n_peers: number of peers to do measurements with
+ * @peers: per-peer measurement request data
+ */
+struct cfg80211_pmsr_request {
+ u64 cookie;
+ void *drv_data;
+ u32 n_peers;
+ u32 nl_portid;
+
+ u32 timeout;
+
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+
+ struct list_head list;
+
+ struct cfg80211_pmsr_request_peer peers[];
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -3126,6 +3374,11 @@ struct cfg80211_external_auth_params {
*
* @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter
* tells the driver that the frame should not be encrypted.
+ *
+ * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
+ * Statistics should be cumulative, currently no way to reset is provided.
+ * @start_pmsr: start peer measurement (e.g. FTM)
+ * @abort_pmsr: abort peer measurement
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3431,6 +3684,15 @@ struct cfg80211_ops {
const u8 *buf, size_t len,
const u8 *dest, const __be16 proto,
const bool noencrypt);
+
+ int (*get_ftm_responder_stats)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ftm_responder_stats *ftm_stats);
+
+ int (*start_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *request);
+ void (*abort_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *request);
};
/*
@@ -3803,6 +4065,42 @@ struct wiphy_iftype_ext_capab {
};
/**
+ * struct cfg80211_pmsr_capabilities - cfg80211 peer measurement capabilities
+ * @max_peers: maximum number of peers in a single measurement
+ * @report_ap_tsf: can report assoc AP's TSF for radio resource measurement
+ * @randomize_mac_addr: can randomize MAC address for measurement
+ * @ftm.supported: FTM measurement is supported
+ * @ftm.asap: ASAP-mode is supported
+ * @ftm.non_asap: non-ASAP-mode is supported
+ * @ftm.request_lci: can request LCI data
+ * @ftm.request_civicloc: can request civic location data
+ * @ftm.preambles: bitmap of preambles supported (&enum nl80211_preamble)
+ * @ftm.bandwidths: bitmap of bandwidths supported (&enum nl80211_chan_width)
+ * @ftm.max_bursts_exponent: maximum burst exponent supported
+ * (set to -1 if not limited; note that setting this will necessarily
+ * forbid using the value 15 to let the responder pick)
+ * @ftm.max_ftms_per_burst: maximum FTMs per burst supported (set to 0 if
+ * not limited)
+ */
+struct cfg80211_pmsr_capabilities {
+ unsigned int max_peers;
+ u8 report_ap_tsf:1,
+ randomize_mac_addr:1;
+
+ struct {
+ u32 preambles;
+ u32 bandwidths;
+ s8 max_bursts_exponent;
+ u8 max_ftms_per_burst;
+ u8 supported:1,
+ asap:1,
+ non_asap:1,
+ request_lci:1,
+ request_civicloc:1;
+ } ftm;
+};
+
+/**
* struct wiphy - wireless hardware description
* @reg_notifier: the driver's regulatory notification callback,
* note that if your driver uses wiphy_apply_custom_regulatory()
@@ -3958,7 +4256,6 @@ struct wiphy_iftype_ext_capab {
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
*
- * @cookie_counter: unique generic cookie counter, used to identify objects.
* @nan_supported_bands: bands supported by the device in NAN mode, a
* bitmap of &enum nl80211_band values. For instance, for
* NL80211_BAND_2GHZ, bit 0 would be set
@@ -3967,6 +4264,8 @@ struct wiphy_iftype_ext_capab {
* @txq_limit: configuration of internal TX queue frame limit
* @txq_memory_limit: configuration internal TX queue memory limit
* @txq_quantum: configuration of internal TX queue scheduler quantum
+ *
+ * @pmsr_capa: peer measurement capabilities
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -4097,14 +4396,14 @@ struct wiphy {
u32 bss_select_support;
- u64 cookie_counter;
-
u8 nan_supported_bands;
u32 txq_limit;
u32 txq_memory_limit;
u32 txq_quantum;
+ const struct cfg80211_pmsr_capabilities *pmsr_capa;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -4307,6 +4606,9 @@ struct cfg80211_cqm_config;
* @owner_nlportid: (private) owner socket port ID
* @nl_owner_dead: (private) owner socket went away
* @cqm_config: (private) nl80211 RSSI monitor state
+ * @pmsr_list: (private) peer measurement requests
+ * @pmsr_lock: (private) peer measurements requests/results lock
+ * @pmsr_free_wk: (private) peer measurements cleanup work
*/
struct wireless_dev {
struct wiphy *wiphy;
@@ -4378,6 +4680,10 @@ struct wireless_dev {
#endif
struct cfg80211_cqm_config *cqm_config;
+
+ struct list_head pmsr_list;
+ spinlock_t pmsr_lock;
+ struct work_struct pmsr_free_wk;
};
static inline u8 *wdev_address(struct wireless_dev *wdev)
@@ -4733,6 +5039,17 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
const u8 *ies, int len);
/**
+ * cfg80211_send_layer2_update - send layer 2 update frame
+ *
+ * @dev: network device
+ * @addr: STA MAC address
+ *
+ * Wireless drivers can use this function to update forwarding tables in bridge
+ * devices upon STA association.
+ */
+void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr);
+
+/**
* DOC: Regulatory enforcement infrastructure
*
* TODO
@@ -4852,8 +5169,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
*
* @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
* @freq: the freqency(in MHz) to be queried.
- * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
- * irrelevant). This can be used later for deduplication.
* @rule: pointer to store the wmm rule from the regulatory db.
*
* Self-managed wireless drivers can use this function to query
@@ -4865,8 +5180,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
*
* Return: 0 on success. -ENODATA.
*/
-int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr,
- struct ieee80211_wmm_rule *rule);
+int reg_query_regdb_wmm(char *alpha2, int freq,
+ struct ieee80211_reg_rule *rule);
/*
* callbacks for asynchronous cfg80211 methods, notification
@@ -5261,7 +5576,8 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
* cfg80211 then sends a notification to userspace.
*/
void cfg80211_notify_new_peer_candidate(struct net_device *dev,
- const u8 *macaddr, const u8 *ie, u8 ie_len, gfp_t gfp);
+ const u8 *macaddr, const u8 *ie, u8 ie_len,
+ int sig_dbm, gfp_t gfp);
/**
* DOC: RFkill integration
@@ -6563,6 +6879,31 @@ int cfg80211_external_auth_request(struct net_device *netdev,
struct cfg80211_external_auth_params *params,
gfp_t gfp);
+/**
+ * cfg80211_pmsr_report - report peer measurement result data
+ * @wdev: the wireless device reporting the measurement
+ * @req: the original measurement request
+ * @result: the result data
+ * @gfp: allocation flags
+ */
+void cfg80211_pmsr_report(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ struct cfg80211_pmsr_result *result,
+ gfp_t gfp);
+
+/**
+ * cfg80211_pmsr_complete - report peer measurement completed
+ * @wdev: the wireless device reporting the measurement
+ * @req: the original measurement request
+ * @gfp: allocation flags
+ *
+ * Report that the entire measurement completed, after this
+ * the request pointer will no longer be valid.
+ */
+void cfg80211_pmsr_complete(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ gfp_t gfp);
+
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index aef2b2bb6603..0f319e13be2c 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -30,7 +30,7 @@ static inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
- if (access_ok(VERIFY_READ, src, len))
+ if (access_ok(src, len))
return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
if (len)
@@ -46,7 +46,7 @@ static __inline__ __wsum csum_and_copy_to_user
{
sum = csum_partial(src, len, sum);
- if (access_ok(VERIFY_WRITE, dst, len)) {
+ if (access_ok(dst, len)) {
if (copy_to_user(dst, src, len) == 0)
return sum;
}
diff --git a/include/net/devlink.h b/include/net/devlink.h
index b9b89d6604d4..67f4293bc970 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -298,7 +298,7 @@ struct devlink_resource {
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
-#define DEVLINK_PARAM_MAX_STRING_VALUE 32
+#define __DEVLINK_PARAM_MAX_STRING_VALUE 32
enum devlink_param_type {
DEVLINK_PARAM_TYPE_U8,
DEVLINK_PARAM_TYPE_U16,
@@ -311,7 +311,7 @@ union devlink_param_value {
u8 vu8;
u16 vu16;
u32 vu32;
- const char *vstr;
+ char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE];
bool vbool;
};
@@ -362,6 +362,10 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -380,6 +384,18 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable"
#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL
+#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME "ignore_ari"
+#define DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME "msix_vec_per_pf_max"
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME "msix_vec_per_pf_min"
+#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME "fw_load_policy"
+#define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE DEVLINK_PARAM_TYPE_U8
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -451,11 +467,14 @@ struct devlink_ops {
u32 *p_cur, u32 *p_max);
int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode);
- int (*eswitch_mode_set)(struct devlink *devlink, u16 mode);
+ int (*eswitch_mode_set)(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode);
- int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode);
+ int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode,
+ struct netlink_ext_ack *extack);
int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode);
- int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode);
+ int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode,
+ struct netlink_ext_ack *extack);
};
static inline void *devlink_priv(struct devlink *devlink)
@@ -553,6 +572,8 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
union devlink_param_value init_val);
void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+void devlink_param_value_str_fill(union devlink_param_value *dst_val,
+ const char *src);
struct devlink_region *devlink_region_create(struct devlink *devlink,
const char *region_name,
u32 region_max_snapshots,
@@ -789,6 +810,12 @@ devlink_param_value_changed(struct devlink *devlink, u32 param_id)
{
}
+static inline void
+devlink_param_value_str_fill(union devlink_param_value *dst_val,
+ const char *src)
+{
+}
+
static inline struct devlink_region *
devlink_region_create(struct devlink *devlink,
const char *region_name,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 461e8a7661b7..b3eefe8e18fd 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -35,7 +35,8 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_BRCM_PREPEND,
DSA_TAG_PROTO_DSA,
DSA_TAG_PROTO_EDSA,
- DSA_TAG_PROTO_KSZ,
+ DSA_TAG_PROTO_GSWIP,
+ DSA_TAG_PROTO_KSZ9477,
DSA_TAG_PROTO_LAN9303,
DSA_TAG_PROTO_MTK,
DSA_TAG_PROTO_QCA,
@@ -112,6 +113,7 @@ struct dsa_device_ops {
struct packet_type *pt);
int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
int *offset);
+ unsigned int overhead;
};
struct dsa_switch_tree {
diff --git a/include/net/dst.h b/include/net/dst.h
index 7f735e76ca73..6cf0870414c7 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -527,4 +527,14 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu);
}
+static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
+ struct dst_entry *encap_dst,
+ int headroom)
+{
+ u32 encap_mtu = dst_mtu(encap_dst);
+
+ if (skb->len > encap_mtu - headroom)
+ skb_dst_update_pmtu(skb, encap_mtu - headroom);
+}
+
#endif /* _NET_DST_H */
diff --git a/include/net/flow.h b/include/net/flow.h
index 8ce21793094e..93f2c9a0f098 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -38,8 +38,8 @@ struct flowi_common {
#define FLOWI_FLAG_KNOWN_NH 0x02
#define FLOWI_FLAG_SKIP_NH_OIF 0x04
__u32 flowic_secid;
- struct flowi_tunnel flowic_tun_key;
kuid_t flowic_uid;
+ struct flowi_tunnel flowic_tun_key;
};
union flowi_uli {
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 6a4586dcdede..2b26979efb48 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -209,8 +209,8 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */
FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */
- FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */
- FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_vlan */
+ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_tags */
FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_ENC_KEYID, /* struct flow_dissector_key_keyid */
@@ -221,7 +221,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
- FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */
+ FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_vlan */
FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */
FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 883bb9085f15..ca23860adbb9 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -10,7 +10,7 @@
struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
struct u64_stats_sync syncp;
-};
+} __aligned(2 * sizeof(u64));
struct net_rate_estimator;
@@ -44,6 +44,10 @@ void __gnet_stats_copy_basic(const seqcount_t *running,
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_basic_hw(const seqcount_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
struct net_rate_estimator __rcu **ptr);
int gnet_stats_copy_queue(struct gnet_dump *d,
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index decf6012a401..aa2e5888f18d 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -112,7 +112,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
static inline int genl_err_attr(struct genl_info *info, int err,
- struct nlattr *attr)
+ const struct nlattr *attr)
{
info->extack->bad_attr = attr;
diff --git a/include/net/geneve.h b/include/net/geneve.h
index a7600ed55ea3..fc6a7e0a874a 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -60,6 +60,12 @@ struct genevehdr {
struct geneve_opt options[];
};
+static inline bool netif_is_geneve(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "geneve");
+}
+
#ifdef CONFIG_INET
struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
u8 name_assign_type, u16 dst_port);
diff --git a/include/net/gre.h b/include/net/gre.h
index 797142eee9cd..b60f212c16c6 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -37,8 +37,17 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, __be16 proto, int nhs);
-bool is_gretap_dev(const struct net_device *dev);
-bool is_ip6gretap_dev(const struct net_device *dev);
+static inline bool netif_is_gretap(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "gretap");
+}
+
+static inline bool netif_is_ip6gretap(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "ip6gretap");
+}
static inline int gre_calc_hlen(__be16 o_flags)
{
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 3ef2743a8eec..6ac3a5bd0117 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -41,7 +41,7 @@ struct net;
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
int icmp_rcv(struct sk_buff *skb);
-void icmp_err(struct sk_buff *skb, u32 info);
+int icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
void icmp_out_count(struct net *net, unsigned char type);
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index feef706e1158..8014153bdd49 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -75,6 +75,8 @@ enum ieee80211_radiotap_presence {
IEEE80211_RADIOTAP_TIMESTAMP = 22,
IEEE80211_RADIOTAP_HE = 23,
IEEE80211_RADIOTAP_HE_MU = 24,
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU = 26,
+ IEEE80211_RADIOTAP_LSIG = 27,
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -325,6 +327,25 @@ enum ieee80211_radiotap_he_mu_bits {
IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800,
};
+enum ieee80211_radiotap_lsig_data1 {
+ IEEE80211_RADIOTAP_LSIG_DATA1_RATE_KNOWN = 0x0001,
+ IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN = 0x0002,
+};
+
+enum ieee80211_radiotap_lsig_data2 {
+ IEEE80211_RADIOTAP_LSIG_DATA2_RATE = 0x000f,
+ IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH = 0xfff0,
+};
+
+struct ieee80211_radiotap_lsig {
+ __le16 data1, data2;
+};
+
+enum ieee80211_radiotap_zero_len_psdu_type {
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING = 0,
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR = 0xff,
+};
+
/**
* ieee80211_get_radiotap_len - get radiotap header length
*/
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d7578cf49c3a..c9c78c15bce0 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -146,10 +146,12 @@ struct ifacaddr6 {
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
struct ifacaddr6 *aca_next;
+ struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
unsigned long aca_cstamp;
unsigned long aca_tstamp;
+ struct rcu_head rcu;
};
#define IFA_HOST IPV6_ADDR_LOOPBACK
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 6e91e38a31da..9db98af46985 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -115,9 +115,8 @@ int inet6_hash(struct sock *sk);
((__sk)->sk_family == AF_INET6) && \
ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
+ (((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 3ca969cbd161..975901a95c0f 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -2,6 +2,8 @@
#ifndef _INET_COMMON_H
#define _INET_COMMON_H
+#include <linux/indirect_call_wrapper.h>
+
extern const struct proto_ops inet_stream_ops;
extern const struct proto_ops inet_dgram_ops;
@@ -54,4 +56,11 @@ static inline void inet_ctl_sock_destroy(struct sock *sk)
sock_release(sk->sk_socket);
}
+#define indirect_call_gro_receive(f2, f1, cb, head, skb) \
+({ \
+ unlikely(gro_recursion_inc_test(skb)) ? \
+ NAPI_GRO_CB(skb)->flush |= 1, NULL : \
+ INDIRECT_CALL_2(cb, f2, f1, head, skb); \
+})
+
#endif
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 482a1b705362..c8e2bebd8d93 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -183,8 +183,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
* 1 if something is broken and should be logged (!!! above)
* 2 if packet should be dropped
*/
-static inline int INET_ECN_decapsulate(struct sk_buff *skb,
- __u8 outer, __u8 inner)
+static inline int __INET_ECN_decapsulate(__u8 outer, __u8 inner, bool *set_ce)
{
if (INET_ECN_is_not_ect(inner)) {
switch (outer & INET_ECN_MASK) {
@@ -198,10 +197,21 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb,
}
}
- if (INET_ECN_is_ce(outer))
+ *set_ce = INET_ECN_is_ce(outer);
+ return 0;
+}
+
+static inline int INET_ECN_decapsulate(struct sk_buff *skb,
+ __u8 outer, __u8 inner)
+{
+ bool set_ce = false;
+ int rc;
+
+ rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
+ if (!rc && set_ce)
INET_ECN_set_ce(skb);
- return 0;
+ return rc;
}
static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 9141e95529e7..babb14136705 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -79,6 +79,7 @@ struct inet_ehash_bucket {
struct inet_bind_bucket {
possible_net_t ib_net;
+ int l3mdev;
unsigned short port;
signed char fastreuse;
signed char fastreuseport;
@@ -188,10 +189,21 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
hashinfo->ehash_locks = NULL;
}
+static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
struct inet_bind_bucket *
inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
struct inet_bind_hashbucket *head,
- const unsigned short snum);
+ const unsigned short snum, int l3mdev);
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
@@ -225,6 +237,7 @@ void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
unsigned long numentries, int scale,
unsigned long low_limit,
unsigned long high_limit);
+int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
bool inet_ehash_insert(struct sock *sk, struct sock *osk);
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
@@ -282,9 +295,8 @@ static inline struct sock *inet_lookup_listener(struct net *net,
#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
(((__sk)->sk_portpair == (__ports)) && \
((__sk)->sk_addrpair == (__cookie)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
+ (((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#else /* 32-bit arch */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
@@ -294,9 +306,8 @@ static inline struct sock *inet_lookup_listener(struct net *net,
(((__sk)->sk_portpair == (__ports)) && \
((__sk)->sk_daddr == (__saddr)) && \
((__sk)->sk_rcv_saddr == (__daddr)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
+ (((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#endif /* 64-bit arch */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index e03b93360f33..e8eef85006aa 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -130,10 +130,25 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
return sk->sk_bound_dev_if;
}
-static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
+static inline int inet_sk_bound_l3mdev(const struct sock *sk)
{
- return rcu_dereference_check(ireq->ireq_opt,
- refcount_read(&ireq->req.rsk_refcnt) > 0);
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ struct net *net = sock_net(sk);
+
+ if (!net->ipv4.sysctl_tcp_l3mdev_accept)
+ return l3mdev_master_ifindex_by_index(net,
+ sk->sk_bound_dev_if);
+#endif
+
+ return 0;
+}
+
+static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
+ int dif, int sdif)
+{
+ if (!bound_dev_if)
+ return !sdif || l3mdev_accept;
+ return bound_dev_if == dif || bound_dev_if == sdif;
}
struct inet_cork {
diff --git a/include/net/ip.h b/include/net/ip.h
index e44b1a44f67a..8866bfce6121 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -155,6 +155,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
void ip_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *orig_dev);
int ip_local_deliver(struct sk_buff *skb);
+void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
int ip_mr_input(struct sk_buff *skb);
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -420,8 +421,36 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
}
-int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len,
- u32 *metrics);
+struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+ int fc_mx_len,
+ struct netlink_ext_ack *extack);
+static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
+{
+ if (fib_metrics != &dst_default_metrics &&
+ refcount_dec_and_test(&fib_metrics->refcnt))
+ kfree(fib_metrics);
+}
+
+/* ipv4 and ipv6 both use refcounted metrics if it is not the default */
+static inline
+void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
+{
+ dst_init_metrics(dst, fib_metrics->metrics, true);
+
+ if (fib_metrics != &dst_default_metrics) {
+ dst->_metrics |= DST_METRICS_REFCOUNTED;
+ refcount_inc(&fib_metrics->refcnt);
+ }
+}
+
+static inline
+void ip_dst_metrics_put(struct dst_entry *dst)
+{
+ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
+
+ if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
+ kfree(p);
+}
u32 ip_idents_reserve(u32 hash, int segs);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3d4930528db0..84097010237c 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -159,6 +159,10 @@ struct fib6_info {
struct rt6_info * __percpu *rt6i_pcpu;
struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
+#ifdef CONFIG_IPV6_ROUTER_PREF
+ unsigned long last_probe;
+#endif
+
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
@@ -182,7 +186,6 @@ struct rt6_info {
struct in6_addr rt6i_gateway;
struct inet6_dev *rt6i_idev;
u32 rt6i_flags;
- struct rt6key rt6i_prefsrc;
struct list_head rt6i_uncached;
struct uncached_list *rt6i_uncached_list;
@@ -408,11 +411,33 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *arg),
void *arg);
+void fib6_clean_all_skip_notify(struct net *net,
+ int (*func)(struct fib6_info *, void *arg),
+ void *arg);
int fib6_add(struct fib6_node *root, struct fib6_info *rt,
struct nl_info *info, struct netlink_ext_ack *extack);
int fib6_del(struct fib6_info *rt, struct nl_info *info);
+static inline
+void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr)
+{
+ const struct fib6_info *from;
+
+ rcu_read_lock();
+
+ from = rcu_dereference(rt->from);
+ if (from) {
+ *addr = from->fib6_prefsrc.addr;
+ } else {
+ struct in6_addr in6_zero = {};
+
+ *addr = in6_zero;
+ }
+
+ rcu_read_unlock();
+}
+
static inline struct net_device *fib6_info_nh_dev(const struct fib6_info *f6i)
{
return f6i->fib6_nh.nh_dev;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 7b9c82de11cc..7ab119936e69 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -165,8 +165,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
kuid_t uid);
-void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
- u32 mark);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif);
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
struct netlink_callback;
@@ -175,6 +174,7 @@ struct rt6_rtnl_dump_arg {
struct sk_buff *skb;
struct netlink_callback *cb;
struct net *net;
+ struct fib_dump_filter filter;
};
int rt6_dump_route(struct fib6_info *f6i, void *p_arg);
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 236e40ba06bf..69b4bcf880c9 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -69,6 +69,8 @@ struct ip6_tnl_encap_ops {
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, struct flowi6 *fl6);
+ int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info);
};
#ifdef CONFIG_INET
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 69c91d1934c1..c5969762a8f4 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -222,6 +222,17 @@ struct fib_table {
unsigned long __data[0];
};
+struct fib_dump_filter {
+ u32 table_id;
+ /* filter_set is an optimization that an entry is set */
+ bool filter_set;
+ bool dump_all_families;
+ unsigned char protocol;
+ unsigned char rt_type;
+ unsigned int flags;
+ struct net_device *dev;
+};
+
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags);
int fib_table_insert(struct net *, struct fib_table *, struct fib_config *,
@@ -229,7 +240,7 @@ int fib_table_insert(struct net *, struct fib_table *, struct fib_config *,
int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
struct netlink_ext_ack *extack);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
- struct netlink_callback *cb);
+ struct netlink_callback *cb, struct fib_dump_filter *filter);
int fib_table_flush(struct net *net, struct fib_table *table);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
void fib_table_flush_external(struct fib_table *table);
@@ -373,6 +384,7 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
extern const struct nla_policy rtm_ipv4_policy[];
void ip_fib_init(void);
__be32 fib_compute_spec_dst(struct sk_buff *skb);
+bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev);
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
u8 tos, int oif, struct net_device *dev,
struct in_device *idev, u32 *itag);
@@ -394,6 +406,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
@@ -451,4 +464,7 @@ static inline void fib_proc_exit(struct net *net)
u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr);
+int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
+ struct fib_dump_filter *filter,
+ struct netlink_callback *cb);
#endif /* _NET_FIB_H */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index b0d022ff6ea1..34f019650941 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -144,25 +144,6 @@ struct ip_tunnel {
bool ignore_df;
};
-#define TUNNEL_CSUM __cpu_to_be16(0x01)
-#define TUNNEL_ROUTING __cpu_to_be16(0x02)
-#define TUNNEL_KEY __cpu_to_be16(0x04)
-#define TUNNEL_SEQ __cpu_to_be16(0x08)
-#define TUNNEL_STRICT __cpu_to_be16(0x10)
-#define TUNNEL_REC __cpu_to_be16(0x20)
-#define TUNNEL_VERSION __cpu_to_be16(0x40)
-#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
-#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
-#define TUNNEL_OAM __cpu_to_be16(0x0200)
-#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
-#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
-#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
-#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
-#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
-
-#define TUNNEL_OPTIONS_PRESENT \
- (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
-
struct tnl_ptk_info {
__be16 flags;
__be16 proto;
@@ -311,6 +292,7 @@ struct ip_tunnel_encap_ops {
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, struct flowi4 *fl4);
+ int (*err_handler)(struct sk_buff *skb, u32 info);
};
#define MAX_IPTUN_ENCAP_OPS 8
@@ -326,6 +308,26 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap);
+static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+{
+ int nhlen;
+
+ switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ nhlen = sizeof(struct ipv6hdr);
+ break;
+#endif
+ case htons(ETH_P_IP):
+ nhlen = sizeof(struct iphdr);
+ break;
+ default:
+ nhlen = 0;
+ }
+
+ return pskb_network_may_pull(skb, nhlen);
+}
+
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
{
const struct ip_tunnel_encap_ops *ops;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index ff33f498c137..daf80863d3a5 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -975,6 +975,8 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip6_forward(struct sk_buff *skb);
int ip6_input(struct sk_buff *skb);
int ip6_mc_input(struct sk_buff *skb);
+void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
+ bool have_final);
int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -1089,8 +1091,6 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
#endif
#ifdef CONFIG_SYSCTL
-extern struct ctl_table ipv6_route_table_template[];
-
struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
struct ctl_table *ipv6_route_sysctl_init(struct net *net);
int ipv6_sysctl_register(void);
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index f4c21b5a1242..14a490246be9 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -80,6 +80,11 @@ struct af_iucv_trans_hdr {
u8 pad; /* total 104 bytes */
} __packed;
+static inline struct af_iucv_trans_hdr *iucv_trans_hdr(struct sk_buff *skb)
+{
+ return (struct af_iucv_trans_hdr *)skb_network_header(skb);
+}
+
enum iucv_tx_notify {
/* transmission of skb is completed and was successful */
TX_NOTIFY_OK = 0,
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 3832099289c5..78fa0ac4613c 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -101,6 +101,17 @@ struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
return master;
}
+int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex);
+static inline
+int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex)
+{
+ rcu_read_lock();
+ ifindex = l3mdev_master_upper_ifindex_by_index_rcu(net, ifindex);
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
u32 l3mdev_fib_table_rcu(const struct net_device *dev);
u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
static inline u32 l3mdev_fib_table(const struct net_device *dev)
@@ -208,6 +219,17 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
}
static inline
+int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
+{
+ return 0;
+}
+static inline
+int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex)
+{
+ return 0;
+}
+
+static inline
struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
{
return NULL;
diff --git a/include/net/llc.h b/include/net/llc.h
index 890a87318014..df282d9b4017 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -66,6 +66,7 @@ struct llc_sap {
int sk_count;
struct hlist_nulls_head sk_laddr_hash[LLC_SK_LADDR_HASH_ENTRIES];
struct hlist_head sk_dev_hash[LLC_SK_DEV_HASH_ENTRIES];
+ struct rcu_head rcu;
};
static inline
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 5790f55c241d..88219cc137c3 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -101,8 +101,9 @@
* Drivers indicate that they use this model by implementing the .wake_tx_queue
* driver operation.
*
- * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with a
- * single per-vif queue for multicast data frames.
+ * Intermediate queues (struct ieee80211_txq) are kept per-sta per-tid, with
+ * another per-sta for non-data/non-mgmt and bufferable management frames, and
+ * a single per-vif queue for multicast data frames.
*
* The driver is expected to initialize its private per-queue data for stations
* and interfaces in the .add_interface and .sta_add ops.
@@ -308,6 +309,8 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected
* keep alive) changed.
* @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface
+ * @BSS_CHANGED_FTM_RESPONDER: fime timing reasurement request responder
+ * functionality changed for this BSS (AP mode).
*
*/
enum ieee80211_bss_change {
@@ -337,6 +340,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_MU_GROUPS = 1<<23,
BSS_CHANGED_KEEP_ALIVE = 1<<24,
BSS_CHANGED_MCAST_RATE = 1<<25,
+ BSS_CHANGED_FTM_RESPONDER = 1<<26,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -463,6 +467,21 @@ struct ieee80211_mu_group_data {
};
/**
+ * struct ieee80211_ftm_responder_params - FTM responder parameters
+ *
+ * @lci: LCI subelement content
+ * @civicloc: CIVIC location subelement content
+ * @lci_len: LCI data length
+ * @civicloc_len: Civic data length
+ */
+struct ieee80211_ftm_responder_params {
+ const u8 *lci;
+ const u8 *civicloc;
+ size_t lci_len;
+ size_t civicloc_len;
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -477,6 +496,8 @@ struct ieee80211_mu_group_data {
* @uora_ocw_range: UORA element's OCW Range field
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @he_support: does this BSS support HE
+ * @twt_requester: does this BSS support TWT requester (relevant for managed
+ * mode only, set if the AP advertises TWT responder role)
* @assoc: association status
* @ibss_joined: indicates whether this station is part of an IBSS
* or not
@@ -561,6 +582,9 @@ struct ieee80211_mu_group_data {
* @protected_keep_alive: if set, indicates that the station should send an RSN
* protected frame to the AP to reset the idle timer at the AP for the
* station.
+ * @ftm_responder: whether to enable or disable fine timing measurement FTM
+ * responder functionality.
+ * @ftmr_params: configurable lci/civic parameter when enabling FTM responder.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -572,6 +596,7 @@ struct ieee80211_bss_conf {
u8 uora_ocw_range;
u16 frame_time_rts_th;
bool he_support;
+ bool twt_requester;
/* association related data */
bool assoc, ibss_joined;
bool ibss_creator;
@@ -611,6 +636,8 @@ struct ieee80211_bss_conf {
bool allow_p2p_go_ps;
u16 max_idle_period;
bool protected_keep_alive;
+ bool ftm_responder;
+ struct ieee80211_ftm_responder_params *ftmr_params;
};
/**
@@ -1140,6 +1167,11 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* from the RX info data, so leave those zeroed when building this data)
* @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present
* (&struct ieee80211_radiotap_he_mu)
+ * @RX_FLAG_RADIOTAP_LSIG: L-SIG radiotap data is present
+ * @RX_FLAG_NO_PSDU: use the frame only for radiotap reporting, with
+ * the "0-length PSDU" field included there. The value for it is
+ * in &struct ieee80211_rx_status. Note that if this value isn't
+ * known the frame shouldn't be reported.
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1170,6 +1202,8 @@ enum mac80211_rx_flags {
RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25),
RX_FLAG_RADIOTAP_HE = BIT(26),
RX_FLAG_RADIOTAP_HE_MU = BIT(27),
+ RX_FLAG_RADIOTAP_LSIG = BIT(28),
+ RX_FLAG_NO_PSDU = BIT(29),
};
/**
@@ -1242,6 +1276,7 @@ enum mac80211_rx_encoding {
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
* @ampdu_delimiter_crc: A-MPDU delimiter CRC
+ * @zero_length_psdu_type: radiotap type of the 0-length PSDU
*/
struct ieee80211_rx_status {
u64 mactime;
@@ -1262,6 +1297,7 @@ struct ieee80211_rx_status {
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 ampdu_delimiter_crc;
+ u8 zero_length_psdu_type;
};
/**
@@ -1504,6 +1540,8 @@ enum ieee80211_vif_flags {
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void \*).
* @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
+ * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
+ * protected by fq->lock.
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1528,6 +1566,8 @@ struct ieee80211_vif {
unsigned int probe_req_reg;
+ bool txqs_stopped[IEEE80211_NUM_ACS];
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
@@ -1839,7 +1879,9 @@ struct ieee80211_sta_rates {
* unlimited.
* @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
* @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
- * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
+ * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID
+ * @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that
+ * the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames
*/
struct ieee80211_sta {
u32 supp_rates[NUM_NL80211_BANDS];
@@ -1879,8 +1921,9 @@ struct ieee80211_sta {
u16 max_amsdu_len;
bool support_p2p_ps;
u16 max_rc_amsdu_len;
+ u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS];
- struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
+ struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1];
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
@@ -1914,7 +1957,8 @@ struct ieee80211_tx_control {
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @sta: station table entry, %NULL for per-vif queue
- * @tid: the TID for this queue (unused for per-vif queue)
+ * @tid: the TID for this queue (unused for per-vif queue),
+ * %IEEE80211_NUM_TIDS for non-data (if enabled)
* @ac: the AC for this queue
* @drv_priv: driver private area, sized by hw->txq_data_size
*
@@ -2127,6 +2171,19 @@ struct ieee80211_txq {
* @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
* support QoS NDP for AP probing - that's most likely a driver bug.
*
+ * @IEEE80211_HW_BUFF_MMPDU_TXQ: use the TXQ for bufferable MMPDUs, this of
+ * course requires the driver to use TXQs to start with.
+ *
+ * @IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW: (Hardware) rate control supports VHT
+ * extended NSS BW (dot11VHTExtendedNSSBWCapable). This flag will be set if
+ * the selected rate control algorithm sets %RATE_CTRL_CAPA_VHT_EXT_NSS_BW
+ * but if the rate control is built-in then it must be set by the driver.
+ * See also the documentation for that flag.
+ *
+ * @IEEE80211_HW_STA_MMPDU_TXQ: use the extra non-TID per-station TXQ for all
+ * MMPDUs on station interfaces. This of course requires the driver to use
+ * TXQs to start with.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2172,6 +2229,9 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP,
IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP,
+ IEEE80211_HW_BUFF_MMPDU_TXQ,
+ IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW,
+ IEEE80211_HW_STA_MMPDU_TXQ,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2290,6 +2350,10 @@ enum ieee80211_hw_flags {
* supported by HW.
* @max_nan_de_entries: maximum number of NAN DE functions supported by the
* device.
+ *
+ * @tx_sk_pacing_shift: Pacing shift to set on TCP sockets when frames from
+ * them are encountered. The default should typically not be changed,
+ * unless the driver has good reasons for needing more buffers.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -2325,6 +2389,7 @@ struct ieee80211_hw {
u8 n_cipher_schemes;
const struct ieee80211_cipher_scheme *cipher_schemes;
u8 max_nan_de_entries;
+ u8 tx_sk_pacing_shift;
};
static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
@@ -2506,6 +2571,19 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* The set_default_unicast_key() call updates the default WEP key index
* configured to the hardware for WEP encryption type. This is required
* for devices that support offload of data packets (e.g. ARP responses).
+ *
+ * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag
+ * when they are able to replace in-use PTK keys according to to following
+ * requirements:
+ * 1) They do not hand over frames decrypted with the old key to
+ mac80211 once the call to set_key() with command %DISABLE_KEY has been
+ completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key,
+ 2) either drop or continue to use the old key for any outgoing frames queued
+ at the time of the key deletion (including re-transmits),
+ 3) never send out a frame queued prior to the set_key() %SET_KEY command
+ encrypted with the new key and
+ 4) never send out a frame unencrypted when it should be encrypted.
+ Mac80211 will not queue any new frames for a deleted key to the driver.
*/
/**
@@ -3164,6 +3242,11 @@ enum ieee80211_reconfig_type {
* When the scan finishes, ieee80211_scan_completed() must be called;
* note that it also must be called when the scan cannot finish due to
* any error unless this callback returned a negative error code.
+ * This callback is also allowed to return the special return value 1,
+ * this indicates that hardware scan isn't desirable right now and a
+ * software scan should be done instead. A driver wishing to use this
+ * capability must ensure its (hardware) scan capabilities aren't
+ * advertised as more capable than mac80211's software scan is.
* The callback can sleep.
*
* @cancel_hw_scan: Ask the low-level tp cancel the active hw scan.
@@ -3542,6 +3625,15 @@ enum ieee80211_reconfig_type {
* @del_nan_func: Remove a NAN function. The driver must call
* ieee80211_nan_func_terminated() with
* NL80211_NAN_FUNC_TERM_REASON_USER_REQUEST reason code upon removal.
+ * @can_aggregate_in_amsdu: Called in order to determine if HW supports
+ * aggregating two specific frames in the same A-MSDU. The relation
+ * between the skbs should be symmetric and transitive. Note that while
+ * skb is always a real frame, head may or may not be an A-MSDU.
+ * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
+ * Statistics should be cumulative, currently no way to reset is provided.
+ *
+ * @start_pmsr: start peer measurement (e.g. FTM) (this call can sleep)
+ * @abort_pmsr: abort peer measurement (this call can sleep)
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3824,6 +3916,16 @@ struct ieee80211_ops {
void (*del_nan_func)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u8 instance_id);
+ bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *hw,
+ struct sk_buff *head,
+ struct sk_buff *skb);
+ int (*get_ftm_responder_stats)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_ftm_responder_stats *ftm_stats);
+ int (*start_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
+ void (*abort_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
};
/**
@@ -4293,6 +4395,21 @@ void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
u32 thr);
/**
+ * ieee80211_tx_rate_update - transmit rate update callback
+ *
+ * Drivers should call this functions with a non-NULL pub sta
+ * This function can be used in drivers that does not have provision
+ * in updating the tx rate in data path.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @pubsta: the station to update the tx rate for.
+ * @info: tx status information
+ */
+void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta,
+ struct ieee80211_tx_info *info);
+
+/**
* ieee80211_tx_status - transmit status callback
*
* Call this function for all transmitted frames after they have been
@@ -5644,7 +5761,22 @@ struct ieee80211_tx_rate_control {
bool bss;
};
+/**
+ * enum rate_control_capabilities - rate control capabilities
+ */
+enum rate_control_capabilities {
+ /**
+ * @RATE_CTRL_CAPA_VHT_EXT_NSS_BW:
+ * Support for extended NSS BW support (dot11VHTExtendedNSSCapable)
+ * Note that this is only looked at if the minimum number of chains
+ * that the AP uses is < the number of TX chains the hardware has,
+ * otherwise the NSS difference doesn't bother us.
+ */
+ RATE_CTRL_CAPA_VHT_EXT_NSS_BW = BIT(0),
+};
+
struct rate_control_ops {
+ unsigned long capa;
const char *name;
void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir);
void (*free)(void *priv);
@@ -5974,6 +6106,14 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
* @txq: pointer obtained from station or virtual interface
*
* Returns the skb if successful, %NULL if no frame was available.
+ *
+ * Note that this must be called in an rcu_read_lock() critical section,
+ * which can only be released after the SKB was handled. Some pointers in
+ * skb->cb, e.g. the key pointer, are protected by by RCU and thus the
+ * critical section must persist not just for the duration of this call
+ * but for the duration of the frame handling.
+ * However, also note that while in the wake_tx_queue() method,
+ * rcu_read_lock() is already held.
*/
struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 6c1eecd56a4d..7c1ab9edba03 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -140,8 +140,8 @@ struct neighbour {
unsigned long updated;
rwlock_t lock;
refcount_t refcnt;
- struct sk_buff_head arp_queue;
unsigned int arp_queue_len_bytes;
+ struct sk_buff_head arp_queue;
struct timer_list timer;
unsigned long used;
atomic_t probes;
@@ -149,11 +149,13 @@ struct neighbour {
__u8 nud_state;
__u8 type;
__u8 dead;
+ u8 protocol;
seqlock_t ha_lock;
- unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+ unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
struct hh_cache hh;
int (*output)(struct neighbour *, struct sk_buff *);
const struct neigh_ops *ops;
+ struct list_head gc_list;
struct rcu_head rcu;
struct net_device *dev;
u8 primary_key[0];
@@ -172,6 +174,7 @@ struct pneigh_entry {
possible_net_t net;
struct net_device *dev;
u8 flags;
+ u8 protocol;
u8 key[0];
};
@@ -214,6 +217,8 @@ struct neigh_table {
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
atomic_t entries;
+ atomic_t gc_entries;
+ struct list_head gc_list;
rwlock_t lock;
unsigned long last_rand;
struct neigh_statistics __percpu *stats;
@@ -250,6 +255,7 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
+extern const struct nla_policy nda_policy[];
static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
{
@@ -323,6 +329,7 @@ void __neigh_set_probe_once(struct neighbour *neigh);
bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
@@ -453,6 +460,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
{
+ unsigned int hh_alen = 0;
unsigned int seq;
unsigned int hh_len;
@@ -460,16 +468,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
seq = read_seqbegin(&hh->hh_lock);
hh_len = hh->hh_len;
if (likely(hh_len <= HH_DATA_MOD)) {
- /* this is inlined by gcc */
- memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+ hh_alen = HH_DATA_MOD;
+
+ /* skb_push() would proceed silently if we have room for
+ * the unaligned size but not for the aligned size:
+ * check headroom explicitly.
+ */
+ if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
+ /* this is inlined by gcc */
+ memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+ HH_DATA_MOD);
+ }
} else {
- unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
+ hh_alen = HH_DATA_ALIGN(hh_len);
- memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+ if (likely(skb_headroom(skb) >= hh_alen)) {
+ memcpy(skb->data - hh_alen, hh->hh_data,
+ hh_alen);
+ }
}
} while (read_seqretry(&hh->hh_lock, seq));
- skb_push(skb, hh_len);
+ if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ __skb_push(skb, hh_len);
return dev_queue_xmit(skb);
}
@@ -527,20 +552,17 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
} while (read_seqretry(&n->ha_lock, seq));
}
-static inline void neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
- int *notify)
+static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags,
+ int *notify)
{
u8 ndm_flags = 0;
- if (!(flags & NEIGH_UPDATE_F_ADMIN))
- return;
-
- ndm_flags |= (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
- if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
- if (ndm_flags & NTF_EXT_LEARNED)
- neigh->flags |= NTF_EXT_LEARNED;
+ ndm_flags |= (flags & NEIGH_UPDATE_F_ISROUTER) ? NTF_ROUTER : 0;
+ if ((neigh->flags ^ ndm_flags) & NTF_ROUTER) {
+ if (ndm_flags & NTF_ROUTER)
+ neigh->flags |= NTF_ROUTER;
else
- neigh->flags &= ~NTF_EXT_LEARNED;
+ neigh->flags &= ~NTF_ROUTER;
*notify = 1;
}
}
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 9b5fdc50519a..99d4148e0f90 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -43,6 +43,7 @@ struct ctl_table_header;
struct net_generic;
struct uevent_sock;
struct netns_ipvs;
+struct bpf_prog;
#define NETDEV_HASHBITS 8
@@ -145,6 +146,8 @@ struct net {
#endif
struct net_generic __rcu *gen;
+ struct bpf_prog __rcu *flow_dissector_prog;
+
/* Note : following structs are cache line aligned */
#ifdef CONFIG_XFRM
struct netns_xfrm xfrm;
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 74af19c3a8f7..4cd56808ac4e 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -6,12 +6,12 @@
static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
{
- skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
+ struct nf_bridge_info *b = skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
- if (likely(skb->nf_bridge))
- refcount_set(&(skb->nf_bridge->use), 1);
+ if (b)
+ memset(b, 0, sizeof(*b));
- return skb->nf_bridge;
+ return b;
}
void nf_bridge_update_protocol(struct sk_buff *skb);
@@ -22,12 +22,6 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, struct sock *sk,
int (*okfn)(struct net *, struct sock *,
struct sk_buff *));
-static inline struct nf_bridge_info *
-nf_bridge_info_get(const struct sk_buff *skb)
-{
- return skb->nf_bridge;
-}
-
unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb);
static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index c84b51682f08..135ee702c7b0 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -10,20 +10,17 @@
#ifndef _NF_CONNTRACK_IPV4_H
#define _NF_CONNTRACK_IPV4_H
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
#ifdef CONFIG_NF_CT_PROTO_DCCP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp;
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp;
#endif
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite;
#endif
-int nf_conntrack_ipv4_compat_init(void);
-void nf_conntrack_ipv4_compat_fini(void);
-
#endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
index cd24be4c4a99..13d55206bb9f 100644
--- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
const struct nf_nat_range2 *range,
const struct net_device *out);
-void nf_nat_masquerade_ipv4_register_notifier(void);
+int nf_nat_masquerade_ipv4_register_notifier(void);
void nf_nat_masquerade_ipv4_unregister_notifier(void);
#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index effa8dfba68c..7b3c873f8839 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -2,20 +2,7 @@
#ifndef _NF_CONNTRACK_IPV6_H
#define _NF_CONNTRACK_IPV6_H
-extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
-
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
-#endif
-#ifdef CONFIG_NF_CT_PROTO_SCTP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
-#endif
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
-#endif
#include <linux/sysctl.h>
extern struct ctl_table nf_ct_ipv6_sysctl_table[];
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
index 0c3b5ebf0bb8..2917bf95c437 100644
--- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -5,7 +5,7 @@
unsigned int
nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
const struct net_device *out);
-void nf_nat_masquerade_ipv6_register_notifier(void);
+int nf_nat_masquerade_ipv6_register_notifier(void);
void nf_nat_masquerade_ipv6_unregister_notifier(void);
#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 7e012312cd61..249d0a5b12b8 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -27,12 +27,17 @@
#include <net/netfilter/nf_conntrack_tuple.h>
+struct nf_ct_udp {
+ unsigned long stream_ts;
+};
+
/* per conntrack: protocol private data */
union nf_conntrack_proto {
/* insert conntrack proto private data here */
struct nf_ct_dccp dccp;
struct ip_ct_sctp sctp;
struct ip_ct_tcp tcp;
+ struct nf_ct_udp udp;
struct nf_ct_gre gre;
unsigned int tmpl_padto;
};
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 79d8d16732b4..bc6745d3010e 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -46,9 +46,6 @@ struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
return acct;
};
-unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct,
- int dir);
-
/* Check if connection tracking accounting is enabled */
static inline bool nf_ct_acct_enabled(struct net *net)
{
@@ -61,8 +58,7 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
net->ct.sysctl_acct = enable;
}
-int nf_conntrack_acct_pernet_init(struct net *net);
-void nf_conntrack_acct_pernet_fini(struct net *net);
+void nf_conntrack_acct_pernet_init(struct net *net);
int nf_conntrack_acct_init(void);
void nf_conntrack_acct_fini(void);
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 2a3e0974a6af..afc9b3620473 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -20,8 +20,7 @@
/* This header is used to share core functionality between the
standalone connection tracking module, and the compatibility layer's use
of connection tracking. */
-unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
- struct sk_buff *skb);
+unsigned int nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state);
int nf_conntrack_init_net(struct net *net);
void nf_conntrack_cleanup_net(struct net *net);
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 4b2b2baf8ab4..f32fc8289473 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -5,17 +5,10 @@
struct nf_conncount_data;
-enum nf_conncount_list_add {
- NF_CONNCOUNT_ADDED, /* list add was ok */
- NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
- NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
-};
-
struct nf_conncount_list {
spinlock_t list_lock;
struct list_head head; /* connections with the same filtering key */
unsigned int count; /* length of list */
- bool dead;
};
struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
@@ -29,18 +22,12 @@ unsigned int nf_conncount_count(struct net *net,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone);
-void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone,
- bool *addit);
+int nf_conncount_add(struct net *net, struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone);
void nf_conncount_list_init(struct nf_conncount_list *list);
-enum nf_conncount_list_add
-nf_conncount_add(struct nf_conncount_list *list,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone);
-
bool nf_conncount_gc_list(struct net *net,
struct nf_conncount_list *list);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 3f1ce9a8776e..52b44192b43f 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -142,7 +142,7 @@ void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
struct nf_conntrack_expect *exp,
u32 portid, int report);
-int nf_conntrack_ecache_pernet_init(struct net *net);
+void nf_conntrack_ecache_pernet_init(struct net *net);
void nf_conntrack_ecache_pernet_fini(struct net *net);
int nf_conntrack_ecache_init(void);
@@ -182,10 +182,7 @@ static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
u32 portid,
int report) {}
-static inline int nf_conntrack_ecache_pernet_init(struct net *net)
-{
- return 0;
-}
+static inline void nf_conntrack_ecache_pernet_init(struct net *net) {}
static inline void nf_conntrack_ecache_pernet_fini(struct net *net)
{
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 2492120b8097..ec52a8dc32fd 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -124,8 +124,7 @@ static inline void *nfct_help_data(const struct nf_conn *ct)
return (void *)help->data;
}
-int nf_conntrack_helper_pernet_init(struct net *net);
-void nf_conntrack_helper_pernet_fini(struct net *net);
+void nf_conntrack_helper_pernet_init(struct net *net);
int nf_conntrack_helper_init(void);
void nf_conntrack_helper_fini(void);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 8465263b297d..ae7b86f587f2 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -18,9 +18,6 @@
struct seq_file;
struct nf_conntrack_l4proto {
- /* L3 Protocol number. */
- u_int16_t l3proto;
-
/* L4 Protocol number. */
u_int8_t l4proto;
@@ -43,22 +40,14 @@ struct nf_conntrack_l4proto {
/* Returns verdict for packet, or -1 for invalid. */
int (*packet)(struct nf_conn *ct,
- const struct sk_buff *skb,
+ struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo);
-
- /* Called when a new connection for this protocol found;
- * returns TRUE if it's OK. If so, packet() called next. */
- bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff);
+ enum ip_conntrack_info ctinfo,
+ const struct nf_hook_state *state);
/* Called when a conntrack entry is destroyed */
void (*destroy)(struct nf_conn *ct);
- int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
- unsigned int dataoff,
- u_int8_t pf, unsigned int hooknum);
-
/* called by gc worker if table is full */
bool (*can_early_drop)(const struct nf_conn *ct);
@@ -92,7 +81,7 @@ struct nf_conntrack_l4proto {
#endif
unsigned int *net_id;
/* Init l4proto pernet data */
- int (*init_net)(struct net *net, u_int16_t proto);
+ int (*init_net)(struct net *net);
/* Return the per-net protocol part. */
struct nf_proto_net *(*get_net_proto)(struct net *net);
@@ -101,16 +90,23 @@ struct nf_conntrack_l4proto {
struct module *me;
};
+int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state);
+
+int nf_conntrack_icmpv6_error(struct nf_conn *tmpl,
+ struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state);
/* Existing built-in generic protocol */
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
-#define MAX_NF_CT_PROTO 256
+#define MAX_NF_CT_PROTO IPPROTO_UDPLITE
-const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto,
- u_int8_t l4proto);
+const struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u8 l4proto);
-const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto,
- u_int8_t l4proto);
+const struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u8 l4proto);
void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p);
/* Protocol pernet registration. */
@@ -157,4 +153,43 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const char *fmt, ...) { }
#endif /* CONFIG_SYSCTL */
+static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.generic;
+}
+
+static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.tcp;
+}
+
+static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.udp;
+}
+
+static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmp;
+}
+
+static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmpv6;
+}
+
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.dccp;
+}
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.sctp;
+}
+#endif
+
#endif /*_NF_CONNTRACK_PROTOCOL_H*/
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index d5f62cc6c2ae..3394d75e1c80 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -30,7 +30,7 @@ struct nf_conn_timeout {
};
static inline unsigned int *
-nf_ct_timeout_data(struct nf_conn_timeout *t)
+nf_ct_timeout_data(const struct nf_conn_timeout *t)
{
struct nf_ct_timeout *timeout;
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
index 3b661986be8f..0ed617bf0a3d 100644
--- a/include/net/netfilter/nf_conntrack_timestamp.h
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -49,21 +49,12 @@ static inline void nf_ct_set_tstamp(struct net *net, bool enable)
}
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
-int nf_conntrack_tstamp_pernet_init(struct net *net);
-void nf_conntrack_tstamp_pernet_fini(struct net *net);
+void nf_conntrack_tstamp_pernet_init(struct net *net);
int nf_conntrack_tstamp_init(void);
void nf_conntrack_tstamp_fini(void);
#else
-static inline int nf_conntrack_tstamp_pernet_init(struct net *net)
-{
- return 0;
-}
-
-static inline void nf_conntrack_tstamp_pernet_fini(struct net *net)
-{
- return;
-}
+static inline void nf_conntrack_tstamp_pernet_init(struct net *net) {}
static inline int nf_conntrack_tstamp_init(void)
{
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 0e355f4a3d76..7d5cda7ce32a 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -95,11 +95,7 @@ void flow_offload_free(struct flow_offload *flow);
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
-int nf_flow_table_iterate(struct nf_flowtable *flow_table,
- void (*iter)(struct flow_offload *flow, void *data),
- void *data);
-
-void nf_flow_table_cleanup(struct net *net, struct net_device *dev);
+void nf_flow_table_cleanup(struct net_device *dev);
int nf_flow_table_init(struct nf_flowtable *flow_table);
void nf_flow_table_free(struct nf_flowtable *flow_table);
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index d300b8f03972..d774ca0c4c5e 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -2,18 +2,11 @@
#ifndef _NF_NAT_L3PROTO_H
#define _NF_NAT_L3PROTO_H
-struct nf_nat_l4proto;
struct nf_nat_l3proto {
u8 l3proto;
- bool (*in_range)(const struct nf_conntrack_tuple *t,
- const struct nf_nat_range2 *range);
-
- u32 (*secure_port)(const struct nf_conntrack_tuple *t, __be16);
-
bool (*manip_pkt)(struct sk_buff *skb,
unsigned int iphdroff,
- const struct nf_nat_l4proto *l4proto,
const struct nf_conntrack_tuple *target,
enum nf_nat_manip_type maniptype);
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
index b4d6b29bca62..95a4655bd1ad 100644
--- a/include/net/netfilter/nf_nat_l4proto.h
+++ b/include/net/netfilter/nf_nat_l4proto.h
@@ -5,78 +5,12 @@
#include <net/netfilter/nf_nat.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
-struct nf_nat_range;
struct nf_nat_l3proto;
-struct nf_nat_l4proto {
- /* Protocol number. */
- u8 l4proto;
-
- /* Translate a packet to the target according to manip type.
- * Return true if succeeded.
- */
- bool (*manip_pkt)(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype);
-
- /* Is the manipable part of the tuple between min and max incl? */
- bool (*in_range)(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max);
-
- /* Alter the per-proto part of the tuple (depending on
- * maniptype), to give a unique tuple in the given range if
- * possible. Per-protocol part of tuple is initialized to the
- * incoming packet.
- */
- void (*unique_tuple)(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct);
-
- int (*nlattr_to_range)(struct nlattr *tb[],
- struct nf_nat_range2 *range);
-};
-
-/* Protocol registration. */
-int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
-void nf_nat_l4proto_unregister(u8 l3proto,
- const struct nf_nat_l4proto *l4proto);
-
-const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
-
-/* Built-in protocols. */
-extern const struct nf_nat_l4proto nf_nat_l4proto_tcp;
-extern const struct nf_nat_l4proto nf_nat_l4proto_udp;
-extern const struct nf_nat_l4proto nf_nat_l4proto_icmp;
-extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
-extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
-#ifdef CONFIG_NF_NAT_PROTO_DCCP
-extern const struct nf_nat_l4proto nf_nat_l4proto_dccp;
-#endif
-#ifdef CONFIG_NF_NAT_PROTO_SCTP
-extern const struct nf_nat_l4proto nf_nat_l4proto_sctp;
-#endif
-#ifdef CONFIG_NF_NAT_PROTO_UDPLITE
-extern const struct nf_nat_l4proto nf_nat_l4proto_udplite;
-#endif
-
-bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max);
-
-void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct, u16 *rover);
-
-int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
- struct nf_nat_range2 *range);
-
+/* Translate a packet to the target according to manip type. Return on success. */
+bool nf_nat_l4proto_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype);
#endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 0f39ac487012..841835a387e1 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -470,6 +470,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding);
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding);
+void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding);
+void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
/**
* enum nft_set_extensions - set extension type IDs
@@ -724,7 +727,9 @@ struct nft_expr_type {
* @eval: Expression evaluation function
* @size: full expression size, including private data size
* @init: initialization function
- * @destroy: destruction function
+ * @activate: activate expression in the next generation
+ * @deactivate: deactivate expression in next generation
+ * @destroy: destruction function, called after synchronize_rcu
* @dump: function to dump parameters
* @type: expression type
* @validate: validate expression, called during loop detection
@@ -1293,12 +1298,14 @@ static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
*
* @list: used internally
* @msg_type: message type
+ * @put_net: ctx->net needs to be put
* @ctx: transaction context
* @data: internal information related to the transaction
*/
struct nft_trans {
struct list_head list;
int msg_type;
+ bool put_net;
struct nft_ctx ctx;
char data[0];
};
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 8da837d2aaf9..2046d104f323 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -16,6 +16,10 @@ extern struct nft_expr_type nft_meta_type;
extern struct nft_expr_type nft_rt_type;
extern struct nft_expr_type nft_exthdr_type;
+#ifdef CONFIG_NETWORK_SECMARK
+extern struct nft_object_type nft_secmark_obj_type;
+#endif
+
int nf_tables_core_module_init(void);
void nf_tables_core_module_exit(void);
diff --git a/include/net/netfilter/nfnetlink_log.h b/include/net/netfilter/nfnetlink_log.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/include/net/netfilter/nfnetlink_log.h
+++ /dev/null
@@ -1 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 0c154f98e987..4c1e99303b5a 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -153,7 +153,7 @@
* nla_find() find attribute in stream of attributes
* nla_find_nested() find attribute in nested attributes
* nla_parse() parse and validate stream of attrs
- * nla_parse_nested() parse nested attribuets
+ * nla_parse_nested() parse nested attributes
* nla_for_each_attr() loop over all attributes
* nla_for_each_nested() loop over the nested attributes
*=========================================================================
@@ -172,7 +172,7 @@ enum {
NLA_FLAG,
NLA_MSECS,
NLA_NESTED,
- NLA_NESTED_COMPAT,
+ NLA_NESTED_ARRAY,
NLA_NUL_STRING,
NLA_BINARY,
NLA_S8,
@@ -180,14 +180,28 @@ enum {
NLA_S32,
NLA_S64,
NLA_BITFIELD32,
+ NLA_REJECT,
+ NLA_EXACT_LEN,
+ NLA_EXACT_LEN_WARN,
__NLA_TYPE_MAX,
};
#define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
+enum nla_policy_validation {
+ NLA_VALIDATE_NONE,
+ NLA_VALIDATE_RANGE,
+ NLA_VALIDATE_MIN,
+ NLA_VALIDATE_MAX,
+ NLA_VALIDATE_FUNCTION,
+};
+
/**
* struct nla_policy - attribute validation policy
* @type: Type of attribute or NLA_UNSPEC
+ * @validation_type: type of attribute validation done in addition to
+ * type-specific validation (e.g. range, function call), see
+ * &enum nla_policy_validation
* @len: Type specific length of payload
*
* Policies are defined as arrays of this struct, the array must be
@@ -198,9 +212,11 @@ enum {
* NLA_NUL_STRING Maximum length of string (excluding NUL)
* NLA_FLAG Unused
* NLA_BINARY Maximum length of attribute payload
- * NLA_NESTED Don't use `len' field -- length verification is
- * done by checking len of nested header (or empty)
- * NLA_NESTED_COMPAT Minimum length of structure payload
+ * NLA_NESTED,
+ * NLA_NESTED_ARRAY Length verification is done by checking len of
+ * nested header (or empty); len field is used if
+ * validation_data is also used, for the max attr
+ * number in the nested policy.
* NLA_U8, NLA_U16,
* NLA_U32, NLA_U64,
* NLA_S8, NLA_S16,
@@ -208,9 +224,59 @@ enum {
* NLA_MSECS Leaving the length field zero will verify the
* given type fits, using it verifies minimum length
* just like "All other"
- * NLA_BITFIELD32 A 32-bit bitmap/bitselector attribute
+ * NLA_BITFIELD32 Unused
+ * NLA_REJECT Unused
+ * NLA_EXACT_LEN Attribute must have exactly this length, otherwise
+ * it is rejected.
+ * NLA_EXACT_LEN_WARN Attribute should have exactly this length, a warning
+ * is logged if it is longer, shorter is rejected.
* All other Minimum length of attribute payload
*
+ * Meaning of `validation_data' field:
+ * NLA_BITFIELD32 This is a 32-bit bitmap/bitselector attribute and
+ * validation data must point to a u32 value of valid
+ * flags
+ * NLA_REJECT This attribute is always rejected and validation data
+ * may point to a string to report as the error instead
+ * of the generic one in extended ACK.
+ * NLA_NESTED Points to a nested policy to validate, must also set
+ * `len' to the max attribute number.
+ * Note that nla_parse() will validate, but of course not
+ * parse, the nested sub-policies.
+ * NLA_NESTED_ARRAY Points to a nested policy to validate, must also set
+ * `len' to the max attribute number. The difference to
+ * NLA_NESTED is the structure - NLA_NESTED has the
+ * nested attributes directly inside, while an array has
+ * the nested attributes at another level down and the
+ * attributes directly in the nesting don't matter.
+ * All other Unused - but note that it's a union
+ *
+ * Meaning of `min' and `max' fields, use via NLA_POLICY_MIN, NLA_POLICY_MAX
+ * and NLA_POLICY_RANGE:
+ * NLA_U8,
+ * NLA_U16,
+ * NLA_U32,
+ * NLA_U64,
+ * NLA_S8,
+ * NLA_S16,
+ * NLA_S32,
+ * NLA_S64 These are used depending on the validation_type
+ * field, if that is min/max/range then the minimum,
+ * maximum and both are used (respectively) to check
+ * the value of the integer attribute.
+ * Note that in the interest of code simplicity and
+ * struct size both limits are s16, so you cannot
+ * enforce a range that doesn't fall within the range
+ * of s16 - do that as usual in the code instead.
+ * All other Unused - but note that it's a union
+ *
+ * Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
+ * NLA_BINARY Validation function called for the attribute,
+ * not compatible with use of the validation_data
+ * as in NLA_BITFIELD32, NLA_REJECT, NLA_NESTED and
+ * NLA_NESTED_ARRAY.
+ * All other Unused - but note that it's a union
+ *
* Example:
* static const struct nla_policy my_policy[ATTR_MAX+1] = {
* [ATTR_FOO] = { .type = NLA_U16 },
@@ -220,11 +286,69 @@ enum {
* };
*/
struct nla_policy {
- u16 type;
+ u8 type;
+ u8 validation_type;
u16 len;
- void *validation_data;
+ union {
+ const void *validation_data;
+ struct {
+ s16 min, max;
+ };
+ int (*validate)(const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
+ };
};
+#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_EXACT_LEN, .len = _len }
+#define NLA_POLICY_EXACT_LEN_WARN(_len) { .type = NLA_EXACT_LEN_WARN, \
+ .len = _len }
+
+#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN)
+#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN)
+
+#define NLA_POLICY_NESTED(maxattr, policy) \
+ { .type = NLA_NESTED, .validation_data = policy, .len = maxattr }
+#define NLA_POLICY_NESTED_ARRAY(maxattr, policy) \
+ { .type = NLA_NESTED_ARRAY, .validation_data = policy, .len = maxattr }
+
+#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
+#define NLA_ENSURE_INT_TYPE(tp) \
+ (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 || \
+ tp == NLA_S16 || tp == NLA_U16 || \
+ tp == NLA_S32 || tp == NLA_U32 || \
+ tp == NLA_S64 || tp == NLA_U64) + tp)
+#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \
+ (__NLA_ENSURE(tp != NLA_BITFIELD32 && \
+ tp != NLA_REJECT && \
+ tp != NLA_NESTED && \
+ tp != NLA_NESTED_ARRAY) + tp)
+
+#define NLA_POLICY_RANGE(tp, _min, _max) { \
+ .type = NLA_ENSURE_INT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_RANGE, \
+ .min = _min, \
+ .max = _max \
+}
+
+#define NLA_POLICY_MIN(tp, _min) { \
+ .type = NLA_ENSURE_INT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MIN, \
+ .min = _min, \
+}
+
+#define NLA_POLICY_MAX(tp, _max) { \
+ .type = NLA_ENSURE_INT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MAX, \
+ .max = _max, \
+}
+
+#define NLA_POLICY_VALIDATE_FN(tp, fn, ...) { \
+ .type = NLA_ENSURE_NO_VALIDATION_PTR(tp), \
+ .validation_type = NLA_VALIDATE_FUNCTION, \
+ .validate = fn, \
+ .len = __VA_ARGS__ + 0, \
+}
+
/**
* struct nl_info - netlink source information
* @nlh: Netlink message header of original request
@@ -249,6 +373,9 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
int len, const struct nla_policy *policy,
struct netlink_ext_ack *extack);
+int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack);
int nla_policy_len(const struct nla_policy *, int);
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
@@ -392,13 +519,29 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
- if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) {
+ NL_SET_ERR_MSG(extack, "Invalid header length");
return -EINVAL;
+ }
return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
nlmsg_attrlen(nlh, hdrlen), policy, extack);
}
+static inline int nlmsg_parse_strict(const struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) {
+ NL_SET_ERR_MSG(extack, "Invalid header length");
+ return -EINVAL;
+ }
+
+ return nla_parse_strict(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), policy, extack);
+}
+
/**
* nlmsg_find_attr - find a specific attribute in a netlink message
* @nlh: netlink message header
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 9795d628a127..51cba0b8adf5 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -97,18 +97,14 @@ struct netns_ct {
struct delayed_work ecache_dwork;
bool ecache_dwork_pending;
#endif
+ bool auto_assign_helper_warned;
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_header;
- struct ctl_table_header *acct_sysctl_header;
- struct ctl_table_header *tstamp_sysctl_header;
- struct ctl_table_header *event_sysctl_header;
- struct ctl_table_header *helper_sysctl_header;
#endif
unsigned int sysctl_log_invalid; /* Log invalid packets */
int sysctl_events;
int sysctl_acct;
int sysctl_auto_assign_helper;
- bool auto_assign_helper_warned;
int sysctl_tstamp;
int sysctl_checksum;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index e47503b4e4d1..104a6669e344 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -103,6 +103,9 @@ struct netns_ipv4 {
/* Shall we try to damage output packets if routing dev changes? */
int sysctl_ip_dynaddr;
int sysctl_ip_early_demux;
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ int sysctl_raw_l3mdev_accept;
+#endif
int sysctl_tcp_early_demux;
int sysctl_udp_early_demux;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index f0e396ab9bec..ef1ed529f33c 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -45,6 +45,7 @@ struct netns_sysctl_ipv6 {
int max_dst_opts_len;
int max_hbh_opts_len;
int seg6_flowlabel;
+ bool skip_notify_on_dev_down;
};
struct netns_ipv6 {
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 9991e5ef52cc..59f45b1e9dac 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/rhashtable-types.h>
#include <linux/xfrm.h>
#include <net/dst_ops.h>
@@ -53,6 +54,7 @@ struct netns_xfrm {
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
struct xfrm_policy_hthresh policy_hthresh;
+ struct list_head inexact_bins;
struct sock *nlsk;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 316694dafa5b..008f466d1da7 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
* According to specification 102 622 chapter 4.4 Pipes,
* the pipe identifier is 7 bits long.
*/
-#define NFC_HCI_MAX_PIPES 127
+#define NFC_HCI_MAX_PIPES 128
struct nfc_hci_init_data {
u8 gate_count;
struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index ef727f71336e..40965fbbcd31 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -65,11 +65,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
return block->q;
}
-static inline struct net_device *tcf_block_dev(struct tcf_block *block)
-{
- return tcf_block_q(block)->dev_queue->dev;
-}
-
void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident);
@@ -86,6 +81,14 @@ void __tcf_block_cb_unregister(struct tcf_block *block,
struct tcf_block_cb *block_cb);
void tcf_block_cb_unregister(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident);
+int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
+int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
+void __tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
+void tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode);
@@ -122,11 +125,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
return NULL;
}
-static inline struct net_device *tcf_block_dev(struct tcf_block *block)
-{
- return NULL;
-}
-
static inline
int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
void *cb_priv)
@@ -193,6 +191,32 @@ void tcf_block_cb_unregister(struct tcf_block *block,
{
}
+static inline
+int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ return 0;
+}
+
+static inline
+int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ return 0;
+}
+
+static inline
+void __tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+}
+
+static inline
+void tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+}
+
static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
@@ -298,19 +322,13 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
#endif
}
-static inline void tcf_exts_to_list(const struct tcf_exts *exts,
- struct list_head *actions)
-{
#ifdef CONFIG_NET_CLS_ACT
- int i;
-
- for (i = 0; i < exts->nr_actions; i++) {
- struct tc_action *a = exts->actions[i];
-
- list_add_tail(&a->list, actions);
- }
+#define tcf_exts_for_each_action(i, a, exts) \
+ for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
+#else
+#define tcf_exts_for_each_action(i, a, exts) \
+ for (; 0; (void)(i), (void)(a), (void)(exts))
#endif
-}
static inline void
tcf_exts_stats_update(const struct tcf_exts *exts,
@@ -324,7 +342,7 @@ tcf_exts_stats_update(const struct tcf_exts *exts,
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
- tcf_action_stats_update(a, bytes, packets, lastuse);
+ tcf_action_stats_update(a, bytes, packets, lastuse, true);
}
preempt_enable();
@@ -361,6 +379,15 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
#endif
}
+static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return exts->actions[0];
+#else
+ return NULL;
+#endif
+}
+
/**
* tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer
@@ -592,8 +619,8 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
}
#endif /* CONFIG_NET_CLS_IND */
-int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
- enum tc_setup_type type, void *type_data, bool err_stop);
+int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
+ void *type_data, bool err_stop);
enum tc_block_command {
TC_BLOCK_BIND,
@@ -616,6 +643,7 @@ struct tc_cls_common_offload {
struct tc_cls_u32_knode {
struct tcf_exts *exts;
+ struct tcf_result *res;
struct tc_u32_sel *sel;
u32 handle;
u32 val;
@@ -794,12 +822,21 @@ enum tc_mq_command {
TC_MQ_CREATE,
TC_MQ_DESTROY,
TC_MQ_STATS,
+ TC_MQ_GRAFT,
+};
+
+struct tc_mq_opt_offload_graft_params {
+ unsigned long queue;
+ u32 child_handle;
};
struct tc_mq_qopt_offload {
enum tc_mq_command command;
u32 handle;
- struct tc_qopt_offload_stats stats;
+ union {
+ struct tc_qopt_offload_stats stats;
+ struct tc_mq_opt_offload_graft_params graft_params;
+ };
};
enum tc_red_command {
@@ -807,13 +844,16 @@ enum tc_red_command {
TC_RED_DESTROY,
TC_RED_STATS,
TC_RED_XSTATS,
+ TC_RED_GRAFT,
};
struct tc_red_qopt_offload_params {
u32 min;
u32 max;
u32 probability;
+ u32 limit;
bool is_ecn;
+ bool is_harddrop;
struct gnet_stats_queue *qstats;
};
@@ -825,6 +865,51 @@ struct tc_red_qopt_offload {
struct tc_red_qopt_offload_params set;
struct tc_qopt_offload_stats stats;
struct red_stats *xstats;
+ u32 child_handle;
+ };
+};
+
+enum tc_gred_command {
+ TC_GRED_REPLACE,
+ TC_GRED_DESTROY,
+ TC_GRED_STATS,
+};
+
+struct tc_gred_vq_qopt_offload_params {
+ bool present;
+ u32 limit;
+ u32 prio;
+ u32 min;
+ u32 max;
+ bool is_ecn;
+ bool is_harddrop;
+ u32 probability;
+ /* Only need backlog, see struct tc_prio_qopt_offload_params */
+ u32 *backlog;
+};
+
+struct tc_gred_qopt_offload_params {
+ bool grio_on;
+ bool wred_on;
+ unsigned int dp_cnt;
+ unsigned int dp_def;
+ struct gnet_stats_queue *qstats;
+ struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
+};
+
+struct tc_gred_qopt_offload_stats {
+ struct gnet_stats_basic_packed bstats[MAX_DPs];
+ struct gnet_stats_queue qstats[MAX_DPs];
+ struct red_stats *xstats[MAX_DPs];
+};
+
+struct tc_gred_qopt_offload {
+ enum tc_gred_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_gred_qopt_offload_params set;
+ struct tc_gred_qopt_offload_stats stats;
};
};
@@ -861,4 +946,14 @@ struct tc_prio_qopt_offload {
};
};
+enum tc_root_command {
+ TC_ROOT_GRAFT,
+};
+
+struct tc_root_qopt_offload {
+ enum tc_root_command command;
+ u32 handle;
+ bool ingress;
+};
+
#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 7dc769e5452b..a16fbe9a2a67 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -102,6 +102,7 @@ int qdisc_set_default(const char *id);
void qdisc_hash_add(struct Qdisc *q, bool invisible);
void qdisc_hash_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
+struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle);
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
struct nlattr *tab,
struct netlink_ext_ack *extack);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 4fc75f7ae23b..92b3eaad6088 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -42,7 +42,10 @@ struct net_protocol {
int (*early_demux)(struct sk_buff *skb);
int (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
- void (*err_handler)(struct sk_buff *skb, u32 info);
+
+ /* This returns an error if we weren't able to handle the error. */
+ int (*err_handler)(struct sk_buff *skb, u32 info);
+
unsigned int no_policy:1,
netns_ok:1,
/* does the protocol do more stringent
@@ -58,10 +61,12 @@ struct inet6_protocol {
void (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
- void (*err_handler)(struct sk_buff *skb,
+ /* This returns an error if we weren't able to handle the error. */
+ int (*err_handler)(struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, int offset,
__be32 info);
+
unsigned int flags; /* INET6_PROTO_xxx */
};
diff --git a/include/net/raw.h b/include/net/raw.h
index 9c9fa98a91a4..821ff4887f77 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -17,7 +17,7 @@
#ifndef _RAW_H
#define _RAW_H
-
+#include <net/inet_sock.h>
#include <net/protocol.h>
#include <linux/icmp.h>
@@ -61,6 +61,7 @@ void raw_seq_stop(struct seq_file *seq, void *v);
int raw_hash_sk(struct sock *sk);
void raw_unhash_sk(struct sock *sk);
+void raw_init(void);
struct raw_sock {
/* inet_sock has to be the first member */
@@ -74,4 +75,15 @@ static inline struct raw_sock *raw_sk(const struct sock *sk)
return (struct raw_sock *)sk;
}
+static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept,
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
#endif /* _RAW_H */
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 60f8cc86a447..3469750df0f4 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -217,15 +217,15 @@ struct ieee80211_wmm_rule {
struct ieee80211_reg_rule {
struct ieee80211_freq_range freq_range;
struct ieee80211_power_rule power_rule;
- struct ieee80211_wmm_rule *wmm_rule;
+ struct ieee80211_wmm_rule wmm_rule;
u32 flags;
u32 dfs_cac_ms;
+ bool has_wmm;
};
struct ieee80211_regdomain {
struct rcu_head rcu_head;
u32 n_reg_rules;
- u32 n_wmm_rules;
char alpha2[3];
enum nl80211_dfs_regions dfs_region;
struct ieee80211_reg_rule reg_rules[];
diff --git a/include/net/route.h b/include/net/route.h
index bb53cdba38dc..9883dc82f723 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -201,10 +201,9 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
}
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
- u32 mark, u8 protocol, int flow_flags);
+ u8 protocol);
void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
-void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
- u8 protocol, int flow_flags);
+void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u8 protocol);
void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
void ip_rt_send_redirect(struct sk_buff *skb);
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 0bbaa5488423..e2091bb2b3a8 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -159,12 +159,14 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
struct net_device *rtnl_create_link(struct net *net, const char *ifname,
unsigned char name_assign_type,
const struct rtnl_link_ops *ops,
- struct nlattr *tb[]);
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
int rtnl_delete_link(struct net_device *dev);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
struct netlink_ext_ack *exterr);
+struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a6d00093f35e..9481f2c142e2 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -19,10 +19,14 @@ struct Qdisc_ops;
struct qdisc_walker;
struct tcf_walker;
struct module;
+struct bpf_flow_keys;
typedef int tc_setup_cb_t(enum tc_setup_type type,
void *type_data, void *cb_priv);
+typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
+ enum tc_setup_type type, void *type_data);
+
struct qdisc_rate_table {
struct tc_ratespec rate;
u32 data[256];
@@ -105,6 +109,7 @@ struct Qdisc {
spinlock_t busylock ____cacheline_aligned_in_smp;
spinlock_t seqlock;
+ struct rcu_head rcu;
};
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
@@ -114,6 +119,19 @@ static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
refcount_inc(&qdisc->refcnt);
}
+/* Intended to be used by unlocked users, when concurrent qdisc release is
+ * possible.
+ */
+
+static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_BUILTIN)
+ return qdisc;
+ if (refcount_inc_not_zero(&qdisc->refcnt))
+ return qdisc;
+ return NULL;
+}
+
static inline bool qdisc_is_running(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK)
@@ -307,9 +325,14 @@ struct tcf_proto {
};
struct qdisc_skb_cb {
- unsigned int pkt_len;
- u16 slave_dev_queue_mapping;
- u16 tc_classid;
+ union {
+ struct {
+ unsigned int pkt_len;
+ u16 slave_dev_queue_mapping;
+ u16 tc_classid;
+ };
+ struct bpf_flow_keys *flow_keys;
+ };
#define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN];
};
@@ -331,7 +354,7 @@ struct tcf_chain {
struct tcf_block {
struct list_head chain_list;
u32 index; /* block index for shared blocks */
- unsigned int refcnt;
+ refcount_t refcnt;
struct net *net;
struct Qdisc *q;
struct list_head cb_list;
@@ -343,6 +366,7 @@ struct tcf_block {
struct tcf_chain *chain;
struct list_head filter_chain_list;
} chain0;
+ struct rcu_head rcu;
};
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
@@ -362,7 +386,7 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
}
static inline void
-tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
+tc_cls_offload_cnt_update(struct tcf_block *block, u32 *cnt,
u32 *flags, bool add)
{
if (add) {
@@ -554,9 +578,34 @@ void dev_deactivate_many(struct list_head *head);
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
-void qdisc_destroy(struct Qdisc *qdisc);
+void qdisc_put(struct Qdisc *qdisc);
+void qdisc_put_unlocked(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
unsigned int len);
+#ifdef CONFIG_NET_SCHED
+int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data);
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack);
+#else
+static inline int
+qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data)
+{
+ q->flags &= ~TCQ_F_OFFLOADED;
+ return 0;
+}
+
+static inline void
+qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack)
+{
+}
+#endif
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
@@ -828,8 +877,8 @@ static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
qh->qlen = 0;
}
-static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
- struct qdisc_skb_head *qh)
+static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
+ struct qdisc_skb_head *qh)
{
struct sk_buff *last = qh->tail;
@@ -842,14 +891,24 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
qh->head = skb;
}
qh->qlen++;
- qdisc_qstats_backlog_inc(sch, skb);
+}
+static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
+{
+ __qdisc_enqueue_tail(skb, &sch->q);
+ qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
}
-static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
+static inline void __qdisc_enqueue_head(struct sk_buff *skb,
+ struct qdisc_skb_head *qh)
{
- return __qdisc_enqueue_tail(skb, sch, &sch->q);
+ skb->next = qh->head;
+
+ if (!qh->head)
+ qh->tail = skb;
+ qh->head = skb;
+ qh->qlen++;
}
static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
diff --git a/include/net/scm.h b/include/net/scm.h
index 903771c8d4e3..1ce365f4c256 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -8,6 +8,7 @@
#include <linux/security.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
+#include <linux/sched/signal.h>
/* Well, we should have at least one descriptor open
* to accept passed FDs 8)
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 86f034b524d4..4588bdc2b8f0 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -71,7 +71,7 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
SCTP_NUM_AUTH_CHUNK_TYPES)
/* These are the different flavours of event. */
-enum sctp_event {
+enum sctp_event_type {
SCTP_EVENT_T_CHUNK = 1,
SCTP_EVENT_T_TIMEOUT,
SCTP_EVENT_T_OTHER,
@@ -148,11 +148,6 @@ SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive)
#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \
a->chunk_hdr->type == SCTP_CID_I_DATA)
-/* Calculate the actual data size in a data chunk */
-#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end) - \
- (unsigned long)(c->chunk_hdr) - \
- sctp_datachk_len(&c->asoc->stream)))
-
/* Internal error codes */
enum sctp_ierror {
SCTP_IERROR_NO_ERROR = 0,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 8c2caa370e0f..1d13ec3f2707 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -151,8 +151,8 @@ int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc,
* sctp/input.c
*/
int sctp_rcv(struct sk_buff *skb);
-void sctp_v4_err(struct sk_buff *skb, u32 info);
-void sctp_hash_endpoint(struct sctp_endpoint *);
+int sctp_v4_err(struct sk_buff *skb, u32 info);
+int sctp_hash_endpoint(struct sctp_endpoint *ep);
void sctp_unhash_endpoint(struct sctp_endpoint *);
struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
struct sctphdr *, struct sctp_association **,
@@ -608,4 +608,21 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
SCTP_DEFAULT_MINSEGMENT));
}
+static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+{
+ __u32 pmtu = sctp_dst_mtu(t->dst);
+
+ if (t->pathmtu == pmtu)
+ return true;
+
+ t->pathmtu = pmtu;
+
+ return false;
+}
+
+static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
+{
+ return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
+}
+
#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 5ef1bad81ef5..24825a81829e 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -173,7 +173,7 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire;
__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
const struct sctp_sm_table_entry *sctp_sm_lookup_event(
struct net *net,
- enum sctp_event event_type,
+ enum sctp_event_type event_type,
enum sctp_state state,
union sctp_subtype event_subtype);
int sctp_chunk_iif(const struct sctp_chunk *);
@@ -313,7 +313,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
/* Prototypes for statetable processing. */
-int sctp_do_sm(struct net *net, enum sctp_event event_type,
+int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
union sctp_subtype subtype, enum sctp_state state,
struct sctp_endpoint *ep, struct sctp_association *asoc,
void *event_arg, gfp_t gfp);
@@ -347,7 +347,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
__u16 size;
size = ntohs(chunk->chunk_hdr->length);
- size -= sctp_datahdr_len(&chunk->asoc->stream);
+ size -= sctp_datachk_len(&chunk->asoc->stream);
return size;
}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 28a7c8e44636..003020eb6e66 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -96,7 +96,9 @@ struct sctp_stream;
struct sctp_bind_bucket {
unsigned short port;
- unsigned short fastreuse;
+ signed char fastreuse;
+ signed char fastreuseport;
+ kuid_t fastuid;
struct hlist_node node;
struct hlist_head owner;
struct net *net;
@@ -215,7 +217,7 @@ struct sctp_sock {
* These two structures must be grouped together for the usercopy
* whitelist region.
*/
- struct sctp_event_subscribe subscribe;
+ __u16 subscribe;
struct sctp_initmsg initmsg;
int user_frag;
@@ -876,6 +878,8 @@ struct sctp_transport {
unsigned long sackdelay;
__u32 sackfreq;
+ atomic_t mtu_info;
+
/* When was the last time that we heard from this transport? We use
* this to pick new active and retran paths.
*/
@@ -1188,6 +1192,8 @@ int sctp_bind_addr_conflict(struct sctp_bind_addr *, const union sctp_addr *,
struct sctp_sock *, struct sctp_sock *);
int sctp_bind_addr_state(const struct sctp_bind_addr *bp,
const union sctp_addr *addr);
+int sctp_bind_addrs_check(struct sctp_sock *sp,
+ struct sctp_sock *sp2, int cnt2);
union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
const union sctp_addr *addrs,
int addrcnt,
@@ -2071,8 +2077,12 @@ struct sctp_association {
int sent_cnt_removable;
+ __u16 subscribe;
+
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+
+ struct rcu_head rcu;
};
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 51b4e0626c34..bd922a0fe914 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -164,30 +164,39 @@ void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
+static inline void sctp_ulpevent_type_set(__u16 *subscribe,
+ __u16 sn_type, __u8 on)
+{
+ if (sn_type > SCTP_SN_TYPE_MAX)
+ return;
+
+ if (on)
+ *subscribe |= (1 << (sn_type - SCTP_SN_TYPE_BASE));
+ else
+ *subscribe &= ~(1 << (sn_type - SCTP_SN_TYPE_BASE));
+}
+
/* Is this event type enabled? */
-static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
- struct sctp_event_subscribe *mask)
+static inline bool sctp_ulpevent_type_enabled(__u16 subscribe, __u16 sn_type)
{
- int offset = sn_type - SCTP_SN_TYPE_BASE;
- char *amask = (char *) mask;
+ if (sn_type > SCTP_SN_TYPE_MAX)
+ return false;
- if (offset >= sizeof(struct sctp_event_subscribe))
- return 0;
- return amask[offset];
+ return subscribe & (1 << (sn_type - SCTP_SN_TYPE_BASE));
}
/* Given an event subscription, is this event enabled? */
-static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
- struct sctp_event_subscribe *mask)
+static inline bool sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
+ __u16 subscribe)
{
__u16 sn_type;
- int enabled = 1;
- if (sctp_ulpevent_is_notification(event)) {
- sn_type = sctp_ulpevent_get_notification_type(event);
- enabled = sctp_ulpevent_type_enabled(sn_type, mask);
- }
- return enabled;
+ if (!sctp_ulpevent_is_notification(event))
+ return true;
+
+ sn_type = sctp_ulpevent_get_notification_type(event);
+
+ return sctp_ulpevent_type_enabled(subscribe, sn_type);
}
#endif /* __sctp_ulpevent_h__ */
diff --git a/include/net/seg6.h b/include/net/seg6.h
index 2567941a2f32..8b2dc6869fd1 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -16,7 +16,6 @@
#include <linux/net.h>
#include <linux/ipv6.h>
-#include <net/lwtunnel.h>
#include <linux/seg6.h>
#include <linux/rhashtable-types.h>
diff --git a/include/net/sock.h b/include/net/sock.h
index 433f45fc2d68..2b229f7be8eb 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -298,6 +298,7 @@ struct sock_common {
* @sk_filter: socket filtering instructions
* @sk_timer: sock cleanup timer
* @sk_stamp: time stamp of last packet received
+ * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
* @sk_tsflags: SO_TIMESTAMPING socket options
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
@@ -422,8 +423,8 @@ struct sock {
struct timer_list sk_timer;
__u32 sk_priority;
__u32 sk_mark;
- u32 sk_pacing_rate; /* bytes per second */
- u32 sk_max_pacing_rate;
+ unsigned long sk_pacing_rate; /* bytes per second */
+ unsigned long sk_max_pacing_rate;
struct page_frag sk_frag;
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
@@ -474,6 +475,9 @@ struct sock {
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
ktime_t sk_stamp;
+#if BITS_PER_LONG==32
+ seqlock_t sk_stamp_seq;
+#endif
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
@@ -800,6 +804,7 @@ enum sock_flags {
SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
SOCK_TXTIME,
+ SOCK_XDP, /* XDP is attached */
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -1109,7 +1114,7 @@ struct proto {
unsigned int inuse_idx;
#endif
- bool (*stream_memory_free)(const struct sock *sk);
+ bool (*stream_memory_free)(const struct sock *sk, int wake);
bool (*stream_memory_read)(const struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
@@ -1191,19 +1196,29 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
-static inline bool sk_stream_memory_free(const struct sock *sk)
+static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (sk->sk_wmem_queued >= sk->sk_sndbuf)
return false;
return sk->sk_prot->stream_memory_free ?
- sk->sk_prot->stream_memory_free(sk) : true;
+ sk->sk_prot->stream_memory_free(sk, wake) : true;
}
-static inline bool sk_stream_is_writeable(const struct sock *sk)
+static inline bool sk_stream_memory_free(const struct sock *sk)
+{
+ return __sk_stream_memory_free(sk, 0);
+}
+
+static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
{
return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
- sk_stream_memory_free(sk);
+ __sk_stream_memory_free(sk, wake);
+}
+
+static inline bool sk_stream_is_writeable(const struct sock *sk)
+{
+ return __sk_stream_is_writeable(sk, 0);
}
static inline int sk_under_cgroup_hierarchy(struct sock *sk,
@@ -1491,6 +1506,7 @@ static inline void lock_sock(struct sock *sk)
lock_sock_nested(sk, 0);
}
+void __release_sock(struct sock *sk);
void release_sock(struct sock *sk);
/* BH context may only use the following locking interface. */
@@ -2057,14 +2073,20 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
/**
* sock_poll_wait - place memory barrier behind the poll_wait call.
* @filp: file
+ * @sock: socket to wait on
* @p: poll_table
*
* See the comments in the wq_has_sleeper function.
+ *
+ * Do not derive sock from filp->private_data here. An SMC socket establishes
+ * an internal TCP socket that is used in the fallback case. All socket
+ * operations on the SMC socket are then forwarded to the TCP socket. In case of
+ * poll, the filp->private_data pointer references the SMC socket because the
+ * TCP socket has no file assigned.
*/
-static inline void sock_poll_wait(struct file *filp, poll_table *p)
+static inline void sock_poll_wait(struct file *filp, struct socket *sock,
+ poll_table *p)
{
- struct socket *sock = filp->private_data;
-
if (!poll_does_not_wait(p)) {
poll_wait(filp, &sock->wq->wait, p);
/* We need to be sure we are in sync with the
@@ -2212,10 +2234,6 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
-int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
- int sg_start, int *sg_curr, unsigned int *sg_size,
- int first_coalesce);
-
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
@@ -2283,6 +2301,34 @@ static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
atomic_add(segs, &sk->sk_drops);
}
+static inline ktime_t sock_read_timestamp(struct sock *sk)
+{
+#if BITS_PER_LONG==32
+ unsigned int seq;
+ ktime_t kt;
+
+ do {
+ seq = read_seqbegin(&sk->sk_stamp_seq);
+ kt = sk->sk_stamp;
+ } while (read_seqretry(&sk->sk_stamp_seq, seq));
+
+ return kt;
+#else
+ return sk->sk_stamp;
+#endif
+}
+
+static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
+{
+#if BITS_PER_LONG==32
+ write_seqlock(&sk->sk_stamp_seq);
+ sk->sk_stamp = kt;
+ write_sequnlock(&sk->sk_stamp_seq);
+#else
+ sk->sk_stamp = kt;
+#endif
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2307,7 +2353,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
- sk->sk_stamp = kt;
+ sock_write_timestamp(sk, kt);
if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
__sock_recv_wifi_status(msg, sk, skb);
@@ -2328,30 +2374,47 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
__sock_recv_ts_and_drops(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
- sk->sk_stamp = skb->tstamp;
+ sock_write_timestamp(sk, skb->tstamp);
else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
- sk->sk_stamp = 0;
+ sock_write_timestamp(sk, 0);
}
void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
/**
- * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
+ * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
* @tsflags: timestamping flags to use
* @tx_flags: completed with instructions for time stamping
+ * @tskey: filled in with next sk_tskey (not for TCP, which uses seqno)
*
* Note: callers should take care of initial ``*tx_flags`` value (usually 0)
*/
-static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags,
- __u8 *tx_flags)
+static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+ __u8 *tx_flags, __u32 *tskey)
{
- if (unlikely(tsflags))
+ if (unlikely(tsflags)) {
__sock_tx_timestamp(tsflags, tx_flags);
+ if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
+ tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
+ *tskey = sk->sk_tskey++;
+ }
if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
*tx_flags |= SKBTX_WIFI_STATUS;
}
+static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+ __u8 *tx_flags)
+{
+ _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
+}
+
+static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+{
+ _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
+ &skb_shinfo(skb)->tskey);
+}
+
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d574ce63bf22..a7fdab5ee6c3 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -95,8 +95,8 @@ struct switchdev_obj_port_vlan {
u16 vid_end;
};
-#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
- container_of(obj, struct switchdev_obj_port_vlan, obj)
+#define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \
+ container_of((OBJ), struct switchdev_obj_port_vlan, obj)
/* SWITCHDEV_OBJ_ID_PORT_MDB */
struct switchdev_obj_port_mdb {
@@ -105,8 +105,8 @@ struct switchdev_obj_port_mdb {
u16 vid;
};
-#define SWITCHDEV_OBJ_PORT_MDB(obj) \
- container_of(obj, struct switchdev_obj_port_mdb, obj)
+#define SWITCHDEV_OBJ_PORT_MDB(OBJ) \
+ container_of((OBJ), struct switchdev_obj_port_mdb, obj)
void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
void *data, void (*destructor)(void const *),
@@ -121,10 +121,6 @@ typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
* @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
*
* @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
- *
- * @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*).
- *
- * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*).
*/
struct switchdev_ops {
int (*switchdev_port_attr_get)(struct net_device *dev,
@@ -132,11 +128,6 @@ struct switchdev_ops {
int (*switchdev_port_attr_set)(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans);
- int (*switchdev_port_obj_add)(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans);
- int (*switchdev_port_obj_del)(struct net_device *dev,
- const struct switchdev_obj *obj);
};
enum switchdev_notifier_type {
@@ -145,17 +136,35 @@ enum switchdev_notifier_type {
SWITCHDEV_FDB_ADD_TO_DEVICE,
SWITCHDEV_FDB_DEL_TO_DEVICE,
SWITCHDEV_FDB_OFFLOADED,
+
+ SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
+ SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
+
+ SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE,
+ SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE,
+ SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
+ SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE,
+ SWITCHDEV_VXLAN_FDB_OFFLOADED,
};
struct switchdev_notifier_info {
struct net_device *dev;
+ struct netlink_ext_ack *extack;
};
struct switchdev_notifier_fdb_info {
struct switchdev_notifier_info info; /* must be first */
const unsigned char *addr;
u16 vid;
- bool added_by_user;
+ u8 added_by_user:1,
+ offloaded:1;
+};
+
+struct switchdev_notifier_port_obj_info {
+ struct switchdev_notifier_info info; /* must be first */
+ const struct switchdev_obj *obj;
+ struct switchdev_trans *trans;
+ bool handled;
};
static inline struct net_device *
@@ -164,6 +173,12 @@ switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
return info->dev;
}
+static inline struct netlink_ext_ack *
+switchdev_notifier_info_to_extack(const struct switchdev_notifier_info *info)
+{
+ return info->extack;
+}
+
#ifdef CONFIG_NET_SWITCHDEV
void switchdev_deferred_process(void);
@@ -172,13 +187,22 @@ int switchdev_port_attr_get(struct net_device *dev,
int switchdev_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr);
int switchdev_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj);
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack);
int switchdev_port_obj_del(struct net_device *dev,
const struct switchdev_obj *obj);
+
int register_switchdev_notifier(struct notifier_block *nb);
int unregister_switchdev_notifier(struct notifier_block *nb);
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
struct switchdev_notifier_info *info);
+
+int register_switchdev_blocking_notifier(struct notifier_block *nb);
+int unregister_switchdev_blocking_notifier(struct notifier_block *nb);
+int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack);
+
void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
@@ -186,6 +210,19 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
bool switchdev_port_same_parent_id(struct net_device *a,
struct net_device *b);
+int switchdev_handle_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*add_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack));
+int switchdev_handle_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*del_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj));
+
#define SWITCHDEV_SET_OPS(netdev, ops) ((netdev)->switchdev_ops = (ops))
#else
@@ -206,7 +243,8 @@ static inline int switchdev_port_attr_set(struct net_device *dev,
}
static inline int switchdev_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj)
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -234,12 +272,55 @@ static inline int call_switchdev_notifiers(unsigned long val,
return NOTIFY_DONE;
}
+static inline int
+register_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int
+unregister_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int
+call_switchdev_blocking_notifiers(unsigned long val,
+ struct net_device *dev,
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack)
+{
+ return NOTIFY_DONE;
+}
+
static inline bool switchdev_port_same_parent_id(struct net_device *a,
struct net_device *b)
{
return false;
}
+static inline int
+switchdev_handle_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*add_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack))
+{
+ return 0;
+}
+
+static inline int
+switchdev_handle_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*del_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj))
+{
+ return 0;
+}
+
#define SWITCHDEV_SET_OPS(netdev, ops) do {} while (0)
#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 770917d0caa7..e0a65c067662 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -313,7 +313,7 @@ extern struct proto tcp_prot;
void tcp_tasklet_init(void);
-void tcp_v4_err(struct sk_buff *skb, u32);
+int tcp_v4_err(struct sk_buff *skb, u32);
void tcp_shutdown(struct sock *sk, int how);
@@ -732,7 +732,7 @@ void tcp_send_window_probe(struct sock *sk);
static inline u64 tcp_clock_ns(void)
{
- return local_clock();
+ return ktime_get_ns();
}
static inline u64 tcp_clock_us(void)
@@ -752,17 +752,7 @@ static inline u32 tcp_time_stamp_raw(void)
return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
}
-
-/* Refresh 1us clock of a TCP socket,
- * ensuring monotically increasing values.
- */
-static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
-{
- u64 val = tcp_clock_us();
-
- if (val > tp->tcp_mstamp)
- tp->tcp_mstamp = val;
-}
+void tcp_mstamp_refresh(struct tcp_sock *tp);
static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
{
@@ -771,7 +761,13 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
- return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
+ return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ);
+}
+
+/* provide the departure time in us unit */
+static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
+{
+ return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
}
@@ -817,7 +813,7 @@ struct tcp_skb_cb {
#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
#define TCPCB_LOST 0x04 /* SKB is lost */
#define TCPCB_TAGBITS 0x07 /* All tag bits */
-#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */
+#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
TCPCB_REPAIRED)
@@ -862,6 +858,21 @@ static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
}
+static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
+{
+ return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
+}
+
+static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
+{
+ return TCP_SKB_CB(skb)->bpf.sk_redir;
+}
+
+static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
+{
+ TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
/* This is the variant of inet6_iif() that must be used by TCP,
* as TCP moves IP6CB into a different location in skb->cb[]
@@ -1113,7 +1124,7 @@ void tcp_rate_check_app_limited(struct sock *sk);
*/
static inline int tcp_is_sack(const struct tcp_sock *tp)
{
- return tp->rx_opt.sack_ok;
+ return likely(tp->rx_opt.sack_ok);
}
static inline bool tcp_is_reno(const struct tcp_sock *tp)
@@ -1234,8 +1245,31 @@ static inline bool tcp_needs_internal_pacing(const struct sock *sk)
return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
}
+/* Return in jiffies the delay before one skb is sent.
+ * If @skb is NULL, we look at EDT for next packet being sent on the socket.
+ */
+static inline unsigned long tcp_pacing_delay(const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns;
+
+ pacing_delay -= tcp_sk(sk)->tcp_clock_cache;
+
+ return pacing_delay > 0 ? nsecs_to_jiffies(pacing_delay) : 0;
+}
+
+static inline void tcp_reset_xmit_timer(struct sock *sk,
+ const int what,
+ unsigned long when,
+ const unsigned long max_when,
+ const struct sk_buff *skb)
+{
+ inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb),
+ max_when);
+}
+
/* Something is really bad, we could not queue an additional packet,
- * because qdisc is full or receiver sent a 0 window.
+ * because qdisc is full or receiver sent a 0 window, or we are paced.
* We do not want to add fuel to the fire, or abort too early,
* so make sure the timer we arm now is at least 200ms in the future,
* regardless of current icsk_rto value (as it could be ~2ms)
@@ -1257,8 +1291,9 @@ static inline unsigned long tcp_probe0_when(const struct sock *sk,
static inline void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_base(sk), TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
+ tcp_probe0_base(sk), TCP_RTO_MAX,
+ NULL);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
@@ -1280,33 +1315,16 @@ static inline __sum16 tcp_v4_check(int len, __be32 saddr,
return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
}
-static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
-{
- return __skb_checksum_complete(skb);
-}
-
static inline bool tcp_checksum_complete(struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
- __tcp_checksum_complete(skb);
+ __skb_checksum_complete(skb);
}
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
int tcp_filter(struct sock *sk, struct sk_buff *skb);
-
-#undef STATE_TRACE
-
-#ifdef STATE_TRACE
-static const char *statename[]={
- "Unused","Established","Syn Sent","Syn Recv",
- "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
- "Close Wait","Last ACK","Listen","Closing"
-};
-#endif
void tcp_set_state(struct sock *sk, int state);
-
void tcp_done(struct sock *sk);
-
int tcp_abort(struct sock *sk, int err);
static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
@@ -1350,7 +1368,7 @@ static inline int tcp_win_from_space(const struct sock *sk, int space)
/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(const struct sock *sk)
{
- return tcp_win_from_space(sk, sk->sk_rcvbuf -
+ return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len -
atomic_read(&sk->sk_rmem_alloc));
}
@@ -1537,9 +1555,21 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
-struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
- const union tcp_md5_addr *addr,
- int family);
+#include <linux/jump_label.h>
+extern struct static_key tcp_md5_needed;
+struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family);
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup(const struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family)
+{
+ if (!static_key_false(&tcp_md5_needed))
+ return NULL;
+ return __tcp_md5_do_lookup(sk, addr, family);
+}
+
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
@@ -1840,12 +1870,16 @@ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
}
-static inline bool tcp_stream_memory_free(const struct sock *sk)
+/* @wake is one when sk_stream_write_space() calls us.
+ * This sends EPOLLOUT only if notsent_bytes is half the limit.
+ * This mimics the strategy used in sock_def_write_space().
+ */
+static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
{
const struct tcp_sock *tp = tcp_sk(sk);
u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
- return notsent_bytes < tcp_notsent_lowat(tp);
+ return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
}
#ifdef CONFIG_PROC_FS
@@ -1940,7 +1974,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
{
const struct sk_buff *skb = tcp_rtx_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto;
- u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
+ u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
}
@@ -2040,11 +2074,6 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
#define TCP_ULP_MAX 128
#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
-enum {
- TCP_ULP_TLS,
- TCP_ULP_BPF,
-};
-
struct tcp_ulp_ops {
struct list_head list;
@@ -2053,15 +2082,12 @@ struct tcp_ulp_ops {
/* cleanup ulp */
void (*release)(struct sock *sk);
- int uid;
char name[TCP_ULP_NAME_MAX];
- bool user_visible;
struct module *owner;
};
int tcp_register_ulp(struct tcp_ulp_ops *type);
void tcp_unregister_ulp(struct tcp_ulp_ops *type);
int tcp_set_ulp(struct sock *sk, const char *name);
-int tcp_set_ulp_id(struct sock *sk, const int ulp);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
@@ -2069,6 +2095,18 @@ void tcp_cleanup_ulp(struct sock *sk);
__MODULE_INFO(alias, alias_userspace, name); \
__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
+struct sk_msg;
+struct sk_psock;
+
+int tcp_bpf_init(struct sock *sk);
+void tcp_bpf_reinit(struct sock *sk);
+int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
+ int flags);
+int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int nonblock, int flags, int *addr_len);
+int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
+ struct msghdr *msg, int len, int flags);
+
/* Call BPF_SOCK_OPS program that returns an int. If the return value
* is < 0, then the BPF op failed (for example if the loaded BPF
* program does not support the chosen operation or there is no BPF
diff --git a/include/net/tls.h b/include/net/tls.h
index d5c683e8bb22..2a6ac8d642af 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -39,9 +39,11 @@
#include <linux/crypto.h>
#include <linux/socket.h>
#include <linux/tcp.h>
+#include <linux/skmsg.h>
+
#include <net/tcp.h>
#include <net/strparser.h>
-
+#include <crypto/aead.h>
#include <uapi/linux/tls.h>
@@ -74,6 +76,10 @@
*
* void (*unhash)(struct tls_device *device, struct sock *sk);
* This function cleans listen state set by Inline TLS driver
+ *
+ * void (*release)(struct kref *kref);
+ * Release the registered device and allocated resources
+ * @kref: Number of reference to tls_device
*/
struct tls_device {
char name[TLS_DEVICE_NAME_MAX];
@@ -81,6 +87,8 @@ struct tls_device {
int (*feature)(struct tls_device *device);
int (*hash)(struct tls_device *device, struct sock *sk);
void (*unhash)(struct tls_device *device, struct sock *sk);
+ void (*release)(struct kref *kref);
+ struct kref kref;
};
enum {
@@ -93,24 +101,45 @@ enum {
TLS_NUM_CONFIG,
};
-struct tls_sw_context_tx {
- struct crypto_aead *aead_send;
- struct crypto_wait async_wait;
-
- char aad_space[TLS_AAD_SPACE_SIZE];
-
- unsigned int sg_plaintext_size;
- int sg_plaintext_num_elem;
- struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS];
+/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
+ * allocated or mapped for each TLS record. After encryption, the records are
+ * stores in a linked list.
+ */
+struct tls_rec {
+ struct list_head list;
+ int tx_ready;
+ int tx_flags;
+ int inplace_crypto;
- unsigned int sg_encrypted_size;
- int sg_encrypted_num_elem;
- struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS];
+ struct sk_msg msg_plaintext;
+ struct sk_msg msg_encrypted;
- /* AAD | sg_plaintext_data | sg_tag */
+ /* AAD | msg_plaintext.sg.data | sg_tag */
struct scatterlist sg_aead_in[2];
- /* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */
+ /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
struct scatterlist sg_aead_out[2];
+
+ char aad_space[TLS_AAD_SPACE_SIZE];
+ struct aead_request aead_req;
+ u8 aead_req_ctx[];
+};
+
+struct tx_work {
+ struct delayed_work work;
+ struct sock *sk;
+};
+
+struct tls_sw_context_tx {
+ struct crypto_aead *aead_send;
+ struct crypto_wait async_wait;
+ struct tx_work tx_work;
+ struct tls_rec *open_rec;
+ struct list_head tx_list;
+ atomic_t encrypt_pending;
+ int async_notify;
+
+#define BIT_TX_SCHEDULED 0
+ unsigned long tx_bitmask;
};
struct tls_sw_context_rx {
@@ -119,11 +148,12 @@ struct tls_sw_context_rx {
struct strparser strp;
void (*saved_data_ready)(struct sock *sk);
- unsigned int (*sk_poll)(struct file *file, struct socket *sock,
- struct poll_table_struct *wait);
+
struct sk_buff *recv_pkt;
u8 control;
bool decrypted;
+ atomic_t decrypt_pending;
+ bool async_notify;
};
struct tls_record_info {
@@ -171,15 +201,14 @@ struct cipher_context {
char *rec_seq;
};
+union tls_crypto_context {
+ struct tls_crypto_info info;
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+};
+
struct tls_context {
- union {
- struct tls_crypto_info crypto_send;
- struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128;
- };
- union {
- struct tls_crypto_info crypto_recv;
- struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
- };
+ union tls_crypto_context crypto_send;
+ union tls_crypto_context crypto_recv;
struct list_head list;
struct net_device *netdev;
@@ -196,10 +225,11 @@ struct tls_context {
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
+
unsigned long flags;
bool in_tcp_sendpages;
+ bool pending_open_record_frags;
- u16 pending_open_record_frags;
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);
@@ -247,8 +277,7 @@ void tls_sw_free_resources_rx(struct sock *sk);
void tls_sw_release_resources_rx(struct sock *sk);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
-unsigned int tls_sw_poll(struct file *file, struct socket *sock,
- struct poll_table_struct *wait);
+bool tls_sw_stream_read(const struct sock *sk);
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
@@ -260,6 +289,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
void tls_device_sk_destruct(struct sock *sk);
void tls_device_init(void);
void tls_device_cleanup(void);
+int tls_tx_records(struct sock *sk, int flags);
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);
@@ -278,6 +308,9 @@ void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
int tls_push_sg(struct sock *sk, struct tls_context *ctx,
struct scatterlist *sg, u16 first_offset,
int flags);
+int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
+ int flags);
+
int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
int flags, long *timeo);
@@ -311,6 +344,17 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
return tls_ctx->pending_open_record_frags;
}
+static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
+{
+ struct tls_rec *rec;
+
+ rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
+ if (!rec)
+ return false;
+
+ return READ_ONCE(rec->tx_ready);
+}
+
struct sk_buff *
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
@@ -367,8 +411,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx,
* size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
*/
buf[0] = record_type;
- buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version);
- buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version);
+ buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version);
+ buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version);
/* we can use IV for nonce explicit according to spec */
buf[3] = pkt_len >> 8;
buf[4] = pkt_len & 0xFF;
@@ -416,6 +460,15 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx)
return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
}
+static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (!ctx)
+ return false;
+ return !!tls_sw_ctx_tx(ctx);
+}
+
static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{
diff --git a/include/net/udp.h b/include/net/udp.h
index 8482a990b0bb..fd6d948755c8 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -252,6 +252,17 @@ static inline int udp_rqueue_get(struct sock *sk)
return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
}
+static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
/* net/ipv4/udp.c */
void udp_destruct_sock(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
@@ -272,7 +283,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
-void udp_err(struct sk_buff *, u32);
+int udp_err(struct sk_buff *, u32);
int udp_abort(struct sock *sk, int err);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udp_push_pending_frames(struct sock *sk);
@@ -406,17 +417,24 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
} while(0)
#if IS_ENABLED(CONFIG_IPV6)
-#define __UDPX_INC_STATS(sk, field) \
-do { \
- if ((sk)->sk_family == AF_INET) \
- __UDP_INC_STATS(sock_net(sk), field, 0); \
- else \
- __UDP6_INC_STATS(sock_net(sk), field, 0); \
-} while (0)
+#define __UDPX_MIB(sk, ipv4) \
+({ \
+ ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
+ sock_net(sk)->mib.udp_statistics) : \
+ (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
+ sock_net(sk)->mib.udp_stats_in6); \
+})
#else
-#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
+#define __UDPX_MIB(sk, ipv4) \
+({ \
+ IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
+ sock_net(sk)->mib.udp_statistics; \
+})
#endif
+#define __UDPX_INC_STATS(sk, field) \
+ __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
+
#ifdef CONFIG_PROC_FS
struct udp_seq_afinfo {
sa_family_t family;
@@ -443,9 +461,33 @@ int udpv4_offload_init(void);
void udp_init(void);
+DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
void udp_encap_enable(void);
#if IS_ENABLED(CONFIG_IPV6)
+DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void);
#endif
+static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+ struct sk_buff *skb, bool ipv4)
+{
+ struct sk_buff *segs;
+
+ /* the GSO CB lays after the UDP one, no need to save and restore any
+ * CB fragment
+ */
+ segs = __skb_gso_segment(skb, NETIF_F_SG, false);
+ if (unlikely(IS_ERR_OR_NULL(segs))) {
+ int segs_nr = skb_shinfo(skb)->gso_segs;
+
+ atomic_add(segs_nr, &sk->sk_drops);
+ SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ consume_skb(skb);
+ return segs;
+}
+
#endif /* _UDP_H */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index fe680ab6b15a..b8137953fea3 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -30,6 +30,7 @@ struct udp_port_cfg {
__be16 local_udp_port;
__be16 peer_udp_port;
+ int bind_ifindex;
unsigned int use_udp_checksums:1,
use_udp6_tx_checksums:1,
use_udp6_rx_checksums:1,
@@ -64,6 +65,8 @@ static inline int udp_sock_create(struct net *net,
}
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
+ struct sk_buff *skb);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
struct list_head *head,
@@ -76,6 +79,7 @@ struct udp_tunnel_sock_cfg {
/* Used for setting up udp_sock fields, see udp.h for details */
__u8 encap_type;
udp_tunnel_encap_rcv_t encap_rcv;
+ udp_tunnel_encap_err_lookup_t encap_err_lookup;
udp_tunnel_encap_destroy_t encap_destroy;
udp_tunnel_gro_receive_t gro_receive;
udp_tunnel_gro_complete_t gro_complete;
@@ -165,6 +169,12 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
static inline void udp_tunnel_encap_enable(struct socket *sock)
{
+ struct udp_sock *up = udp_sk(sock->sk);
+
+ if (up->encap_enabled)
+ return;
+
+ up->encap_enabled = 1;
#if IS_ENABLED(CONFIG_IPV6)
if (sock->sk->sk_family == PF_INET6)
ipv6_stub->udpv6_encap_enable();
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index b99a02ae3934..236403eb5ba6 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -5,7 +5,8 @@
#include <linux/if_vlan.h>
#include <net/udp_tunnel.h>
#include <net/dst_metadata.h>
-#include <net/udp_tunnel.h>
+#include <net/rtnetlink.h>
+#include <net/switchdev.h>
/* VXLAN protocol (RFC 7348) header:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -191,6 +192,7 @@ union vxlan_addr {
struct vxlan_rdst {
union vxlan_addr remote_ip;
__be16 remote_port;
+ u8 offloaded:1;
__be32 remote_vni;
u32 remote_ifindex;
struct list_head list;
@@ -214,6 +216,7 @@ struct vxlan_config {
unsigned long age_interval;
unsigned int addrmax;
bool no_share;
+ enum ifla_vxlan_df df;
};
struct vxlan_dev_node {
@@ -371,4 +374,81 @@ static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
return vs->sock->sk->sk_family;
}
+#if IS_ENABLED(CONFIG_IPV6)
+
+static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
+{
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_any(&ipa->sin6.sin6_addr);
+ else
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+}
+
+static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
+{
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
+ else
+ return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+}
+
+#else /* !IS_ENABLED(CONFIG_IPV6) */
+
+static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
+{
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+}
+
+static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
+{
+ return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+}
+
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+static inline bool netif_is_vxlan(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "vxlan");
+}
+
+struct switchdev_notifier_vxlan_fdb_info {
+ struct switchdev_notifier_info info; /* must be first */
+ union vxlan_addr remote_ip;
+ __be16 remote_port;
+ __be32 remote_vni;
+ u32 remote_ifindex;
+ u8 eth_addr[ETH_ALEN];
+ __be32 vni;
+ bool offloaded;
+ bool added_by_user;
+};
+
+#if IS_ENABLED(CONFIG_VXLAN)
+int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info);
+int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
+ struct notifier_block *nb);
+void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni);
+
+#else
+static inline int
+vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ return -ENOENT;
+}
+
+static inline int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
+ struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
+{
+}
+#endif
+
#endif
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 76b95256c266..0f25b3675c5c 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -91,6 +91,8 @@ static inline void xdp_scrub_frame(struct xdp_frame *frame)
frame->dev_rx = NULL;
}
+struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
+
/* Convert xdp_buff to xdp_frame */
static inline
struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
@@ -99,9 +101,8 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
int metasize;
int headroom;
- /* TODO: implement clone, copy, use "native" MEM_TYPE */
if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY)
- return NULL;
+ return xdp_convert_zc_to_xdp_frame(xdp);
/* Assure headroom is available for storing info */
headroom = xdp->data - xdp->data_hard_start;
@@ -135,6 +136,7 @@ void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
enum xdp_mem_type type, void *allocator);
+void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
/* Drivers not supporting XDP metadata can use this helper, which
* rejects any room expansion for metadata as a result.
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 7161856bcf9c..13acb9803a6d 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -16,21 +16,23 @@
struct net_device;
struct xsk_queue;
-struct xdp_umem_props {
- u64 chunk_mask;
- u64 size;
-};
-
struct xdp_umem_page {
void *addr;
dma_addr_t dma;
};
+struct xdp_umem_fq_reuse {
+ u32 nentries;
+ u32 length;
+ u64 handles[];
+};
+
struct xdp_umem {
struct xsk_queue *fq;
struct xsk_queue *cq;
struct xdp_umem_page *pages;
- struct xdp_umem_props props;
+ u64 chunk_mask;
+ u64 size;
u32 headroom;
u32 chunk_size_nohr;
struct user_struct *user;
@@ -41,6 +43,7 @@ struct xdp_umem {
struct page **pgs;
u32 npgs;
struct net_device *dev;
+ struct xdp_umem_fq_reuse *fq_reuse;
u16 queue_id;
bool zc;
spinlock_t xsk_list_lock;
@@ -79,6 +82,50 @@ void xsk_umem_discard_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
void xsk_umem_consume_tx_done(struct xdp_umem *umem);
+struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
+struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
+ struct xdp_umem_fq_reuse *newq);
+void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
+struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
+
+static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
+{
+ return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
+}
+
+static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
+{
+ return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
+}
+
+/* Reuse-queue aware version of FILL queue helpers */
+static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
+{
+ struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
+
+ if (!rq->length)
+ return xsk_umem_peek_addr(umem, addr);
+
+ *addr = rq->handles[rq->length - 1];
+ return addr;
+}
+
+static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
+{
+ struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
+
+ if (!rq->length)
+ xsk_umem_discard_addr(umem);
+ else
+ rq->length--;
+}
+
+static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
+{
+ struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
+
+ rq->handles[rq->length++] = addr;
+}
#else
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
@@ -98,6 +145,74 @@ static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
return false;
}
+
+static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
+{
+ return NULL;
+}
+
+static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
+{
+}
+
+static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
+{
+}
+
+static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
+ u32 *len)
+{
+ return false;
+}
+
+static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
+{
+}
+
+static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
+{
+ return NULL;
+}
+
+static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
+ struct xdp_umem *umem,
+ struct xdp_umem_fq_reuse *newq)
+{
+ return NULL;
+}
+static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
+{
+}
+
+static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
+ u16 queue_id)
+{
+ return NULL;
+}
+
+static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
+{
+ return NULL;
+}
+
+static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
+{
+ return 0;
+}
+
+static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
+{
+ return NULL;
+}
+
+static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
+{
+}
+
+static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
+{
+}
+
#endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 0eb390c205af..7298a53b9702 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -577,6 +577,7 @@ struct xfrm_policy {
/* This lock only affects elements except for entry. */
rwlock_t lock;
refcount_t refcnt;
+ u32 pos;
struct timer_list timer;
atomic_t genid;
@@ -589,6 +590,7 @@ struct xfrm_policy {
struct xfrm_lifetime_cur curlft;
struct xfrm_policy_walk_entry walk;
struct xfrm_policy_queue polq;
+ bool bydst_reinsert;
u8 type;
u8 action;
u8 flags;
@@ -596,6 +598,7 @@ struct xfrm_policy {
u16 family;
struct xfrm_sec_ctx *security;
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
+ struct hlist_node bydst_inexact_list;
struct rcu_head rcu;
};
@@ -1093,7 +1096,6 @@ struct xfrm_offload {
};
struct sec_path {
- refcount_t refcnt;
int len;
int olen;
@@ -1101,41 +1103,13 @@ struct sec_path {
struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH];
};
-static inline int secpath_exists(struct sk_buff *skb)
-{
-#ifdef CONFIG_XFRM
- return skb->sp != NULL;
-#else
- return 0;
-#endif
-}
-
-static inline struct sec_path *
-secpath_get(struct sec_path *sp)
-{
- if (sp)
- refcount_inc(&sp->refcnt);
- return sp;
-}
-
-void __secpath_destroy(struct sec_path *sp);
-
-static inline void
-secpath_put(struct sec_path *sp)
-{
- if (sp && refcount_dec_and_test(&sp->refcnt))
- __secpath_destroy(sp);
-}
-
-struct sec_path *secpath_dup(struct sec_path *src);
-int secpath_set(struct sk_buff *skb);
+struct sec_path *secpath_set(struct sk_buff *skb);
static inline void
secpath_reset(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
- secpath_put(skb->sp);
- skb->sp = NULL;
+ skb_ext_del(skb, SKB_EXT_SEC_PATH);
#endif
}
@@ -1191,7 +1165,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
if (sk && sk->sk_policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, ndir, skb, family);
- return (!net->xfrm.policy_count[dir] && !skb->sp) ||
+ return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
(skb_dst(skb)->flags & DST_NOPOLICY) ||
__xfrm_policy_check(sk, ndir, skb, family);
}
@@ -1552,6 +1526,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
int (*func)(struct xfrm_state *, int, void*), void *);
void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
struct xfrm_state *xfrm_state_alloc(struct net *net);
+void xfrm_state_free(struct xfrm_state *x);
struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
const struct flowi *fl,
@@ -1902,14 +1877,16 @@ static inline void xfrm_states_delete(struct xfrm_state **states, int n)
#ifdef CONFIG_XFRM
static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
{
- return skb->sp->xvec[skb->sp->len - 1];
+ struct sec_path *sp = skb_sec_path(skb);
+
+ return sp->xvec[sp->len - 1];
}
#endif
static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
- struct sec_path *sp = skb->sp;
+ struct sec_path *sp = skb_sec_path(skb);
if (!sp || !sp->olen || sp->len != sp->olen)
return NULL;
@@ -1967,7 +1944,7 @@ static inline void xfrm_dev_state_delete(struct xfrm_state *x)
static inline void xfrm_dev_state_free(struct xfrm_state *x)
{
struct xfrm_state_offload *xso = &x->xso;
- struct net_device *dev = xso->dev;
+ struct net_device *dev = xso->dev;
if (dev && dev->xfrmdev_ops) {
if (dev->xfrmdev_ops->xdo_dev_state_free)
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 77c7908b7d73..2734c895c1bf 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -46,7 +46,6 @@
#include <net/ip.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
-#include <net/ipv6.h>
#include <net/net_namespace.h>
/**
@@ -95,20 +94,18 @@ int rdma_translate_ip(const struct sockaddr *addr,
* @timeout_ms: Amount of time to wait for the address resolution to complete.
* @callback: Call invoked once address resolution has completed, timed out,
* or been canceled. A status of 0 indicates success.
+ * @resolve_by_gid_attr: Resolve the ip based on the GID attribute from
+ * rdma_dev_addr.
* @context: User-specified context associated with the call.
*/
int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr, int timeout_ms,
+ struct rdma_dev_addr *addr, unsigned long timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
- void *context);
+ bool resolve_by_gid_attr, void *context);
void rdma_addr_cancel(struct rdma_dev_addr *addr);
-void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
- const struct net_device *dev,
- const unsigned char *dst_dev_addr);
-
int rdma_addr_size(const struct sockaddr *addr);
int rdma_addr_size_in6(struct sockaddr_in6 *addr);
int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index 3e11e7cc60b7..62e990b620aa 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -133,28 +133,4 @@ const struct ib_gid_attr *rdma_get_gid_attr(struct ib_device *device,
void rdma_put_gid_attr(const struct ib_gid_attr *attr);
void rdma_hold_gid_attr(const struct ib_gid_attr *attr);
-/*
- * This is to be removed. It only exists to make merging rdma and smc simpler.
- */
-static inline __deprecated int ib_query_gid(struct ib_device *device,
- u8 port_num, int index,
- union ib_gid *gid,
- struct ib_gid_attr *attr_out)
-{
- const struct ib_gid_attr *attr;
-
- memset(attr_out, 0, sizeof(*attr_out));
- attr = rdma_get_gid_attr(device, port_num, index);
- if (IS_ERR(attr))
- return PTR_ERR(attr);
-
- if (attr->ndev)
- dev_hold(attr->ndev);
- *attr_out = *attr;
-
- rdma_put_gid_attr(attr);
-
- return 0;
-}
-
#endif /* _IB_CACHE_H */
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index c10f4b5ea8ab..49f4f75499b3 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -583,7 +583,7 @@ struct ib_cm_sidr_req_param {
struct sa_path_rec *path;
const struct ib_gid_attr *sgid_attr;
__be64 service_id;
- int timeout_ms;
+ unsigned long timeout_ms;
const void *private_data;
u8 private_data_len;
u8 max_cm_retries;
diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
index f62b842e6596..f8982e4e9702 100644
--- a/include/rdma/ib_fmr_pool.h
+++ b/include/rdma/ib_fmr_pool.h
@@ -88,6 +88,6 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
int list_len,
u64 io_virtual_address);
-int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
+void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
#endif /* IB_FMR_POOL_H */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index f6ba366051c7..fdef558e3a2d 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -277,6 +277,7 @@ enum ib_port_capability_mask_bits {
IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
+ IB_PORT_CAP_MASK2_SUP = 1 << 15,
IB_PORT_CM_SUP = 1 << 16,
IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
IB_PORT_REINIT_SUP = 1 << 18,
@@ -295,6 +296,15 @@ enum ib_port_capability_mask_bits {
IB_PORT_HIERARCHY_INFO_SUP = 1ULL << 31,
};
+enum ib_port_capability_mask2_bits {
+ IB_PORT_SET_NODE_DESC_SUP = 1 << 0,
+ IB_PORT_EX_PORT_INFO_EX_SUP = 1 << 1,
+ IB_PORT_VIRT_SUP = 1 << 2,
+ IB_PORT_SWITCH_PORT_STATE_TABLE_SUP = 1 << 3,
+ IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4,
+ IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5,
+};
+
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
struct opa_class_port_info {
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index b6ddf2a1b9d8..19520979b84c 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -449,28 +449,23 @@ struct ib_sa_query;
void ib_sa_cancel_query(int id, struct ib_sa_query *query);
-int ib_sa_path_rec_get(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
- void (*callback)(int status,
- struct sa_path_rec *resp,
+int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
+ u8 port_num, struct sa_path_rec *rec,
+ ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
+ gfp_t gfp_mask,
+ void (*callback)(int status, struct sa_path_rec *resp,
void *context),
- void *context,
- struct ib_sa_query **query);
+ void *context, struct ib_sa_query **query);
int ib_sa_service_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- u8 method,
- struct ib_sa_service_rec *rec,
- ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
- void (*callback)(int status,
- struct ib_sa_service_rec *resp,
- void *context),
- void *context,
- struct ib_sa_query **sa_query);
+ struct ib_device *device, u8 port_num, u8 method,
+ struct ib_sa_service_rec *rec,
+ ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
+ gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_sa_service_rec *resp,
+ void *context),
+ void *context, struct ib_sa_query **sa_query);
struct ib_sa_multicast {
struct ib_sa_mcmember_rec rec;
@@ -573,12 +568,11 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_guidinfo_rec *resp,
void *context),
- void *context,
- struct ib_sa_query **sa_query);
+ void *context, struct ib_sa_query **sa_query);
bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
struct ib_device *device,
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index a1fd63871d17..5d3755ec5afa 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -42,15 +42,14 @@ struct ib_umem_odp;
struct ib_umem {
struct ib_ucontext *context;
+ struct mm_struct *owning_mm;
size_t length;
unsigned long address;
int page_shift;
- int writable;
- int hugetlb;
+ u32 writable : 1;
+ u32 hugetlb : 1;
+ u32 is_odp : 1;
struct work_struct work;
- struct mm_struct *mm;
- unsigned long diff;
- struct ib_umem_odp *odp_data;
struct sg_table sg_head;
int nmap;
int npages;
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 6a17f856f841..0b1446fe2fab 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -43,6 +43,9 @@ struct umem_odp_node {
};
struct ib_umem_odp {
+ struct ib_umem umem;
+ struct ib_ucontext_per_mm *per_mm;
+
/*
* An array of the pages included in the on-demand paging umem.
* Indices of pages that are currently not mapped into the device will
@@ -64,16 +67,9 @@ struct ib_umem_odp {
struct mutex umem_mutex;
void *private; /* for the HW driver to use. */
- /* When false, use the notifier counter in the ucontext struct. */
- bool mn_counters_active;
int notifiers_seq;
int notifiers_count;
- /* A linked list of umems that don't have private mmu notifier
- * counters yet. */
- struct list_head no_private_counters;
- struct ib_umem *umem;
-
/* Tree tracking */
struct umem_odp_node interval_tree;
@@ -82,15 +78,34 @@ struct ib_umem_odp {
struct work_struct work;
};
+static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
+{
+ return container_of(umem, struct ib_umem_odp, umem);
+}
+
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
- int access);
-struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
- unsigned long addr,
- size_t size);
+struct ib_ucontext_per_mm {
+ struct ib_ucontext *context;
+ struct mm_struct *mm;
+ struct pid *tgid;
+ bool active;
+
+ struct rb_root_cached umem_tree;
+ /* Protects umem_tree */
+ struct rw_semaphore umem_rwsem;
-void ib_umem_odp_release(struct ib_umem *umem);
+ struct mmu_notifier mn;
+ unsigned int odp_mrs_count;
+
+ struct list_head ucontext_list;
+ struct rcu_head rcu;
+};
+
+int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
+struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
+ unsigned long addr, size_t size);
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
/*
* The lower 2 bits of the DMA address signal the R/W permissions for
@@ -105,13 +120,14 @@ void ib_umem_odp_release(struct ib_umem *umem);
#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
-int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
- u64 access_mask, unsigned long current_seq);
+int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
+ u64 bcnt, u64 access_mask,
+ unsigned long current_seq);
-void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
+void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
u64 bound);
-typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
+typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
void *cookie);
/*
* Call the callback on each ib_umem in the range. Returns the logical or of
@@ -119,7 +135,8 @@ typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
*/
int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
u64 start, u64 end,
- umem_call_back cb, void *cookie);
+ umem_call_back cb,
+ bool blockable, void *cookie);
/*
* Find first region intersecting with address range.
@@ -128,46 +145,37 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
u64 addr, u64 length);
-static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,
+static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
unsigned long mmu_seq)
{
/*
* This code is strongly based on the KVM code from
* mmu_notifier_retry. Should be called with
- * the relevant locks taken (item->odp_data->umem_mutex
+ * the relevant locks taken (umem_odp->umem_mutex
* and the ucontext umem_mutex semaphore locked for read).
*/
- /* Do not allow page faults while the new ib_umem hasn't seen a state
- * with zero notifiers yet, and doesn't have its own valid set of
- * private counters. */
- if (!item->odp_data->mn_counters_active)
- return 1;
-
- if (unlikely(item->odp_data->notifiers_count))
+ if (unlikely(umem_odp->notifiers_count))
return 1;
- if (item->odp_data->notifiers_seq != mmu_seq)
+ if (umem_odp->notifiers_seq != mmu_seq)
return 1;
return 0;
}
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline int ib_umem_odp_get(struct ib_ucontext *context,
- struct ib_umem *umem,
- int access)
+static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
{
return -EINVAL;
}
-static inline struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
- unsigned long addr,
- size_t size)
+static inline struct ib_umem_odp *
+ib_alloc_odp_umem(struct ib_ucontext *context, unsigned long addr, size_t size)
{
return ERR_PTR(-EINVAL);
}
-static inline void ib_umem_odp_release(struct ib_umem *umem) {}
+static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e950c2a68f06..a3ceed3a040a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -41,14 +41,11 @@
#include <linux/types.h>
#include <linux/device.h>
-#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/rwsem.h>
-#include <linux/scatterlist.h>
#include <linux/workqueue.h>
-#include <linux/socket.h>
#include <linux/irq_poll.h>
#include <uapi/linux/if_ether.h>
#include <net/ipv6.h>
@@ -56,7 +53,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
-
+#include <linux/refcount.h>
#include <linux/if_link.h>
#include <linux/atomic.h>
#include <linux/mmu_notifier.h>
@@ -69,8 +66,11 @@
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
+struct ib_umem_odp;
+
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
+extern struct workqueue_struct *ib_comp_unbound_wq;
union ib_gid {
u8 raw[16];
@@ -434,6 +434,7 @@ enum ib_port_state {
enum ib_port_width {
IB_WIDTH_1X = 1,
+ IB_WIDTH_2X = 16,
IB_WIDTH_4X = 2,
IB_WIDTH_8X = 4,
IB_WIDTH_12X = 8
@@ -443,6 +444,7 @@ static inline int ib_width_enum_to_int(enum ib_port_width width)
{
switch (width) {
case IB_WIDTH_1X: return 1;
+ case IB_WIDTH_2X: return 2;
case IB_WIDTH_4X: return 4;
case IB_WIDTH_8X: return 8;
case IB_WIDTH_12X: return 12;
@@ -592,6 +594,7 @@ struct ib_port_attr {
u8 active_width;
u8 active_speed;
u8 phys_state;
+ u16 port_cap_flags2;
};
enum ib_device_modify_flags {
@@ -729,7 +732,11 @@ enum ib_rate {
IB_RATE_25_GBPS = 15,
IB_RATE_100_GBPS = 16,
IB_RATE_200_GBPS = 17,
- IB_RATE_300_GBPS = 18
+ IB_RATE_300_GBPS = 18,
+ IB_RATE_28_GBPS = 19,
+ IB_RATE_50_GBPS = 20,
+ IB_RATE_400_GBPS = 21,
+ IB_RATE_600_GBPS = 22,
};
/**
@@ -1137,7 +1144,9 @@ enum ib_qp_create_flags {
*/
struct ib_qp_init_attr {
+ /* Consumer's event_handler callback must not block */
void (*event_handler)(struct ib_event *, void *);
+
void *qp_context;
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
@@ -1146,7 +1155,7 @@ struct ib_qp_init_attr {
struct ib_qp_cap cap;
enum ib_sig_type sq_sig_type;
enum ib_qp_type qp_type;
- enum ib_qp_create_flags create_flags;
+ u32 create_flags;
/*
* Only needed for special QP types, or when using the RW API.
@@ -1278,21 +1287,27 @@ struct ib_qp_attr {
};
enum ib_wr_opcode {
- IB_WR_RDMA_WRITE,
- IB_WR_RDMA_WRITE_WITH_IMM,
- IB_WR_SEND,
- IB_WR_SEND_WITH_IMM,
- IB_WR_RDMA_READ,
- IB_WR_ATOMIC_CMP_AND_SWP,
- IB_WR_ATOMIC_FETCH_AND_ADD,
- IB_WR_LSO,
- IB_WR_SEND_WITH_INV,
- IB_WR_RDMA_READ_WITH_INV,
- IB_WR_LOCAL_INV,
- IB_WR_REG_MR,
- IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
- IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
+ /* These are shared with userspace */
+ IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
+ IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
+ IB_WR_SEND = IB_UVERBS_WR_SEND,
+ IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
+ IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
+ IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
+ IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
+ IB_WR_LSO = IB_UVERBS_WR_TSO,
+ IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
+ IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
+ IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
+ IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
+ IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
+ IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
+ IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
+
+ /* These are kernel only and can not be issued by userspace */
+ IB_WR_REG_MR = 0x20,
IB_WR_REG_SIG_MR,
+
/* reserve values for low level drivers' internal use.
* These values will not be used at all in the ib core layer.
*/
@@ -1485,29 +1500,22 @@ struct ib_ucontext {
* it is set when we are closing the file descriptor and indicates
* that mm_sem may be locked.
*/
- int closing;
+ bool closing;
bool cleanup_retryable;
- struct pid *tgid;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct rb_root_cached umem_tree;
- /*
- * Protects .umem_rbroot and tree, as well as odp_mrs_count and
- * mmu notifiers registration.
- */
- struct rw_semaphore umem_rwsem;
- void (*invalidate_range)(struct ib_umem *umem,
+ void (*invalidate_range)(struct ib_umem_odp *umem_odp,
unsigned long start, unsigned long end);
-
- struct mmu_notifier mn;
- atomic_t notifier_count;
- /* A list of umems that don't have private mmu notifier counters yet. */
- struct list_head no_private_counters;
- int odp_mrs_count;
+ struct mutex per_mm_list_lock;
+ struct list_head per_mm_list;
#endif
struct ib_rdmacg_object cg_obj;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_uobject {
@@ -1570,9 +1578,10 @@ struct ib_ah {
typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
enum ib_poll_context {
- IB_POLL_DIRECT, /* caller context, no hw completions */
- IB_POLL_SOFTIRQ, /* poll from softirq context */
- IB_POLL_WORKQUEUE, /* poll from workqueue */
+ IB_POLL_DIRECT, /* caller context, no hw completions */
+ IB_POLL_SOFTIRQ, /* poll from softirq context */
+ IB_POLL_WORKQUEUE, /* poll from workqueue */
+ IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
};
struct ib_cq {
@@ -1589,6 +1598,7 @@ struct ib_cq {
struct irq_poll iop;
struct work_struct work;
};
+ struct workqueue_struct *comp_wq;
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -2223,6 +2233,16 @@ struct rdma_netdev {
union ib_gid *gid, u16 mlid);
};
+struct rdma_netdev_alloc_params {
+ size_t sizeof_priv;
+ unsigned int txqs;
+ unsigned int rxqs;
+ void *param;
+
+ int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
+ struct net_device *netdev, void *param);
+};
+
struct ib_port_pkey_list {
/* Lock to hold while modifying the list. */
spinlock_t list_lock;
@@ -2244,81 +2264,86 @@ struct ib_counters_read_attr {
struct uverbs_attr_bundle;
-struct ib_device {
- /* Do not access @dma_device directly from ULP nor from HW drivers. */
- struct device *dma_device;
-
- char name[IB_DEVICE_NAME_MAX];
-
- struct list_head event_handler_list;
- spinlock_t event_handler_lock;
-
- spinlock_t client_data_lock;
- struct list_head core_list;
- /* Access to the client_data_list is protected by the client_data_lock
- * spinlock and the lists_rwsem read-write semaphore */
- struct list_head client_data_list;
-
- struct ib_cache cache;
- /**
- * port_immutable is indexed by port number
- */
- struct ib_port_immutable *port_immutable;
-
- int num_comp_vectors;
-
- struct ib_port_pkey_list *port_pkey_list;
-
- struct iw_cm_verbs *iwcm;
-
+/**
+ * struct ib_device_ops - InfiniBand device operations
+ * This structure defines all the InfiniBand device operations, providers will
+ * need to define the supported operations, otherwise they will be set to null.
+ */
+struct ib_device_ops {
+ int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr);
+ int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
+ void (*drain_rq)(struct ib_qp *qp);
+ void (*drain_sq)(struct ib_qp *qp);
+ int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
+ int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
+ int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+ int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
+ int (*post_srq_recv)(struct ib_srq *srq,
+ const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
+ int (*process_mad)(struct ib_device *device, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in_mad, size_t in_mad_size,
+ struct ib_mad_hdr *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
+ int (*query_device)(struct ib_device *device,
+ struct ib_device_attr *device_attr,
+ struct ib_udata *udata);
+ int (*modify_device)(struct ib_device *device, int device_modify_mask,
+ struct ib_device_modify *device_modify);
+ void (*get_dev_fw_str)(struct ib_device *device, char *str);
+ const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
+ int comp_vector);
+ int (*query_port)(struct ib_device *device, u8 port_num,
+ struct ib_port_attr *port_attr);
+ int (*modify_port)(struct ib_device *device, u8 port_num,
+ int port_modify_mask,
+ struct ib_port_modify *port_modify);
/**
- * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
- * driver initialized data. The struct is kfree()'ed by the sysfs
- * core when the device is removed. A lifespan of -1 in the return
- * struct tells the core to set a default lifespan.
+ * The following mandatory functions are used only at device
+ * registration. Keep functions such as these at the end of this
+ * structure to avoid cache line misses when accessing struct ib_device
+ * in fast paths.
*/
- struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
- u8 port_num);
+ int (*get_port_immutable)(struct ib_device *device, u8 port_num,
+ struct ib_port_immutable *immutable);
+ enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
+ u8 port_num);
/**
- * get_hw_stats - Fill in the counter value(s) in the stats struct.
- * @index - The index in the value array we wish to have updated, or
- * num_counters if we want all stats updated
- * Return codes -
- * < 0 - Error, no counters updated
- * index - Updated the single counter pointed to by index
- * num_counters - Updated all counters (will reset the timestamp
- * and prevent further calls for lifespan milliseconds)
- * Drivers are allowed to update all counters in leiu of just the
- * one given in index at their option
- */
- int (*get_hw_stats)(struct ib_device *device,
- struct rdma_hw_stats *stats,
- u8 port, int index);
- int (*query_device)(struct ib_device *device,
- struct ib_device_attr *device_attr,
- struct ib_udata *udata);
- int (*query_port)(struct ib_device *device,
- u8 port_num,
- struct ib_port_attr *port_attr);
- enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
- u8 port_num);
- /* When calling get_netdev, the HW vendor's driver should return the
+ * When calling get_netdev, the HW vendor's driver should return the
* net device of device @device at port @port_num or NULL if such
* a net device doesn't exist. The vendor driver should call dev_hold
* on this net device. The HW vendor's device driver must guarantee
* that this function returns NULL before the net device has finished
* NETDEV_UNREGISTER state.
*/
- struct net_device *(*get_netdev)(struct ib_device *device,
- u8 port_num);
- /* query_gid should be return GID value for @device, when @port_num
+ struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
+ /**
+ * rdma netdev operation
+ *
+ * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
+ * must return -EOPNOTSUPP if it doesn't support the specified type.
+ */
+ struct net_device *(*alloc_rdma_netdev)(
+ struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
+ const char *name, unsigned char name_assign_type,
+ void (*setup)(struct net_device *));
+
+ int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params);
+ /**
+ * query_gid should be return GID value for @device, when @port_num
* link layer is either IB or iWarp. It is no-op if @port_num port
* is RoCE link layer.
*/
- int (*query_gid)(struct ib_device *device,
- u8 port_num, int index,
- union ib_gid *gid);
- /* When calling add_gid, the HW vendor's driver should add the gid
+ int (*query_gid)(struct ib_device *device, u8 port_num, int index,
+ union ib_gid *gid);
+ /**
+ * When calling add_gid, the HW vendor's driver should add the gid
* of device of port at gid index available at @attr. Meta-info of
* that gid (for example, the network device related to this gid) is
* available at @attr. @context allows the HW vendor driver to store
@@ -2330,213 +2355,196 @@ struct ib_device {
* concurrently for different ports. This function is only called when
* roce_gid_table is used.
*/
- int (*add_gid)(const struct ib_gid_attr *attr,
- void **context);
- /* When calling del_gid, the HW vendor's driver should delete the
+ int (*add_gid)(const struct ib_gid_attr *attr, void **context);
+ /**
+ * When calling del_gid, the HW vendor's driver should delete the
* gid of device @device at gid index gid_index of port port_num
* available in @attr.
* Upon the deletion of a GID entry, the HW vendor must free any
* allocated memory. The caller will clear @context afterwards.
* This function is only called when roce_gid_table is used.
*/
- int (*del_gid)(const struct ib_gid_attr *attr,
- void **context);
- int (*query_pkey)(struct ib_device *device,
- u8 port_num, u16 index, u16 *pkey);
- int (*modify_device)(struct ib_device *device,
- int device_modify_mask,
- struct ib_device_modify *device_modify);
- int (*modify_port)(struct ib_device *device,
- u8 port_num, int port_modify_mask,
- struct ib_port_modify *port_modify);
- struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
- struct ib_udata *udata);
- int (*dealloc_ucontext)(struct ib_ucontext *context);
- int (*mmap)(struct ib_ucontext *context,
- struct vm_area_struct *vma);
- struct ib_pd * (*alloc_pd)(struct ib_device *device,
- struct ib_ucontext *context,
- struct ib_udata *udata);
- int (*dealloc_pd)(struct ib_pd *pd);
- struct ib_ah * (*create_ah)(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr,
- struct ib_udata *udata);
- int (*modify_ah)(struct ib_ah *ah,
- struct rdma_ah_attr *ah_attr);
- int (*query_ah)(struct ib_ah *ah,
- struct rdma_ah_attr *ah_attr);
- int (*destroy_ah)(struct ib_ah *ah);
- struct ib_srq * (*create_srq)(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata);
- int (*modify_srq)(struct ib_srq *srq,
- struct ib_srq_attr *srq_attr,
- enum ib_srq_attr_mask srq_attr_mask,
- struct ib_udata *udata);
- int (*query_srq)(struct ib_srq *srq,
- struct ib_srq_attr *srq_attr);
- int (*destroy_srq)(struct ib_srq *srq);
- int (*post_srq_recv)(struct ib_srq *srq,
- const struct ib_recv_wr *recv_wr,
- const struct ib_recv_wr **bad_recv_wr);
- struct ib_qp * (*create_qp)(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata);
- int (*modify_qp)(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr,
- int qp_attr_mask,
- struct ib_udata *udata);
- int (*query_qp)(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr,
- int qp_attr_mask,
- struct ib_qp_init_attr *qp_init_attr);
- int (*destroy_qp)(struct ib_qp *qp);
- int (*post_send)(struct ib_qp *qp,
- const struct ib_send_wr *send_wr,
- const struct ib_send_wr **bad_send_wr);
- int (*post_recv)(struct ib_qp *qp,
- const struct ib_recv_wr *recv_wr,
- const struct ib_recv_wr **bad_recv_wr);
- struct ib_cq * (*create_cq)(struct ib_device *device,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
- int (*modify_cq)(struct ib_cq *cq, u16 cq_count,
- u16 cq_period);
- int (*destroy_cq)(struct ib_cq *cq);
- int (*resize_cq)(struct ib_cq *cq, int cqe,
- struct ib_udata *udata);
- int (*poll_cq)(struct ib_cq *cq, int num_entries,
- struct ib_wc *wc);
- int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
- int (*req_notify_cq)(struct ib_cq *cq,
- enum ib_cq_notify_flags flags);
- int (*req_ncomp_notif)(struct ib_cq *cq,
- int wc_cnt);
- struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
- int mr_access_flags);
- struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
- u64 start, u64 length,
- u64 virt_addr,
- int mr_access_flags,
- struct ib_udata *udata);
- int (*rereg_user_mr)(struct ib_mr *mr,
- int flags,
- u64 start, u64 length,
- u64 virt_addr,
- int mr_access_flags,
- struct ib_pd *pd,
- struct ib_udata *udata);
- int (*dereg_mr)(struct ib_mr *mr);
- struct ib_mr * (*alloc_mr)(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg);
- int (*map_mr_sg)(struct ib_mr *mr,
- struct scatterlist *sg,
- int sg_nents,
- unsigned int *sg_offset);
- struct ib_mw * (*alloc_mw)(struct ib_pd *pd,
- enum ib_mw_type type,
- struct ib_udata *udata);
- int (*dealloc_mw)(struct ib_mw *mw);
- struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
- int (*map_phys_fmr)(struct ib_fmr *fmr,
- u64 *page_list, int list_len,
- u64 iova);
- int (*unmap_fmr)(struct list_head *fmr_list);
- int (*dealloc_fmr)(struct ib_fmr *fmr);
- int (*attach_mcast)(struct ib_qp *qp,
- union ib_gid *gid,
- u16 lid);
- int (*detach_mcast)(struct ib_qp *qp,
- union ib_gid *gid,
- u16 lid);
- int (*process_mad)(struct ib_device *device,
- int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size,
- struct ib_mad_hdr *out_mad,
- size_t *out_mad_size,
- u16 *out_mad_pkey_index);
- struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
- struct ib_ucontext *ucontext,
- struct ib_udata *udata);
- int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
- struct ib_flow * (*create_flow)(struct ib_qp *qp,
- struct ib_flow_attr
- *flow_attr,
- int domain,
- struct ib_udata *udata);
- int (*destroy_flow)(struct ib_flow *flow_id);
- int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
- struct ib_mr_status *mr_status);
- void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
- void (*drain_rq)(struct ib_qp *qp);
- void (*drain_sq)(struct ib_qp *qp);
- int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
- int state);
- int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
- struct ifla_vf_info *ivf);
- int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
- struct ifla_vf_stats *stats);
- int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
- int type);
- struct ib_wq * (*create_wq)(struct ib_pd *pd,
- struct ib_wq_init_attr *init_attr,
- struct ib_udata *udata);
- int (*destroy_wq)(struct ib_wq *wq);
- int (*modify_wq)(struct ib_wq *wq,
- struct ib_wq_attr *attr,
- u32 wq_attr_mask,
- struct ib_udata *udata);
- struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr *init_attr,
- struct ib_udata *udata);
- int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
- struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs);
- int (*destroy_flow_action)(struct ib_flow_action *action);
- int (*modify_flow_action_esp)(struct ib_flow_action *action,
- const struct ib_flow_action_attrs_esp *attr,
- struct uverbs_attr_bundle *attrs);
- struct ib_dm * (*alloc_dm)(struct ib_device *device,
- struct ib_ucontext *context,
- struct ib_dm_alloc_attr *attr,
- struct uverbs_attr_bundle *attrs);
- int (*dealloc_dm)(struct ib_dm *dm);
- struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
- struct ib_dm_mr_attr *attr,
- struct uverbs_attr_bundle *attrs);
- struct ib_counters * (*create_counters)(struct ib_device *device,
- struct uverbs_attr_bundle *attrs);
- int (*destroy_counters)(struct ib_counters *counters);
- int (*read_counters)(struct ib_counters *counters,
- struct ib_counters_read_attr *counters_read_attr,
- struct uverbs_attr_bundle *attrs);
+ int (*del_gid)(const struct ib_gid_attr *attr, void **context);
+ int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
+ u16 *pkey);
+ struct ib_ucontext *(*alloc_ucontext)(struct ib_device *device,
+ struct ib_udata *udata);
+ int (*dealloc_ucontext)(struct ib_ucontext *context);
+ int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
+ void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
+ struct ib_pd *(*alloc_pd)(struct ib_device *device,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+ int (*dealloc_pd)(struct ib_pd *pd);
+ struct ib_ah *(*create_ah)(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr, u32 flags,
+ struct ib_udata *udata);
+ int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
+ int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
+ int (*destroy_ah)(struct ib_ah *ah, u32 flags);
+ struct ib_srq *(*create_srq)(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+ int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata);
+ int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
+ int (*destroy_srq)(struct ib_srq *srq);
+ struct ib_qp *(*create_qp)(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata);
+ int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_udata *udata);
+ int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+ int (*destroy_qp)(struct ib_qp *qp);
+ struct ib_cq *(*create_cq)(struct ib_device *device,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+ int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+ int (*destroy_cq)(struct ib_cq *cq);
+ int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+ struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
+ struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata);
+ int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_pd *pd, struct ib_udata *udata);
+ int (*dereg_mr)(struct ib_mr *mr);
+ struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+ int (*advise_mr)(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice, u32 flags,
+ struct ib_sge *sg_list, u32 num_sge,
+ struct uverbs_attr_bundle *attrs);
+ int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+ int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
+ struct ib_mr_status *mr_status);
+ struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata);
+ int (*dealloc_mw)(struct ib_mw *mw);
+ struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr);
+ int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
+ u64 iova);
+ int (*unmap_fmr)(struct list_head *fmr_list);
+ int (*dealloc_fmr)(struct ib_fmr *fmr);
+ int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+ int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
+ struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
+ struct ib_ucontext *ucontext,
+ struct ib_udata *udata);
+ int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
+ struct ib_flow *(*create_flow)(struct ib_qp *qp,
+ struct ib_flow_attr *flow_attr,
+ int domain, struct ib_udata *udata);
+ int (*destroy_flow)(struct ib_flow *flow_id);
+ struct ib_flow_action *(*create_flow_action_esp)(
+ struct ib_device *device,
+ const struct ib_flow_action_attrs_esp *attr,
+ struct uverbs_attr_bundle *attrs);
+ int (*destroy_flow_action)(struct ib_flow_action *action);
+ int (*modify_flow_action_esp)(
+ struct ib_flow_action *action,
+ const struct ib_flow_action_attrs_esp *attr,
+ struct uverbs_attr_bundle *attrs);
+ int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
+ int state);
+ int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_info *ivf);
+ int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_stats *stats);
+ int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
+ int type);
+ struct ib_wq *(*create_wq)(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata);
+ int (*destroy_wq)(struct ib_wq *wq);
+ int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
+ u32 wq_attr_mask, struct ib_udata *udata);
+ struct ib_rwq_ind_table *(*create_rwq_ind_table)(
+ struct ib_device *device,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
+ int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
+ struct ib_dm *(*alloc_dm)(struct ib_device *device,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+ int (*dealloc_dm)(struct ib_dm *dm);
+ struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
+ struct ib_dm_mr_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+ struct ib_counters *(*create_counters)(
+ struct ib_device *device, struct uverbs_attr_bundle *attrs);
+ int (*destroy_counters)(struct ib_counters *counters);
+ int (*read_counters)(struct ib_counters *counters,
+ struct ib_counters_read_attr *counters_read_attr,
+ struct uverbs_attr_bundle *attrs);
+ /**
+ * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
+ * driver initialized data. The struct is kfree()'ed by the sysfs
+ * core when the device is removed. A lifespan of -1 in the return
+ * struct tells the core to set a default lifespan.
+ */
+ struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
+ u8 port_num);
+ /**
+ * get_hw_stats - Fill in the counter value(s) in the stats struct.
+ * @index - The index in the value array we wish to have updated, or
+ * num_counters if we want all stats updated
+ * Return codes -
+ * < 0 - Error, no counters updated
+ * index - Updated the single counter pointed to by index
+ * num_counters - Updated all counters (will reset the timestamp
+ * and prevent further calls for lifespan milliseconds)
+ * Drivers are allowed to update all counters in leiu of just the
+ * one given in index at their option
+ */
+ int (*get_hw_stats)(struct ib_device *device,
+ struct rdma_hw_stats *stats, u8 port, int index);
+};
+
+struct ib_device {
+ /* Do not access @dma_device directly from ULP nor from HW drivers. */
+ struct device *dma_device;
+ struct ib_device_ops ops;
+ char name[IB_DEVICE_NAME_MAX];
+
+ struct list_head event_handler_list;
+ spinlock_t event_handler_lock;
+
+ rwlock_t client_data_lock;
+ struct list_head core_list;
+ /* Access to the client_data_list is protected by the client_data_lock
+ * rwlock and the lists_rwsem read-write semaphore
+ */
+ struct list_head client_data_list;
+ struct ib_cache cache;
/**
- * rdma netdev operation
- *
- * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
- * doesn't support the specified rdma netdev type.
+ * port_immutable is indexed by port number
*/
- struct net_device *(*alloc_rdma_netdev)(
- struct ib_device *device,
- u8 port_num,
- enum rdma_netdev_t type,
- const char *name,
- unsigned char name_assign_type,
- void (*setup)(struct net_device *));
+ struct ib_port_immutable *port_immutable;
+
+ int num_comp_vectors;
+
+ struct ib_port_pkey_list *port_pkey_list;
+
+ struct iw_cm_verbs *iwcm;
struct module *owner;
struct device dev;
- struct kobject *ports_parent;
+ /* First group for device attributes,
+ * Second group for driver provided attributes (optional).
+ * It is NULL terminated array.
+ */
+ const struct attribute_group *groups[3];
+
+ struct kobject *ports_kobj;
struct list_head port_list;
enum {
@@ -2569,19 +2577,14 @@ struct ib_device {
*/
struct rdma_restrack_root res;
- /**
- * The following mandatory functions are used only at device
- * registration. Keep functions such as these at the end of this
- * structure to avoid cache line misses when accessing struct ib_device
- * in fast paths.
- */
- int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
- void (*get_dev_fw_str)(struct ib_device *, char *str);
- const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
- int comp_vector);
-
- const struct uverbs_object_tree_def *const *driver_specs;
+ const struct uapi_definition *driver_def;
enum rdma_driver_id driver_id;
+ /*
+ * Provides synchronization between device unregistration and netlink
+ * commands on a device. To be used only by core.
+ */
+ refcount_t refcount;
+ struct completion unreg_completion;
};
struct ib_client {
@@ -2619,9 +2622,9 @@ void ib_dealloc_device(struct ib_device *device);
void ib_get_device_fw_str(struct ib_device *device, char *str);
-int ib_register_device(struct ib_device *device,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *));
+int ib_register_device(struct ib_device *device, const char *name,
+ int (*port_callback)(struct ib_device *, u8,
+ struct kobject *));
void ib_unregister_device(struct ib_device *device);
int ib_register_client (struct ib_client *client);
@@ -2630,6 +2633,30 @@ void ib_unregister_client(struct ib_client *client);
void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
void ib_set_client_data(struct ib_device *device, struct ib_client *client,
void *data);
+void ib_set_device_ops(struct ib_device *device,
+ const struct ib_device_ops *ops);
+
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot);
+int rdma_user_mmap_page(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma, struct page *page,
+ unsigned long size);
+#else
+static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size,
+ pgprot_t prot)
+{
+ return -EINVAL;
+}
+static inline int rdma_user_mmap_page(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma, struct page *page,
+ unsigned long size)
+{
+ return -EINVAL;
+}
+#endif
static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
{
@@ -2714,7 +2741,6 @@ static inline int ib_destroy_usecnt(atomic_t *usecnt,
* @next_state: Next QP state
* @type: QP type
* @mask: Mask of supplied QP attributes
- * @ll : link layer of port
*
* This function is a helper function that a low-level driver's
* modify_qp method can use to validate the consumer's input. It
@@ -2723,8 +2749,7 @@ static inline int ib_destroy_usecnt(atomic_t *usecnt,
* and that the attribute mask supplied is allowed for the transition.
*/
bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask,
- enum rdma_link_layer ll);
+ enum ib_qp_type type, enum ib_qp_attr_mask mask);
void ib_register_event_handler(struct ib_event_handler *event_handler);
void ib_unregister_event_handler(struct ib_event_handler *event_handler);
@@ -3066,7 +3091,7 @@ static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
u8 port_num)
{
return rdma_protocol_roce(device, port_num) &&
- device->add_gid && device->del_gid;
+ device->ops.add_gid && device->ops.del_gid;
}
/*
@@ -3126,15 +3151,22 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
void ib_dealloc_pd(struct ib_pd *pd);
+enum rdma_create_ah_flags {
+ /* In a sleepable context */
+ RDMA_CREATE_AH_SLEEPABLE = BIT(0),
+};
+
/**
* rdma_create_ah - Creates an address handle for the given address vector.
* @pd: The protection domain associated with the address handle.
* @ah_attr: The attributes of the address vector.
+ * @flags: Create address handle flags (see enum rdma_create_ah_flags).
*
* The address handle is used to reference a local or global destination
* in all UD QP post sends.
*/
-struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
+struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
+ u32 flags);
/**
* rdma_create_user_ah - Creates an address handle for the given address vector.
@@ -3224,11 +3256,17 @@ int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
*/
int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
+enum rdma_destroy_ah_flags {
+ /* In a sleepable context */
+ RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
+};
+
/**
* rdma_destroy_ah - Destroys an address handle.
* @ah: The address handle to destroy.
+ * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
*/
-int rdma_destroy_ah(struct ib_ah *ah);
+int rdma_destroy_ah(struct ib_ah *ah, u32 flags);
/**
* ib_create_srq - Creates a SRQ associated with the specified protection
@@ -3290,7 +3328,8 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
{
const struct ib_recv_wr *dummy;
- return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr ? : &dummy);
+ return srq->device->ops.post_srq_recv(srq, recv_wr,
+ bad_recv_wr ? : &dummy);
}
/**
@@ -3393,7 +3432,7 @@ static inline int ib_post_send(struct ib_qp *qp,
{
const struct ib_send_wr *dummy;
- return qp->device->post_send(qp, send_wr, bad_send_wr ? : &dummy);
+ return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
}
/**
@@ -3410,7 +3449,7 @@ static inline int ib_post_recv(struct ib_qp *qp,
{
const struct ib_recv_wr *dummy;
- return qp->device->post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
+ return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
}
struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
@@ -3483,7 +3522,7 @@ int ib_destroy_cq(struct ib_cq *cq);
static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
struct ib_wc *wc)
{
- return cq->device->poll_cq(cq, num_entries, wc);
+ return cq->device->ops.poll_cq(cq, num_entries, wc);
}
/**
@@ -3516,7 +3555,7 @@ static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
static inline int ib_req_notify_cq(struct ib_cq *cq,
enum ib_cq_notify_flags flags)
{
- return cq->device->req_notify_cq(cq, flags);
+ return cq->device->ops.req_notify_cq(cq, flags);
}
/**
@@ -3528,8 +3567,8 @@ static inline int ib_req_notify_cq(struct ib_cq *cq,
*/
static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
{
- return cq->device->req_ncomp_notif ?
- cq->device->req_ncomp_notif(cq, wc_cnt) :
+ return cq->device->ops.req_ncomp_notif ?
+ cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
-ENOSYS;
}
@@ -3793,7 +3832,7 @@ static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
u64 *page_list, int list_len,
u64 iova)
{
- return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
+ return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
}
/**
@@ -4146,25 +4185,11 @@ static inline const struct cpumask *
ib_get_vector_affinity(struct ib_device *device, int comp_vector)
{
if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
- !device->get_vector_affinity)
+ !device->ops.get_vector_affinity)
return NULL;
- return device->get_vector_affinity(device, comp_vector);
-
-}
-
-static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
- struct ib_qp *qp, struct ib_device *device)
-{
- uobj->object = ibflow;
- ibflow->uobject = uobj;
-
- if (qp) {
- atomic_inc(&qp->usecnt);
- ibflow->qp = qp;
- }
+ return device->ops.get_vector_affinity(device, comp_vector);
- ibflow->device = device;
}
/**
@@ -4175,8 +4200,42 @@ static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
*/
void rdma_roce_rescan_device(struct ib_device *ibdev);
-struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile);
+struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
+
+
+int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
+
+struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *));
+
+int rdma_init_netdev(struct ib_device *device, u8 port_num,
+ enum rdma_netdev_t type, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *),
+ struct net_device *netdev);
+
+/**
+ * rdma_set_device_sysfs_group - Set device attributes group to have
+ * driver specific sysfs entries at
+ * for infiniband class.
+ *
+ * @device: device pointer for which attributes to be created
+ * @group: Pointer to group which should be added when device
+ * is registered with sysfs.
+ * rdma_set_device_sysfs_group() allows existing drivers to expose one
+ * group per device to have sysfs attributes.
+ *
+ * NOTE: New drivers should not make use of this API; instead new device
+ * parameter should be exposed via netlink command. This API and mechanism
+ * exist only for existing drivers.
+ */
+static inline void
+rdma_set_device_sysfs_group(struct ib_device *dev,
+ const struct attribute_group *group)
+{
+ dev->groups[1] = group;
+}
-int uverbs_destroy_def_handler(struct ib_uverbs_file *file,
- struct uverbs_attr_bundle *attrs);
#endif /* IB_VERBS_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 5d71a7f51a9f..60987a5903b7 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -152,7 +152,11 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
* @ps: RDMA port space.
* @qp_type: type of queue pair associated with the id.
*
- * The id holds a reference on the network namespace until it is destroyed.
+ * Returns a new rdma_cm_id. The id holds a reference on the network
+ * namespace until it is destroyed.
+ *
+ * The event handler callback serializes on the id's mutex and is
+ * allowed to sleep.
*/
#define rdma_create_id(net, event_handler, context, ps, qp_type) \
__rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \
@@ -192,7 +196,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr);
* @timeout_ms: Time to wait for resolution to complete.
*/
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- const struct sockaddr *dst_addr, int timeout_ms);
+ const struct sockaddr *dst_addr,
+ unsigned long timeout_ms);
/**
* rdma_resolve_route - Resolve the RDMA address bound to the RDMA identifier
@@ -202,7 +207,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
* Users must have first called rdma_resolve_addr to resolve a dst_addr
* into an RDMA address before calling this routine.
*/
-int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms);
+int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms);
/**
* rdma_create_qp - Allocate a QP and associate it with the specified RDMA
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index c369703fcd69..70218e6b5187 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -96,7 +96,7 @@ int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags);
/**
* Check if there are any listeners to the netlink group
* @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
+ * Returns true on success or false if no listeners.
*/
-int rdma_nl_chk_listeners(unsigned int group);
+bool rdma_nl_chk_listeners(unsigned int group);
#endif /* _RDMA_NETLINK_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index e79229a0cf01..dd0ed8048bb4 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -149,6 +149,10 @@ struct rvt_ibport {
#define RVT_CQN_MAX 16 /* maximum length of cq name */
+#define RVT_SGE_COPY_MEMCPY 0
+#define RVT_SGE_COPY_CACHELESS 1
+#define RVT_SGE_COPY_ADAPTIVE 2
+
/*
* Things that are driver specific, module parameters in hfi1 and qib
*/
@@ -161,6 +165,9 @@ struct rvt_driver_params {
*/
unsigned int lkey_table_size;
unsigned int qp_table_size;
+ unsigned int sge_copy_mode;
+ unsigned int wss_threshold;
+ unsigned int wss_clean_period;
int qpn_start;
int qpn_inc;
int qpn_res_start;
@@ -193,6 +200,19 @@ struct rvt_ah {
u8 log_pmtu;
};
+/* memory working set size */
+struct rvt_wss {
+ unsigned long *entries;
+ atomic_t total_count;
+ atomic_t clean_counter;
+ atomic_t clean_entry;
+
+ int threshold;
+ int num_entries;
+ long pages_mask;
+ unsigned int clean_period;
+};
+
struct rvt_dev_info;
struct rvt_swqe;
struct rvt_driver_provided {
@@ -211,11 +231,18 @@ struct rvt_driver_provided {
* version requires the s_lock not to be held. The other assumes the
* s_lock is held.
*/
- void (*schedule_send)(struct rvt_qp *qp);
- void (*schedule_send_no_lock)(struct rvt_qp *qp);
+ bool (*schedule_send)(struct rvt_qp *qp);
+ bool (*schedule_send_no_lock)(struct rvt_qp *qp);
- /* Driver specific work request checking */
- int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
+ /*
+ * Driver specific work request setup and checking.
+ * This function is allowed to perform any setup, checks, or
+ * adjustments required to the SWQE in order to be usable by
+ * underlying protocols. This includes private data structure
+ * allocations.
+ */
+ int (*setup_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ bool *call_send);
/*
* Sometimes rdmavt needs to kick the driver's send progress. That is
@@ -242,6 +269,13 @@ struct rvt_driver_provided {
void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
/*
+ * Init a struture allocated with qp_priv_alloc(). This should be
+ * called after all qp fields have been initialized in rdmavt.
+ */
+ int (*qp_priv_init)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_init_attr *init_attr);
+
+ /*
* Free the driver's private qp structure.
*/
void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp);
@@ -371,6 +405,9 @@ struct rvt_dev_info {
/* post send table */
const struct rvt_operation_params *post_parms;
+ /* opcode translation table */
+ const enum ib_wc_opcode *wc_opcode;
+
/* Driver specific helper functions */
struct rvt_driver_provided driver_f;
@@ -411,6 +448,8 @@ struct rvt_dev_info {
u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
spinlock_t n_mcast_grps_lock;
+ /* Memory Working Set Size */
+ struct rvt_wss *wss;
};
/**
@@ -423,7 +462,14 @@ static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
const char *fmt, const char *name,
const int unit)
{
- snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit);
+ /*
+ * FIXME: rvt and its users want to touch the ibdev before
+ * registration and have things like the name work. We don't have the
+ * infrastructure in the core to support this directly today, hack it
+ * to work by setting the name manually here.
+ */
+ dev_set_name(&rdi->ibdev.dev, fmt, name, unit);
+ strlcpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
}
/**
@@ -434,7 +480,7 @@ static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
*/
static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
{
- return rdi->ibdev.name;
+ return dev_name(&rdi->ibdev.dev);
}
static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 927f6d5b6d0f..cbafb1878669 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -678,6 +678,13 @@ void rvt_del_timers_sync(struct rvt_qp *qp);
void rvt_stop_rc_timers(struct rvt_qp *qp);
void rvt_add_retry_timer(struct rvt_qp *qp);
+void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
+ void *data, u32 length,
+ bool release, bool copy_last);
+void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ enum ib_wc_status status);
+void rvt_ruc_loopback(struct rvt_qp *qp);
+
/**
* struct rvt_qp_iter - the iterator for QPs
* @qp - the current QP
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 9654d33edd98..8f179be9d9a9 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -39,6 +39,10 @@ enum rdma_restrack_type {
*/
RDMA_RESTRACK_MR,
/**
+ * @RDMA_RESTRACK_CTX: Verbs contexts (CTX)
+ */
+ RDMA_RESTRACK_CTX,
+ /**
* @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
*/
RDMA_RESTRACK_MAX
@@ -112,6 +116,10 @@ struct rdma_restrack_entry {
* @type: various objects in restrack database
*/
enum rdma_restrack_type type;
+ /**
+ * @user: user resource
+ */
+ bool user;
};
/**
@@ -136,11 +144,8 @@ int rdma_restrack_count(struct rdma_restrack_root *res,
enum rdma_restrack_type type,
struct pid_namespace *ns);
-/**
- * rdma_restrack_add() - add object to the reource tracking database
- * @res: resource entry
- */
-void rdma_restrack_add(struct rdma_restrack_entry *res);
+void rdma_restrack_kadd(struct rdma_restrack_entry *res);
+void rdma_restrack_uadd(struct rdma_restrack_entry *res);
/**
* rdma_restrack_del() - delete object from the reource tracking database
@@ -155,7 +160,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res);
*/
static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res)
{
- return !res->task;
+ return !res->user;
}
/**
@@ -173,16 +178,10 @@ int rdma_restrack_put(struct rdma_restrack_entry *res);
/**
* rdma_restrack_set_task() - set the task for this resource
* @res: resource entry
- * @task: task struct
+ * @caller: kernel name, the current task will be used if the caller is NULL.
*/
-static inline void rdma_restrack_set_task(struct rdma_restrack_entry *res,
- struct task_struct *task)
-{
- if (res->task)
- put_task_struct(res->task);
- get_task_struct(task);
- res->task = task;
-}
+void rdma_restrack_set_task(struct rdma_restrack_entry *res,
+ const char *caller);
/*
* Helper functions for rdma drivers when filling out
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 9e997c3c2f04..27da906beea7 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -52,6 +52,7 @@ enum uverbs_attr_type {
UVERBS_ATTR_TYPE_IDR,
UVERBS_ATTR_TYPE_FD,
UVERBS_ATTR_TYPE_ENUM_IN,
+ UVERBS_ATTR_TYPE_IDRS_ARRAY,
};
enum uverbs_obj_access {
@@ -78,6 +79,8 @@ struct uverbs_attr_spec {
*/
u8 alloc_and_copy:1;
u8 mandatory:1;
+ /* True if this is from UVERBS_ATTR_UHW */
+ u8 is_udata:1;
union {
struct {
@@ -101,7 +104,7 @@ struct uverbs_attr_spec {
} enum_def;
} u;
- /* This weird split of the enum lets us remove some padding */
+ /* This weird split lets us remove some padding */
union {
struct {
/*
@@ -111,6 +114,17 @@ struct uverbs_attr_spec {
*/
const struct uverbs_attr_spec *ids;
} enum_def;
+
+ struct {
+ /*
+ * higher bits mean the namespace and lower bits mean
+ * the type id within the namespace.
+ */
+ u16 obj_type;
+ u16 min_len;
+ u16 max_len;
+ u8 access;
+ } objs_arr;
} u2;
};
@@ -128,6 +142,13 @@ struct uverbs_attr_spec {
*
* The tree encodes multiple types, and uses a scheme where OBJ_ID,0,0 returns
* the object slot, and OBJ_ID,METH_ID,0 and returns the method slot.
+ *
+ * This also encodes the tables for the write() and write() extended commands
+ * using the coding
+ * OBJ_ID,UVERBS_API_METHOD_IS_WRITE,command #
+ * OBJ_ID,UVERBS_API_METHOD_IS_WRITE_EX,command_ex #
+ * ie the WRITE path is treated as a special method type in the ioctl
+ * framework.
*/
enum uapi_radix_data {
UVERBS_API_NS_FLAG = 1U << UVERBS_ID_NS_SHIFT,
@@ -135,12 +156,16 @@ enum uapi_radix_data {
UVERBS_API_ATTR_KEY_BITS = 6,
UVERBS_API_ATTR_KEY_MASK = GENMASK(UVERBS_API_ATTR_KEY_BITS - 1, 0),
UVERBS_API_ATTR_BKEY_LEN = (1 << UVERBS_API_ATTR_KEY_BITS) - 1,
+ UVERBS_API_WRITE_KEY_NUM = 1 << UVERBS_API_ATTR_KEY_BITS,
UVERBS_API_METHOD_KEY_BITS = 5,
UVERBS_API_METHOD_KEY_SHIFT = UVERBS_API_ATTR_KEY_BITS,
- UVERBS_API_METHOD_KEY_NUM_CORE = 24,
- UVERBS_API_METHOD_KEY_NUM_DRIVER = (1 << UVERBS_API_METHOD_KEY_BITS) -
- UVERBS_API_METHOD_KEY_NUM_CORE,
+ UVERBS_API_METHOD_KEY_NUM_CORE = 22,
+ UVERBS_API_METHOD_IS_WRITE = 30 << UVERBS_API_METHOD_KEY_SHIFT,
+ UVERBS_API_METHOD_IS_WRITE_EX = 31 << UVERBS_API_METHOD_KEY_SHIFT,
+ UVERBS_API_METHOD_KEY_NUM_DRIVER =
+ (UVERBS_API_METHOD_IS_WRITE >> UVERBS_API_METHOD_KEY_SHIFT) -
+ UVERBS_API_METHOD_KEY_NUM_CORE,
UVERBS_API_METHOD_KEY_MASK = GENMASK(
UVERBS_API_METHOD_KEY_BITS + UVERBS_API_METHOD_KEY_SHIFT - 1,
UVERBS_API_METHOD_KEY_SHIFT),
@@ -193,7 +218,22 @@ static inline __attribute_const__ u32 uapi_key_ioctl_method(u32 id)
return id << UVERBS_API_METHOD_KEY_SHIFT;
}
-static inline __attribute_const__ u32 uapi_key_attr_to_method(u32 attr_key)
+static inline __attribute_const__ u32 uapi_key_write_method(u32 id)
+{
+ if (id >= UVERBS_API_WRITE_KEY_NUM)
+ return UVERBS_API_KEY_ERR;
+ return UVERBS_API_METHOD_IS_WRITE | id;
+}
+
+static inline __attribute_const__ u32 uapi_key_write_ex_method(u32 id)
+{
+ if (id >= UVERBS_API_WRITE_KEY_NUM)
+ return UVERBS_API_KEY_ERR;
+ return UVERBS_API_METHOD_IS_WRITE_EX | id;
+}
+
+static inline __attribute_const__ u32
+uapi_key_attr_to_ioctl_method(u32 attr_key)
{
return attr_key &
(UVERBS_API_OBJ_KEY_MASK | UVERBS_API_METHOD_KEY_MASK);
@@ -201,10 +241,23 @@ static inline __attribute_const__ u32 uapi_key_attr_to_method(u32 attr_key)
static inline __attribute_const__ bool uapi_key_is_ioctl_method(u32 key)
{
- return (key & UVERBS_API_METHOD_KEY_MASK) != 0 &&
+ unsigned int method = key & UVERBS_API_METHOD_KEY_MASK;
+
+ return method != 0 && method < UVERBS_API_METHOD_IS_WRITE &&
(key & UVERBS_API_ATTR_KEY_MASK) == 0;
}
+static inline __attribute_const__ bool uapi_key_is_write_method(u32 key)
+{
+ return (key & UVERBS_API_METHOD_KEY_MASK) == UVERBS_API_METHOD_IS_WRITE;
+}
+
+static inline __attribute_const__ bool uapi_key_is_write_ex_method(u32 key)
+{
+ return (key & UVERBS_API_METHOD_KEY_MASK) ==
+ UVERBS_API_METHOD_IS_WRITE_EX;
+}
+
static inline __attribute_const__ u32 uapi_key_attrs_start(u32 ioctl_method_key)
{
/* 0 is the method slot itself */
@@ -234,9 +287,12 @@ static inline __attribute_const__ u32 uapi_key_attr(u32 id)
return id;
}
+/* Only true for ioctl methods */
static inline __attribute_const__ bool uapi_key_is_attr(u32 key)
{
- return (key & UVERBS_API_METHOD_KEY_MASK) != 0 &&
+ unsigned int method = key & UVERBS_API_METHOD_KEY_MASK;
+
+ return method != 0 && method < UVERBS_API_METHOD_IS_WRITE &&
(key & UVERBS_API_ATTR_KEY_MASK) != 0;
}
@@ -251,6 +307,11 @@ static inline __attribute_const__ u32 uapi_bkey_attr(u32 attr_key)
return attr_key - 1;
}
+static inline __attribute_const__ u32 uapi_bkey_to_key_attr(u32 attr_bkey)
+{
+ return attr_bkey + 1;
+}
+
/*
* =======================================
* Verbs definitions
@@ -268,8 +329,7 @@ struct uverbs_method_def {
u32 flags;
size_t num_attrs;
const struct uverbs_attr_def * const (*attrs)[];
- int (*handler)(struct ib_uverbs_file *ufile,
- struct uverbs_attr_bundle *ctx);
+ int (*handler)(struct uverbs_attr_bundle *attrs);
};
struct uverbs_object_def {
@@ -279,11 +339,132 @@ struct uverbs_object_def {
const struct uverbs_method_def * const (*methods)[];
};
-struct uverbs_object_tree_def {
- size_t num_objects;
- const struct uverbs_object_def * const (*objects)[];
+enum uapi_definition_kind {
+ UAPI_DEF_END = 0,
+ UAPI_DEF_OBJECT_START,
+ UAPI_DEF_WRITE,
+ UAPI_DEF_CHAIN_OBJ_TREE,
+ UAPI_DEF_CHAIN,
+ UAPI_DEF_IS_SUPPORTED_FUNC,
+ UAPI_DEF_IS_SUPPORTED_DEV_FN,
+};
+
+enum uapi_definition_scope {
+ UAPI_SCOPE_OBJECT = 1,
+ UAPI_SCOPE_METHOD = 2,
};
+struct uapi_definition {
+ u8 kind;
+ u8 scope;
+ union {
+ struct {
+ u16 object_id;
+ } object_start;
+ struct {
+ u16 command_num;
+ u8 is_ex:1;
+ u8 has_udata:1;
+ u8 has_resp:1;
+ u8 req_size;
+ u8 resp_size;
+ } write;
+ };
+
+ union {
+ bool (*func_is_supported)(struct ib_device *device);
+ int (*func_write)(struct uverbs_attr_bundle *attrs);
+ const struct uapi_definition *chain;
+ const struct uverbs_object_def *chain_obj_tree;
+ size_t needs_fn_offset;
+ };
+};
+
+/* Define things connected to object_id */
+#define DECLARE_UVERBS_OBJECT(_object_id, ...) \
+ { \
+ .kind = UAPI_DEF_OBJECT_START, \
+ .object_start = { .object_id = _object_id }, \
+ }, \
+ ##__VA_ARGS__
+
+/* Use in a var_args of DECLARE_UVERBS_OBJECT */
+#define DECLARE_UVERBS_WRITE(_command_num, _func, _cmd_desc, ...) \
+ { \
+ .kind = UAPI_DEF_WRITE, \
+ .scope = UAPI_SCOPE_OBJECT, \
+ .write = { .is_ex = 0, .command_num = _command_num }, \
+ .func_write = _func, \
+ _cmd_desc, \
+ }, \
+ ##__VA_ARGS__
+
+/* Use in a var_args of DECLARE_UVERBS_OBJECT */
+#define DECLARE_UVERBS_WRITE_EX(_command_num, _func, _cmd_desc, ...) \
+ { \
+ .kind = UAPI_DEF_WRITE, \
+ .scope = UAPI_SCOPE_OBJECT, \
+ .write = { .is_ex = 1, .command_num = _command_num }, \
+ .func_write = _func, \
+ _cmd_desc, \
+ }, \
+ ##__VA_ARGS__
+
+/*
+ * Object is only supported if the function pointer named ibdev_fn in struct
+ * ib_device is not NULL.
+ */
+#define UAPI_DEF_OBJ_NEEDS_FN(ibdev_fn) \
+ { \
+ .kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \
+ .scope = UAPI_SCOPE_OBJECT, \
+ .needs_fn_offset = \
+ offsetof(struct ib_device_ops, ibdev_fn) + \
+ BUILD_BUG_ON_ZERO( \
+ sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
+ sizeof(void *)), \
+ }
+
+/*
+ * Method is only supported if the function pointer named ibdev_fn in struct
+ * ib_device is not NULL.
+ */
+#define UAPI_DEF_METHOD_NEEDS_FN(ibdev_fn) \
+ { \
+ .kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \
+ .scope = UAPI_SCOPE_METHOD, \
+ .needs_fn_offset = \
+ offsetof(struct ib_device_ops, ibdev_fn) + \
+ BUILD_BUG_ON_ZERO( \
+ sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
+ sizeof(void *)), \
+ }
+
+/* Call a function to determine if the entire object is supported or not */
+#define UAPI_DEF_IS_OBJ_SUPPORTED(_func) \
+ { \
+ .kind = UAPI_DEF_IS_SUPPORTED_FUNC, \
+ .scope = UAPI_SCOPE_OBJECT, .func_is_supported = _func, \
+ }
+
+/* Include another struct uapi_definition in this one */
+#define UAPI_DEF_CHAIN(_def_var) \
+ { \
+ .kind = UAPI_DEF_CHAIN, .chain = _def_var, \
+ }
+
+/* Temporary until the tree base description is replaced */
+#define UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, _object_ptr, ...) \
+ { \
+ .kind = UAPI_DEF_CHAIN_OBJ_TREE, \
+ .object_start = { .object_id = _object_enum }, \
+ .chain_obj_tree = _object_ptr, \
+ }, \
+ ##__VA_ARGS__
+#define UAPI_DEF_CHAIN_OBJ_TREE_NAMED(_object_enum, ...) \
+ UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, &UVERBS_OBJECT(_object_enum), \
+ ##__VA_ARGS__)
+
/*
* =======================================
* Attribute Specifications
@@ -323,6 +504,33 @@ struct uverbs_object_tree_def {
#define UA_MANDATORY .mandatory = 1
#define UA_OPTIONAL .mandatory = 0
+/*
+ * min_len must be bigger than 0 and _max_len must be smaller than 4095. Only
+ * READ\WRITE accesses are supported.
+ */
+#define UVERBS_ATTR_IDRS_ARR(_attr_id, _idr_type, _access, _min_len, _max_len, \
+ ...) \
+ (&(const struct uverbs_attr_def){ \
+ .id = (_attr_id) + \
+ BUILD_BUG_ON_ZERO((_min_len) == 0 || \
+ (_max_len) > \
+ PAGE_SIZE / sizeof(void *) || \
+ (_min_len) > (_max_len) || \
+ (_access) == UVERBS_ACCESS_NEW || \
+ (_access) == UVERBS_ACCESS_DESTROY), \
+ .attr = { .type = UVERBS_ATTR_TYPE_IDRS_ARRAY, \
+ .u2.objs_arr.obj_type = _idr_type, \
+ .u2.objs_arr.access = _access, \
+ .u2.objs_arr.min_len = _min_len, \
+ .u2.objs_arr.max_len = _max_len, \
+ __VA_ARGS__ } })
+
+/*
+ * Only for use with UVERBS_ATTR_IDR, allows any uobject type to be accepted,
+ * the user must validate the type of the uobject instead.
+ */
+#define UVERBS_IDR_ANY_OBJECT 0xFFFF
+
#define UVERBS_ATTR_IDR(_attr_id, _idr_type, _access, ...) \
(&(const struct uverbs_attr_def){ \
.id = _attr_id, \
@@ -365,6 +573,15 @@ struct uverbs_object_tree_def {
__VA_ARGS__ }, \
})
+/* An input value that is a member in the enum _enum_type. */
+#define UVERBS_ATTR_CONST_IN(_attr_id, _enum_type, ...) \
+ UVERBS_ATTR_PTR_IN( \
+ _attr_id, \
+ UVERBS_ATTR_SIZE( \
+ sizeof(u64) + BUILD_BUG_ON_ZERO(!sizeof(_enum_type)), \
+ sizeof(u64)), \
+ __VA_ARGS__)
+
/*
* An input value that is a bitwise combination of values of _enum_type.
* This permits the flag value to be passed as either a u32 or u64, it must
@@ -386,25 +603,12 @@ struct uverbs_object_tree_def {
#define UVERBS_ATTR_UHW() \
UVERBS_ATTR_PTR_IN(UVERBS_ATTR_UHW_IN, \
UVERBS_ATTR_MIN_SIZE(0), \
- UA_OPTIONAL), \
+ UA_OPTIONAL, \
+ .is_udata = 1), \
UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_UHW_OUT, \
UVERBS_ATTR_MIN_SIZE(0), \
- UA_OPTIONAL)
-
-/*
- * =======================================
- * Declaration helpers
- * =======================================
- */
-
-#define DECLARE_UVERBS_OBJECT_TREE(_name, ...) \
- static const struct uverbs_object_def *const _name##_ptr[] = { \
- __VA_ARGS__, \
- }; \
- static const struct uverbs_object_tree_def _name = { \
- .num_objects = ARRAY_SIZE(_name##_ptr), \
- .objects = &_name##_ptr, \
- }
+ UA_OPTIONAL, \
+ .is_udata = 1)
/* =================================================
* Parsing infrastructure
@@ -431,14 +635,22 @@ struct uverbs_obj_attr {
const struct uverbs_api_attr *attr_elm;
};
+struct uverbs_objs_arr_attr {
+ struct ib_uobject **uobjects;
+ u16 len;
+};
+
struct uverbs_attr {
union {
struct uverbs_ptr_attr ptr_attr;
struct uverbs_obj_attr obj_attr;
+ struct uverbs_objs_arr_attr objs_arr_attr;
};
};
struct uverbs_attr_bundle {
+ struct ib_udata driver_udata;
+ struct ib_udata ucore;
struct ib_uverbs_file *ufile;
DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN);
struct uverbs_attr attrs[];
@@ -507,6 +719,53 @@ uverbs_attr_get_len(const struct uverbs_attr_bundle *attrs_bundle, u16 idx)
return attr->ptr_attr.len;
}
+/*
+ * uverbs_attr_ptr_get_array_size() - Get array size pointer by a ptr
+ * attribute.
+ * @attrs: The attribute bundle
+ * @idx: The ID of the attribute
+ * @elem_size: The size of the element in the array
+ */
+static inline int
+uverbs_attr_ptr_get_array_size(struct uverbs_attr_bundle *attrs, u16 idx,
+ size_t elem_size)
+{
+ int size = uverbs_attr_get_len(attrs, idx);
+
+ if (size < 0)
+ return size;
+
+ if (size % elem_size)
+ return -EINVAL;
+
+ return size / elem_size;
+}
+
+/**
+ * uverbs_attr_get_uobjs_arr() - Provides array's properties for attribute for
+ * UVERBS_ATTR_TYPE_IDRS_ARRAY.
+ * @arr: Returned pointer to array of pointers for uobjects or NULL if
+ * the attribute isn't provided.
+ *
+ * Return: The array length or 0 if no attribute was provided.
+ */
+static inline int uverbs_attr_get_uobjs_arr(
+ const struct uverbs_attr_bundle *attrs_bundle, u16 attr_idx,
+ struct ib_uobject ***arr)
+{
+ const struct uverbs_attr *attr =
+ uverbs_attr_get(attrs_bundle, attr_idx);
+
+ if (IS_ERR(attr)) {
+ *arr = NULL;
+ return 0;
+ }
+
+ *arr = attr->objs_arr_attr.uobjects;
+
+ return attr->objs_arr_attr.len;
+}
+
static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr)
{
return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data);
@@ -582,6 +841,12 @@ static inline int _uverbs_copy_from_or_zero(void *to,
#define uverbs_copy_from_or_zero(to, attrs_bundle, idx) \
_uverbs_copy_from_or_zero(to, attrs_bundle, idx, sizeof(*to))
+static inline struct ib_ucontext *
+ib_uverbs_get_ucontext(const struct uverbs_attr_bundle *attrs)
+{
+ return ib_uverbs_get_ucontext_file(attrs->ufile);
+}
+
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, u64 allowed_bits);
@@ -603,6 +868,11 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
{
return _uverbs_alloc(bundle, size, GFP_KERNEL | __GFP_ZERO);
}
+int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val);
+int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
+ size_t idx, const void *from, size_t size);
#else
static inline int
uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
@@ -631,6 +901,40 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
{
return ERR_PTR(-EINVAL);
}
+static inline int
+_uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val)
+{
+ return -EINVAL;
+}
+static inline int
+uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
+ size_t idx, const void *from, size_t size)
+{
+ return -EINVAL;
+}
#endif
+#define uverbs_get_const(_to, _attrs_bundle, _idx) \
+ ({ \
+ s64 _val; \
+ int _ret = _uverbs_get_const(&_val, _attrs_bundle, _idx, \
+ type_min(typeof(*_to)), \
+ type_max(typeof(*_to)), NULL); \
+ (*_to) = _val; \
+ _ret; \
+ })
+
+#define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \
+ ({ \
+ s64 _val; \
+ s64 _def_val = _default; \
+ int _ret = \
+ _uverbs_get_const(&_val, _attrs_bundle, _idx, \
+ type_min(typeof(*_to)), \
+ type_max(typeof(*_to)), &_def_val); \
+ (*_to) = _val; \
+ _ret; \
+ })
#endif
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
index b3b21733cc55..3447bfe356d6 100644
--- a/include/rdma/uverbs_named_ioctl.h
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -43,7 +43,7 @@
#define _UVERBS_NAME(x, y) _UVERBS_PASTE(x, y)
#define UVERBS_METHOD(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _method_##id)
#define UVERBS_HANDLER(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _handler_##id)
-#define UVERBS_OBJECT(id) _UVERBS_NAME(UVERBS_MOUDLE_NAME, _object_##id)
+#define UVERBS_OBJECT(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _object_##id)
/* These are static so they do not need to be qualified */
#define UVERBS_METHOD_ATTRS(method_id) _method_attrs_##method_id
@@ -102,18 +102,11 @@
#define ADD_UVERBS_METHODS(_name, _object_id, ...) \
static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS( \
_object_id)[] = { __VA_ARGS__ }; \
- static const struct uverbs_object_def _name##_struct = { \
+ static const struct uverbs_object_def _name = { \
.id = _object_id, \
.num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)), \
.methods = &UVERBS_OBJECT_METHODS(_object_id) \
- }; \
- static const struct uverbs_object_def *const _name##_ptrs[] = { \
- &_name##_struct, \
- }; \
- static const struct uverbs_object_tree_def _name = { \
- .num_objects = 1, \
- .objects = &_name##_ptrs, \
- }
+ };
/* Used by drivers to declare a complete parsing tree for a single method that
* differs only in having additional driver specific attributes.
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 3b00231cc084..883abcf6d36e 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -37,15 +37,6 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/ib_user_ioctl_verbs.h>
-#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
-const struct uverbs_object_tree_def *uverbs_default_get_objects(void);
-#else
-static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
-{
- return NULL;
-}
-#endif
-
/* Returns _id, or causes a compile error if _id is not a u32.
*
* The uobj APIs should only be used with the write based uAPI to access
@@ -54,15 +45,15 @@ static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(vo
*/
#define _uobj_check_id(_id) ((_id) * typecheck(u32, _id))
-#define uobj_get_type(_ufile, _object) \
- uapi_get_object((_ufile)->device->uapi, _object)
+#define uobj_get_type(_attrs, _object) \
+ uapi_get_object((_attrs)->ufile->device->uapi, _object)
-#define uobj_get_read(_type, _id, _ufile) \
- rdma_lookup_get_uobject(uobj_get_type(_ufile, _type), _ufile, \
+#define uobj_get_read(_type, _id, _attrs) \
+ rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \
_uobj_check_id(_id), UVERBS_LOOKUP_READ)
-#define ufd_get_read(_type, _fdnum, _ufile) \
- rdma_lookup_get_uobject(uobj_get_type(_ufile, _type), _ufile, \
+#define ufd_get_read(_type, _fdnum, _attrs) \
+ rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \
(_fdnum)*typecheck(s32, _fdnum), \
UVERBS_LOOKUP_READ)
@@ -72,26 +63,27 @@ static inline void *_uobj_get_obj_read(struct ib_uobject *uobj)
return NULL;
return uobj->object;
}
-#define uobj_get_obj_read(_object, _type, _id, _ufile) \
+#define uobj_get_obj_read(_object, _type, _id, _attrs) \
((struct ib_##_object *)_uobj_get_obj_read( \
- uobj_get_read(_type, _id, _ufile)))
+ uobj_get_read(_type, _id, _attrs)))
-#define uobj_get_write(_type, _id, _ufile) \
- rdma_lookup_get_uobject(uobj_get_type(_ufile, _type), _ufile, \
+#define uobj_get_write(_type, _id, _attrs) \
+ rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \
_uobj_check_id(_id), UVERBS_LOOKUP_WRITE)
int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
- struct ib_uverbs_file *ufile, int success_res);
-#define uobj_perform_destroy(_type, _id, _ufile, _success_res) \
- __uobj_perform_destroy(uobj_get_type(_ufile, _type), \
- _uobj_check_id(_id), _ufile, _success_res)
+ const struct uverbs_attr_bundle *attrs);
+#define uobj_perform_destroy(_type, _id, _attrs) \
+ __uobj_perform_destroy(uobj_get_type(_attrs, _type), \
+ _uobj_check_id(_id), _attrs)
struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
- u32 id, struct ib_uverbs_file *ufile);
+ u32 id,
+ const struct uverbs_attr_bundle *attrs);
-#define uobj_get_destroy(_type, _id, _ufile) \
- __uobj_get_destroy(uobj_get_type(_ufile, _type), _uobj_check_id(_id), \
- _ufile)
+#define uobj_get_destroy(_type, _id, _attrs) \
+ __uobj_get_destroy(uobj_get_type(_attrs, _type), _uobj_check_id(_id), \
+ _attrs)
static inline void uobj_put_destroy(struct ib_uobject *uobj)
{
@@ -111,14 +103,13 @@ static inline void uobj_put_write(struct ib_uobject *uobj)
rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
}
-static inline int __must_check uobj_alloc_commit(struct ib_uobject *uobj,
- int success_res)
+static inline int __must_check uobj_alloc_commit(struct ib_uobject *uobj)
{
int ret = rdma_alloc_commit_uobject(uobj);
if (ret)
return ret;
- return success_res;
+ return 0;
}
static inline void uobj_alloc_abort(struct ib_uobject *uobj)
@@ -127,18 +118,81 @@ static inline void uobj_alloc_abort(struct ib_uobject *uobj)
}
static inline struct ib_uobject *
-__uobj_alloc(const struct uverbs_api_object *obj, struct ib_uverbs_file *ufile,
- struct ib_device **ib_dev)
+__uobj_alloc(const struct uverbs_api_object *obj,
+ struct uverbs_attr_bundle *attrs, struct ib_device **ib_dev)
{
- struct ib_uobject *uobj = rdma_alloc_begin_uobject(obj, ufile);
+ struct ib_uobject *uobj = rdma_alloc_begin_uobject(obj, attrs->ufile);
if (!IS_ERR(uobj))
*ib_dev = uobj->context->device;
return uobj;
}
-#define uobj_alloc(_type, _ufile, _ib_dev) \
- __uobj_alloc(uobj_get_type(_ufile, _type), _ufile, _ib_dev)
+#define uobj_alloc(_type, _attrs, _ib_dev) \
+ __uobj_alloc(uobj_get_type(_attrs, _type), _attrs, _ib_dev)
+
+static inline void uverbs_flow_action_fill_action(struct ib_flow_action *action,
+ struct ib_uobject *uobj,
+ struct ib_device *ib_dev,
+ enum ib_flow_action_type type)
+{
+ atomic_set(&action->usecnt, 0);
+ action->device = ib_dev;
+ action->type = type;
+ action->uobject = uobj;
+ uobj->object = action;
+}
+
+struct ib_uflow_resources {
+ size_t max;
+ size_t num;
+ size_t collection_num;
+ size_t counters_num;
+ struct ib_counters **counters;
+ struct ib_flow_action **collection;
+};
+
+struct ib_uflow_object {
+ struct ib_uobject uobject;
+ struct ib_uflow_resources *resources;
+};
+
+struct ib_uflow_resources *flow_resources_alloc(size_t num_specs);
+void flow_resources_add(struct ib_uflow_resources *uflow_res,
+ enum ib_flow_spec_type type,
+ void *ibobj);
+void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
+
+static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
+ struct ib_qp *qp, struct ib_device *device,
+ struct ib_uflow_resources *uflow_res)
+{
+ struct ib_uflow_object *uflow;
+
+ uobj->object = ibflow;
+ ibflow->uobject = uobj;
+
+ if (qp) {
+ atomic_inc(&qp->usecnt);
+ ibflow->qp = qp;
+ }
+
+ ibflow->device = device;
+ uflow = container_of(uobj, typeof(*uflow), uobject);
+ uflow->resources = uflow_res;
+}
+
+struct uverbs_api_object {
+ const struct uverbs_obj_type *type_attrs;
+ const struct uverbs_obj_type_class *type_class;
+ u8 disabled:1;
+ u32 id;
+};
+
+static inline u32 uobj_get_object_id(struct ib_uobject *uobj)
+{
+ return uobj->uapi_object->id;
+}
#endif
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index c891ada3c5c2..d85e6befa26b 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -61,6 +61,9 @@ struct scsi_pointer {
/* flags preserved across unprep / reprep */
#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
+/* for scmd->state */
+#define SCMD_STATE_COMPLETE 0
+
struct scsi_cmnd {
struct scsi_request req;
struct scsi_device *device;
@@ -145,6 +148,7 @@ struct scsi_cmnd {
int result; /* Status code from lower level driver */
int flags; /* Command flags */
+ unsigned long state; /* Command completion state */
unsigned char tag; /* SCSI-II queued command tag */
};
@@ -171,7 +175,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len);
extern void scsi_kunmap_atomic_sg(void *virt);
-extern int scsi_init_io(struct scsi_cmnd *cmd);
+extern blk_status_t scsi_init_io(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_DMA
extern int scsi_dma_map(struct scsi_cmnd *cmd);
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
index c7bba2b24849..a862dc23c68d 100644
--- a/include/scsi/scsi_dh.h
+++ b/include/scsi/scsi_dh.h
@@ -69,7 +69,7 @@ struct scsi_device_handler {
int (*attach)(struct scsi_device *);
void (*detach)(struct scsi_device *);
int (*activate)(struct scsi_device *, activate_complete, void *);
- int (*prep_fn)(struct scsi_device *, struct request *);
+ blk_status_t (*prep_fn)(struct scsi_device *, struct request *);
int (*set_params)(struct scsi_device *, const char *);
void (*rescan)(struct scsi_device *);
};
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index fae8b465233e..6dffa8555a39 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -2,6 +2,7 @@
#ifndef _SCSI_SCSI_DRIVER_H
#define _SCSI_SCSI_DRIVER_H
+#include <linux/blk_types.h>
#include <linux/device.h>
struct module;
@@ -13,7 +14,7 @@ struct scsi_driver {
struct device_driver gendrv;
void (*rescan)(struct device *);
- int (*init_command)(struct scsi_cmnd *);
+ blk_status_t (*init_command)(struct scsi_cmnd *);
void (*uninit_command)(struct scsi_cmnd *);
int (*done)(struct scsi_cmnd *);
int (*eh_action)(struct scsi_cmnd *, int);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 5ea06d310a25..6ca954e9f752 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -11,7 +11,6 @@
#include <linux/blk-mq.h>
#include <scsi/scsi.h>
-struct request_queue;
struct block_device;
struct completion;
struct module;
@@ -22,7 +21,6 @@ struct scsi_target;
struct Scsi_Host;
struct scsi_host_cmd_pool;
struct scsi_transport_template;
-struct blk_queue_tags;
/*
@@ -44,9 +42,6 @@ struct blk_queue_tags;
#define MODE_INITIATOR 0x01
#define MODE_TARGET 0x02
-#define DISABLE_CLUSTERING 0
-#define ENABLE_CLUSTERING 1
-
struct scsi_host_template {
struct module *module;
const char *name;
@@ -366,6 +361,11 @@ struct scsi_host_template {
unsigned int max_sectors;
/*
+ * Maximum size in bytes of a single segment.
+ */
+ unsigned int max_segment_size;
+
+ /*
* DMA scatter gather segment boundary limit. A segment crossing this
* boundary will be split in two.
*/
@@ -415,16 +415,6 @@ struct scsi_host_template {
unsigned unchecked_isa_dma:1;
/*
- * True if this host adapter can make good use of clustering.
- * I originally thought that if the tablesize was large that it
- * was a waste of CPU cycles to prepare a cluster list, but
- * it works out that the Buslogic is faster if you use a smaller
- * number of segments (i.e. use clustering). I guess it is
- * inefficient.
- */
- unsigned use_clustering:1;
-
- /*
* True for emulated SCSI host adapters (e.g. ATAPI).
*/
unsigned emulated:1;
@@ -547,14 +537,8 @@ struct Scsi_Host {
struct scsi_host_template *hostt;
struct scsi_transport_template *transportt;
- /*
- * Area to keep a shared tag map (if needed, will be
- * NULL if not).
- */
- union {
- struct blk_queue_tag *bqt;
- struct blk_mq_tag_set tag_set;
- };
+ /* Area to keep a shared tag map */
+ struct blk_mq_tag_set tag_set;
atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked;
@@ -604,6 +588,7 @@ struct Scsi_Host {
short unsigned int sg_tablesize;
short unsigned int sg_prot_tablesize;
unsigned int max_sectors;
+ unsigned int max_segment_size;
unsigned long dma_boundary;
/*
* In scsi-mq mode, the number of hardware queues supported by the LLD.
@@ -621,7 +606,6 @@ struct Scsi_Host {
unsigned active_mode:2;
unsigned unchecked_isa_dma:1;
- unsigned use_clustering:1;
/*
* Host has requested that no further requests come through for the
@@ -648,7 +632,6 @@ struct Scsi_Host {
/* The controller does not support WRITE SAME */
unsigned no_write_same:1;
- unsigned use_blk_mq:1;
unsigned use_cmd_list:1;
/* Host responded with short (<36 bytes) INQUIRY result */
@@ -742,11 +725,6 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
shost->tmf_in_progress;
}
-static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
-{
- return shost->use_blk_mq;
-}
-
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
extern void scsi_flush_work(struct Scsi_Host *);
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index e192a0caa850..6053d46e794e 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -23,19 +23,15 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost,
int tag)
{
struct request *req = NULL;
+ u16 hwq;
if (tag == SCSI_NO_TAG)
return NULL;
- if (shost_use_blk_mq(shost)) {
- u16 hwq = blk_mq_unique_tag_to_hwq(tag);
-
- if (hwq < shost->tag_set.nr_hw_queues) {
- req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
- blk_mq_unique_tag_to_tag(tag));
- }
- } else {
- req = blk_map_queue_find_tag(shost->bqt, tag);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ if (hwq < shost->tag_set.nr_hw_queues) {
+ req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
+ blk_mq_unique_tag_to_tag(tag));
}
if (!req)
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index c16a3c9a4d9b..9220758d5087 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -67,7 +67,8 @@ enum {
enum {
SRP_NO_DATA_DESC = 0,
SRP_DATA_DESC_DIRECT = 1,
- SRP_DATA_DESC_INDIRECT = 2
+ SRP_DATA_DESC_INDIRECT = 2,
+ SRP_DATA_DESC_IMM = 3, /* new in SRP2 */
};
enum {
@@ -111,9 +112,16 @@ struct srp_indirect_buf {
struct srp_direct_buf desc_list[0];
} __attribute__((packed));
+/* Immediate data buffer descriptor as defined in SRP2. */
+struct srp_imm_buf {
+ __be32 len;
+};
+
+/* srp_login_req.flags */
enum {
SRP_MULTICHAN_SINGLE = 0,
- SRP_MULTICHAN_MULTI = 1
+ SRP_MULTICHAN_MULTI = 1,
+ SRP_IMMED_REQUESTED = 0x80, /* new in SRP2 */
};
struct srp_login_req {
@@ -124,7 +132,9 @@ struct srp_login_req {
u8 reserved2[4];
__be16 req_buf_fmt;
u8 req_flags;
- u8 reserved3[5];
+ u8 reserved3[1];
+ __be16 imm_data_offset; /* new in SRP2 */
+ u8 reserved4[2];
u8 initiator_port_id[16];
u8 target_port_id[16];
};
@@ -144,6 +154,16 @@ struct srp_login_req_rdma {
__be32 req_it_iu_len;
u8 initiator_port_id[16];
u8 target_port_id[16];
+ __be16 imm_data_offset;
+ u8 reserved[6];
+};
+
+/* srp_login_rsp.rsp_flags */
+enum {
+ SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
+ SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
+ SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
+ SRP_LOGIN_RSP_IMMED_SUPP = 0x80, /* new in SRP2 */
};
/*
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 8ee8991aa099..4be1aa4435ae 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright © 2015 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SOC_RASPBERRY_FIRMWARE_H__
@@ -75,6 +72,7 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_GET_EDID_BLOCK = 0x00030020,
RPI_FIRMWARE_GET_CUSTOMER_OTP = 0x00030021,
RPI_FIRMWARE_GET_DOMAIN_STATE = 0x00030030,
+ RPI_FIRMWARE_GET_THROTTLED = 0x00030046,
RPI_FIRMWARE_SET_CLOCK_STATE = 0x00038001,
RPI_FIRMWARE_SET_CLOCK_RATE = 0x00038002,
RPI_FIRMWARE_SET_VOLTAGE = 0x00038003,
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
index eaaf56df4086..5b99cb2ea5ef 100644
--- a/include/soc/fsl/bman.h
+++ b/include/soc/fsl/bman.h
@@ -126,4 +126,12 @@ int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
*/
int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
+/**
+ * bman_is_probed - Check if bman is probed
+ *
+ * Returns 1 if the bman driver successfully probed, -1 if the bman driver
+ * failed to probe or 0 if the bman driver did not probed yet.
+ */
+int bman_is_probed(void);
+
#endif /* __FSL_BMAN_H */
diff --git a/include/soc/fsl/dpaa2-fd.h b/include/soc/fsl/dpaa2-fd.h
new file mode 100644
index 000000000000..90ae8d191f1a
--- /dev/null
+++ b/include/soc/fsl/dpaa2-fd.h
@@ -0,0 +1,680 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef __FSL_DPAA2_FD_H
+#define __FSL_DPAA2_FD_H
+
+#include <linux/kernel.h>
+
+/**
+ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2
+ *
+ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2.
+ * Frames can be enqueued and dequeued to Frame Queues (FQs) which are consumed
+ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE)
+ *
+ * There are three types of frames: single, scatter gather, and frame lists.
+ *
+ * The set of APIs in this file must be used to create, manipulate and
+ * query Frame Descriptors.
+ */
+
+/**
+ * struct dpaa2_fd - Struct describing FDs
+ * @words: for easier/faster copying the whole FD structure
+ * @addr: address in the FD
+ * @len: length in the FD
+ * @bpid: buffer pool ID
+ * @format_offset: format, offset, and short-length fields
+ * @frc: frame context
+ * @ctrl: control bits...including dd, sc, va, err, etc
+ * @flc: flow context address
+ *
+ * This structure represents the basic Frame Descriptor used in the system.
+ */
+struct dpaa2_fd {
+ union {
+ u32 words[8];
+ struct dpaa2_fd_simple {
+ __le64 addr;
+ __le32 len;
+ __le16 bpid;
+ __le16 format_offset;
+ __le32 frc;
+ __le32 ctrl;
+ __le64 flc;
+ } simple;
+ };
+};
+
+#define FD_SHORT_LEN_FLAG_MASK 0x1
+#define FD_SHORT_LEN_FLAG_SHIFT 14
+#define FD_SHORT_LEN_MASK 0x3FFFF
+#define FD_OFFSET_MASK 0x0FFF
+#define FD_FORMAT_MASK 0x3
+#define FD_FORMAT_SHIFT 12
+#define FD_BPID_MASK 0x3FFF
+#define SG_SHORT_LEN_FLAG_MASK 0x1
+#define SG_SHORT_LEN_FLAG_SHIFT 14
+#define SG_SHORT_LEN_MASK 0x1FFFF
+#define SG_OFFSET_MASK 0x0FFF
+#define SG_FORMAT_MASK 0x3
+#define SG_FORMAT_SHIFT 12
+#define SG_BPID_MASK 0x3FFF
+#define SG_FINAL_FLAG_MASK 0x1
+#define SG_FINAL_FLAG_SHIFT 15
+#define FL_SHORT_LEN_FLAG_MASK 0x1
+#define FL_SHORT_LEN_FLAG_SHIFT 14
+#define FL_SHORT_LEN_MASK 0x3FFFF
+#define FL_OFFSET_MASK 0x0FFF
+#define FL_FORMAT_MASK 0x3
+#define FL_FORMAT_SHIFT 12
+#define FL_BPID_MASK 0x3FFF
+#define FL_FINAL_FLAG_MASK 0x1
+#define FL_FINAL_FLAG_SHIFT 15
+
+/* Error bits in FD CTRL */
+#define FD_CTRL_ERR_MASK 0x000000FF
+#define FD_CTRL_UFD 0x00000004
+#define FD_CTRL_SBE 0x00000008
+#define FD_CTRL_FLC 0x00000010
+#define FD_CTRL_FSE 0x00000020
+#define FD_CTRL_FAERR 0x00000040
+
+/* Annotation bits in FD CTRL */
+#define FD_CTRL_PTA 0x00800000
+#define FD_CTRL_PTV1 0x00400000
+
+enum dpaa2_fd_format {
+ dpaa2_fd_single = 0,
+ dpaa2_fd_list,
+ dpaa2_fd_sg
+};
+
+/**
+ * dpaa2_fd_get_addr() - get the addr field of frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the address in the frame descriptor.
+ */
+static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd)
+{
+ return (dma_addr_t)le64_to_cpu(fd->simple.addr);
+}
+
+/**
+ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor
+ * @fd: the given frame descriptor
+ * @addr: the address needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr)
+{
+ fd->simple.addr = cpu_to_le64(addr);
+}
+
+/**
+ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the frame context field in the frame descriptor.
+ */
+static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd)
+{
+ return le32_to_cpu(fd->simple.frc);
+}
+
+/**
+ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor
+ * @fd: the given frame descriptor
+ * @frc: the frame context needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc)
+{
+ fd->simple.frc = cpu_to_le32(frc);
+}
+
+/**
+ * dpaa2_fd_get_ctrl() - Get the control bits in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the control bits field in the frame descriptor.
+ */
+static inline u32 dpaa2_fd_get_ctrl(const struct dpaa2_fd *fd)
+{
+ return le32_to_cpu(fd->simple.ctrl);
+}
+
+/**
+ * dpaa2_fd_set_ctrl() - Set the control bits in the frame descriptor
+ * @fd: the given frame descriptor
+ * @ctrl: the control bits to be set in the frame descriptor
+ */
+static inline void dpaa2_fd_set_ctrl(struct dpaa2_fd *fd, u32 ctrl)
+{
+ fd->simple.ctrl = cpu_to_le32(ctrl);
+}
+
+/**
+ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the flow context in the frame descriptor.
+ */
+static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd)
+{
+ return (dma_addr_t)le64_to_cpu(fd->simple.flc);
+}
+
+/**
+ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor
+ * @fd: the given frame descriptor
+ * @flc_addr: the flow context needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr)
+{
+ fd->simple.flc = cpu_to_le64(flc_addr);
+}
+
+static inline bool dpaa2_fd_short_len(const struct dpaa2_fd *fd)
+{
+ return !!((le16_to_cpu(fd->simple.format_offset) >>
+ FD_SHORT_LEN_FLAG_SHIFT) & FD_SHORT_LEN_FLAG_MASK);
+}
+
+/**
+ * dpaa2_fd_get_len() - Get the length in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the length field in the frame descriptor.
+ */
+static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd)
+{
+ if (dpaa2_fd_short_len(fd))
+ return le32_to_cpu(fd->simple.len) & FD_SHORT_LEN_MASK;
+
+ return le32_to_cpu(fd->simple.len);
+}
+
+/**
+ * dpaa2_fd_set_len() - Set the length field of frame descriptor
+ * @fd: the given frame descriptor
+ * @len: the length needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len)
+{
+ fd->simple.len = cpu_to_le32(len);
+}
+
+/**
+ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the offset.
+ */
+static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd)
+{
+ return le16_to_cpu(fd->simple.format_offset) & FD_OFFSET_MASK;
+}
+
+/**
+ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor
+ * @fd: the given frame descriptor
+ * @offset: the offset needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset)
+{
+ fd->simple.format_offset &= cpu_to_le16(~FD_OFFSET_MASK);
+ fd->simple.format_offset |= cpu_to_le16(offset);
+}
+
+/**
+ * dpaa2_fd_get_format() - Get the format field in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the format.
+ */
+static inline enum dpaa2_fd_format dpaa2_fd_get_format(
+ const struct dpaa2_fd *fd)
+{
+ return (enum dpaa2_fd_format)((le16_to_cpu(fd->simple.format_offset)
+ >> FD_FORMAT_SHIFT) & FD_FORMAT_MASK);
+}
+
+/**
+ * dpaa2_fd_set_format() - Set the format field of frame descriptor
+ * @fd: the given frame descriptor
+ * @format: the format needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd,
+ enum dpaa2_fd_format format)
+{
+ fd->simple.format_offset &=
+ cpu_to_le16(~(FD_FORMAT_MASK << FD_FORMAT_SHIFT));
+ fd->simple.format_offset |= cpu_to_le16(format << FD_FORMAT_SHIFT);
+}
+
+/**
+ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the buffer pool id.
+ */
+static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd)
+{
+ return le16_to_cpu(fd->simple.bpid) & FD_BPID_MASK;
+}
+
+/**
+ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor
+ * @fd: the given frame descriptor
+ * @bpid: buffer pool id to be set
+ */
+static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid)
+{
+ fd->simple.bpid &= cpu_to_le16(~(FD_BPID_MASK));
+ fd->simple.bpid |= cpu_to_le16(bpid);
+}
+
+/**
+ * struct dpaa2_sg_entry - the scatter-gathering structure
+ * @addr: address of the sg entry
+ * @len: length in this sg entry
+ * @bpid: buffer pool id
+ * @format_offset: format and offset fields
+ */
+struct dpaa2_sg_entry {
+ __le64 addr;
+ __le32 len;
+ __le16 bpid;
+ __le16 format_offset;
+};
+
+enum dpaa2_sg_format {
+ dpaa2_sg_single = 0,
+ dpaa2_sg_frame_data,
+ dpaa2_sg_sgt_ext
+};
+
+/* Accessors for SG entry fields */
+
+/**
+ * dpaa2_sg_get_addr() - Get the address from SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the address.
+ */
+static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg)
+{
+ return (dma_addr_t)le64_to_cpu(sg->addr);
+}
+
+/**
+ * dpaa2_sg_set_addr() - Set the address in SG entry
+ * @sg: the given scatter-gathering object
+ * @addr: the address to be set
+ */
+static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr)
+{
+ sg->addr = cpu_to_le64(addr);
+}
+
+static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg)
+{
+ return !!((le16_to_cpu(sg->format_offset) >> SG_SHORT_LEN_FLAG_SHIFT)
+ & SG_SHORT_LEN_FLAG_MASK);
+}
+
+/**
+ * dpaa2_sg_get_len() - Get the length in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the length.
+ */
+static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg)
+{
+ if (dpaa2_sg_short_len(sg))
+ return le32_to_cpu(sg->len) & SG_SHORT_LEN_MASK;
+
+ return le32_to_cpu(sg->len);
+}
+
+/**
+ * dpaa2_sg_set_len() - Set the length in SG entry
+ * @sg: the given scatter-gathering object
+ * @len: the length to be set
+ */
+static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len)
+{
+ sg->len = cpu_to_le32(len);
+}
+
+/**
+ * dpaa2_sg_get_offset() - Get the offset in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the offset.
+ */
+static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg)
+{
+ return le16_to_cpu(sg->format_offset) & SG_OFFSET_MASK;
+}
+
+/**
+ * dpaa2_sg_set_offset() - Set the offset in SG entry
+ * @sg: the given scatter-gathering object
+ * @offset: the offset to be set
+ */
+static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg,
+ u16 offset)
+{
+ sg->format_offset &= cpu_to_le16(~SG_OFFSET_MASK);
+ sg->format_offset |= cpu_to_le16(offset);
+}
+
+/**
+ * dpaa2_sg_get_format() - Get the SG format in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the format.
+ */
+static inline enum dpaa2_sg_format
+ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg)
+{
+ return (enum dpaa2_sg_format)((le16_to_cpu(sg->format_offset)
+ >> SG_FORMAT_SHIFT) & SG_FORMAT_MASK);
+}
+
+/**
+ * dpaa2_sg_set_format() - Set the SG format in SG entry
+ * @sg: the given scatter-gathering object
+ * @format: the format to be set
+ */
+static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg,
+ enum dpaa2_sg_format format)
+{
+ sg->format_offset &= cpu_to_le16(~(SG_FORMAT_MASK << SG_FORMAT_SHIFT));
+ sg->format_offset |= cpu_to_le16(format << SG_FORMAT_SHIFT);
+}
+
+/**
+ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the bpid.
+ */
+static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg)
+{
+ return le16_to_cpu(sg->bpid) & SG_BPID_MASK;
+}
+
+/**
+ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry
+ * @sg: the given scatter-gathering object
+ * @bpid: the bpid to be set
+ */
+static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid)
+{
+ sg->bpid &= cpu_to_le16(~(SG_BPID_MASK));
+ sg->bpid |= cpu_to_le16(bpid);
+}
+
+/**
+ * dpaa2_sg_is_final() - Check final bit in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return bool.
+ */
+static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg)
+{
+ return !!(le16_to_cpu(sg->format_offset) >> SG_FINAL_FLAG_SHIFT);
+}
+
+/**
+ * dpaa2_sg_set_final() - Set the final bit in SG entry
+ * @sg: the given scatter-gathering object
+ * @final: the final boolean to be set
+ */
+static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
+{
+ sg->format_offset &= cpu_to_le16((~(SG_FINAL_FLAG_MASK
+ << SG_FINAL_FLAG_SHIFT)) & 0xFFFF);
+ sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT);
+}
+
+/**
+ * struct dpaa2_fl_entry - structure for frame list entry.
+ * @addr: address in the FLE
+ * @len: length in the FLE
+ * @bpid: buffer pool ID
+ * @format_offset: format, offset, and short-length fields
+ * @frc: frame context
+ * @ctrl: control bits...including pta, pvt1, pvt2, err, etc
+ * @flc: flow context address
+ */
+struct dpaa2_fl_entry {
+ __le64 addr;
+ __le32 len;
+ __le16 bpid;
+ __le16 format_offset;
+ __le32 frc;
+ __le32 ctrl;
+ __le64 flc;
+};
+
+enum dpaa2_fl_format {
+ dpaa2_fl_single = 0,
+ dpaa2_fl_res,
+ dpaa2_fl_sg
+};
+
+/**
+ * dpaa2_fl_get_addr() - get the addr field of FLE
+ * @fle: the given frame list entry
+ *
+ * Return the address in the frame list entry.
+ */
+static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle)
+{
+ return (dma_addr_t)le64_to_cpu(fle->addr);
+}
+
+/**
+ * dpaa2_fl_set_addr() - Set the addr field of FLE
+ * @fle: the given frame list entry
+ * @addr: the address needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle,
+ dma_addr_t addr)
+{
+ fle->addr = cpu_to_le64(addr);
+}
+
+/**
+ * dpaa2_fl_get_frc() - Get the frame context in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the frame context field in the frame lsit entry.
+ */
+static inline u32 dpaa2_fl_get_frc(const struct dpaa2_fl_entry *fle)
+{
+ return le32_to_cpu(fle->frc);
+}
+
+/**
+ * dpaa2_fl_set_frc() - Set the frame context in the FLE
+ * @fle: the given frame list entry
+ * @frc: the frame context needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_frc(struct dpaa2_fl_entry *fle, u32 frc)
+{
+ fle->frc = cpu_to_le32(frc);
+}
+
+/**
+ * dpaa2_fl_get_ctrl() - Get the control bits in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the control bits field in the frame list entry.
+ */
+static inline u32 dpaa2_fl_get_ctrl(const struct dpaa2_fl_entry *fle)
+{
+ return le32_to_cpu(fle->ctrl);
+}
+
+/**
+ * dpaa2_fl_set_ctrl() - Set the control bits in the FLE
+ * @fle: the given frame list entry
+ * @ctrl: the control bits to be set in the frame list entry
+ */
+static inline void dpaa2_fl_set_ctrl(struct dpaa2_fl_entry *fle, u32 ctrl)
+{
+ fle->ctrl = cpu_to_le32(ctrl);
+}
+
+/**
+ * dpaa2_fl_get_flc() - Get the flow context in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the flow context in the frame list entry.
+ */
+static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle)
+{
+ return (dma_addr_t)le64_to_cpu(fle->flc);
+}
+
+/**
+ * dpaa2_fl_set_flc() - Set the flow context field of FLE
+ * @fle: the given frame list entry
+ * @flc_addr: the flow context needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle,
+ dma_addr_t flc_addr)
+{
+ fle->flc = cpu_to_le64(flc_addr);
+}
+
+static inline bool dpaa2_fl_short_len(const struct dpaa2_fl_entry *fle)
+{
+ return !!((le16_to_cpu(fle->format_offset) >>
+ FL_SHORT_LEN_FLAG_SHIFT) & FL_SHORT_LEN_FLAG_MASK);
+}
+
+/**
+ * dpaa2_fl_get_len() - Get the length in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the length field in the frame list entry.
+ */
+static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle)
+{
+ if (dpaa2_fl_short_len(fle))
+ return le32_to_cpu(fle->len) & FL_SHORT_LEN_MASK;
+
+ return le32_to_cpu(fle->len);
+}
+
+/**
+ * dpaa2_fl_set_len() - Set the length field of FLE
+ * @fle: the given frame list entry
+ * @len: the length needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len)
+{
+ fle->len = cpu_to_le32(len);
+}
+
+/**
+ * dpaa2_fl_get_offset() - Get the offset field in the frame list entry
+ * @fle: the given frame list entry
+ *
+ * Return the offset.
+ */
+static inline u16 dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle)
+{
+ return le16_to_cpu(fle->format_offset) & FL_OFFSET_MASK;
+}
+
+/**
+ * dpaa2_fl_set_offset() - Set the offset field of FLE
+ * @fle: the given frame list entry
+ * @offset: the offset needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, u16 offset)
+{
+ fle->format_offset &= cpu_to_le16(~FL_OFFSET_MASK);
+ fle->format_offset |= cpu_to_le16(offset);
+}
+
+/**
+ * dpaa2_fl_get_format() - Get the format field in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the format.
+ */
+static inline enum dpaa2_fl_format dpaa2_fl_get_format(const struct dpaa2_fl_entry *fle)
+{
+ return (enum dpaa2_fl_format)((le16_to_cpu(fle->format_offset) >>
+ FL_FORMAT_SHIFT) & FL_FORMAT_MASK);
+}
+
+/**
+ * dpaa2_fl_set_format() - Set the format field of FLE
+ * @fle: the given frame list entry
+ * @format: the format needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle,
+ enum dpaa2_fl_format format)
+{
+ fle->format_offset &= cpu_to_le16(~(FL_FORMAT_MASK << FL_FORMAT_SHIFT));
+ fle->format_offset |= cpu_to_le16(format << FL_FORMAT_SHIFT);
+}
+
+/**
+ * dpaa2_fl_get_bpid() - Get the bpid field in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the buffer pool id.
+ */
+static inline u16 dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle)
+{
+ return le16_to_cpu(fle->bpid) & FL_BPID_MASK;
+}
+
+/**
+ * dpaa2_fl_set_bpid() - Set the bpid field of FLE
+ * @fle: the given frame list entry
+ * @bpid: buffer pool id to be set
+ */
+static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, u16 bpid)
+{
+ fle->bpid &= cpu_to_le16(~(FL_BPID_MASK));
+ fle->bpid |= cpu_to_le16(bpid);
+}
+
+/**
+ * dpaa2_fl_is_final() - Check final bit in FLE
+ * @fle: the given frame list entry
+ *
+ * Return bool.
+ */
+static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle)
+{
+ return !!(le16_to_cpu(fle->format_offset) >> FL_FINAL_FLAG_SHIFT);
+}
+
+/**
+ * dpaa2_fl_set_final() - Set the final bit in FLE
+ * @fle: the given frame list entry
+ * @final: the final boolean to be set
+ */
+static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final)
+{
+ fle->format_offset &= cpu_to_le16((~(FL_FINAL_FLAG_MASK <<
+ FL_FINAL_FLAG_SHIFT)) & 0xFFFF);
+ fle->format_offset |= cpu_to_le16(final << FL_FINAL_FLAG_SHIFT);
+}
+
+#endif /* __FSL_DPAA2_FD_H */
diff --git a/include/soc/fsl/dpaa2-global.h b/include/soc/fsl/dpaa2-global.h
new file mode 100644
index 000000000000..2bfc379d3dc9
--- /dev/null
+++ b/include/soc/fsl/dpaa2-global.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef __FSL_DPAA2_GLOBAL_H
+#define __FSL_DPAA2_GLOBAL_H
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include "dpaa2-fd.h"
+
+struct dpaa2_dq {
+ union {
+ struct common {
+ u8 verb;
+ u8 reserved[63];
+ } common;
+ struct dq {
+ u8 verb;
+ u8 stat;
+ __le16 seqnum;
+ __le16 oprid;
+ u8 reserved;
+ u8 tok;
+ __le32 fqid;
+ u32 reserved2;
+ __le32 fq_byte_cnt;
+ __le32 fq_frm_cnt;
+ __le64 fqd_ctx;
+ u8 fd[32];
+ } dq;
+ struct scn {
+ u8 verb;
+ u8 stat;
+ u8 state;
+ u8 reserved;
+ __le32 rid_tok;
+ __le64 ctx;
+ } scn;
+ };
+};
+
+/* Parsing frame dequeue results */
+/* FQ empty */
+#define DPAA2_DQ_STAT_FQEMPTY 0x80
+/* FQ held active */
+#define DPAA2_DQ_STAT_HELDACTIVE 0x40
+/* FQ force eligible */
+#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20
+/* valid frame */
+#define DPAA2_DQ_STAT_VALIDFRAME 0x10
+/* FQ ODP enable */
+#define DPAA2_DQ_STAT_ODPVALID 0x04
+/* volatile dequeue */
+#define DPAA2_DQ_STAT_VOLATILE 0x02
+/* volatile dequeue command is expired */
+#define DPAA2_DQ_STAT_EXPIRED 0x01
+
+#define DQ_FQID_MASK 0x00FFFFFF
+#define DQ_FRAME_COUNT_MASK 0x00FFFFFF
+
+/**
+ * dpaa2_dq_flags() - Get the stat field of dequeue response
+ * @dq: the dequeue result.
+ */
+static inline u32 dpaa2_dq_flags(const struct dpaa2_dq *dq)
+{
+ return dq->dq.stat;
+}
+
+/**
+ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull
+ * command.
+ * @dq: the dequeue result
+ *
+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
+ */
+static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq)
+{
+ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE);
+}
+
+/**
+ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed.
+ * @dq: the dequeue result
+ *
+ * Return boolean.
+ */
+static inline bool dpaa2_dq_is_pull_complete(const struct dpaa2_dq *dq)
+{
+ return !!(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED);
+}
+
+/**
+ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response
+ * @dq: the dequeue result
+ *
+ * seqnum is valid only if VALIDFRAME flag is TRUE
+ *
+ * Return seqnum.
+ */
+static inline u16 dpaa2_dq_seqnum(const struct dpaa2_dq *dq)
+{
+ return le16_to_cpu(dq->dq.seqnum);
+}
+
+/**
+ * dpaa2_dq_odpid() - Get the odpid field in dequeue response
+ * @dq: the dequeue result
+ *
+ * odpid is valid only if ODPVALID flag is TRUE.
+ *
+ * Return odpid.
+ */
+static inline u16 dpaa2_dq_odpid(const struct dpaa2_dq *dq)
+{
+ return le16_to_cpu(dq->dq.oprid);
+}
+
+/**
+ * dpaa2_dq_fqid() - Get the fqid in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return fqid.
+ */
+static inline u32 dpaa2_dq_fqid(const struct dpaa2_dq *dq)
+{
+ return le32_to_cpu(dq->dq.fqid) & DQ_FQID_MASK;
+}
+
+/**
+ * dpaa2_dq_byte_count() - Get the byte count in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the byte count remaining in the FQ.
+ */
+static inline u32 dpaa2_dq_byte_count(const struct dpaa2_dq *dq)
+{
+ return le32_to_cpu(dq->dq.fq_byte_cnt);
+}
+
+/**
+ * dpaa2_dq_frame_count() - Get the frame count in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the frame count remaining in the FQ.
+ */
+static inline u32 dpaa2_dq_frame_count(const struct dpaa2_dq *dq)
+{
+ return le32_to_cpu(dq->dq.fq_frm_cnt) & DQ_FRAME_COUNT_MASK;
+}
+
+/**
+ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the frame queue context.
+ */
+static inline u64 dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq)
+{
+ return le64_to_cpu(dq->dq.fqd_ctx);
+}
+
+/**
+ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the frame descriptor.
+ */
+static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
+{
+ return (const struct dpaa2_fd *)&dq->dq.fd[0];
+}
+
+#define DPAA2_CSCN_SIZE sizeof(struct dpaa2_dq)
+#define DPAA2_CSCN_ALIGN 16
+#define DPAA2_CSCN_STATE_CG BIT(0)
+
+/**
+ * dpaa2_cscn_state_congested() - Check congestion state
+ * @cscn: congestion SCN (delivered to WQ or memory)
+ *
+i * Return true is congested.
+ */
+static inline bool dpaa2_cscn_state_congested(struct dpaa2_dq *cscn)
+{
+ return !!(cscn->scn.state & DPAA2_CSCN_STATE_CG);
+}
+
+#endif /* __FSL_DPAA2_GLOBAL_H */
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
new file mode 100644
index 000000000000..3fbd71c27ba3
--- /dev/null
+++ b/include/soc/fsl/dpaa2-io.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright NXP
+ *
+ */
+#ifndef __FSL_DPAA2_IO_H
+#define __FSL_DPAA2_IO_H
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
+
+#include "dpaa2-fd.h"
+#include "dpaa2-global.h"
+
+struct dpaa2_io;
+struct dpaa2_io_store;
+struct device;
+
+/**
+ * DOC: DPIO Service
+ *
+ * The DPIO service provides APIs for users to interact with the datapath
+ * by enqueueing and dequeing frame descriptors.
+ *
+ * The following set of APIs can be used to enqueue and dequeue frames
+ * as well as producing notification callbacks when data is available
+ * for dequeue.
+ */
+
+#define DPAA2_IO_ANY_CPU -1
+
+/**
+ * struct dpaa2_io_desc - The DPIO descriptor
+ * @receives_notifications: Use notificaton mode. Non-zero if the DPIO
+ * has a channel.
+ * @has_8prio: Set to non-zero for channel with 8 priority WQs. Ignored
+ * unless receives_notification is TRUE.
+ * @cpu: The cpu index that at least interrupt handlers will
+ * execute on.
+ * @stash_affinity: The stash affinity for this portal favour 'cpu'
+ * @regs_cena: The cache enabled regs.
+ * @regs_cinh: The cache inhibited regs
+ * @dpio_id: The dpio index
+ * @qman_version: The qman version
+ *
+ * Describes the attributes and features of the DPIO object.
+ */
+struct dpaa2_io_desc {
+ int receives_notifications;
+ int has_8prio;
+ int cpu;
+ void *regs_cena;
+ void __iomem *regs_cinh;
+ int dpio_id;
+ u32 qman_version;
+};
+
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
+
+void dpaa2_io_down(struct dpaa2_io *d);
+
+irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
+
+struct dpaa2_io *dpaa2_io_service_select(int cpu);
+
+/**
+ * struct dpaa2_io_notification_ctx - The DPIO notification context structure
+ * @cb: The callback to be invoked when the notification arrives
+ * @is_cdan: Zero for FQDAN, non-zero for CDAN
+ * @id: FQID or channel ID, needed for rearm
+ * @desired_cpu: The cpu on which the notifications will show up. Use
+ * DPAA2_IO_ANY_CPU if don't care
+ * @dpio_id: The dpio index
+ * @qman64: The 64-bit context value shows up in the FQDAN/CDAN.
+ * @node: The list node
+ * @dpio_private: The dpio object internal to dpio_service
+ *
+ * Used when a FQDAN/CDAN registration is made by drivers.
+ */
+struct dpaa2_io_notification_ctx {
+ void (*cb)(struct dpaa2_io_notification_ctx *ctx);
+ int is_cdan;
+ u32 id;
+ int desired_cpu;
+ int dpio_id;
+ u64 qman64;
+ struct list_head node;
+ void *dpio_private;
+};
+
+int dpaa2_io_service_register(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx);
+void dpaa2_io_service_deregister(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx);
+int dpaa2_io_service_rearm(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx);
+
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
+ struct dpaa2_io_store *s);
+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
+ struct dpaa2_io_store *s);
+
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
+ const struct dpaa2_fd *fd);
+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
+ u16 qdbin, const struct dpaa2_fd *fd);
+int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
+ const u64 *buffers, unsigned int num_buffers);
+int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid,
+ u64 *buffers, unsigned int num_buffers);
+
+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
+ struct device *dev);
+void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
+
+int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
+ u32 *fcnt, u32 *bcnt);
+int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid,
+ u32 *num);
+#endif /* __FSL_DPAA2_IO_H */
diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h
index 3ee9e7c1a7d7..dcd6b865b590 100644
--- a/include/soc/fsl/qe/ucc_fast.h
+++ b/include/soc/fsl/qe/ucc_fast.h
@@ -41,8 +41,12 @@
#define R_L_S 0x0800 /* last */
#define R_F_S 0x0400 /* first */
#define R_CM_S 0x0200 /* continuous mode */
+#define R_LG_S 0x0020 /* frame length */
+#define R_NO_S 0x0010 /* nonoctet */
+#define R_AB_S 0x0008 /* abort */
#define R_CR_S 0x0004 /* crc */
-#define R_OV_S 0x0002 /* crc */
+#define R_OV_S 0x0002 /* overrun */
+#define R_CD_S 0x0001 /* carrier detect */
/* transmit BD's status */
#define T_R_S 0x8000 /* ready bit */
@@ -51,6 +55,8 @@
#define T_L_S 0x0800 /* last */
#define T_TC_S 0x0400 /* crc */
#define T_TM_S 0x0200 /* continuous mode */
+#define T_UN_S 0x0002 /* hdlc underrun */
+#define T_CT_S 0x0001 /* hdlc carrier lost */
/* Rx Data buffer must be 4 bytes aligned in most cases */
#define UCC_FAST_RX_ALIGN 4
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index d4dfefdee6c1..5cc7af06c1ba 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -1186,4 +1186,44 @@ int qman_alloc_cgrid_range(u32 *result, u32 count);
*/
int qman_release_cgrid(u32 id);
+/**
+ * qman_is_probed - Check if qman is probed
+ *
+ * Returns 1 if the qman driver successfully probed, -1 if the qman driver
+ * failed to probe or 0 if the qman driver did not probed yet.
+ */
+int qman_is_probed(void);
+
+/**
+ * qman_dqrr_get_ithresh - Get coalesce interrupt threshold
+ * @portal: portal to get the value for
+ * @ithresh: threshold pointer
+ */
+void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh);
+
+/**
+ * qman_dqrr_set_ithresh - Set coalesce interrupt threshold
+ * @portal: portal to set the new value on
+ * @ithresh: new threshold value
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh);
+
+/**
+ * qman_dqrr_get_iperiod - Get coalesce interrupt period
+ * @portal: portal to get the value for
+ * @iperiod: period pointer
+ */
+void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod);
+
+/**
+ * qman_dqrr_set_iperiod - Set coalesce interrupt period
+ * @portal: portal to set the new value on
+ * @ithresh: new period value
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod);
+
#endif /* __FSL_QMAN_H */
diff --git a/include/soc/mscc/ocelot_hsio.h b/include/soc/mscc/ocelot_hsio.h
new file mode 100644
index 000000000000..43112dd7313a
--- /dev/null
+++ b/include/soc/mscc/ocelot_hsio.h
@@ -0,0 +1,859 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_HSIO_H_
+#define _MSCC_OCELOT_HSIO_H_
+
+#define HSIO_PLL5G_CFG0 0x0000
+#define HSIO_PLL5G_CFG1 0x0004
+#define HSIO_PLL5G_CFG2 0x0008
+#define HSIO_PLL5G_CFG3 0x000c
+#define HSIO_PLL5G_CFG4 0x0010
+#define HSIO_PLL5G_CFG5 0x0014
+#define HSIO_PLL5G_CFG6 0x0018
+#define HSIO_PLL5G_STATUS0 0x001c
+#define HSIO_PLL5G_STATUS1 0x0020
+#define HSIO_PLL5G_BIST_CFG0 0x0024
+#define HSIO_PLL5G_BIST_CFG1 0x0028
+#define HSIO_PLL5G_BIST_CFG2 0x002c
+#define HSIO_PLL5G_BIST_STAT0 0x0030
+#define HSIO_PLL5G_BIST_STAT1 0x0034
+#define HSIO_RCOMP_CFG0 0x0038
+#define HSIO_RCOMP_STATUS 0x003c
+#define HSIO_SYNC_ETH_CFG 0x0040
+#define HSIO_SYNC_ETH_PLL_CFG 0x0048
+#define HSIO_S1G_DES_CFG 0x004c
+#define HSIO_S1G_IB_CFG 0x0050
+#define HSIO_S1G_OB_CFG 0x0054
+#define HSIO_S1G_SER_CFG 0x0058
+#define HSIO_S1G_COMMON_CFG 0x005c
+#define HSIO_S1G_PLL_CFG 0x0060
+#define HSIO_S1G_PLL_STATUS 0x0064
+#define HSIO_S1G_DFT_CFG0 0x0068
+#define HSIO_S1G_DFT_CFG1 0x006c
+#define HSIO_S1G_DFT_CFG2 0x0070
+#define HSIO_S1G_TP_CFG 0x0074
+#define HSIO_S1G_RC_PLL_BIST_CFG 0x0078
+#define HSIO_S1G_MISC_CFG 0x007c
+#define HSIO_S1G_DFT_STATUS 0x0080
+#define HSIO_S1G_MISC_STATUS 0x0084
+#define HSIO_MCB_S1G_ADDR_CFG 0x0088
+#define HSIO_S6G_DIG_CFG 0x008c
+#define HSIO_S6G_DFT_CFG0 0x0090
+#define HSIO_S6G_DFT_CFG1 0x0094
+#define HSIO_S6G_DFT_CFG2 0x0098
+#define HSIO_S6G_TP_CFG0 0x009c
+#define HSIO_S6G_TP_CFG1 0x00a0
+#define HSIO_S6G_RC_PLL_BIST_CFG 0x00a4
+#define HSIO_S6G_MISC_CFG 0x00a8
+#define HSIO_S6G_OB_ANEG_CFG 0x00ac
+#define HSIO_S6G_DFT_STATUS 0x00b0
+#define HSIO_S6G_ERR_CNT 0x00b4
+#define HSIO_S6G_MISC_STATUS 0x00b8
+#define HSIO_S6G_DES_CFG 0x00bc
+#define HSIO_S6G_IB_CFG 0x00c0
+#define HSIO_S6G_IB_CFG1 0x00c4
+#define HSIO_S6G_IB_CFG2 0x00c8
+#define HSIO_S6G_IB_CFG3 0x00cc
+#define HSIO_S6G_IB_CFG4 0x00d0
+#define HSIO_S6G_IB_CFG5 0x00d4
+#define HSIO_S6G_OB_CFG 0x00d8
+#define HSIO_S6G_OB_CFG1 0x00dc
+#define HSIO_S6G_SER_CFG 0x00e0
+#define HSIO_S6G_COMMON_CFG 0x00e4
+#define HSIO_S6G_PLL_CFG 0x00e8
+#define HSIO_S6G_ACJTAG_CFG 0x00ec
+#define HSIO_S6G_GP_CFG 0x00f0
+#define HSIO_S6G_IB_STATUS0 0x00f4
+#define HSIO_S6G_IB_STATUS1 0x00f8
+#define HSIO_S6G_ACJTAG_STATUS 0x00fc
+#define HSIO_S6G_PLL_STATUS 0x0100
+#define HSIO_S6G_REVID 0x0104
+#define HSIO_MCB_S6G_ADDR_CFG 0x0108
+#define HSIO_HW_CFG 0x010c
+#define HSIO_HW_QSGMII_CFG 0x0110
+#define HSIO_HW_QSGMII_STAT 0x0114
+#define HSIO_CLK_CFG 0x0118
+#define HSIO_TEMP_SENSOR_CTRL 0x011c
+#define HSIO_TEMP_SENSOR_CFG 0x0120
+#define HSIO_TEMP_SENSOR_STAT 0x0124
+
+#define HSIO_PLL5G_CFG0_ENA_ROT BIT(31)
+#define HSIO_PLL5G_CFG0_ENA_LANE BIT(30)
+#define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29)
+#define HSIO_PLL5G_CFG0_DIV4 BIT(28)
+#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE BIT(27)
+#define HSIO_PLL5G_CFG0_SELBGV820(x) (((x) << 23) & GENMASK(26, 23))
+#define HSIO_PLL5G_CFG0_SELBGV820_M GENMASK(26, 23)
+#define HSIO_PLL5G_CFG0_SELBGV820_X(x) (((x) & GENMASK(26, 23)) >> 23)
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x) (((x) << 18) & GENMASK(22, 18))
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M GENMASK(22, 18)
+#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x) (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_PLL5G_CFG0_SELCPI(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_PLL5G_CFG0_SELCPI_M GENMASK(17, 16)
+#define HSIO_PLL5G_CFG0_SELCPI_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH BIT(15)
+#define HSIO_PLL5G_CFG0_ENA_CP1 BIT(14)
+#define HSIO_PLL5G_CFG0_ENA_VCO_BUF BIT(13)
+#define HSIO_PLL5G_CFG0_ENA_BIAS BIT(12)
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M GENMASK(11, 6)
+#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x) ((x) & GENMASK(5, 0))
+#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M GENMASK(5, 0)
+
+#define HSIO_PLL5G_CFG1_ENA_DIRECT BIT(18)
+#define HSIO_PLL5G_CFG1_ROT_SPEED BIT(17)
+#define HSIO_PLL5G_CFG1_ROT_DIR BIT(16)
+#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL BIT(15)
+#define HSIO_PLL5G_CFG1_RC_ENABLE BIT(14)
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6))
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M GENMASK(13, 6)
+#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6)
+#define HSIO_PLL5G_CFG1_QUARTER_RATE BIT(5)
+#define HSIO_PLL5G_CFG1_PWD_TX BIT(4)
+#define HSIO_PLL5G_CFG1_PWD_RX BIT(3)
+#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA BIT(2)
+#define HSIO_PLL5G_CFG1_HALF_RATE BIT(1)
+#define HSIO_PLL5G_CFG1_FORCE_SET_ENA BIT(0)
+
+#define HSIO_PLL5G_CFG2_ENA_TEST_MODE BIT(30)
+#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP BIT(29)
+#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT BIT(28)
+#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT BIT(27)
+#define HSIO_PLL5G_CFG2_ENA_RCPLL BIT(26)
+#define HSIO_PLL5G_CFG2_ENA_CP2 BIT(25)
+#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1 BIT(24)
+#define HSIO_PLL5G_CFG2_AMPC_SEL(x) (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG2_AMPC_SEL_M GENMASK(23, 16)
+#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS BIT(15)
+#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N BIT(14)
+#define HSIO_PLL5G_CFG2_ENA_AMPCTRL BIT(13)
+#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE BIT(12)
+#define HSIO_PLL5G_CFG2_FRC_FSM_POR BIT(11)
+#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR BIT(10)
+#define HSIO_PLL5G_CFG2_GAIN_TEST(x) (((x) << 5) & GENMASK(9, 5))
+#define HSIO_PLL5G_CFG2_GAIN_TEST_M GENMASK(9, 5)
+#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN BIT(4)
+#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET BIT(3)
+#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET BIT(2)
+#define HSIO_PLL5G_CFG2_DISABLE_FSM BIT(1)
+#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST BIT(0)
+
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x) (((x) << 22) & GENMASK(23, 22))
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M GENMASK(23, 22)
+#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x) (((x) & GENMASK(23, 22)) >> 22)
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x) (((x) << 19) & GENMASK(21, 19))
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M GENMASK(21, 19)
+#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT BIT(18)
+#define HSIO_PLL5G_CFG3_ENA_TEST_OUT BIT(17)
+#define HSIO_PLL5G_CFG3_SEL_FBDCLK BIT(16)
+#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD BIT(15)
+#define HSIO_PLL5G_CFG3_RST_FB_N BIT(14)
+#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH BIT(13)
+#define HSIO_PLL5G_CFG3_FORCE_LO BIT(12)
+#define HSIO_PLL5G_CFG3_FORCE_HI BIT(11)
+#define HSIO_PLL5G_CFG3_FORCE_ENA BIT(10)
+#define HSIO_PLL5G_CFG3_FORCE_CP BIT(9)
+#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA BIT(8)
+#define HSIO_PLL5G_CFG3_FBDIVSEL(x) ((x) & GENMASK(7, 0))
+#define HSIO_PLL5G_CFG3_FBDIVSEL_M GENMASK(7, 0)
+
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M GENMASK(23, 16)
+#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG4_IB_CTRL(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_CFG4_IB_CTRL_M GENMASK(15, 0)
+
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16))
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M GENMASK(23, 16)
+#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define HSIO_PLL5G_CFG5_OB_CTRL(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_CFG5_OB_CTRL_M GENMASK(15, 0)
+
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC BIT(23)
+#define HSIO_PLL5G_CFG6_REFCLK_SEL(x) (((x) << 20) & GENMASK(22, 20))
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_M GENMASK(22, 20)
+#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x) (((x) & GENMASK(22, 20)) >> 20)
+#define HSIO_PLL5G_CFG6_REFCLK_SRC BIT(19)
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M GENMASK(17, 16)
+#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M GENMASK(15, 8)
+#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_PLL5G_CFG6_ENA_REFCLKC2 BIT(7)
+#define HSIO_PLL5G_CFG6_ENA_FBCLKC2 BIT(6)
+#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x) ((x) & GENMASK(5, 0))
+#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M GENMASK(5, 0)
+
+#define HSIO_PLL5G_STATUS0_RANGE_LIM BIT(12)
+#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR BIT(11)
+#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR BIT(10)
+#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE BIT(9)
+#define HSIO_PLL5G_STATUS0_READBACK_DATA(x) (((x) << 1) & GENMASK(8, 1))
+#define HSIO_PLL5G_STATUS0_READBACK_DATA_M GENMASK(8, 1)
+#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x) (((x) & GENMASK(8, 1)) >> 1)
+#define HSIO_PLL5G_STATUS0_LOCK_STATUS BIT(0)
+
+#define HSIO_PLL5G_STATUS1_SIG_DEL(x) (((x) << 21) & GENMASK(28, 21))
+#define HSIO_PLL5G_STATUS1_SIG_DEL_M GENMASK(28, 21)
+#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x) (((x) & GENMASK(28, 21)) >> 21)
+#define HSIO_PLL5G_STATUS1_GAIN_STAT(x) (((x) << 16) & GENMASK(20, 16))
+#define HSIO_PLL5G_STATUS1_GAIN_STAT_M GENMASK(20, 16)
+#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x) (((x) & GENMASK(20, 16)) >> 16)
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x) (((x) << 4) & GENMASK(13, 4))
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M GENMASK(13, 4)
+#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x) (((x) & GENMASK(13, 4)) >> 4)
+#define HSIO_PLL5G_STATUS1_FSM_STAT(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_PLL5G_STATUS1_FSM_STAT_M GENMASK(3, 1)
+#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_PLL5G_STATUS1_FSM_LOCK BIT(0)
+
+#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST BIT(31)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE BIT(30)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x) (((x) << 20) & GENMASK(23, 20))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M GENMASK(23, 20)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x) (((x) & GENMASK(23, 20)) >> 20)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x) (((x) << 16) & GENMASK(19, 16))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M GENMASK(19, 16)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x) (((x) & GENMASK(19, 16)) >> 16)
+#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M GENMASK(15, 0)
+
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M GENMASK(7, 4)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY BIT(2)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N BIT(1)
+#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL BIT(0)
+
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x) (((x) << 16) & GENMASK(31, 16))
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M GENMASK(31, 16)
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x) ((x) & GENMASK(15, 0))
+#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M GENMASK(15, 0)
+
+#define HSIO_RCOMP_CFG0_PWD_ENA BIT(13)
+#define HSIO_RCOMP_CFG0_RUN_CAL BIT(12)
+#define HSIO_RCOMP_CFG0_SPEED_SEL(x) (((x) << 10) & GENMASK(11, 10))
+#define HSIO_RCOMP_CFG0_SPEED_SEL_M GENMASK(11, 10)
+#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x) (((x) & GENMASK(11, 10)) >> 10)
+#define HSIO_RCOMP_CFG0_MODE_SEL(x) (((x) << 8) & GENMASK(9, 8))
+#define HSIO_RCOMP_CFG0_MODE_SEL_M GENMASK(9, 8)
+#define HSIO_RCOMP_CFG0_MODE_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define HSIO_RCOMP_CFG0_FORCE_ENA BIT(4)
+#define HSIO_RCOMP_CFG0_RCOMP_VAL(x) ((x) & GENMASK(3, 0))
+#define HSIO_RCOMP_CFG0_RCOMP_VAL_M GENMASK(3, 0)
+
+#define HSIO_RCOMP_STATUS_BUSY BIT(12)
+#define HSIO_RCOMP_STATUS_DELTA_ALERT BIT(7)
+#define HSIO_RCOMP_STATUS_RCOMP(x) ((x) & GENMASK(3, 0))
+#define HSIO_RCOMP_STATUS_RCOMP_M GENMASK(3, 0)
+
+#define HSIO_SYNC_ETH_CFG_RSZ 0x4
+
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M GENMASK(7, 4)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M GENMASK(3, 1)
+#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA BIT(0)
+
+#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA BIT(0)
+
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13)
+#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M GENMASK(12, 11)
+#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 8) & GENMASK(10, 8))
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M GENMASK(10, 8)
+#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define HSIO_S1G_DES_CFG_DES_BW_ANA(x) (((x) << 5) & GENMASK(7, 5))
+#define HSIO_S1G_DES_CFG_DES_BW_ANA_M GENMASK(7, 5)
+#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define HSIO_S1G_DES_CFG_DES_SWAP_ANA BIT(4)
+#define HSIO_S1G_DES_CFG_DES_BW_HYST(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_S1G_DES_CFG_DES_BW_HYST_M GENMASK(3, 1)
+#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_S1G_DES_CFG_DES_SWAP_HYST BIT(0)
+
+#define HSIO_S1G_IB_CFG_IB_FX100_ENA BIT(27)
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x) (((x) << 24) & GENMASK(26, 24))
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M GENMASK(26, 24)
+#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x) (((x) & GENMASK(26, 24)) >> 24)
+#define HSIO_S1G_IB_CFG_IB_DET_LEV(x) (((x) << 19) & GENMASK(21, 19))
+#define HSIO_S1G_IB_CFG_IB_DET_LEV_M GENMASK(21, 19)
+#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_S1G_IB_CFG_IB_HYST_LEV BIT(14)
+#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM BIT(13)
+#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING BIT(12)
+#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV BIT(11)
+#define HSIO_S1G_IB_CFG_IB_ENA_HYST BIT(10)
+#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP BIT(9)
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x) (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M GENMASK(8, 6)
+#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x) (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M GENMASK(5, 4)
+#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
+#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M GENMASK(3, 0)
+
+#define HSIO_S1G_OB_CFG_OB_SLP(x) (((x) << 17) & GENMASK(18, 17))
+#define HSIO_S1G_OB_CFG_OB_SLP_M GENMASK(18, 17)
+#define HSIO_S1G_OB_CFG_OB_SLP_X(x) (((x) & GENMASK(18, 17)) >> 17)
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x) (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M GENMASK(16, 13)
+#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x) (((x) << 10) & GENMASK(12, 10))
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M GENMASK(12, 10)
+#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10)
+#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL BIT(9)
+#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG BIT(8)
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M GENMASK(7, 4)
+#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
+#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0)
+
+#define HSIO_S1G_SER_CFG_SER_IDLE BIT(9)
+#define HSIO_S1G_SER_CFG_SER_DEEMPH BIT(8)
+#define HSIO_S1G_SER_CFG_SER_CPMD_SEL BIT(7)
+#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD BIT(6)
+#define HSIO_S1G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S1G_SER_CFG_SER_ALISEL_M GENMASK(5, 4)
+#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S1G_SER_CFG_SER_ENHYS BIT(3)
+#define HSIO_S1G_SER_CFG_SER_BIG_WIN BIT(2)
+#define HSIO_S1G_SER_CFG_SER_EN_WIN BIT(1)
+#define HSIO_S1G_SER_CFG_SER_ENALI BIT(0)
+
+#define HSIO_S1G_COMMON_CFG_SYS_RST BIT(31)
+#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(21)
+#define HSIO_S1G_COMMON_CFG_ENA_LANE BIT(18)
+#define HSIO_S1G_COMMON_CFG_PWD_RX BIT(17)
+#define HSIO_S1G_COMMON_CFG_PWD_TX BIT(16)
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x) (((x) << 13) & GENMASK(15, 13))
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M GENMASK(15, 13)
+#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(15, 13)) >> 13)
+#define HSIO_S1G_COMMON_CFG_ENA_DIRECT BIT(12)
+#define HSIO_S1G_COMMON_CFG_ENA_ELOOP BIT(11)
+#define HSIO_S1G_COMMON_CFG_ENA_FLOOP BIT(10)
+#define HSIO_S1G_COMMON_CFG_ENA_ILOOP BIT(9)
+#define HSIO_S1G_COMMON_CFG_ENA_PLOOP BIT(8)
+#define HSIO_S1G_COMMON_CFG_HRATE BIT(7)
+#define HSIO_S1G_COMMON_CFG_IF_MODE BIT(0)
+
+#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2 BIT(22)
+#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2 BIT(21)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(15, 8)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA BIT(7)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(6)
+#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(5)
+#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL BIT(3)
+
+#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(12)
+#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR BIT(11)
+#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(10)
+#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0))
+#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0)
+
+#define HSIO_S1G_DFT_CFG0_LAZYBIT BIT(31)
+#define HSIO_S1G_DFT_CFG0_INV_DIS BIT(23)
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20))
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20)
+#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20)
+#define HSIO_S1G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S1G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16)
+#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4)
+#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA BIT(3)
+#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA BIT(2)
+#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA BIT(0)
+
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_DFT_CFG1_TX_JI_ENA BIT(3)
+#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2)
+#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR BIT(1)
+#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S1G_DFT_CFG2_RX_JI_ENA BIT(3)
+#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2)
+#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR BIT(1)
+#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(17, 16)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0))
+#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0)
+
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11)
+#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9)
+#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8)
+#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA BIT(5)
+#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA BIT(4)
+#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA BIT(3)
+#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA BIT(2)
+#define HSIO_S1G_MISC_CFG_LANE_RST BIT(0)
+
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7)
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED BIT(6)
+#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5)
+#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE BIT(3)
+#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC BIT(2)
+#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N BIT(1)
+#define HSIO_S1G_DFT_STATUS_BIST_ERROR BIT(0)
+
+#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0)
+
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT BIT(31)
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT BIT(30)
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x) ((x) & GENMASK(8, 0))
+#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M GENMASK(8, 0)
+
+#define HSIO_S6G_DIG_CFG_GP(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_DIG_CFG_GP_M GENMASK(18, 16)
+#define HSIO_S6G_DIG_CFG_GP_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA BIT(7)
+#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE BIT(6)
+#define HSIO_S6G_DIG_CFG_SIGDET_AST(x) (((x) << 3) & GENMASK(5, 3))
+#define HSIO_S6G_DIG_CFG_SIGDET_AST_M GENMASK(5, 3)
+#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x) (((x) & GENMASK(5, 3)) >> 3)
+#define HSIO_S6G_DIG_CFG_SIGDET_DST(x) ((x) & GENMASK(2, 0))
+#define HSIO_S6G_DIG_CFG_SIGDET_DST_M GENMASK(2, 0)
+
+#define HSIO_S6G_DFT_CFG0_LAZYBIT BIT(31)
+#define HSIO_S6G_DFT_CFG0_INV_DIS BIT(23)
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20))
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20)
+#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20)
+#define HSIO_S6G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16)
+#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4)
+#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA BIT(3)
+#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA BIT(2)
+#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA BIT(0)
+
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_DFT_CFG1_TX_JI_ENA BIT(3)
+#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2)
+#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR BIT(1)
+#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8))
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8)
+#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8)
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4)
+#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_DFT_CFG2_RX_JI_ENA BIT(3)
+#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2)
+#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR BIT(1)
+#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA BIT(0)
+
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(19, 16))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(19, 16)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(19, 16)) >> 16)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0))
+#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0)
+
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x) (((x) << 13) & GENMASK(14, 13))
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M GENMASK(14, 13)
+#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x) (((x) & GENMASK(14, 13)) >> 13)
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11)
+#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9)
+#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8)
+#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA BIT(7)
+#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA BIT(6)
+#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA BIT(5)
+#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA BIT(4)
+#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA BIT(3)
+#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA BIT(2)
+#define HSIO_S6G_MISC_CFG_LANE_RST BIT(0)
+
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x) (((x) << 23) & GENMASK(28, 23))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M GENMASK(28, 23)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x) (((x) << 18) & GENMASK(22, 18))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M GENMASK(22, 18)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x) (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x) (((x) << 13) & GENMASK(17, 13))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M GENMASK(17, 13)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x) (((x) & GENMASK(17, 13)) >> 13)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M GENMASK(8, 6)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M GENMASK(5, 0)
+
+#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT BIT(8)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED BIT(6)
+#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5)
+#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE BIT(3)
+#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC BIT(2)
+#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N BIT(1)
+#define HSIO_S6G_DFT_STATUS_BIST_ERROR BIT(0)
+
+#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0)
+
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13))
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13)
+#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13)
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 10) & GENMASK(12, 10))
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M GENMASK(12, 10)
+#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10)
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x) (((x) << 8) & GENMASK(9, 8))
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M GENMASK(9, 8)
+#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define HSIO_S6G_DES_CFG_DES_BW_HYST(x) (((x) << 5) & GENMASK(7, 5))
+#define HSIO_S6G_DES_CFG_DES_BW_HYST_M GENMASK(7, 5)
+#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define HSIO_S6G_DES_CFG_DES_SWAP_HYST BIT(4)
+#define HSIO_S6G_DES_CFG_DES_BW_ANA(x) (((x) << 1) & GENMASK(3, 1))
+#define HSIO_S6G_DES_CFG_DES_BW_ANA_M GENMASK(3, 1)
+#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(3, 1)) >> 1)
+#define HSIO_S6G_DES_CFG_DES_SWAP_ANA BIT(0)
+
+#define HSIO_S6G_IB_CFG_IB_SOFSI(x) (((x) << 29) & GENMASK(30, 29))
+#define HSIO_S6G_IB_CFG_IB_SOFSI_M GENMASK(30, 29)
+#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x) (((x) & GENMASK(30, 29)) >> 29)
+#define HSIO_S6G_IB_CFG_IB_VBULK_SEL BIT(28)
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x) (((x) << 24) & GENMASK(27, 24))
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M GENMASK(27, 24)
+#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x) (((x) & GENMASK(27, 24)) >> 24)
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x) (((x) << 20) & GENMASK(23, 20))
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M GENMASK(23, 20)
+#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x) (((x) & GENMASK(23, 20)) >> 20)
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x) (((x) << 18) & GENMASK(19, 18))
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M GENMASK(19, 18)
+#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x) (((x) & GENMASK(19, 18)) >> 18)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x) (((x) << 15) & GENMASK(17, 15))
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M GENMASK(17, 15)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x) (((x) & GENMASK(17, 15)) >> 15)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x) (((x) << 13) & GENMASK(14, 13))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M GENMASK(14, 13)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x) (((x) & GENMASK(14, 13)) >> 13)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x) (((x) << 11) & GENMASK(12, 11))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M GENMASK(12, 11)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x) (((x) & GENMASK(12, 11)) >> 11)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x) (((x) << 9) & GENMASK(10, 9))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M GENMASK(10, 9)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x) (((x) & GENMASK(10, 9)) >> 9)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x) (((x) << 7) & GENMASK(8, 7))
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M GENMASK(8, 7)
+#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x) (((x) & GENMASK(8, 7)) >> 7)
+#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA BIT(6)
+#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA BIT(5)
+#define HSIO_S6G_IB_CFG_IB_CONCUR BIT(4)
+#define HSIO_S6G_IB_CFG_IB_CAL_ENA BIT(3)
+#define HSIO_S6G_IB_CFG_IB_SAM_ENA BIT(2)
+#define HSIO_S6G_IB_CFG_IB_EQZ_ENA BIT(1)
+#define HSIO_S6G_IB_CFG_IB_REG_ENA BIT(0)
+
+#define HSIO_S6G_IB_CFG1_IB_TJTAG(x) (((x) << 17) & GENMASK(21, 17))
+#define HSIO_S6G_IB_CFG1_IB_TJTAG_M GENMASK(21, 17)
+#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x) (((x) & GENMASK(21, 17)) >> 17)
+#define HSIO_S6G_IB_CFG1_IB_TSDET(x) (((x) << 12) & GENMASK(16, 12))
+#define HSIO_S6G_IB_CFG1_IB_TSDET_M GENMASK(16, 12)
+#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x) (((x) & GENMASK(16, 12)) >> 12)
+#define HSIO_S6G_IB_CFG1_IB_SCALY(x) (((x) << 8) & GENMASK(11, 8))
+#define HSIO_S6G_IB_CFG1_IB_SCALY_M GENMASK(11, 8)
+#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x) (((x) & GENMASK(11, 8)) >> 8)
+#define HSIO_S6G_IB_CFG1_IB_FILT_HP BIT(7)
+#define HSIO_S6G_IB_CFG1_IB_FILT_MID BIT(6)
+#define HSIO_S6G_IB_CFG1_IB_FILT_LP BIT(5)
+#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET BIT(4)
+#define HSIO_S6G_IB_CFG1_IB_FRC_HP BIT(3)
+#define HSIO_S6G_IB_CFG1_IB_FRC_MID BIT(2)
+#define HSIO_S6G_IB_CFG1_IB_FRC_LP BIT(1)
+#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET BIT(0)
+
+#define HSIO_S6G_IB_CFG2_IB_TINFV(x) (((x) << 27) & GENMASK(29, 27))
+#define HSIO_S6G_IB_CFG2_IB_TINFV_M GENMASK(29, 27)
+#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x) (((x) & GENMASK(29, 27)) >> 27)
+#define HSIO_S6G_IB_CFG2_IB_OINFI(x) (((x) << 22) & GENMASK(26, 22))
+#define HSIO_S6G_IB_CFG2_IB_OINFI_M GENMASK(26, 22)
+#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x) (((x) & GENMASK(26, 22)) >> 22)
+#define HSIO_S6G_IB_CFG2_IB_TAUX(x) (((x) << 19) & GENMASK(21, 19))
+#define HSIO_S6G_IB_CFG2_IB_TAUX_M GENMASK(21, 19)
+#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define HSIO_S6G_IB_CFG2_IB_OINFS(x) (((x) << 16) & GENMASK(18, 16))
+#define HSIO_S6G_IB_CFG2_IB_OINFS_M GENMASK(18, 16)
+#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define HSIO_S6G_IB_CFG2_IB_OCALS(x) (((x) << 10) & GENMASK(15, 10))
+#define HSIO_S6G_IB_CFG2_IB_OCALS_M GENMASK(15, 10)
+#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x) (((x) & GENMASK(15, 10)) >> 10)
+#define HSIO_S6G_IB_CFG2_IB_TCALV(x) (((x) << 5) & GENMASK(9, 5))
+#define HSIO_S6G_IB_CFG2_IB_TCALV_M GENMASK(9, 5)
+#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_S6G_IB_CFG2_IB_UMAX(x) (((x) << 3) & GENMASK(4, 3))
+#define HSIO_S6G_IB_CFG2_IB_UMAX_M GENMASK(4, 3)
+#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define HSIO_S6G_IB_CFG2_IB_UREG(x) ((x) & GENMASK(2, 0))
+#define HSIO_S6G_IB_CFG2_IB_UREG_M GENMASK(2, 0)
+
+#define HSIO_S6G_IB_CFG3_IB_INI_HP(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG3_IB_INI_HP_M GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG3_IB_INI_MID(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG3_IB_INI_MID_M GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG3_IB_INI_LP(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG3_IB_INI_LP_M GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M GENMASK(5, 0)
+
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M GENMASK(5, 0)
+
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M GENMASK(23, 18)
+#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M GENMASK(17, 12)
+#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M GENMASK(11, 6)
+#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M GENMASK(5, 0)
+
+#define HSIO_S6G_OB_CFG_OB_IDLE BIT(31)
+#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE BIT(30)
+#define HSIO_S6G_OB_CFG_OB_POL BIT(29)
+#define HSIO_S6G_OB_CFG_OB_POST0(x) (((x) << 23) & GENMASK(28, 23))
+#define HSIO_S6G_OB_CFG_OB_POST0_M GENMASK(28, 23)
+#define HSIO_S6G_OB_CFG_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23)
+#define HSIO_S6G_OB_CFG_OB_PREC(x) (((x) << 18) & GENMASK(22, 18))
+#define HSIO_S6G_OB_CFG_OB_PREC_M GENMASK(22, 18)
+#define HSIO_S6G_OB_CFG_OB_PREC_X(x) (((x) & GENMASK(22, 18)) >> 18)
+#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX BIT(17)
+#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR BIT(16)
+#define HSIO_S6G_OB_CFG_OB_POST1(x) (((x) << 11) & GENMASK(15, 11))
+#define HSIO_S6G_OB_CFG_OB_POST1_M GENMASK(15, 11)
+#define HSIO_S6G_OB_CFG_OB_POST1_X(x) (((x) & GENMASK(15, 11)) >> 11)
+#define HSIO_S6G_OB_CFG_OB_R_COR BIT(10)
+#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL BIT(9)
+#define HSIO_S6G_OB_CFG_OB_SR_H BIT(8)
+#define HSIO_S6G_OB_CFG_OB_SR(x) (((x) << 4) & GENMASK(7, 4))
+#define HSIO_S6G_OB_CFG_OB_SR_M GENMASK(7, 4)
+#define HSIO_S6G_OB_CFG_OB_SR_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0))
+#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0)
+
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6))
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M GENMASK(8, 6)
+#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define HSIO_S6G_OB_CFG1_OB_LEV(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_OB_CFG1_OB_LEV_M GENMASK(5, 0)
+
+#define HSIO_S6G_SER_CFG_SER_4TAP_ENA BIT(8)
+#define HSIO_S6G_SER_CFG_SER_CPMD_SEL BIT(7)
+#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD BIT(6)
+#define HSIO_S6G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4))
+#define HSIO_S6G_SER_CFG_SER_ALISEL_M GENMASK(5, 4)
+#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define HSIO_S6G_SER_CFG_SER_ENHYS BIT(3)
+#define HSIO_S6G_SER_CFG_SER_BIG_WIN BIT(2)
+#define HSIO_S6G_SER_CFG_SER_EN_WIN BIT(1)
+#define HSIO_S6G_SER_CFG_SER_ENALI BIT(0)
+
+#define HSIO_S6G_COMMON_CFG_SYS_RST BIT(17)
+#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA BIT(16)
+#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(15)
+#define HSIO_S6G_COMMON_CFG_ENA_LANE BIT(14)
+#define HSIO_S6G_COMMON_CFG_PWD_RX BIT(13)
+#define HSIO_S6G_COMMON_CFG_PWD_TX BIT(12)
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x) (((x) << 9) & GENMASK(11, 9))
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M GENMASK(11, 9)
+#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(11, 9)) >> 9)
+#define HSIO_S6G_COMMON_CFG_ENA_DIRECT BIT(8)
+#define HSIO_S6G_COMMON_CFG_ENA_ELOOP BIT(7)
+#define HSIO_S6G_COMMON_CFG_ENA_FLOOP BIT(6)
+#define HSIO_S6G_COMMON_CFG_ENA_ILOOP BIT(5)
+#define HSIO_S6G_COMMON_CFG_ENA_PLOOP BIT(4)
+#define HSIO_S6G_COMMON_CFG_HRATE BIT(3)
+#define HSIO_S6G_COMMON_CFG_QRATE BIT(2)
+#define HSIO_S6G_COMMON_CFG_IF_MODE(x) ((x) & GENMASK(1, 0))
+#define HSIO_S6G_COMMON_CFG_IF_MODE_M GENMASK(1, 0)
+
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x) (((x) << 16) & GENMASK(17, 16))
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M GENMASK(17, 16)
+#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x) (((x) & GENMASK(17, 16)) >> 16)
+#define HSIO_S6G_PLL_CFG_PLL_DIV4 BIT(15)
+#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT BIT(14)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6))
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(13, 6)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA BIT(5)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(4)
+#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(3)
+#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL BIT(2)
+#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR BIT(1)
+#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ BIT(0)
+
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N BIT(5)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P BIT(4)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK BIT(3)
+#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT BIT(2)
+#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA BIT(1)
+#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA BIT(0)
+
+#define HSIO_S6G_GP_CFG_GP_MSB(x) (((x) << 16) & GENMASK(31, 16))
+#define HSIO_S6G_GP_CFG_GP_MSB_M GENMASK(31, 16)
+#define HSIO_S6G_GP_CFG_GP_MSB_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define HSIO_S6G_GP_CFG_GP_LSB(x) ((x) & GENMASK(15, 0))
+#define HSIO_S6G_GP_CFG_GP_LSB_M GENMASK(15, 0)
+
+#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE BIT(8)
+#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT BIT(7)
+#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT BIT(6)
+#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT BIT(5)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT BIT(4)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD BIT(3)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR BIT(2)
+#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR BIT(1)
+#define HSIO_S6G_IB_STATUS0_IB_SIG_DET BIT(0)
+
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x) (((x) << 18) & GENMASK(23, 18))
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M GENMASK(23, 18)
+#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x) (((x) << 12) & GENMASK(17, 12))
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M GENMASK(17, 12)
+#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x) (((x) << 6) & GENMASK(11, 6))
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M GENMASK(11, 6)
+#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x) ((x) & GENMASK(5, 0))
+#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M GENMASK(5, 0)
+
+#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N BIT(2)
+#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P BIT(1)
+#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT BIT(0)
+
+#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(10)
+#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR BIT(9)
+#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(8)
+#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0))
+#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0)
+
+#define HSIO_S6G_REVID_SERDES_REV(x) (((x) << 26) & GENMASK(31, 26))
+#define HSIO_S6G_REVID_SERDES_REV_M GENMASK(31, 26)
+#define HSIO_S6G_REVID_SERDES_REV_X(x) (((x) & GENMASK(31, 26)) >> 26)
+#define HSIO_S6G_REVID_RCPLL_REV(x) (((x) << 21) & GENMASK(25, 21))
+#define HSIO_S6G_REVID_RCPLL_REV_M GENMASK(25, 21)
+#define HSIO_S6G_REVID_RCPLL_REV_X(x) (((x) & GENMASK(25, 21)) >> 21)
+#define HSIO_S6G_REVID_SER_REV(x) (((x) << 16) & GENMASK(20, 16))
+#define HSIO_S6G_REVID_SER_REV_M GENMASK(20, 16)
+#define HSIO_S6G_REVID_SER_REV_X(x) (((x) & GENMASK(20, 16)) >> 16)
+#define HSIO_S6G_REVID_DES_REV(x) (((x) << 10) & GENMASK(15, 10))
+#define HSIO_S6G_REVID_DES_REV_M GENMASK(15, 10)
+#define HSIO_S6G_REVID_DES_REV_X(x) (((x) & GENMASK(15, 10)) >> 10)
+#define HSIO_S6G_REVID_OB_REV(x) (((x) << 5) & GENMASK(9, 5))
+#define HSIO_S6G_REVID_OB_REV_M GENMASK(9, 5)
+#define HSIO_S6G_REVID_OB_REV_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define HSIO_S6G_REVID_IB_REV(x) ((x) & GENMASK(4, 0))
+#define HSIO_S6G_REVID_IB_REV_M GENMASK(4, 0)
+
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT BIT(31)
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT BIT(30)
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x) ((x) & GENMASK(24, 0))
+#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M GENMASK(24, 0)
+
+#define HSIO_HW_CFG_DEV2G5_10_MODE BIT(6)
+#define HSIO_HW_CFG_DEV1G_9_MODE BIT(5)
+#define HSIO_HW_CFG_DEV1G_6_MODE BIT(4)
+#define HSIO_HW_CFG_DEV1G_5_MODE BIT(3)
+#define HSIO_HW_CFG_DEV1G_4_MODE BIT(2)
+#define HSIO_HW_CFG_PCIE_ENA BIT(1)
+#define HSIO_HW_CFG_QSGMII_ENA BIT(0)
+
+#define HSIO_HW_QSGMII_CFG_SHYST_DIS BIT(3)
+#define HSIO_HW_QSGMII_CFG_E_DET_ENA BIT(2)
+#define HSIO_HW_QSGMII_CFG_USE_I1_ENA BIT(1)
+#define HSIO_HW_QSGMII_CFG_FLIP_LANES BIT(0)
+
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x) (((x) << 1) & GENMASK(6, 1))
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M GENMASK(6, 1)
+#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x) (((x) & GENMASK(6, 1)) >> 1)
+#define HSIO_HW_QSGMII_STAT_SYNC BIT(0)
+
+#define HSIO_CLK_CFG_CLKDIV_PHY(x) (((x) << 1) & GENMASK(8, 1))
+#define HSIO_CLK_CFG_CLKDIV_PHY_M GENMASK(8, 1)
+#define HSIO_CLK_CFG_CLKDIV_PHY_X(x) (((x) & GENMASK(8, 1)) >> 1)
+#define HSIO_CLK_CFG_CLKDIV_PHY_DIS BIT(0)
+
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD BIT(5)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN BIT(4)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST BIT(3)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP BIT(2)
+#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK BIT(1)
+#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA BIT(0)
+
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x) (((x) << 8) & GENMASK(15, 8))
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M GENMASK(15, 8)
+#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x) ((x) & GENMASK(7, 0))
+#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M GENMASK(7, 0)
+
+#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID BIT(8)
+#define HSIO_TEMP_SENSOR_STAT_TEMP(x) ((x) & GENMASK(7, 0))
+#define HSIO_TEMP_SENSOR_STAT_TEMP_M GENMASK(7, 0)
+
+#endif
diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h
index 578180cbc134..af9722223925 100644
--- a/include/soc/qcom/cmd-db.h
+++ b/include/soc/qcom/cmd-db.h
@@ -18,9 +18,7 @@ enum cmd_db_hw_type {
#if IS_ENABLED(CONFIG_QCOM_COMMAND_DB)
u32 cmd_db_read_addr(const char *resource_id);
-int cmd_db_read_aux_data(const char *resource_id, u8 *data, size_t len);
-
-size_t cmd_db_read_aux_data_len(const char *resource_id);
+const void *cmd_db_read_aux_data(const char *resource_id, size_t *len);
enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id);
@@ -29,12 +27,8 @@ int cmd_db_ready(void);
static inline u32 cmd_db_read_addr(const char *resource_id)
{ return 0; }
-static inline int cmd_db_read_aux_data(const char *resource_id, u8 *data,
- size_t len)
-{ return -ENODEV; }
-
-static inline size_t cmd_db_read_aux_data_len(const char *resource_id)
-{ return -ENODEV; }
+static inline const void *cmd_db_read_aux_data(const char *resource_id, size_t *len)
+{ return ERR_PTR(-ENODEV); }
static inline enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id)
{ return -ENODEV; }
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
index 98d8d38b99a1..ab7f8796a260 100644
--- a/include/soc/tegra/bpmp-abi.h
+++ b/include/soc/tegra/bpmp-abi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -40,7 +40,6 @@
* @file
*/
-
/**
* @defgroup MRQ MRQ Messages
* @brief Messages sent to/from BPMP via IPC
@@ -53,7 +52,7 @@
*/
/**
- * @addtogroup MRQ_Format Message Format
+ * @addtogroup MRQ_Format
* @{
* The CPU requests the BPMP to perform a particular service by
* sending it an IVC frame containing a single MRQ message. An MRQ
@@ -76,7 +75,7 @@
/**
* @ingroup MRQ_Format
- * @brief header for an MRQ message
+ * @brief Header for an MRQ message
*
* Provides the MRQ number for the MRQ message: #mrq. The remainder of
* the MRQ message is a payload (immediately following the
@@ -86,7 +85,7 @@ struct mrq_request {
/** @brief MRQ number of the request */
uint32_t mrq;
/**
- * @brief flags providing follow up directions to the receiver
+ * @brief Flags providing follow up directions to the receiver
*
* | Bit | Description |
* |-----|--------------------------------------------|
@@ -98,7 +97,7 @@ struct mrq_request {
/**
* @ingroup MRQ_Format
- * @brief header for an MRQ response
+ * @brief Header for an MRQ response
*
* Provides an error code for the associated MRQ message. The
* remainder of the MRQ response is a payload (immediately following
@@ -106,9 +105,9 @@ struct mrq_request {
* mrq_request::mrq
*/
struct mrq_response {
- /** @brief error code for the MRQ request itself */
+ /** @brief Error code for the MRQ request itself */
int32_t err;
- /** @brief reserved for future use */
+ /** @brief Reserved for future use */
uint32_t flags;
} __ABI_PACKED;
@@ -152,6 +151,14 @@ struct mrq_response {
#define MRQ_TRACE_ITER 64
#define MRQ_RINGBUF_CONSOLE 65
#define MRQ_PG 66
+#define MRQ_CPU_NDIV_LIMITS 67
+#define MRQ_STRAP 68
+#define MRQ_UPHY 69
+#define MRQ_CPU_AUTO_CC3 70
+#define MRQ_QUERY_FW_TAG 71
+#define MRQ_FMON 72
+#define MRQ_EC 73
+#define MRQ_FBVOLT_STATUS 74
/** @} */
@@ -160,31 +167,35 @@ struct mrq_response {
* @brief Maximum MRQ code to be sent by CPU software to
* BPMP. Subject to change in future
*/
-#define MAX_CPU_MRQ_ID 66
+#define MAX_CPU_MRQ_ID 74
/**
- * @addtogroup MRQ_Payloads Message Payloads
+ * @addtogroup MRQ_Payloads
* @{
- * @defgroup Ping
+ * @defgroup Ping Ping
* @defgroup Query_Tag Query Tag
* @defgroup Module Loadable Modules
- * @defgroup Trace
- * @defgroup Debugfs
- * @defgroup Reset
- * @defgroup I2C
- * @defgroup Clocks
+ * @defgroup Trace Trace
+ * @defgroup Debugfs Debug File System
+ * @defgroup Reset Reset
+ * @defgroup I2C I2C
+ * @defgroup Clocks Clocks
* @defgroup ABI_info ABI Info
- * @defgroup MC_Flush MC Flush
- * @defgroup Powergating
- * @defgroup Thermal
+ * @defgroup Powergating Power Gating
+ * @defgroup Thermal Thermal
* @defgroup Vhint CPU Voltage hint
- * @defgroup MRQ_Deprecated Deprecated MRQ messages
- * @defgroup EMC
- * @defgroup RingbufConsole
+ * @defgroup EMC EMC
+ * @defgroup CPU NDIV Limits
+ * @defgroup RingbufConsole Ring Buffer Console
+ * @defgroup Strap Straps
+ * @defgroup UPHY UPHY
+ * @defgroup CC3 Auto-CC3
+ * @defgroup FMON FMON
+ * @defgroup EC EC
+ * @defgroup Fbvolt_status Fuse Burn Voltage Status
* @}
*/
-
/**
* @ingroup MRQ_Codes
* @def MRQ_PING
@@ -214,20 +225,20 @@ struct mrq_response {
/**
* @ingroup Ping
- * @brief request with #MRQ_PING
+ * @brief Request with #MRQ_PING
*
* Used by the sender of an #MRQ_PING message to request a pong from
* recipient. The response from the recipient is computed based on
* #challenge.
*/
struct mrq_ping_request {
-/** @brief arbitrarily chosen value */
+/** @brief Arbitrarily chosen value */
uint32_t challenge;
} __ABI_PACKED;
/**
* @ingroup Ping
- * @brief response to #MRQ_PING
+ * @brief Response to #MRQ_PING
*
* Sent in response to an #MRQ_PING message. #reply should be the
* mrq_ping_request challenge left shifted by 1 with the carry-bit
@@ -235,14 +246,16 @@ struct mrq_ping_request {
*
*/
struct mrq_ping_response {
- /** @brief response to the MRQ_PING challege */
+ /** @brief Response to the MRQ_PING challege */
uint32_t reply;
} __ABI_PACKED;
/**
* @ingroup MRQ_Codes
* @def MRQ_QUERY_TAG
- * @brief Query BPMP firmware's tag (i.e. version information)
+ * @brief Query BPMP firmware's tag (i.e. unique identifer)
+ *
+ * @deprecated Use #MRQ_QUERY_FW_TAG instead.
*
* * Platforms: All
* * Initiators: CCPLEX
@@ -254,25 +267,50 @@ struct mrq_ping_response {
/**
* @ingroup Query_Tag
- * @brief request with #MRQ_QUERY_TAG
- *
- * Used by #MRQ_QUERY_TAG call to ask BPMP to fill in the memory
- * pointed by #addr with BPMP firmware header.
+ * @brief Request with #MRQ_QUERY_TAG
*
- * The sender is reponsible for ensuring that #addr is mapped in to
- * the recipient's address map.
+ * @deprecated This structure will be removed in future version.
+ * Use MRQ_QUERY_FW_TAG instead.
*/
struct mrq_query_tag_request {
- /** @brief base address to store the firmware header */
+ /** @brief Base address to store the firmware tag */
uint32_t addr;
} __ABI_PACKED;
+
/**
* @ingroup MRQ_Codes
- * @def MRQ_MODULE_LOAD
- * @brief dynamically load a BPMP code module
+ * @def MRQ_QUERY_FW_TAG
+ * @brief Query BPMP firmware's tag (i.e. unique identifier)
*
* * Platforms: All
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_query_fw_tag_response
+ *
+ */
+
+/**
+ * @ingroup Query_Tag
+ * @brief Response to #MRQ_QUERY_FW_TAG
+ *
+ * Sent in response to #MRQ_QUERY_FW_TAG message. #tag contains the unique
+ * identifier for the version of firmware issuing the reply.
+ *
+ */
+struct mrq_query_fw_tag_response {
+ /** @brief Array to store tag information */
+ uint8_t tag[32];
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_MODULE_LOAD
+ * @brief Dynamically load a BPMP code module
+ *
+ * * Platforms: T210, T214, T186
+ * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_module_load_request
@@ -284,7 +322,7 @@ struct mrq_query_tag_request {
/**
* @ingroup Module
- * @brief request with #MRQ_MODULE_LOAD
+ * @brief Request with #MRQ_MODULE_LOAD
*
* Used by #MRQ_MODULE_LOAD calls to ask the recipient to dynamically
* load the code located at #phys_addr and having size #size
@@ -300,29 +338,31 @@ struct mrq_query_tag_request {
*
*/
struct mrq_module_load_request {
- /** @brief base address of the code to load. Treated as (void *) */
+ /** @brief Base address of the code to load. Treated as (void *) */
uint32_t phys_addr; /* (void *) */
- /** @brief size in bytes of code to load */
+ /** @brief Size in bytes of code to load */
uint32_t size;
} __ABI_PACKED;
/**
* @ingroup Module
- * @brief response to #MRQ_MODULE_LOAD
+ * @brief Response to #MRQ_MODULE_LOAD
*
* @todo document mrq_response::err
*/
struct mrq_module_load_response {
- /** @brief handle to the loaded module */
+ /** @brief Handle to the loaded module */
uint32_t base;
} __ABI_PACKED;
+/** @endcond*/
/**
* @ingroup MRQ_Codes
* @def MRQ_MODULE_UNLOAD
- * @brief unload a previously loaded code module
+ * @brief Unload a previously loaded code module
*
- * * Platforms: All
+ * * Platforms: T210, T214, T186
+ * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_module_unload_request
@@ -333,20 +373,21 @@ struct mrq_module_load_response {
/**
* @ingroup Module
- * @brief request with #MRQ_MODULE_UNLOAD
+ * @brief Request with #MRQ_MODULE_UNLOAD
*
* Used by #MRQ_MODULE_UNLOAD calls to request that a previously loaded
* module be unloaded.
*/
struct mrq_module_unload_request {
- /** @brief handle of the module to unload */
+ /** @brief Handle of the module to unload */
uint32_t base;
} __ABI_PACKED;
+/** @endcond*/
/**
* @ingroup MRQ_Codes
* @def MRQ_TRACE_MODIFY
- * @brief modify the set of enabled trace events
+ * @brief Modify the set of enabled trace events
*
* * Platforms: All
* * Initiators: CCPLEX
@@ -359,22 +400,22 @@ struct mrq_module_unload_request {
/**
* @ingroup Trace
- * @brief request with #MRQ_TRACE_MODIFY
+ * @brief Request with #MRQ_TRACE_MODIFY
*
* Used by %MRQ_TRACE_MODIFY calls to enable or disable specify trace
* events. #set takes precedence for any bit set in both #set and
* #clr.
*/
struct mrq_trace_modify_request {
- /** @brief bit mask of trace events to disable */
+ /** @brief Bit mask of trace events to disable */
uint32_t clr;
- /** @brief bit mask of trace events to enable */
+ /** @brief Bit mask of trace events to enable */
uint32_t set;
} __ABI_PACKED;
/**
* @ingroup Trace
- * @brief response to #MRQ_TRACE_MODIFY
+ * @brief Response to #MRQ_TRACE_MODIFY
*
* Sent in repsonse to an #MRQ_TRACE_MODIFY message. #mask reflects the
* state of which events are enabled after the recipient acted on the
@@ -382,7 +423,7 @@ struct mrq_trace_modify_request {
*
*/
struct mrq_trace_modify_response {
- /** @brief bit mask of trace event enable states */
+ /** @brief Bit mask of trace event enable states */
uint32_t mask;
} __ABI_PACKED;
@@ -407,7 +448,7 @@ struct mrq_trace_modify_response {
/**
* @ingroup Trace
- * @brief request with #MRQ_WRITE_TRACE
+ * @brief Request with #MRQ_WRITE_TRACE
*
* Used by MRQ_WRITE_TRACE calls to ask the recipient to copy trace
* data from the recipient's local buffer to the output buffer. #area
@@ -420,22 +461,22 @@ struct mrq_trace_modify_response {
* overwrites.
*/
struct mrq_write_trace_request {
- /** @brief base address of output buffer */
+ /** @brief Base address of output buffer */
uint32_t area;
- /** @brief size in bytes of the output buffer */
+ /** @brief Size in bytes of the output buffer */
uint32_t size;
} __ABI_PACKED;
/**
* @ingroup Trace
- * @brief response to #MRQ_WRITE_TRACE
+ * @brief Response to #MRQ_WRITE_TRACE
*
* Once this response is sent, the respondent will not access the
* output buffer further.
*/
struct mrq_write_trace_response {
/**
- * @brief flag whether more data remains in local buffer
+ * @brief Flag whether more data remains in local buffer
*
* Value is 1 if the entire local trace buffer has been
* drained to the outputbuffer. Value is 0 otherwise.
@@ -456,9 +497,10 @@ struct mrq_threaded_ping_response {
/**
* @ingroup MRQ_Codes
* @def MRQ_MODULE_MAIL
- * @brief send a message to a loadable module
+ * @brief Send a message to a loadable module
*
- * * Platforms: All
+ * * Platforms: T210, T214, T186
+ * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_module_mail_request
@@ -469,12 +511,12 @@ struct mrq_threaded_ping_response {
/**
* @ingroup Module
- * @brief request with #MRQ_MODULE_MAIL
+ * @brief Request with #MRQ_MODULE_MAIL
*/
struct mrq_module_mail_request {
- /** @brief handle to the previously loaded module */
+ /** @brief Handle to the previously loaded module */
uint32_t base;
- /** @brief module-specific mail payload
+ /** @brief Module-specific mail payload
*
* The length of data[ ] is unknown to the BPMP core firmware
* but it is limited to the size of an IPC message.
@@ -484,23 +526,24 @@ struct mrq_module_mail_request {
/**
* @ingroup Module
- * @brief response to #MRQ_MODULE_MAIL
+ * @brief Response to #MRQ_MODULE_MAIL
*/
struct mrq_module_mail_response {
- /** @brief module-specific mail payload
+ /** @brief Module-specific mail payload
*
* The length of data[ ] is unknown to the BPMP core firmware
* but it is limited to the size of an IPC message.
*/
uint8_t data[EMPTY_ARRAY];
} __ABI_PACKED;
+/** @endcond */
/**
* @ingroup MRQ_Codes
* @def MRQ_DEBUGFS
* @brief Interact with BPMP's debugfs file nodes
*
- * * Platforms: T186
+ * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_debugfs_request
@@ -529,65 +572,70 @@ struct mrq_module_mail_response {
*
* @}
*/
+
/** @ingroup Debugfs */
enum mrq_debugfs_commands {
+ /** @brief Perform read */
CMD_DEBUGFS_READ = 1,
+ /** @brief Perform write */
CMD_DEBUGFS_WRITE = 2,
+ /** @brief Perform dumping directory */
CMD_DEBUGFS_DUMPDIR = 3,
+ /** @brief Not a command */
CMD_DEBUGFS_MAX
};
/**
* @ingroup Debugfs
- * @brief parameters for CMD_DEBUGFS_READ/WRITE command
+ * @brief Parameters for CMD_DEBUGFS_READ/WRITE command
*/
struct cmd_debugfs_fileop_request {
- /** @brief physical address pointing at filename */
+ /** @brief Physical address pointing at filename */
uint32_t fnameaddr;
- /** @brief length in bytes of filename buffer */
+ /** @brief Length in bytes of filename buffer */
uint32_t fnamelen;
- /** @brief physical address pointing to data buffer */
+ /** @brief Physical address pointing to data buffer */
uint32_t dataaddr;
- /** @brief length in bytes of data buffer */
+ /** @brief Length in bytes of data buffer */
uint32_t datalen;
} __ABI_PACKED;
/**
* @ingroup Debugfs
- * @brief parameters for CMD_DEBUGFS_READ/WRITE command
+ * @brief Parameters for CMD_DEBUGFS_READ/WRITE command
*/
struct cmd_debugfs_dumpdir_request {
- /** @brief physical address pointing to data buffer */
+ /** @brief Physical address pointing to data buffer */
uint32_t dataaddr;
- /** @brief length in bytes of data buffer */
+ /** @brief Length in bytes of data buffer */
uint32_t datalen;
} __ABI_PACKED;
/**
* @ingroup Debugfs
- * @brief response data for CMD_DEBUGFS_READ/WRITE command
+ * @brief Response data for CMD_DEBUGFS_READ/WRITE command
*/
struct cmd_debugfs_fileop_response {
- /** @brief always 0 */
+ /** @brief Always 0 */
uint32_t reserved;
- /** @brief number of bytes read from or written to data buffer */
+ /** @brief Number of bytes read from or written to data buffer */
uint32_t nbytes;
} __ABI_PACKED;
/**
* @ingroup Debugfs
- * @brief response data for CMD_DEBUGFS_DUMPDIR command
+ * @brief Response data for CMD_DEBUGFS_DUMPDIR command
*/
struct cmd_debugfs_dumpdir_response {
- /** @brief always 0 */
+ /** @brief Always 0 */
uint32_t reserved;
- /** @brief number of bytes read from or written to data buffer */
+ /** @brief Number of bytes read from or written to data buffer */
uint32_t nbytes;
} __ABI_PACKED;
/**
* @ingroup Debugfs
- * @brief request with #MRQ_DEBUGFS.
+ * @brief Request with #MRQ_DEBUGFS.
*
* The sender of an MRQ_DEBUGFS message uses #cmd to specify a debugfs
* command to execute. Legal commands are the values of @ref
@@ -601,6 +649,7 @@ struct cmd_debugfs_dumpdir_response {
* |CMD_DEBUGFS_DUMPDIR|dumpdir|
*/
struct mrq_debugfs_request {
+ /** @brief Sub-command (@ref mrq_debugfs_commands) */
uint32_t cmd;
union {
struct cmd_debugfs_fileop_request fop;
@@ -612,14 +661,14 @@ struct mrq_debugfs_request {
* @ingroup Debugfs
*/
struct mrq_debugfs_response {
- /** @brief always 0 */
+ /** @brief Always 0 */
int32_t reserved;
union {
- /** @brief response data for CMD_DEBUGFS_READ OR
+ /** @brief Response data for CMD_DEBUGFS_READ OR
* CMD_DEBUGFS_WRITE command
*/
struct cmd_debugfs_fileop_response fop;
- /** @brief response data for CMD_DEBUGFS_DUMPDIR command */
+ /** @brief Response data for CMD_DEBUGFS_DUMPDIR command */
struct cmd_debugfs_dumpdir_response dumpdir;
} __UNION_ANON;
} __ABI_PACKED;
@@ -633,57 +682,58 @@ struct mrq_debugfs_response {
#define DEBUGFS_S_IWUSR (1 << 7)
/** @} */
-
/**
* @ingroup MRQ_Codes
* @def MRQ_RESET
- * @brief reset an IP block
+ * @brief Reset an IP block
*
- * * Platforms: T186
+ * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_reset_request
* * Response Payload: @ref mrq_reset_response
+ *
+ * @addtogroup Reset
+ * @{
*/
-/**
- * @ingroup Reset
- */
enum mrq_reset_commands {
+ /** @brief Assert module reset */
CMD_RESET_ASSERT = 1,
+ /** @brief Deassert module reset */
CMD_RESET_DEASSERT = 2,
+ /** @brief Assert and deassert the module reset */
CMD_RESET_MODULE = 3,
+ /** @brief Get the highest reset ID */
CMD_RESET_GET_MAX_ID = 4,
- CMD_RESET_MAX, /* not part of ABI and subject to change */
+ /** @brief Not part of ABI and subject to change */
+ CMD_RESET_MAX,
};
/**
- * @ingroup Reset
- * @brief request with MRQ_RESET
+ * @brief Request with MRQ_RESET
*
* Used by the sender of an #MRQ_RESET message to request BPMP to
* assert or or deassert a given reset line.
*/
struct mrq_reset_request {
- /** @brief reset action to perform (@enum mrq_reset_commands) */
+ /** @brief Reset action to perform (@ref mrq_reset_commands) */
uint32_t cmd;
- /** @brief id of the reset to affected */
+ /** @brief Id of the reset to affected */
uint32_t reset_id;
} __ABI_PACKED;
/**
- * @ingroup Reset
* @brief Response for MRQ_RESET sub-command CMD_RESET_GET_MAX_ID. When
* this sub-command is not supported, firmware will return -BPMP_EBADCMD
* in mrq_response::err.
*/
struct cmd_reset_get_max_id_response {
- /** @brief max reset id */
+ /** @brief Max reset id */
uint32_t max_id;
} __ABI_PACKED;
/**
- * @ingroup Reset
* @brief Response with MRQ_RESET
*
* Each sub-command supported by @ref mrq_reset_request may return
@@ -703,32 +753,25 @@ struct mrq_reset_response {
} __UNION_ANON;
} __ABI_PACKED;
+/** @} */
+
/**
* @ingroup MRQ_Codes
* @def MRQ_I2C
- * @brief issue an i2c transaction
+ * @brief Issue an i2c transaction
*
- * * Platforms: T186
+ * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_i2c_request
* * Response Payload: @ref mrq_i2c_response
- */
-
-/**
+ *
* @addtogroup I2C
* @{
*/
#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12)
#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4)
-/** @} */
-/**
- * @ingroup I2C
- * @name Serial I2C flags
- * Use these flags with serial_i2c_request::flags
- * @{
- */
#define SERIALI2C_TEN 0x0010
#define SERIALI2C_RD 0x0001
#define SERIALI2C_STOP 0x8000
@@ -737,15 +780,13 @@ struct mrq_reset_response {
#define SERIALI2C_IGNORE_NAK 0x1000
#define SERIALI2C_NO_RD_ACK 0x0800
#define SERIALI2C_RECV_LEN 0x0400
-/** @} */
-/** @ingroup I2C */
+
enum {
CMD_I2C_XFER = 1
};
/**
- * @ingroup I2C
- * @brief serializable i2c request
+ * @brief Serializable i2c request
*
* Instances of this structure are packed (little-endian) into
* cmd_i2c_xfer_request::data_buf. Each instance represents a single
@@ -762,80 +803,75 @@ enum {
struct serial_i2c_request {
/** @brief I2C slave address */
uint16_t addr;
- /** @brief bitmask of SERIALI2C_ flags */
+ /** @brief Bitmask of SERIALI2C_ flags */
uint16_t flags;
- /** @brief length of I2C transaction in bytes */
+ /** @brief Length of I2C transaction in bytes */
uint16_t len;
- /** @brief for write transactions only, #len bytes of data */
+ /** @brief For write transactions only, #len bytes of data */
uint8_t data[];
} __ABI_PACKED;
/**
- * @ingroup I2C
- * @brief trigger one or more i2c transactions
+ * @brief Trigger one or more i2c transactions
*/
struct cmd_i2c_xfer_request {
- /** @brief valid bus number from mach-t186/i2c-t186.h*/
+ /** @brief Valid bus number from @ref bpmp_i2c_ids*/
uint32_t bus_id;
- /** @brief count of valid bytes in #data_buf*/
+ /** @brief Count of valid bytes in #data_buf*/
uint32_t data_size;
- /** @brief serialized packed instances of @ref serial_i2c_request*/
+ /** @brief Serialized packed instances of @ref serial_i2c_request*/
uint8_t data_buf[TEGRA_I2C_IPC_MAX_IN_BUF_SIZE];
} __ABI_PACKED;
/**
- * @ingroup I2C
- * @brief container for data read from the i2c bus
+ * @brief Container for data read from the i2c bus
*
* Processing an cmd_i2c_xfer_request::data_buf causes BPMP to execute
* zero or more I2C reads. The data read from the bus is serialized
* into #data_buf.
*/
struct cmd_i2c_xfer_response {
- /** @brief count of valid bytes in #data_buf*/
+ /** @brief Count of valid bytes in #data_buf*/
uint32_t data_size;
- /** @brief i2c read data */
+ /** @brief I2c read data */
uint8_t data_buf[TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE];
} __ABI_PACKED;
/**
- * @ingroup I2C
- * @brief request with #MRQ_I2C
+ * @brief Request with #MRQ_I2C
*/
struct mrq_i2c_request {
- /** @brief always CMD_I2C_XFER (i.e. 1) */
+ /** @brief Always CMD_I2C_XFER (i.e. 1) */
uint32_t cmd;
- /** @brief parameters of the transfer request */
+ /** @brief Parameters of the transfer request */
struct cmd_i2c_xfer_request xfer;
} __ABI_PACKED;
/**
- * @ingroup I2C
- * @brief response to #MRQ_I2C
+ * @brief Response to #MRQ_I2C
*/
struct mrq_i2c_response {
struct cmd_i2c_xfer_response xfer;
} __ABI_PACKED;
+/** @} */
+
/**
* @ingroup MRQ_Codes
* @def MRQ_CLK
+ * @brief Perform a clock operation
*
- * * Platforms: T186
+ * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_clk_request
* * Response Payload: @ref mrq_clk_response
+ *
* @addtogroup Clocks
* @{
*/
-
-/**
- * @name MRQ_CLK sub-commands
- * @{
- */
enum {
CMD_CLK_GET_RATE = 1,
CMD_CLK_SET_RATE = 2,
@@ -847,20 +883,13 @@ enum {
CMD_CLK_DISABLE = 8,
CMD_CLK_GET_ALL_INFO = 14,
CMD_CLK_GET_MAX_CLK_ID = 15,
+ CMD_CLK_GET_FMAX_AT_VMIN = 16,
CMD_CLK_MAX,
};
-/** @} */
-/**
- * @name MRQ_CLK properties
- * Flag bits for cmd_clk_properties_response::flags and
- * cmd_clk_get_all_info_response::flags
- * @{
- */
#define BPMP_CLK_HAS_MUX (1 << 0)
#define BPMP_CLK_HAS_SET_RATE (1 << 1)
#define BPMP_CLK_IS_ROOT (1 << 2)
-/** @} */
#define MRQ_CLK_NAME_MAXLEN 40
#define MRQ_CLK_MAX_PARENTS 16
@@ -959,11 +988,19 @@ struct cmd_clk_get_max_clk_id_request {
struct cmd_clk_get_max_clk_id_response {
uint32_t max_id;
} __ABI_PACKED;
-/** @} */
+
+/** @private */
+struct cmd_clk_get_fmax_at_vmin_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_fmax_at_vmin_response {
+ int64_t rate;
+} __ABI_PACKED;
/**
* @ingroup Clocks
- * @brief request with #MRQ_CLK
+ * @brief Request with #MRQ_CLK
*
* Used by the sender of an #MRQ_CLK message to control clocks. The
* clk_request is split into several sub-commands. Some sub-commands
@@ -982,11 +1019,13 @@ struct cmd_clk_get_max_clk_id_response {
* |CMD_CLK_DISABLE |- |
* |CMD_CLK_GET_ALL_INFO |- |
* |CMD_CLK_GET_MAX_CLK_ID |- |
+ * |CMD_CLK_GET_FMAX_AT_VMIN |-
+ * |
*
*/
struct mrq_clk_request {
- /** @brief sub-command and clock id concatenated to 32-bit word.
+ /** @brief Sub-command and clock id concatenated to 32-bit word.
* - bits[31..24] is the sub-cmd.
* - bits[23..0] is the clock id
*/
@@ -1010,12 +1049,14 @@ struct mrq_clk_request {
struct cmd_clk_get_all_info_request clk_get_all_info;
/** @private */
struct cmd_clk_get_max_clk_id_request clk_get_max_clk_id;
+ /** @private */
+ struct cmd_clk_get_fmax_at_vmin_request clk_get_fmax_at_vmin;
} __UNION_ANON;
} __ABI_PACKED;
/**
* @ingroup Clocks
- * @brief response to MRQ_CLK
+ * @brief Response to MRQ_CLK
*
* Each sub-command supported by @ref mrq_clk_request may return
* sub-command-specific data. Some do and some do not as indicated in
@@ -1033,6 +1074,7 @@ struct mrq_clk_request {
* |CMD_CLK_DISABLE |- |
* |CMD_CLK_GET_ALL_INFO |clk_get_all_info |
* |CMD_CLK_GET_MAX_CLK_ID |clk_get_max_id |
+ * |CMD_CLK_GET_FMAX_AT_VMIN |clk_get_fmax_at_vmin |
*
*/
@@ -1050,13 +1092,16 @@ struct mrq_clk_response {
struct cmd_clk_is_enabled_response clk_is_enabled;
struct cmd_clk_get_all_info_response clk_get_all_info;
struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id;
+ struct cmd_clk_get_fmax_at_vmin_response clk_get_fmax_at_vmin;
} __UNION_ANON;
} __ABI_PACKED;
+/** @} */
+
/**
* @ingroup MRQ_Codes
* @def MRQ_QUERY_ABI
- * @brief check if an MRQ is implemented
+ * @brief Check if an MRQ is implemented
*
* * Platforms: All
* * Initiators: Any
@@ -1067,7 +1112,7 @@ struct mrq_clk_response {
/**
* @ingroup ABI_info
- * @brief request with MRQ_QUERY_ABI
+ * @brief Request with MRQ_QUERY_ABI
*
* Used by #MRQ_QUERY_ABI call to check if MRQ code #mrq is supported
* by the recipient.
@@ -1079,7 +1124,7 @@ struct mrq_query_abi_request {
/**
* @ingroup ABI_info
- * @brief response to MRQ_QUERY_ABI
+ * @brief Response to MRQ_QUERY_ABI
*
* @note mrq_response::err of 0 indicates that the query was
* successful, not that the MRQ itself is supported!
@@ -1092,19 +1137,19 @@ struct mrq_query_abi_response {
/**
* @ingroup MRQ_Codes
* @def MRQ_PG_READ_STATE
- * @brief read the power-gating state of a partition
+ * @brief Read the power-gating state of a partition
*
* * Platforms: T186
+ * @cond bpmp_t186
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_pg_read_state_request
* * Response Payload: @ref mrq_pg_read_state_response
- * @addtogroup Powergating
- * @{
*/
/**
- * @brief request with #MRQ_PG_READ_STATE
+ * @ingroup Powergating
+ * @brief Request with #MRQ_PG_READ_STATE
*
* Used by MRQ_PG_READ_STATE call to read the current state of a
* partition.
@@ -1115,39 +1160,40 @@ struct mrq_pg_read_state_request {
} __ABI_PACKED;
/**
- * @brief response to MRQ_PG_READ_STATE
+ * @ingroup Powergating
+ * @brief Response to MRQ_PG_READ_STATE
* @todo define possible errors.
*/
struct mrq_pg_read_state_response {
- /** @brief read as don't care */
+ /** @brief Read as don't care */
uint32_t sram_state;
- /** @brief state of power partition
+ /** @brief State of power partition
* * 0 : off
* * 1 : on
*/
uint32_t logic_state;
} __ABI_PACKED;
-
+/** @endcond*/
/** @} */
/**
* @ingroup MRQ_Codes
* @def MRQ_PG_UPDATE_STATE
- * @brief modify the power-gating state of a partition. In contrast to
+ * @brief Modify the power-gating state of a partition. In contrast to
* MRQ_PG calls, the operations that change state (on/off) of power
* partition are reference counted.
*
* * Platforms: T186
+ * @cond bpmp_t186
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_pg_update_state_request
* * Response Payload: N/A
- * @addtogroup Powergating
- * @{
*/
/**
- * @brief request with mrq_pg_update_state_request
+ * @ingroup Powergating
+ * @brief Request with mrq_pg_update_state_request
*
* Used by #MRQ_PG_UPDATE_STATE call to request BPMP to change the
* state of a power partition #partition_id.
@@ -1155,20 +1201,20 @@ struct mrq_pg_read_state_response {
struct mrq_pg_update_state_request {
/** @brief ID of partition */
uint32_t partition_id;
- /** @brief secondary control of power partition
+ /** @brief Secondary control of power partition
* @details Ignored by many versions of the BPMP
* firmware. For maximum compatibility, set the value
- * according to @logic_state
+ * according to @ref logic_state
* * 0x1: power ON partition (@ref logic_state == 0x3)
* * 0x3: power OFF partition (@ref logic_state == 0x1)
*/
uint32_t sram_state;
- /** @brief controls state of power partition, legal values are
+ /** @brief Controls state of power partition, legal values are
* * 0x1 : power OFF partition
* * 0x3 : power ON partition
*/
uint32_t logic_state;
- /** @brief change state of clocks of the power partition, legal values
+ /** @brief Change state of clocks of the power partition, legal values
* * 0x0 : do not change clock state
* * 0x1 : disable partition clocks (only applicable when
* @ref logic_state == 0x1)
@@ -1177,7 +1223,7 @@ struct mrq_pg_update_state_request {
*/
uint32_t clock_state;
} __ABI_PACKED;
-/** @} */
+/** @endcond*/
/**
* @ingroup MRQ_Codes
@@ -1186,19 +1232,20 @@ struct mrq_pg_update_state_request {
* MRQ_PG_UPDATE_STATE, operations that change the power partition
* state are NOT reference counted
*
- * * Platforms: T186
+ * @note BPMP-FW forcefully turns off some partitions as part of SC7 entry
+ * because their state cannot be adequately restored on exit. Therefore,
+ * it is recommended to power off all domains via MRQ_PG prior to SC7 entry.
+ * See @ref bpmp_pdomain_ids for further detail.
+ *
+ * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_pg_request
* * Response Payload: @ref mrq_pg_response
+ *
* @addtogroup Powergating
* @{
*/
-
-/**
- * @name MRQ_PG sub-commands
- * @{
- */
enum mrq_pg_cmd {
/**
* @brief Check whether the BPMP driver supports the specified
@@ -1232,7 +1279,7 @@ enum mrq_pg_cmd {
CMD_PG_GET_STATE = 2,
/**
- * @brief get the name string of specified power domain id.
+ * @brief Get the name string of specified power domain id.
*
* mrq_response:err is
* 0: Success
@@ -1242,7 +1289,7 @@ enum mrq_pg_cmd {
/**
- * @brief get the highest power domain id in the system. Not
+ * @brief Get the highest power domain id in the system. Not
* all IDs between 0 and max_id are valid IDs.
*
* mrq_response:err is
@@ -1251,35 +1298,36 @@ enum mrq_pg_cmd {
*/
CMD_PG_GET_MAX_ID = 4,
};
-/** @} */
#define MRQ_PG_NAME_MAXLEN 40
-/**
- * @brief possible power domain states in
- * cmd_pg_set_state_request:state and cmd_pg_get_state_response:state.
- * PG_STATE_OFF: power domain is OFF
- * PG_STATE_ON: power domain is ON
- * PG_STATE_RUNNING: power domain is ON and made into directly usable
- * state by turning on the clocks associated with
- * the domain
- */
enum pg_states {
+ /** @brief Power domain is OFF */
PG_STATE_OFF = 0,
+ /** @brief Power domain is ON */
PG_STATE_ON = 1,
+ /**
+ * @brief a legacy state where power domain and the clock
+ * associated to the domain are ON.
+ * This state is only supported in T186, and the use of it is
+ * deprecated.
+ */
PG_STATE_RUNNING = 2,
};
struct cmd_pg_query_abi_request {
- uint32_t type; /* enum mrq_pg_cmd */
+ /** @ref mrq_pg_cmd */
+ uint32_t type;
} __ABI_PACKED;
struct cmd_pg_set_state_request {
- uint32_t state; /* enum pg_states */
+ /** @ref pg_states */
+ uint32_t state;
} __ABI_PACKED;
struct cmd_pg_get_state_response {
- uint32_t state; /* enum pg_states */
+ /** @ref pg_states */
+ uint32_t state;
} __ABI_PACKED;
struct cmd_pg_get_name_response {
@@ -1291,8 +1339,7 @@ struct cmd_pg_get_max_id_response {
} __ABI_PACKED;
/**
- * @ingroup Powergating
- * @brief request with #MRQ_PG
+ * @brief Request with #MRQ_PG
*
* Used by the sender of an #MRQ_PG message to control power
* partitions. The pg_request is split into several sub-commands. Some
@@ -1308,7 +1355,6 @@ struct cmd_pg_get_max_id_response {
* |CMD_PG_GET_MAX_ID | - |
*
*/
-
struct mrq_pg_request {
uint32_t cmd;
uint32_t id;
@@ -1319,8 +1365,7 @@ struct mrq_pg_request {
} __ABI_PACKED;
/**
- * @ingroup Powergating
- * @brief response to MRQ_PG
+ * @brief Response to MRQ_PG
*
* Each sub-command supported by @ref mrq_pg_request may return
* sub-command-specific data. Some do and some do not as indicated in
@@ -1333,9 +1378,7 @@ struct mrq_pg_request {
* |CMD_PG_GET_STATE | get_state |
* |CMD_PG_GET_NAME | get_name |
* |CMD_PG_GET_MAX_ID | get_max_id |
- *
*/
-
struct mrq_pg_response {
union {
struct cmd_pg_get_state_response get_state;
@@ -1344,12 +1387,14 @@ struct mrq_pg_response {
} __UNION_ANON;
} __ABI_PACKED;
+/** @} */
+
/**
* @ingroup MRQ_Codes
* @def MRQ_THERMAL
- * @brief interact with BPMP thermal framework
+ * @brief Interact with BPMP thermal framework
*
- * * Platforms: T186
+ * * Platforms: T186, T194
* * Initiators: Any
* * Targets: Any
* * Request Payload: TODO
@@ -1562,17 +1607,18 @@ union mrq_thermal_bpmp_to_host_response {
* @brief Query CPU voltage hint data
*
* * Platforms: T186
+ * @cond bpmp_t186
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_cpu_vhint_request
* * Response Payload: N/A
*
- * @addtogroup Vhint CPU Voltage hint
+ * @addtogroup Vhint
* @{
*/
/**
- * @brief request with #MRQ_CPU_VHINT
+ * @brief Request with #MRQ_CPU_VHINT
*
* Used by #MRQ_CPU_VHINT call by CCPLEX to retrieve voltage hint data
* from BPMP to memory space pointed by #addr. CCPLEX is responsible
@@ -1581,16 +1627,16 @@ union mrq_thermal_bpmp_to_host_response {
*/
struct mrq_cpu_vhint_request {
/** @brief IOVA address for the #cpu_vhint_data */
- uint32_t addr; /* struct cpu_vhint_data * */
+ uint32_t addr;
/** @brief ID of the cluster whose data is requested */
- uint32_t cluster_id; /* enum cluster_id */
+ uint32_t cluster_id;
} __ABI_PACKED;
/**
- * @brief description of the CPU v/f relation
+ * @brief Description of the CPU v/f relation
*
- * Used by #MRQ_CPU_VHINT call to carry data pointed by #addr of
- * struct mrq_cpu_vhint_request
+ * Used by #MRQ_CPU_VHINT call to carry data pointed by
+ * #mrq_cpu_vhint_request::addr
*/
struct cpu_vhint_data {
uint32_t ref_clk_hz; /**< reference frequency in Hz */
@@ -1612,7 +1658,7 @@ struct cpu_vhint_data {
/** reserved for future use */
uint16_t reserved[328];
} __ABI_PACKED;
-
+/** @endcond */
/** @} */
/**
@@ -1620,7 +1666,7 @@ struct cpu_vhint_data {
* @def MRQ_ABI_RATCHET
* @brief ABI ratchet value query
*
- * * Platforms: T186
+ * * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_abi_ratchet_request
@@ -1630,7 +1676,7 @@ struct cpu_vhint_data {
*/
/**
- * @brief an ABI compatibility mechanism
+ * @brief An ABI compatibility mechanism
*
* BPMP_ABI_RATCHET_VALUE may increase for various reasons in a future
* revision of this header file.
@@ -1644,7 +1690,7 @@ struct cpu_vhint_data {
#define BPMP_ABI_RATCHET_VALUE 3
/**
- * @brief request with #MRQ_ABI_RATCHET.
+ * @brief Request with #MRQ_ABI_RATCHET.
*
* #ratchet should be #BPMP_ABI_RATCHET_VALUE from the ABI header
* against which the requester was compiled.
@@ -1657,12 +1703,12 @@ struct cpu_vhint_data {
* Otherwise, err shall be 0.
*/
struct mrq_abi_ratchet_request {
- /** @brief requester's ratchet value */
+ /** @brief Requester's ratchet value */
uint16_t ratchet;
};
/**
- * @brief response to #MRQ_ABI_RATCHET
+ * @brief Response to #MRQ_ABI_RATCHET
*
* #ratchet shall be #BPMP_ABI_RATCHET_VALUE from the ABI header
* against which BPMP firwmare was compiled.
@@ -1685,9 +1731,9 @@ struct mrq_abi_ratchet_response {
/**
* @ingroup MRQ_Codes
* @def MRQ_EMC_DVFS_LATENCY
- * @brief query frequency dependent EMC DVFS latency
+ * @brief Query frequency dependent EMC DVFS latency
*
- * * Platforms: T186
+ * * Platforms: T186, T194
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: N/A
@@ -1697,7 +1743,7 @@ struct mrq_abi_ratchet_response {
*/
/**
- * @brief used by @ref mrq_emc_dvfs_latency_response
+ * @brief Used by @ref mrq_emc_dvfs_latency_response
*/
struct emc_dvfs_latency {
/** @brief EMC frequency in kHz */
@@ -1708,10 +1754,10 @@ struct emc_dvfs_latency {
#define EMC_DVFS_LATENCY_MAX_SIZE 14
/**
- * @brief response to #MRQ_EMC_DVFS_LATENCY
+ * @brief Response to #MRQ_EMC_DVFS_LATENCY
*/
struct mrq_emc_dvfs_latency_response {
- /** @brief the number valid entries in #pairs */
+ /** @brief The number valid entries in #pairs */
uint32_t num_pairs;
/** @brief EMC <frequency, latency> information */
struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE];
@@ -1721,8 +1767,96 @@ struct mrq_emc_dvfs_latency_response {
/**
* @ingroup MRQ_Codes
+ * @def MRQ_CPU_NDIV_LIMITS
+ * @brief CPU freq. limits in ndiv
+ *
+ * * Platforms: T194 onwards
+ * @cond bpmp_t194
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_cpu_ndiv_limits_request
+ * * Response Payload: @ref mrq_cpu_ndiv_limits_response
+ * @addtogroup CPU
+ * @{
+ */
+
+/**
+ * @brief Request for ndiv limits of a cluster
+ */
+struct mrq_cpu_ndiv_limits_request {
+ /** @brief Enum cluster_id */
+ uint32_t cluster_id;
+} __ABI_PACKED;
+
+/**
+ * @brief Response to #MRQ_CPU_NDIV_LIMITS
+ */
+struct mrq_cpu_ndiv_limits_response {
+ /** @brief Reference frequency in Hz */
+ uint32_t ref_clk_hz;
+ /** @brief Post divider value */
+ uint16_t pdiv;
+ /** @brief Input divider value */
+ uint16_t mdiv;
+ /** @brief FMAX expressed with max NDIV value */
+ uint16_t ndiv_max;
+ /** @brief Minimum allowed NDIV value */
+ uint16_t ndiv_min;
+} __ABI_PACKED;
+
+/** @} */
+/** @endcond */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_CPU_AUTO_CC3
+ * @brief Query CPU cluster auto-CC3 configuration
+ *
+ * * Platforms: T194 onwards
+ * @cond bpmp_t194
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_cpu_auto_cc3_request
+ * * Response Payload: @ref mrq_cpu_auto_cc3_response
+ * @addtogroup CC3
+ *
+ * Queries from BPMP auto-CC3 configuration (allowed/not allowed) for a
+ * specified cluster. CCPLEX s/w uses this information to override its own
+ * device tree auto-CC3 settings, so that BPMP device tree is a single source of
+ * auto-CC3 platform configuration.
+ *
+ * @{
+ */
+
+/**
+ * @brief Request for auto-CC3 configuration of a cluster
+ */
+struct mrq_cpu_auto_cc3_request {
+ /** @brief Enum cluster_id (logical cluster id, known to CCPLEX s/w) */
+ uint32_t cluster_id;
+} __ABI_PACKED;
+
+/**
+ * @brief Response to #MRQ_CPU_AUTO_CC3
+ */
+struct mrq_cpu_auto_cc3_response {
+ /**
+ * @brief auto-CC3 configuration
+ *
+ * - bits[31..10] reserved.
+ * - bits[9..1] cc3 ndiv
+ * - bit [0] if "1" auto-CC3 is allowed, if "0" auto-CC3 is not allowed
+ */
+ uint32_t auto_cc3_config;
+} __ABI_PACKED;
+
+/** @} */
+/** @endcond */
+
+/**
+ * @ingroup MRQ_Codes
* @def MRQ_TRACE_ITER
- * @brief manage the trace iterator
+ * @brief Manage the trace iterator
*
* * Platforms: All
* * Initiators: CCPLEX
@@ -1735,12 +1869,12 @@ struct mrq_emc_dvfs_latency_response {
enum {
/** @brief (re)start the tracing now. Ignore older events */
TRACE_ITER_INIT = 0,
- /** @brief clobber all events in the trace buffer */
+ /** @brief Clobber all events in the trace buffer */
TRACE_ITER_CLEAN = 1
};
/**
- * @brief request with #MRQ_TRACE_ITER
+ * @brief Request with #MRQ_TRACE_ITER
*/
struct mrq_trace_iter_request {
/** @brief TRACE_ITER_INIT or TRACE_ITER_CLEAN */
@@ -1900,7 +2034,7 @@ struct cmd_ringbuf_console_get_fifo_resp {
*/
struct mrq_ringbuf_console_host_to_bpmp_request {
/**
- * @brief type of request. Values listed in enum
+ * @brief Type of request. Values listed in enum
* #mrq_ringbuf_console_host_to_bpmp_cmd.
*/
uint32_t type;
@@ -1927,49 +2061,616 @@ union mrq_ringbuf_console_bpmp_to_host_response {
} __ABI_PACKED;
/** @} */
-/*
- * 4. Enumerations
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_STRAP
+ * @brief Set a strap value controlled by BPMP
+ *
+ * * Platforms: T194 onwards
+ * @cond bpmp_t194
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_strap_request
+ * * Response Payload: N/A
+ * @addtogroup Strap
+ *
+ * A strap is an input that is sampled by a hardware unit during the
+ * unit's startup process. The sampled value of a strap affects the
+ * behavior of the unit until the unit is restarted. Many hardware
+ * units sample their straps at the instant that their resets are
+ * deasserted.
+ *
+ * BPMP owns registers which act as straps to various units. It
+ * exposes limited control of those straps via #MRQ_STRAP.
+ *
+ * @{
*/
+enum mrq_strap_cmd {
+ /** @private */
+ STRAP_RESERVED = 0,
+ /** @brief Set a strap value */
+ STRAP_SET = 1
+};
-/*
- * 4.1 CPU enumerations
+/**
+ * @brief Request with #MRQ_STRAP
+ */
+struct mrq_strap_request {
+ /** @brief @ref mrq_strap_cmd */
+ uint32_t cmd;
+ /** @brief Strap ID from @ref Strap_Ids */
+ uint32_t id;
+ /** @brief Desired value for strap (if cmd is #STRAP_SET) */
+ uint32_t value;
+} __ABI_PACKED;
+
+/**
+ * @defgroup Strap_Ids Strap Identifiers
+ * @}
+ */
+/** @endcond */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_UPHY
+ * @brief Perform a UPHY operation
*
- * See <mach-t186/system-t186.h>
+ * * Platforms: T194 onwards
+ * @cond bpmp_t194
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_uphy_request
+ * * Response Payload: @ref mrq_uphy_response
*
- * 4.2 CPU Cluster enumerations
+ * @addtogroup UPHY
+ * @{
+ */
+enum {
+ CMD_UPHY_PCIE_LANE_MARGIN_CONTROL = 1,
+ CMD_UPHY_PCIE_LANE_MARGIN_STATUS = 2,
+ CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT = 3,
+ CMD_UPHY_PCIE_CONTROLLER_STATE = 4,
+ CMD_UPHY_MAX,
+};
+
+struct cmd_uphy_margin_control_request {
+ /** @brief Enable margin */
+ int32_t en;
+ /** @brief Clear the number of error and sections */
+ int32_t clr;
+ /** @brief Set x offset (1's complement) for left/right margin type (y should be 0) */
+ uint32_t x;
+ /** @brief Set y offset (1's complement) for left/right margin type (x should be 0) */
+ uint32_t y;
+ /** @brief Set number of bit blocks for each margin section */
+ uint32_t nblks;
+} __ABI_PACKED;
+
+struct cmd_uphy_margin_status_response {
+ /** @brief Number of errors observed */
+ uint32_t status;
+} __ABI_PACKED;
+
+struct cmd_uphy_ep_controller_pll_init_request {
+ /** @brief EP controller number, valid: 0, 4, 5 */
+ uint8_t ep_controller;
+} __ABI_PACKED;
+
+struct cmd_uphy_pcie_controller_state_request {
+ /** @brief PCIE controller number, valid: 0, 1, 2, 3, 4 */
+ uint8_t pcie_controller;
+ uint8_t enable;
+} __ABI_PACKED;
+
+/**
+ * @ingroup UPHY
+ * @brief Request with #MRQ_UPHY
*
- * See <mach-t186/system-t186.h>
+ * Used by the sender of an #MRQ_UPHY message to control UPHY Lane RX margining.
+ * The uphy_request is split into several sub-commands. Some sub-commands
+ * require no additional data. Others have a sub-command specific payload
*
- * 4.3 System low power state enumerations
+ * |sub-command |payload |
+ * |------------------------------------ |----------------------------------------|
+ * |CMD_UPHY_PCIE_LANE_MARGIN_CONTROL |uphy_set_margin_control |
+ * |CMD_UPHY_PCIE_LANE_MARGIN_STATUS | |
+ * |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT |cmd_uphy_ep_controller_pll_init_request |
+ * |CMD_UPHY_PCIE_CONTROLLER_STATE |cmd_uphy_pcie_controller_state_request |
*
- * See <mach-t186/system-t186.h>
*/
-/*
- * 4.4 Clock enumerations
+struct mrq_uphy_request {
+ /** @brief Lane number. */
+ uint16_t lane;
+ /** @brief Sub-command id. */
+ uint16_t cmd;
+
+ union {
+ struct cmd_uphy_margin_control_request uphy_set_margin_control;
+ struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init;
+ struct cmd_uphy_pcie_controller_state_request controller_state;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup UPHY
+ * @brief Response to MRQ_UPHY
+ *
+ * Each sub-command supported by @ref mrq_uphy_request may return
+ * sub-command-specific data. Some do and some do not as indicated in
+ * the following table
+ *
+ * |sub-command |payload |
+ * |---------------------------- |------------------------|
+ * |CMD_UPHY_PCIE_LANE_MARGIN_CONTROL | |
+ * |CMD_UPHY_PCIE_LANE_MARGIN_STATUS |uphy_get_margin_status |
*
- * For clock enumerations, see <mach-t186/clk-t186.h>
*/
-/*
- * 4.5 Reset enumerations
+struct mrq_uphy_response {
+ union {
+ struct cmd_uphy_margin_status_response uphy_get_margin_status;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/** @} */
+/** @endcond */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_FMON
+ * @brief Perform a frequency monitor configuration operations
+ *
+ * * Platforms: T194 onwards
+ * @cond bpmp_t194
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_fmon_request
+ * * Response Payload: @ref mrq_fmon_response
*
- * For reset enumerations, see <mach-t186/reset-t186.h>
+ * @addtogroup FMON
+ * @{
*/
+enum {
+ /**
+ * @brief Clamp FMON configuration to specified rate.
+ *
+ * The monitored clock must be running for clamp to succeed. If
+ * clamped, FMON configuration is preserved when clock rate
+ * and/or state is changed.
+ */
+ CMD_FMON_GEAR_CLAMP = 1,
+ /**
+ * @brief Release clamped FMON configuration.
+ *
+ * Allow FMON configuration to follow monitored clock rate
+ * and/or state changes.
+ */
+ CMD_FMON_GEAR_FREE = 2,
+ /**
+ * @brief Return rate FMON is clamped at, or 0 if FMON is not
+ * clamped.
+ *
+ * Inherently racy, since clamp state can be changed
+ * concurrently. Useful for testing.
+ */
+ CMD_FMON_GEAR_GET = 3,
+ CMD_FMON_NUM,
+};
-/*
- * 4.6 Thermal sensor enumerations
+struct cmd_fmon_gear_clamp_request {
+ int32_t unused;
+ int64_t rate;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_fmon_gear_clamp_response {
+ EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_fmon_gear_free_request {
+ EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_fmon_gear_free_response {
+ EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_fmon_gear_get_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_fmon_gear_get_response {
+ int64_t rate;
+} __ABI_PACKED;
+
+/**
+ * @ingroup FMON
+ * @brief Request with #MRQ_FMON
+ *
+ * Used by the sender of an #MRQ_FMON message to configure clock
+ * frequency monitors. The FMON request is split into several
+ * sub-commands. Some sub-commands require no additional data.
+ * Others have a sub-command specific payload
+ *
+ * |sub-command |payload |
+ * |----------------------------|-----------------------|
+ * |CMD_FMON_GEAR_CLAMP |fmon_gear_clamp |
+ * |CMD_FMON_GEAR_FREE |- |
+ * |CMD_FMON_GEAR_GET |- |
+ *
+ */
+
+struct mrq_fmon_request {
+ /** @brief Sub-command and clock id concatenated to 32-bit word.
+ * - bits[31..24] is the sub-cmd.
+ * - bits[23..0] is monitored clock id used to select target
+ * FMON
+ */
+ uint32_t cmd_and_id;
+
+ union {
+ struct cmd_fmon_gear_clamp_request fmon_gear_clamp;
+ /** @private */
+ struct cmd_fmon_gear_free_request fmon_gear_free;
+ /** @private */
+ struct cmd_fmon_gear_get_request fmon_gear_get;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup FMON
+ * @brief Response to MRQ_FMON
+ *
+ * Each sub-command supported by @ref mrq_fmon_request may
+ * return sub-command-specific data as indicated below.
+ *
+ * |sub-command |payload |
+ * |----------------------------|------------------------|
+ * |CMD_FMON_GEAR_CLAMP |- |
+ * |CMD_FMON_GEAR_FREE |- |
+ * |CMD_FMON_GEAR_GET |fmon_gear_get |
+ *
+ */
+
+struct mrq_fmon_response {
+ union {
+ /** @private */
+ struct cmd_fmon_gear_clamp_response fmon_gear_clamp;
+ /** @private */
+ struct cmd_fmon_gear_free_response fmon_gear_free;
+ struct cmd_fmon_gear_get_response fmon_gear_get;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/** @} */
+/** @endcond */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_EC
+ * @brief Provide status information on faults reported by Error
+ * Collator (EC) to HSM.
+ *
+ * * Platforms: T194 onwards
+ * @cond bpmp_t194
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_ec_request
+ * * Response Payload: @ref mrq_ec_response
+ *
+ * @note This MRQ ABI is under construction, and subject to change
+ *
+ * @addtogroup EC
+ * @{
+ */
+enum {
+ /**
+ * @brief Retrieve specified EC status.
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_ENODEV if target EC is not owned by BPMP @n
+ * -#BPMP_EACCES if target EC power domain is turned off
+ */
+ CMD_EC_STATUS_GET = 1,
+ CMD_EC_NUM,
+};
+
+/** @brief BPMP ECs error types */
+enum bpmp_ec_err_type {
+ /** @brief Parity error on internal data path
+ *
+ * Error descriptor @ref ec_err_simple_desc.
+ */
+ EC_ERR_TYPE_PARITY_INTERNAL = 1,
+
+ /** @brief ECC SEC error on internal data path
+ *
+ * Error descriptor @ref ec_err_simple_desc.
+ */
+ EC_ERR_TYPE_ECC_SEC_INTERNAL = 2,
+
+ /** @brief ECC DED error on internal data path
+ *
+ * Error descriptor @ref ec_err_simple_desc.
+ */
+ EC_ERR_TYPE_ECC_DED_INTERNAL = 3,
+
+ /** @brief Comparator error
+ *
+ * Error descriptor @ref ec_err_simple_desc.
+ */
+ EC_ERR_TYPE_COMPARATOR = 4,
+
+ /** @brief Register parity error
+ *
+ * Error descriptor @ref ec_err_reg_parity_desc.
+ */
+ EC_ERR_TYPE_REGISTER_PARITY = 5,
+
+ /** @brief Parity error from on-chip SRAM/FIFO
+ *
+ * Error descriptor @ref ec_err_simple_desc.
+ */
+ EC_ERR_TYPE_PARITY_SRAM = 6,
+
+ /** @brief Clock Monitor error
+ *
+ * Error descriptor @ref ec_err_fmon_desc.
+ */
+ EC_ERR_TYPE_CLOCK_MONITOR = 9,
+
+ /** @brief Voltage Monitor error
+ *
+ * Error descriptor @ref ec_err_vmon_desc.
+ */
+ EC_ERR_TYPE_VOLTAGE_MONITOR = 10,
+
+ /** @brief SW Correctable error
+ *
+ * Error descriptor @ref ec_err_simple_desc.
+ */
+ EC_ERR_TYPE_SW_CORRECTABLE = 16,
+
+ /** @brief SW Uncorrectable error
+ *
+ * Error descriptor @ref ec_err_simple_desc.
+ */
+ EC_ERR_TYPE_SW_UNCORRECTABLE = 17,
+
+ /** @brief Other HW Correctable error
+ *
+ * Error descriptor @ref ec_err_simple_desc.
+ */
+ EC_ERR_TYPE_OTHER_HW_CORRECTABLE = 32,
+
+ /** @brief Other HW Uncorrectable error
+ *
+ * Error descriptor @ref ec_err_simple_desc.
+ */
+ EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE = 33,
+};
+
+/** @brief Group of registers with parity error. */
+enum ec_registers_group {
+ /** @brief Functional registers group */
+ EC_ERR_GROUP_FUNC_REG = 0,
+ /** @brief SCR registers group */
+ EC_ERR_GROUP_SCR_REG = 1,
+};
+
+/**
+ * @defgroup bpmp_ec_status_flags EC Status Flags
+ * @addtogroup bpmp_ec_status_flags
+ * @{
+ */
+/** @brief No EC error found flag */
+#define EC_STATUS_FLAG_NO_ERROR 0x0001
+/** @brief Last EC error found flag */
+#define EC_STATUS_FLAG_LAST_ERROR 0x0002
+/** @brief EC latent error flag */
+#define EC_STATUS_FLAG_LATENT_ERROR 0x0004
+/** @} */
+
+/**
+ * @defgroup bpmp_ec_desc_flags EC Descriptor Flags
+ * @addtogroup bpmp_ec_desc_flags
+ * @{
+ */
+/** @brief EC descriptor error resolved flag */
+#define EC_DESC_FLAG_RESOLVED 0x0001
+/** @brief EC descriptor failed to retrieve id flag */
+#define EC_DESC_FLAG_NO_ID 0x0002
+/** @} */
+
+/**
+ * |error type | fmon_clk_id values |
+ * |---------------------------------|---------------------------|
+ * |@ref EC_ERR_TYPE_CLOCK_MONITOR |@ref bpmp_clock_ids |
+ */
+struct ec_err_fmon_desc {
+ /** @brief Bitmask of @ref bpmp_ec_desc_flags */
+ uint16_t desc_flags;
+ /** @brief FMON monitored clock id */
+ uint16_t fmon_clk_id;
+ /** @brief Bitmask of @ref bpmp_fmon_faults_flags */
+ uint32_t fmon_faults;
+ /** @brief FMON faults access error */
+ int32_t fmon_access_error;
+} __ABI_PACKED;
+
+/**
+ * |error type | vmon_adc_id values |
+ * |---------------------------------|---------------------------|
+ * |@ref EC_ERR_TYPE_VOLTAGE_MONITOR |@ref bpmp_adc_ids |
+ */
+struct ec_err_vmon_desc {
+ /** @brief Bitmask of @ref bpmp_ec_desc_flags */
+ uint16_t desc_flags;
+ /** @brief VMON rail adc id */
+ uint16_t vmon_adc_id;
+ /** @brief Bitmask of @ref bpmp_vmon_faults_flags */
+ uint32_t vmon_faults;
+ /** @brief VMON faults access error */
+ int32_t vmon_access_error;
+} __ABI_PACKED;
+
+/**
+ * |error type | reg_id values |
+ * |---------------------------------|---------------------------|
+ * |@ref EC_ERR_TYPE_REGISTER_PARITY |@ref bpmp_ec_registers_ids |
+ */
+struct ec_err_reg_parity_desc {
+ /** @brief Bitmask of @ref bpmp_ec_desc_flags */
+ uint16_t desc_flags;
+ /** @brief Register id */
+ uint16_t reg_id;
+ /** @brief Register group @ref ec_registers_group */
+ uint16_t reg_group;
+} __ABI_PACKED;
+
+/**
+ * |error type | err_source_id values |
+ * |----------------------------------------|---------------------------|
+ * |@ref EC_ERR_TYPE_PARITY_INTERNAL |@ref bpmp_ec_ipath_ids |
+ * |@ref EC_ERR_TYPE_ECC_SEC_INTERNAL |@ref bpmp_ec_ipath_ids |
+ * |@ref EC_ERR_TYPE_ECC_DED_INTERNAL |@ref bpmp_ec_ipath_ids |
+ * |@ref EC_ERR_TYPE_COMPARATOR |@ref bpmp_ec_comparator_ids|
+ * |@ref EC_ERR_TYPE_PARITY_SRAM |@ref bpmp_clock_ids |
+ * |@ref EC_ERR_TYPE_SW_CORRECTABLE |@ref bpmp_ec_misc_ids |
+ * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE |@ref bpmp_ec_misc_ids |
+ * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_ids |
+ * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_ids |
+ */
+struct ec_err_simple_desc {
+ /** @brief Bitmask of @ref bpmp_ec_desc_flags */
+ uint16_t desc_flags;
+ /** @brief Error source id. Id space depends on error type. */
+ uint16_t err_source_id;
+} __ABI_PACKED;
+
+/** @brief Union of EC error descriptors */
+union ec_err_desc {
+ struct ec_err_fmon_desc fmon_desc;
+ struct ec_err_vmon_desc vmon_desc;
+ struct ec_err_reg_parity_desc reg_parity_desc;
+ struct ec_err_simple_desc simple_desc;
+} __ABI_PACKED;
+
+struct cmd_ec_status_get_request {
+ /** @brief HSM error line number that identifies target EC. */
+ uint32_t ec_hsm_id;
+} __ABI_PACKED;
+
+/** EC status maximum number of descriptors */
+#define EC_ERR_STATUS_DESC_MAX_NUM 4
+
+struct cmd_ec_status_get_response {
+ /** @brief Target EC id (the same id received with request). */
+ uint32_t ec_hsm_id;
+ /**
+ * @brief Bitmask of @ref bpmp_ec_status_flags
+ *
+ * If NO_ERROR flag is set, error_ fields should be ignored
+ */
+ uint32_t ec_status_flags;
+ /** @brief Found EC error index. */
+ uint32_t error_idx;
+ /** @brief Found EC error type @ref bpmp_ec_err_type. */
+ uint32_t error_type;
+ /** @brief Number of returned EC error descriptors */
+ uint32_t error_desc_num;
+ /** @brief EC error descriptors */
+ union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM];
+} __ABI_PACKED;
+
+/**
+ * @ingroup EC
+ * @brief Request with #MRQ_EC
+ *
+ * Used by the sender of an #MRQ_EC message to access ECs owned
+ * by BPMP.
+ *
+ * |sub-command |payload |
+ * |----------------------------|-----------------------|
+ * |@ref CMD_EC_STATUS_GET |ec_status_get |
*
- * For thermal sensor enumerations, see <mach-t186/thermal-t186.h>
*/
+struct mrq_ec_request {
+ /** @brief Sub-command id. */
+ uint32_t cmd_id;
+
+ union {
+ struct cmd_ec_status_get_request ec_status_get;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
/**
- * @defgroup Error_Codes
+ * @ingroup EC
+ * @brief Response to MRQ_EC
+ *
+ * Each sub-command supported by @ref mrq_ec_request may return
+ * sub-command-specific data as indicated below.
+ *
+ * |sub-command |payload |
+ * |----------------------------|------------------------|
+ * |@ref CMD_EC_STATUS_GET |ec_status_get |
+ *
+ */
+
+struct mrq_ec_response {
+ union {
+ struct cmd_ec_status_get_response ec_status_get;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/** @} */
+/** @endcond */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_FBVOLT_STATUS
+ * @brief Provides status information about voltage state for fuse burning
+ *
+ * * Platforms: T194 onwards
+ * @cond bpmp_t194
+ * * Initiators: CCPLEX
+ * * Target: BPMP
+ * * Request Payload: None
+ * * Response Payload: @ref mrq_fbvolt_status_response
+ * @{
+ */
+
+/**
+ * @ingroup Fbvolt_status
+ * @brief Response to #MRQ_FBVOLT_STATUS
+ *
+ * Value of #ready reflects if core voltages are in a suitable state for buring
+ * fuses. A value of 0x1 indicates that core voltages are ready for burning
+ * fuses. A value of 0x0 indicates that core voltages are not ready.
+ */
+struct mrq_fbvolt_status_response {
+ /** @brief Bit [0:0] - ready status, bits [31:1] - reserved */
+ uint32_t ready;
+ /** @brief Reserved */
+ uint32_t unused;
+} __ABI_PACKED;
+
+/** @} */
+/** @endcond */
+
+/**
+ * @addtogroup Error_Codes
* Negative values for mrq_response::err generally indicate some
* error. The ABI defines the following error codes. Negating these
* defines is an exercise left to the user.
* @{
*/
+
/** @brief No such file or directory */
#define BPMP_ENOENT 2
/** @brief No MRQ handler */
@@ -1994,6 +2695,11 @@ union mrq_ringbuf_console_bpmp_to_host_response {
#define BPMP_ETIMEDOUT 23
/** @brief Out of range */
#define BPMP_ERANGE 34
+/** @brief Function not implemented */
+#define BPMP_ENOSYS 38
+/** @brief Invalid slot */
+#define BPMP_EBADSLT 57
+
/** @} */
-/** @} */
+
#endif
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
index e69e4c4d80ae..b02f926a0216 100644
--- a/include/soc/tegra/bpmp.h
+++ b/include/soc/tegra/bpmp.h
@@ -129,6 +129,7 @@ int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
tegra_bpmp_mrq_handler_t handler, void *data);
void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
void *data);
+bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq);
#else
static inline struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
{
@@ -164,6 +165,12 @@ static inline void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp,
unsigned int mrq, void *data)
{
}
+
+static inline bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp,
+ unsigned int mrq)
+{
+ return false;
+}
#endif
#if IS_ENABLED(CONFIG_CLK_TEGRA_BPMP)
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 9b6ea0c72117..8fb2f8a87339 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -60,7 +60,6 @@ struct tegra_sku_info {
u32 tegra_read_straps(void);
u32 tegra_read_ram_code(void);
-u32 tegra_read_chipid(void);
int tegra_fuse_readl(unsigned long offset, u32 *value);
extern struct tegra_sku_info tegra_sku_info;
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index c32bf91c23e6..a9db1b501de1 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -26,11 +26,9 @@
struct clk;
struct reset_control;
-#ifdef CONFIG_SMP
bool tegra_pmc_cpu_is_powered(unsigned int cpuid);
int tegra_pmc_cpu_power_on(unsigned int cpuid);
int tegra_pmc_cpu_remove_clamping(unsigned int cpuid);
-#endif /* CONFIG_SMP */
/*
* powergate and I/O rail APIs
@@ -90,6 +88,10 @@ enum tegra_io_pad {
TEGRA_IO_PAD_CSID,
TEGRA_IO_PAD_CSIE,
TEGRA_IO_PAD_CSIF,
+ TEGRA_IO_PAD_CSIG,
+ TEGRA_IO_PAD_CSIH,
+ TEGRA_IO_PAD_DAP3,
+ TEGRA_IO_PAD_DAP5,
TEGRA_IO_PAD_DBG,
TEGRA_IO_PAD_DEBUG_NONAO,
TEGRA_IO_PAD_DMIC,
@@ -102,10 +104,15 @@ enum tegra_io_pad {
TEGRA_IO_PAD_EDP,
TEGRA_IO_PAD_EMMC,
TEGRA_IO_PAD_EMMC2,
+ TEGRA_IO_PAD_EQOS,
TEGRA_IO_PAD_GPIO,
+ TEGRA_IO_PAD_GP_PWM2,
+ TEGRA_IO_PAD_GP_PWM3,
TEGRA_IO_PAD_HDMI,
TEGRA_IO_PAD_HDMI_DP0,
TEGRA_IO_PAD_HDMI_DP1,
+ TEGRA_IO_PAD_HDMI_DP2,
+ TEGRA_IO_PAD_HDMI_DP3,
TEGRA_IO_PAD_HSIC,
TEGRA_IO_PAD_HV,
TEGRA_IO_PAD_LVDS,
@@ -115,8 +122,14 @@ enum tegra_io_pad {
TEGRA_IO_PAD_PEX_CLK_BIAS,
TEGRA_IO_PAD_PEX_CLK1,
TEGRA_IO_PAD_PEX_CLK2,
+ TEGRA_IO_PAD_PEX_CLK2_BIAS,
TEGRA_IO_PAD_PEX_CLK3,
TEGRA_IO_PAD_PEX_CNTRL,
+ TEGRA_IO_PAD_PEX_CTL2,
+ TEGRA_IO_PAD_PEX_L0_RST_N,
+ TEGRA_IO_PAD_PEX_L1_RST_N,
+ TEGRA_IO_PAD_PEX_L5_RST_N,
+ TEGRA_IO_PAD_PWR_CTL,
TEGRA_IO_PAD_SDMMC1,
TEGRA_IO_PAD_SDMMC1_HV,
TEGRA_IO_PAD_SDMMC2,
@@ -124,32 +137,29 @@ enum tegra_io_pad {
TEGRA_IO_PAD_SDMMC3,
TEGRA_IO_PAD_SDMMC3_HV,
TEGRA_IO_PAD_SDMMC4,
+ TEGRA_IO_PAD_SOC_GPIO10,
+ TEGRA_IO_PAD_SOC_GPIO12,
+ TEGRA_IO_PAD_SOC_GPIO13,
+ TEGRA_IO_PAD_SOC_GPIO53,
TEGRA_IO_PAD_SPI,
TEGRA_IO_PAD_SPI_HV,
TEGRA_IO_PAD_SYS_DDC,
TEGRA_IO_PAD_UART,
+ TEGRA_IO_PAD_UART4,
+ TEGRA_IO_PAD_UART5,
TEGRA_IO_PAD_UFS,
TEGRA_IO_PAD_USB0,
TEGRA_IO_PAD_USB1,
TEGRA_IO_PAD_USB2,
TEGRA_IO_PAD_USB3,
TEGRA_IO_PAD_USB_BIAS,
+ TEGRA_IO_PAD_AO_HV,
};
/* deprecated, use TEGRA_IO_PAD_{HDMI,LVDS} instead */
#define TEGRA_IO_RAIL_HDMI TEGRA_IO_PAD_HDMI
#define TEGRA_IO_RAIL_LVDS TEGRA_IO_PAD_LVDS
-/**
- * enum tegra_io_pad_voltage - voltage level of the I/O pad's source rail
- * @TEGRA_IO_PAD_1800000UV: 1.8 V
- * @TEGRA_IO_PAD_3300000UV: 3.3 V
- */
-enum tegra_io_pad_voltage {
- TEGRA_IO_PAD_1800000UV,
- TEGRA_IO_PAD_3300000UV,
-};
-
#ifdef CONFIG_SOC_TEGRA_PMC
int tegra_powergate_is_powered(unsigned int id);
int tegra_powergate_power_on(unsigned int id);
@@ -162,9 +172,6 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
int tegra_io_pad_power_enable(enum tegra_io_pad id);
int tegra_io_pad_power_disable(enum tegra_io_pad id);
-int tegra_io_pad_set_voltage(enum tegra_io_pad id,
- enum tegra_io_pad_voltage voltage);
-int tegra_io_pad_get_voltage(enum tegra_io_pad id);
/* deprecated, use tegra_io_pad_power_{enable,disable}() instead */
int tegra_io_rail_power_on(unsigned int id);
@@ -212,12 +219,6 @@ static inline int tegra_io_pad_power_disable(enum tegra_io_pad id)
return -ENOSYS;
}
-static inline int tegra_io_pad_set_voltage(enum tegra_io_pad id,
- enum tegra_io_pad_voltage voltage)
-{
- return -ENOSYS;
-}
-
static inline int tegra_io_pad_get_voltage(enum tegra_io_pad id)
{
return -ENOSYS;
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index ea8c93bbb0e0..0cdc3999ecfa 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -23,6 +23,7 @@ struct snd_compr_ops;
* struct snd_compr_runtime: runtime stream description
* @state: stream state
* @ops: pointer to DSP callbacks
+ * @dma_buffer_p: runtime dma buffer pointer
* @buffer: pointer to kernel buffer, valid only when not in mmap mode or
* DSP doesn't implement copy
* @buffer_size: size of the above buffer
@@ -37,6 +38,7 @@ struct snd_compr_ops;
struct snd_compr_runtime {
snd_pcm_state_t state;
struct snd_compr_ops *ops;
+ struct snd_dma_buffer *dma_buffer_p;
void *buffer;
u64 buffer_size;
u32 fragment_size;
@@ -175,6 +177,23 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
wake_up(&stream->runtime->sleep);
}
+/**
+ * snd_compr_set_runtime_buffer - Set the Compress runtime buffer
+ * @substream: compress substream to set
+ * @bufp: the buffer information, NULL to clear
+ *
+ * Copy the buffer information to runtime buffer when @bufp is non-NULL.
+ * Otherwise it clears the current buffer information.
+ */
+static inline void snd_compr_set_runtime_buffer(
+ struct snd_compr_stream *substream,
+ struct snd_dma_buffer *bufp)
+{
+ struct snd_compr_runtime *runtime = substream->runtime;
+
+ runtime->dma_buffer_p = bufp;
+}
+
int snd_compr_stop_error(struct snd_compr_stream *stream,
snd_pcm_state_t state);
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
new file mode 100644
index 000000000000..7fa48b100936
--- /dev/null
+++ b/include/sound/hda_codec.h
@@ -0,0 +1,535 @@
+/*
+ * Universal Interface for Intel High Definition Audio Codec
+ *
+ * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __SOUND_HDA_CODEC_H
+#define __SOUND_HDA_CODEC_H
+
+#include <linux/kref.h>
+#include <linux/mod_devicetable.h>
+#include <sound/info.h>
+#include <sound/control.h>
+#include <sound/pcm.h>
+#include <sound/hwdep.h>
+#include <sound/hdaudio.h>
+#include <sound/hda_verbs.h>
+#include <sound/hda_regmap.h>
+
+/*
+ * Structures
+ */
+
+struct hda_bus;
+struct hda_beep;
+struct hda_codec;
+struct hda_pcm;
+struct hda_pcm_stream;
+
+/*
+ * codec bus
+ *
+ * each controller needs to creata a hda_bus to assign the accessor.
+ * A hda_bus contains several codecs in the list codec_list.
+ */
+struct hda_bus {
+ struct hdac_bus core;
+
+ struct snd_card *card;
+
+ struct pci_dev *pci;
+ const char *modelname;
+
+ struct mutex prepare_mutex;
+
+ /* assigned PCMs */
+ DECLARE_BITMAP(pcm_dev_bits, SNDRV_PCM_DEVICES);
+
+ /* misc op flags */
+ unsigned int needs_damn_long_delay :1;
+ unsigned int allow_bus_reset:1; /* allow bus reset at fatal error */
+ /* status for codec/controller */
+ unsigned int shutdown :1; /* being unloaded */
+ unsigned int response_reset:1; /* controller was reset */
+ unsigned int in_reset:1; /* during reset operation */
+ unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
+
+ int primary_dig_out_type; /* primary digital out PCM type */
+ unsigned int mixer_assigned; /* codec addr for mixer name */
+};
+
+/* from hdac_bus to hda_bus */
+#define to_hda_bus(bus) container_of(bus, struct hda_bus, core)
+
+/*
+ * codec preset
+ *
+ * Known codecs have the patch to build and set up the controls/PCMs
+ * better than the generic parser.
+ */
+typedef int (*hda_codec_patch_t)(struct hda_codec *);
+
+#define HDA_CODEC_ID_SKIP_PROBE 0x00000001
+#define HDA_CODEC_ID_GENERIC_HDMI 0x00000101
+#define HDA_CODEC_ID_GENERIC 0x00000201
+
+#define HDA_CODEC_REV_ENTRY(_vid, _rev, _name, _patch) \
+ { .vendor_id = (_vid), .rev_id = (_rev), .name = (_name), \
+ .api_version = HDA_DEV_LEGACY, \
+ .driver_data = (unsigned long)(_patch) }
+#define HDA_CODEC_ENTRY(_vid, _name, _patch) \
+ HDA_CODEC_REV_ENTRY(_vid, 0, _name, _patch)
+
+struct hda_codec_driver {
+ struct hdac_driver core;
+ const struct hda_device_id *id;
+};
+
+int __hda_codec_driver_register(struct hda_codec_driver *drv, const char *name,
+ struct module *owner);
+#define hda_codec_driver_register(drv) \
+ __hda_codec_driver_register(drv, KBUILD_MODNAME, THIS_MODULE)
+void hda_codec_driver_unregister(struct hda_codec_driver *drv);
+#define module_hda_codec_driver(drv) \
+ module_driver(drv, hda_codec_driver_register, \
+ hda_codec_driver_unregister)
+
+/* ops set by the preset patch */
+struct hda_codec_ops {
+ int (*build_controls)(struct hda_codec *codec);
+ int (*build_pcms)(struct hda_codec *codec);
+ int (*init)(struct hda_codec *codec);
+ void (*free)(struct hda_codec *codec);
+ void (*unsol_event)(struct hda_codec *codec, unsigned int res);
+ void (*set_power_state)(struct hda_codec *codec, hda_nid_t fg,
+ unsigned int power_state);
+#ifdef CONFIG_PM
+ int (*suspend)(struct hda_codec *codec);
+ int (*resume)(struct hda_codec *codec);
+ int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
+#endif
+ void (*reboot_notify)(struct hda_codec *codec);
+ void (*stream_pm)(struct hda_codec *codec, hda_nid_t nid, bool on);
+};
+
+/* PCM callbacks */
+struct hda_pcm_ops {
+ int (*open)(struct hda_pcm_stream *info, struct hda_codec *codec,
+ struct snd_pcm_substream *substream);
+ int (*close)(struct hda_pcm_stream *info, struct hda_codec *codec,
+ struct snd_pcm_substream *substream);
+ int (*prepare)(struct hda_pcm_stream *info, struct hda_codec *codec,
+ unsigned int stream_tag, unsigned int format,
+ struct snd_pcm_substream *substream);
+ int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
+ struct snd_pcm_substream *substream);
+ unsigned int (*get_delay)(struct hda_pcm_stream *info,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream);
+};
+
+/* PCM information for each substream */
+struct hda_pcm_stream {
+ unsigned int substreams; /* number of substreams, 0 = not exist*/
+ unsigned int channels_min; /* min. number of channels */
+ unsigned int channels_max; /* max. number of channels */
+ hda_nid_t nid; /* default NID to query rates/formats/bps, or set up */
+ u32 rates; /* supported rates */
+ u64 formats; /* supported formats (SNDRV_PCM_FMTBIT_) */
+ unsigned int maxbps; /* supported max. bit per sample */
+ const struct snd_pcm_chmap_elem *chmap; /* chmap to override */
+ struct hda_pcm_ops ops;
+};
+
+/* PCM types */
+enum {
+ HDA_PCM_TYPE_AUDIO,
+ HDA_PCM_TYPE_SPDIF,
+ HDA_PCM_TYPE_HDMI,
+ HDA_PCM_TYPE_MODEM,
+ HDA_PCM_NTYPES
+};
+
+#define SNDRV_PCM_INVALID_DEVICE (-1)
+/* for PCM creation */
+struct hda_pcm {
+ char *name;
+ struct hda_pcm_stream stream[2];
+ unsigned int pcm_type; /* HDA_PCM_TYPE_XXX */
+ int device; /* device number to assign */
+ struct snd_pcm *pcm; /* assigned PCM instance */
+ bool own_chmap; /* codec driver provides own channel maps */
+ /* private: */
+ struct hda_codec *codec;
+ struct kref kref;
+ struct list_head list;
+};
+
+/* codec information */
+struct hda_codec {
+ struct hdac_device core;
+ struct hda_bus *bus;
+ struct snd_card *card;
+ unsigned int addr; /* codec addr*/
+ u32 probe_id; /* overridden id for probing */
+
+ /* detected preset */
+ const struct hda_device_id *preset;
+ const char *modelname; /* model name for preset */
+
+ /* set by patch */
+ struct hda_codec_ops patch_ops;
+
+ /* PCM to create, set by patch_ops.build_pcms callback */
+ struct list_head pcm_list_head;
+
+ /* codec specific info */
+ void *spec;
+
+ /* beep device */
+ struct hda_beep *beep;
+ unsigned int beep_mode;
+
+ /* widget capabilities cache */
+ u32 *wcaps;
+
+ struct snd_array mixers; /* list of assigned mixer elements */
+ struct snd_array nids; /* list of mapped mixer elements */
+
+ struct list_head conn_list; /* linked-list of connection-list */
+
+ struct mutex spdif_mutex;
+ struct mutex control_mutex;
+ struct snd_array spdif_out;
+ unsigned int spdif_in_enable; /* SPDIF input enable? */
+ const hda_nid_t *slave_dig_outs; /* optional digital out slave widgets */
+ struct snd_array init_pins; /* initial (BIOS) pin configurations */
+ struct snd_array driver_pins; /* pin configs set by codec parser */
+ struct snd_array cvt_setups; /* audio convert setups */
+
+ struct mutex user_mutex;
+#ifdef CONFIG_SND_HDA_RECONFIG
+ struct snd_array init_verbs; /* additional init verbs */
+ struct snd_array hints; /* additional hints */
+ struct snd_array user_pins; /* default pin configs to override */
+#endif
+
+#ifdef CONFIG_SND_HDA_HWDEP
+ struct snd_hwdep *hwdep; /* assigned hwdep device */
+#endif
+
+ /* misc flags */
+ unsigned int in_freeing:1; /* being released */
+ unsigned int registered:1; /* codec was registered */
+ unsigned int display_power_control:1; /* needs display power */
+ unsigned int spdif_status_reset :1; /* needs to toggle SPDIF for each
+ * status change
+ * (e.g. Realtek codecs)
+ */
+ unsigned int pin_amp_workaround:1; /* pin out-amp takes index
+ * (e.g. Conexant codecs)
+ */
+ unsigned int single_adc_amp:1; /* adc in-amp takes no index
+ * (e.g. CX20549 codec)
+ */
+ unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */
+ unsigned int pins_shutup:1; /* pins are shut up */
+ unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
+ unsigned int no_jack_detect:1; /* Machine has no jack-detection */
+ unsigned int inv_eapd:1; /* broken h/w: inverted EAPD control */
+ unsigned int inv_jack_detect:1; /* broken h/w: inverted detection bit */
+ unsigned int pcm_format_first:1; /* PCM format must be set first */
+ unsigned int cached_write:1; /* write only to caches */
+ unsigned int dp_mst:1; /* support DP1.2 Multi-stream transport */
+ unsigned int dump_coef:1; /* dump processing coefs in codec proc file */
+ unsigned int power_save_node:1; /* advanced PM for each widget */
+ unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
+ unsigned int force_pin_prefix:1; /* Add location prefix */
+ unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
+#ifdef CONFIG_PM
+ unsigned long power_on_acct;
+ unsigned long power_off_acct;
+ unsigned long power_jiffies;
+#endif
+
+ /* filter the requested power state per nid */
+ unsigned int (*power_filter)(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int power_state);
+
+ /* codec-specific additional proc output */
+ void (*proc_widget_hook)(struct snd_info_buffer *buffer,
+ struct hda_codec *codec, hda_nid_t nid);
+
+ /* jack detection */
+ struct snd_array jacktbl;
+ unsigned long jackpoll_interval; /* In jiffies. Zero means no poll, rely on unsol events */
+ struct delayed_work jackpoll_work;
+
+ /* jack detection */
+ struct snd_array jacks;
+
+ int depop_delay; /* depop delay in ms, -1 for default delay time */
+
+ /* fix-up list */
+ int fixup_id;
+ const struct hda_fixup *fixup_list;
+ const char *fixup_name;
+
+ /* additional init verbs */
+ struct snd_array verbs;
+};
+
+#define dev_to_hda_codec(_dev) container_of(_dev, struct hda_codec, core.dev)
+#define hda_codec_dev(_dev) (&(_dev)->core.dev)
+
+#define list_for_each_codec(c, bus) \
+ list_for_each_entry(c, &(bus)->core.codec_list, core.list)
+#define list_for_each_codec_safe(c, n, bus) \
+ list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list)
+
+/* snd_hda_codec_read/write optional flags */
+#define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0)
+
+/*
+ * constructors
+ */
+int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
+ unsigned int codec_addr, struct hda_codec **codecp);
+int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card,
+ unsigned int codec_addr, struct hda_codec *codec);
+int snd_hda_codec_configure(struct hda_codec *codec);
+int snd_hda_codec_update_widgets(struct hda_codec *codec);
+
+/*
+ * low level functions
+ */
+static inline unsigned int
+snd_hda_codec_read(struct hda_codec *codec, hda_nid_t nid,
+ int flags,
+ unsigned int verb, unsigned int parm)
+{
+ return snd_hdac_codec_read(&codec->core, nid, flags, verb, parm);
+}
+
+static inline int
+snd_hda_codec_write(struct hda_codec *codec, hda_nid_t nid, int flags,
+ unsigned int verb, unsigned int parm)
+{
+ return snd_hdac_codec_write(&codec->core, nid, flags, verb, parm);
+}
+
+#define snd_hda_param_read(codec, nid, param) \
+ snd_hdac_read_parm(&(codec)->core, nid, param)
+#define snd_hda_get_sub_nodes(codec, nid, start_nid) \
+ snd_hdac_get_sub_nodes(&(codec)->core, nid, start_nid)
+int snd_hda_get_connections(struct hda_codec *codec, hda_nid_t nid,
+ hda_nid_t *conn_list, int max_conns);
+static inline int
+snd_hda_get_num_conns(struct hda_codec *codec, hda_nid_t nid)
+{
+ return snd_hda_get_connections(codec, nid, NULL, 0);
+}
+
+#define snd_hda_get_raw_connections(codec, nid, list, max_conns) \
+ snd_hdac_get_connections(&(codec)->core, nid, list, max_conns)
+#define snd_hda_get_num_raw_conns(codec, nid) \
+ snd_hdac_get_connections(&(codec)->core, nid, NULL, 0);
+
+int snd_hda_get_conn_list(struct hda_codec *codec, hda_nid_t nid,
+ const hda_nid_t **listp);
+int snd_hda_override_conn_list(struct hda_codec *codec, hda_nid_t nid, int nums,
+ const hda_nid_t *list);
+int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
+ hda_nid_t nid, int recursive);
+unsigned int snd_hda_get_num_devices(struct hda_codec *codec, hda_nid_t nid);
+int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
+ u8 *dev_list, int max_devices);
+int snd_hda_get_dev_select(struct hda_codec *codec, hda_nid_t nid);
+int snd_hda_set_dev_select(struct hda_codec *codec, hda_nid_t nid, int dev_id);
+
+struct hda_verb {
+ hda_nid_t nid;
+ u32 verb;
+ u32 param;
+};
+
+void snd_hda_sequence_write(struct hda_codec *codec,
+ const struct hda_verb *seq);
+
+/* unsolicited event */
+static inline void
+snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
+{
+ snd_hdac_bus_queue_event(&bus->core, res, res_ex);
+}
+
+/* cached write */
+static inline int
+snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
+ int flags, unsigned int verb, unsigned int parm)
+{
+ return snd_hdac_regmap_write(&codec->core, nid, verb, parm);
+}
+
+/* the struct for codec->pin_configs */
+struct hda_pincfg {
+ hda_nid_t nid;
+ unsigned char ctrl; /* original pin control value */
+ unsigned char target; /* target pin control value */
+ unsigned int cfg; /* default configuration */
+};
+
+unsigned int snd_hda_codec_get_pincfg(struct hda_codec *codec, hda_nid_t nid);
+int snd_hda_codec_set_pincfg(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int cfg);
+int snd_hda_add_pincfg(struct hda_codec *codec, struct snd_array *list,
+ hda_nid_t nid, unsigned int cfg); /* for hwdep */
+void snd_hda_shutup_pins(struct hda_codec *codec);
+
+/* SPDIF controls */
+struct hda_spdif_out {
+ hda_nid_t nid; /* Converter nid values relate to */
+ unsigned int status; /* IEC958 status bits */
+ unsigned short ctls; /* SPDIF control bits */
+};
+struct hda_spdif_out *snd_hda_spdif_out_of_nid(struct hda_codec *codec,
+ hda_nid_t nid);
+void snd_hda_spdif_ctls_unassign(struct hda_codec *codec, int idx);
+void snd_hda_spdif_ctls_assign(struct hda_codec *codec, int idx, hda_nid_t nid);
+
+/*
+ * Mixer
+ */
+int snd_hda_codec_build_controls(struct hda_codec *codec);
+
+/*
+ * PCM
+ */
+int snd_hda_codec_parse_pcms(struct hda_codec *codec);
+int snd_hda_codec_build_pcms(struct hda_codec *codec);
+
+__printf(2, 3)
+struct hda_pcm *snd_hda_codec_pcm_new(struct hda_codec *codec,
+ const char *fmt, ...);
+
+static inline void snd_hda_codec_pcm_get(struct hda_pcm *pcm)
+{
+ kref_get(&pcm->kref);
+}
+void snd_hda_codec_pcm_put(struct hda_pcm *pcm);
+
+int snd_hda_codec_prepare(struct hda_codec *codec,
+ struct hda_pcm_stream *hinfo,
+ unsigned int stream,
+ unsigned int format,
+ struct snd_pcm_substream *substream);
+void snd_hda_codec_cleanup(struct hda_codec *codec,
+ struct hda_pcm_stream *hinfo,
+ struct snd_pcm_substream *substream);
+
+void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
+ u32 stream_tag,
+ int channel_id, int format);
+void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid,
+ int do_now);
+#define snd_hda_codec_cleanup_stream(codec, nid) \
+ __snd_hda_codec_cleanup_stream(codec, nid, 0)
+
+#define snd_hda_query_supported_pcm(codec, nid, ratesp, fmtsp, bpsp) \
+ snd_hdac_query_supported_pcm(&(codec)->core, nid, ratesp, fmtsp, bpsp)
+#define snd_hda_is_supported_format(codec, nid, fmt) \
+ snd_hdac_is_supported_format(&(codec)->core, nid, fmt)
+
+extern const struct snd_pcm_chmap_elem snd_pcm_2_1_chmaps[];
+
+int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
+ struct hda_pcm *cpcm);
+
+/*
+ * Misc
+ */
+void snd_hda_get_codec_name(struct hda_codec *codec, char *name, int namelen);
+void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
+ unsigned int power_state);
+
+int snd_hda_lock_devices(struct hda_bus *bus);
+void snd_hda_unlock_devices(struct hda_bus *bus);
+void snd_hda_bus_reset(struct hda_bus *bus);
+void snd_hda_bus_reset_codecs(struct hda_bus *bus);
+
+int snd_hda_codec_set_name(struct hda_codec *codec, const char *name);
+
+/*
+ * power management
+ */
+extern const struct dev_pm_ops hda_codec_driver_pm;
+
+static inline
+int hda_call_check_power_status(struct hda_codec *codec, hda_nid_t nid)
+{
+#ifdef CONFIG_PM
+ if (codec->patch_ops.check_power_status)
+ return codec->patch_ops.check_power_status(codec, nid);
+#endif
+ return 0;
+}
+
+/*
+ * power saving
+ */
+#define snd_hda_power_up(codec) snd_hdac_power_up(&(codec)->core)
+#define snd_hda_power_up_pm(codec) snd_hdac_power_up_pm(&(codec)->core)
+#define snd_hda_power_down(codec) snd_hdac_power_down(&(codec)->core)
+#define snd_hda_power_down_pm(codec) snd_hdac_power_down_pm(&(codec)->core)
+#ifdef CONFIG_PM
+void snd_hda_set_power_save(struct hda_bus *bus, int delay);
+void snd_hda_update_power_acct(struct hda_codec *codec);
+#else
+static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {}
+#endif
+
+#ifdef CONFIG_SND_HDA_PATCH_LOADER
+/*
+ * patch firmware
+ */
+int snd_hda_load_patch(struct hda_bus *bus, size_t size, const void *buf);
+#endif
+
+#ifdef CONFIG_SND_HDA_DSP_LOADER
+int snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
+ unsigned int size,
+ struct snd_dma_buffer *bufp);
+void snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start);
+void snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
+ struct snd_dma_buffer *dmab);
+#else
+static inline int
+snd_hda_codec_load_dsp_prepare(struct hda_codec *codec, unsigned int format,
+ unsigned int size,
+ struct snd_dma_buffer *bufp)
+{
+ return -ENOSYS;
+}
+static inline void
+snd_hda_codec_load_dsp_trigger(struct hda_codec *codec, bool start) {}
+static inline void
+snd_hda_codec_load_dsp_cleanup(struct hda_codec *codec,
+ struct snd_dma_buffer *dmab) {}
+#endif
+
+#endif /* __SOUND_HDA_CODEC_H */
diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h
index 78626cde7081..2ec31b358950 100644
--- a/include/sound/hda_component.h
+++ b/include/sound/hda_component.h
@@ -5,10 +5,15 @@
#define __SOUND_HDA_COMPONENT_H
#include <drm/drm_audio_component.h>
+#include <sound/hdaudio.h>
+
+/* virtual idx for controller */
+#define HDA_CODEC_IDX_CONTROLLER HDA_MAX_CODECS
#ifdef CONFIG_SND_HDA_COMPONENT
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
-int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
+void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx,
+ bool enable);
int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
int dev_id, int rate);
int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
@@ -25,9 +30,9 @@ static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
{
return 0;
}
-static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+static inline void snd_hdac_display_power(struct hdac_bus *bus,
+ unsigned int idx, bool enable)
{
- return 0;
}
static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
hda_nid_t nid, int dev_id, int rate)
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 6f1e1f3b3063..b4fa1c775251 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -79,7 +79,6 @@ struct hdac_device {
/* misc flags */
atomic_t in_pm; /* suspend/resume being performed */
- bool link_power_control:1;
/* sysfs */
struct hdac_widget_tree *widgets;
@@ -99,6 +98,12 @@ enum {
HDA_DEV_ASOC,
};
+enum {
+ SND_SKL_PCI_BIND_AUTO, /* automatic selection based on pci class */
+ SND_SKL_PCI_BIND_LEGACY,/* bind only with legacy driver */
+ SND_SKL_PCI_BIND_ASOC /* bind only with ASoC driver */
+};
+
/* direction */
enum {
HDA_INPUT, HDA_OUTPUT
@@ -237,8 +242,6 @@ struct hdac_bus_ops {
/* get a response from the last command */
int (*get_response)(struct hdac_bus *bus, unsigned int addr,
unsigned int *res);
- /* control the link power */
- int (*link_power)(struct hdac_bus *bus, bool enable);
};
/*
@@ -363,7 +366,8 @@ struct hdac_bus {
/* DRM component interface */
struct drm_audio_component *audio_component;
- int drm_power_refcount;
+ long display_power_status;
+ bool display_power_active;
/* parameters required for enhanced capabilities */
int num_streams;
@@ -389,6 +393,7 @@ void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex);
int snd_hdac_bus_add_device(struct hdac_bus *bus, struct hdac_device *codec);
void snd_hdac_bus_remove_device(struct hdac_bus *bus,
struct hdac_device *codec);
+void snd_hdac_bus_process_unsol_events(struct work_struct *work);
static inline void snd_hdac_codec_link_up(struct hdac_device *codec)
{
@@ -404,7 +409,6 @@ int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val);
int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr,
unsigned int *res);
int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus);
-int snd_hdac_link_power(struct hdac_device *codec, bool enable);
bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset);
void snd_hdac_bus_stop_chip(struct hdac_bus *bus);
@@ -412,6 +416,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
+int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 67561b997915..af3fa577fa06 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -47,10 +47,13 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
+#define SNDRV_DMA_TYPE_DEV_UC 5 /* continuous non-cahced */
#ifdef CONFIG_SND_DMA_SGBUF
#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
+#define SNDRV_DMA_TYPE_DEV_UC_SG 6 /* SG non-cached */
#else
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
+#define SNDRV_DMA_TYPE_DEV_UC_SG SNDRV_DMA_TYPE_DEV_UC
#endif
#ifdef CONFIG_GENERIC_ALLOCATOR
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 2dd37cada7c0..888a833d3b00 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -254,11 +254,13 @@ static inline int snd_interval_empty(const struct snd_interval *i)
static inline int snd_interval_single(const struct snd_interval *i)
{
return (i->min == i->max ||
- (i->min + 1 == i->max && i->openmax));
+ (i->min + 1 == i->max && (i->openmin || i->openmax)));
}
static inline int snd_interval_value(const struct snd_interval *i)
{
+ if (i->openmin && !i->openmax)
+ return i->max;
return i->min;
}
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 6665cb29e1a2..3b5a061132b6 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -171,6 +171,7 @@ int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count);
int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
int count);
+int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream);
/* main midi functions */
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 8bc5e2d8b13c..6d69ed2bd7b1 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -51,29 +51,35 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
#define asoc_simple_card_parse_clk_cpu(dev, node, dai_link, simple_dai) \
asoc_simple_card_parse_clk(dev, node, dai_link->cpu_of_node, simple_dai, \
- dai_link->cpu_dai_name)
+ dai_link->cpu_dai_name, NULL)
#define asoc_simple_card_parse_clk_codec(dev, node, dai_link, simple_dai) \
asoc_simple_card_parse_clk(dev, node, dai_link->codec_of_node, simple_dai,\
- dai_link->codec_dai_name)
+ dai_link->codec_dai_name, dai_link->codecs)
int asoc_simple_card_parse_clk(struct device *dev,
struct device_node *node,
struct device_node *dai_of_node,
struct asoc_simple_dai *simple_dai,
- const char *name);
+ const char *dai_name,
+ struct snd_soc_dai_link_component *dlc);
int asoc_simple_card_clk_enable(struct asoc_simple_dai *dai);
void asoc_simple_card_clk_disable(struct asoc_simple_dai *dai);
#define asoc_simple_card_parse_cpu(node, dai_link, \
list_name, cells_name, is_single_link) \
- asoc_simple_card_parse_dai(node, &dai_link->cpu_of_node, \
+ asoc_simple_card_parse_dai(node, NULL, \
+ &dai_link->cpu_of_node, \
&dai_link->cpu_dai_name, list_name, cells_name, is_single_link)
#define asoc_simple_card_parse_codec(node, dai_link, list_name, cells_name) \
- asoc_simple_card_parse_dai(node, &dai_link->codec_of_node, \
- &dai_link->codec_dai_name, list_name, cells_name, NULL)
+ asoc_simple_card_parse_dai(node, dai_link->codecs, \
+ &dai_link->codec_of_node, \
+ &dai_link->codec_dai_name, \
+ list_name, cells_name, NULL)
#define asoc_simple_card_parse_platform(node, dai_link, list_name, cells_name) \
- asoc_simple_card_parse_dai(node, &dai_link->platform_of_node, \
+ asoc_simple_card_parse_dai(node, dai_link->platform, \
+ &dai_link->platform_of_node, \
NULL, list_name, cells_name, NULL)
int asoc_simple_card_parse_dai(struct device_node *node,
+ struct snd_soc_dai_link_component *dlc,
struct device_node **endpoint_np,
const char **dai_name,
const char *list_name,
@@ -81,12 +87,15 @@ int asoc_simple_card_parse_dai(struct device_node *node,
int *is_single_links);
#define asoc_simple_card_parse_graph_cpu(ep, dai_link) \
- asoc_simple_card_parse_graph_dai(ep, &dai_link->cpu_of_node, \
+ asoc_simple_card_parse_graph_dai(ep, NULL, \
+ &dai_link->cpu_of_node, \
&dai_link->cpu_dai_name)
#define asoc_simple_card_parse_graph_codec(ep, dai_link) \
- asoc_simple_card_parse_graph_dai(ep, &dai_link->codec_of_node, \
+ asoc_simple_card_parse_graph_dai(ep, dai_link->codecs, \
+ &dai_link->codec_of_node, \
&dai_link->codec_dai_name)
int asoc_simple_card_parse_graph_dai(struct device_node *ep,
+ struct snd_soc_dai_link_component *dlc,
struct device_node **endpoint_np,
const char **dai_name);
@@ -107,12 +116,12 @@ int asoc_simple_card_clean_reference(struct snd_soc_card *card);
void asoc_simple_card_convert_fixup(struct asoc_simple_card_data *data,
struct snd_pcm_hw_params *params);
-void asoc_simple_card_parse_convert(struct device *dev, char *prefix,
+void asoc_simple_card_parse_convert(struct device *dev,
+ struct device_node *np, char *prefix,
struct asoc_simple_card_data *data);
int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
- char *prefix,
- int optional);
+ char *prefix);
int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card,
char *prefix);
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index bb1d24b703fb..bb5e1e4ce8bf 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -24,5 +24,12 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[];
+
+/*
+ * generic table used for HDA codec-based platforms, possibly with
+ * additional ACPI-enumerated codecs
+ */
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_hda_machines[];
#endif
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index e45b2330d16a..266e64e3c24c 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -38,6 +38,20 @@ struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
/**
+ * snd_soc_acpi_mach_params: interface for machine driver configuration
+ *
+ * @acpi_ipc_irq_index: used for BYT-CR detection
+ * @platform: string used for HDaudio codec support
+ * @codec_mask: used for HDAudio support
+ */
+struct snd_soc_acpi_mach_params {
+ u32 acpi_ipc_irq_index;
+ const char *platform;
+ u32 codec_mask;
+ u32 dmic_num;
+};
+
+/**
* snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
* related to the hardware, except for the firmware and topology file names.
* A platform supported by legacy and Sound Open Firmware (SOF) would expose
@@ -68,6 +82,7 @@ struct snd_soc_acpi_mach {
struct snd_soc_acpi_mach * (*machine_quirk)(void *arg);
const void *quirk_data;
void *pdata;
+ struct snd_soc_acpi_mach_params mach_params;
const char *sof_fw_filename;
const char *sof_tplg_filename;
const char *asoc_plat_name;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index af9ef16cc34d..bd8163f151cb 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -406,11 +406,6 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
struct snd_soc_dai *dai);
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
-int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
- const struct snd_soc_pcm_stream *params,
- unsigned int num_params,
- struct snd_soc_dapm_widget *source,
- struct snd_soc_dapm_widget *sink);
/* dapm path setup */
int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
@@ -589,9 +584,6 @@ struct snd_soc_dapm_widget {
void *priv; /* widget specific data */
struct regulator *regulator; /* attached regulator */
struct pinctrl *pinctrl; /* attached pinctrl */
- const struct snd_soc_pcm_stream *params; /* params for dai links */
- unsigned int num_params; /* number of params for dai links */
- unsigned int params_select; /* currently selected param for dai link */
/* dapm control */
int reg; /* negative reg = no direct dapm */
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 9bb92f187af8..4be3a2b7c106 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -103,6 +103,16 @@ struct snd_soc_dpcm_runtime {
int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
};
+#define for_each_dpcm_fe(be, stream, dpcm) \
+ list_for_each_entry(dpcm, &(be)->dpcm[stream].fe_clients, list_fe)
+
+#define for_each_dpcm_be(fe, stream, dpcm) \
+ list_for_each_entry(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_safe(fe, stream, dpcm, _dpcm) \
+ list_for_each_entry_safe(dpcm, _dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_rollback(fe, stream, dpcm) \
+ list_for_each_entry_continue_reverse(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+
/* can this BE stop and free */
int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be, int stream);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 41cec42fb456..8ec1de856ee7 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -372,6 +372,11 @@
#define SND_SOC_COMP_ORDER_LATE 1
#define SND_SOC_COMP_ORDER_LAST 2
+#define for_each_comp_order(order) \
+ for (order = SND_SOC_COMP_ORDER_FIRST; \
+ order <= SND_SOC_COMP_ORDER_LAST; \
+ order++)
+
/*
* Bias levels
*
@@ -548,12 +553,12 @@ static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
}
#endif
-#ifdef CONFIG_SND_SOC_AC97_BUS
struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
unsigned int id, unsigned int id_mask);
void snd_soc_free_ac97_component(struct snd_ac97 *ac97);
+#ifdef CONFIG_SND_SOC_AC97_BUS
int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
struct platform_device *pdev);
@@ -859,6 +864,11 @@ struct snd_soc_component {
#endif
};
+#define for_each_component_dais(component, dai)\
+ list_for_each_entry(dai, &(component)->dai_list, list)
+#define for_each_component_dais_safe(component, dai, _dai)\
+ list_for_each_entry_safe(dai, _dai, &(component)->dai_list, list)
+
struct snd_soc_rtdcom_list {
struct snd_soc_component *component;
struct list_head list; /* rtd::component_list */
@@ -915,6 +925,8 @@ struct snd_soc_dai_link {
*/
const char *platform_name;
struct device_node *platform_of_node;
+ struct snd_soc_dai_link_component *platform;
+
int id; /* optional ID for machine driver link identification */
const struct snd_soc_pcm_stream *params;
@@ -976,6 +988,10 @@ struct snd_soc_dai_link {
struct list_head list; /* DAI link list of the soc card */
struct snd_soc_dobj dobj; /* For topology */
};
+#define for_each_link_codecs(link, i, codec) \
+ for ((i) = 0; \
+ ((i) < link->num_codecs) && ((codec) = &link->codecs[i]); \
+ (i)++)
struct snd_soc_codec_conf {
/*
@@ -1054,7 +1070,6 @@ struct snd_soc_card {
struct snd_soc_dai_link *dai_link; /* predefined links only */
int num_links; /* predefined links only */
struct list_head dai_link_list; /* all links */
- int num_dai_links;
struct list_head rtd_list;
int num_rtd;
@@ -1092,6 +1107,7 @@ struct snd_soc_card {
/* lists of probed devices belonging to this card */
struct list_head component_dev_list;
+ struct list_head list;
struct list_head widgets;
struct list_head paths;
@@ -1114,6 +1130,23 @@ struct snd_soc_card {
void *drvdata;
};
+#define for_each_card_prelinks(card, i, link) \
+ for ((i) = 0; \
+ ((i) < (card)->num_links) && ((link) = &(card)->dai_link[i]); \
+ (i)++)
+
+#define for_each_card_links(card, link) \
+ list_for_each_entry(dai_link, &(card)->dai_link_list, list)
+#define for_each_card_links_safe(card, link, _link) \
+ list_for_each_entry_safe(link, _link, &(card)->dai_link_list, list)
+
+#define for_each_card_rtds(card, rtd) \
+ list_for_each_entry(rtd, &(card)->rtd_list, list)
+#define for_each_card_rtds_safe(card, rtd, _rtd) \
+ list_for_each_entry_safe(rtd, _rtd, &(card)->rtd_list, list)
+
+#define for_each_card_components(card, component) \
+ list_for_each_entry(component, &(card)->component_dev_list, card_list)
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
struct snd_soc_pcm_runtime {
@@ -1124,6 +1157,8 @@ struct snd_soc_pcm_runtime {
enum snd_soc_pcm_subclass pcm_subclass;
struct snd_pcm_ops ops;
+ unsigned int params_select; /* currently selected param for dai link */
+
/* Dynamic PCM BE runtime data */
struct snd_soc_dpcm_runtime dpcm[2];
int fe_compr;
@@ -1152,6 +1187,13 @@ struct snd_soc_pcm_runtime {
unsigned int dev_registered:1;
unsigned int pop_wait:1;
};
+#define for_each_rtd_codec_dai(rtd, i, dai)\
+ for ((i) = 0; \
+ ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
+ (i)++)
+#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
+ for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);)
+
/* mixer control */
struct soc_mixer_control {
@@ -1359,6 +1401,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
INIT_LIST_HEAD(&card->dapm_list);
INIT_LIST_HEAD(&card->aux_comp_list);
INIT_LIST_HEAD(&card->component_dev_list);
+ INIT_LIST_HEAD(&card->list);
}
static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc)
@@ -1434,10 +1477,20 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np,
unsigned int *rx_mask,
unsigned int *slots,
unsigned int *slot_width);
-void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+void snd_soc_of_parse_node_prefix(struct device_node *np,
struct snd_soc_codec_conf *codec_conf,
struct device_node *of_node,
const char *propname);
+static inline
+void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+ struct snd_soc_codec_conf *codec_conf,
+ struct device_node *of_node,
+ const char *propname)
+{
+ snd_soc_of_parse_node_prefix(card->dev->of_node,
+ codec_conf, of_node, propname);
+}
+
int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname);
unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index f2e6abea8490..24c398f4a68f 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -25,6 +25,7 @@ struct sock;
#define ISCSIT_TCP_BACKLOG 256
#define ISCSI_RX_THREAD_NAME "iscsi_trx"
#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
+#define ISCSI_IQN_LEN 224
/* struct iscsi_node_attrib sanity values */
#define NA_DATAOUT_TIMEOUT 3
@@ -270,9 +271,9 @@ struct iscsi_conn_ops {
};
struct iscsi_sess_ops {
- char InitiatorName[224];
+ char InitiatorName[ISCSI_IQN_LEN];
char InitiatorAlias[256];
- char TargetName[224];
+ char TargetName[ISCSI_IQN_LEN];
char TargetAlias[256];
char TargetAddress[256];
u16 TargetPortalGroupTag; /* [0..65535] */
@@ -855,7 +856,6 @@ struct iscsi_wwn_stat_grps {
};
struct iscsi_tiqn {
-#define ISCSI_IQN_LEN 224
unsigned char tiqn[ISCSI_IQN_LEN];
enum tiqn_state_table tiqn_state;
int tiqn_access_count;
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index 4d75a2c426ca..ff6a47209313 100644
--- a/include/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -33,7 +33,7 @@ struct iscsi_sess_err_stats {
u32 cxn_timeout_errors;
u32 pdu_format_errors;
u32 last_sess_failure_type;
- char last_sess_fail_rem_name[224];
+ char last_sess_fail_rem_name[ISCSI_IQN_LEN];
} ____cacheline_aligned;
/* iSCSI login failure types (sub oids) */
@@ -56,7 +56,7 @@ struct iscsi_login_stats {
u32 last_fail_type;
int last_intr_fail_ip_family;
struct sockaddr_storage last_intr_fail_sockaddr;
- char last_intr_fail_name[224];
+ char last_intr_fail_name[ISCSI_IQN_LEN];
} ____cacheline_aligned;
/* iSCSI logout stats */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7a4ee7852ca4..69b7b955902c 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -46,6 +46,10 @@
/* Used by transport_get_inquiry_vpd_device_ident() */
#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254
+#define INQUIRY_VENDOR_LEN 8
+#define INQUIRY_MODEL_LEN 16
+#define INQUIRY_REVISION_LEN 4
+
/* Attempts before moving from SHORT to LONG */
#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
@@ -87,6 +91,8 @@
#define DA_EMULATE_3PC 1
/* No Emulation for PSCSI by default */
#define DA_EMULATE_ALUA 0
+/* Emulate SCSI2 RESERVE/RELEASE and Persistent Reservations by default */
+#define DA_EMULATE_PR 1
/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
#define DA_ENFORCE_PR_ISIDS 1
/* Force SPC-3 PR Activate Persistence across Target Power Loss */
@@ -134,11 +140,9 @@ enum se_cmd_flags_table {
SCF_SENT_CHECK_CONDITION = 0x00000800,
SCF_OVERFLOW_BIT = 0x00001000,
SCF_UNDERFLOW_BIT = 0x00002000,
- SCF_SEND_DELAYED_TAS = 0x00004000,
SCF_ALUA_NON_OPTIMIZED = 0x00008000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
SCF_COMPARE_AND_WRITE = 0x00080000,
- SCF_COMPARE_AND_WRITE_POST = 0x00100000,
SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
SCF_ACK_KREF = 0x00400000,
SCF_USE_CPUID = 0x00800000,
@@ -315,9 +319,13 @@ struct t10_vpd {
};
struct t10_wwn {
- char vendor[8];
- char model[16];
- char revision[4];
+ /*
+ * SCSI left aligned strings may not be null terminated. +1 to ensure a
+ * null terminator is always present.
+ */
+ char vendor[INQUIRY_VENDOR_LEN + 1];
+ char model[INQUIRY_MODEL_LEN + 1];
+ char revision[INQUIRY_REVISION_LEN + 1];
char unit_serial[INQUIRY_VPD_SERIAL_LEN];
spinlock_t t10_vpd_lock;
struct se_device *t10_dev;
@@ -475,7 +483,8 @@ struct se_cmd {
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
struct list_head se_cmd_list;
- struct completion *compl;
+ struct completion *free_compl;
+ struct completion *abrt_compl;
const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
@@ -602,6 +611,7 @@ struct se_session {
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
void *fabric_sess_ptr;
+ struct percpu_ref cmd_count;
struct list_head sess_list;
struct list_head sess_acl_list;
struct list_head sess_cmd_list;
@@ -665,7 +675,7 @@ struct se_dev_attrib {
int emulate_tpws;
int emulate_caw;
int emulate_3pc;
- int pi_prot_format;
+ int emulate_pr;
enum target_prot_type pi_prot_type;
enum target_prot_type hw_pi_prot_type;
int pi_prot_verify;
@@ -732,7 +742,6 @@ struct se_lun {
struct scsi_port_stats lun_stats;
struct config_group lun_group;
struct se_port_stat_grps port_stat_grps;
- struct completion lun_ref_comp;
struct completion lun_shutdown_comp;
struct percpu_ref lun_ref;
struct list_head lun_dev_link;
@@ -795,7 +804,6 @@ struct se_device {
struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
- struct workqueue_struct *tmr_wq;
struct work_struct qf_work_queue;
struct list_head delayed_cmd_list;
struct list_head state_list;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index f4147b398431..ee5ddd81cd8d 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -8,7 +8,18 @@
struct target_core_fabric_ops {
struct module *module;
- const char *name;
+ /*
+ * XXX: Special case for iscsi/iSCSI...
+ * If non-null, fabric_alias is used for matching target/$fabric
+ * ConfigFS paths. If null, fabric_name is used for this (see below).
+ */
+ const char *fabric_alias;
+ /*
+ * fabric_name is used for matching target/$fabric ConfigFS paths
+ * without a fabric_alias (see above). It's also used for the ALUA state
+ * path and is stored on disk with PR state.
+ */
+ const char *fabric_name;
size_t node_acl_size;
/*
* Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload.
@@ -23,7 +34,6 @@ struct target_core_fabric_ops {
* XXX: Currently assumes single PAGE_SIZE per scatterlist entry
*/
u32 max_data_sg_nents;
- char *(*get_fabric_name)(void);
char *(*tpg_get_wwn)(struct se_portal_group *);
u16 (*tpg_get_tag)(struct se_portal_group *);
u32 (*tpg_get_default_depth)(struct se_portal_group *);
@@ -101,6 +111,13 @@ struct target_core_fabric_ops {
struct configfs_attribute **tfc_tpg_nacl_attrib_attrs;
struct configfs_attribute **tfc_tpg_nacl_auth_attrs;
struct configfs_attribute **tfc_tpg_nacl_param_attrs;
+
+ /*
+ * Set this member variable to true if the SCSI transport protocol
+ * (e.g. iSCSI) requires that the Data-Out buffer is transferred in
+ * its entirety before a command is aborted.
+ */
+ bool write_pending_must_be_called;
};
int target_register_template(const struct target_core_fabric_ops *fo);
@@ -116,7 +133,7 @@ struct se_session *target_setup_session(struct se_portal_group *,
struct se_session *, void *));
void target_remove_session(struct se_session *);
-void transport_init_session(struct se_session *);
+int transport_init_session(struct se_session *se_sess);
struct se_session *transport_alloc_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
@@ -149,12 +166,12 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
int transport_handle_cdb_direct(struct se_cmd *);
sense_reason_t transport_generic_new_cmd(struct se_cmd *);
+void target_put_cmd_and_wait(struct se_cmd *cmd);
void target_execute_cmd(struct se_cmd *cmd);
int transport_generic_free_cmd(struct se_cmd *, int);
bool transport_wait_for_tasks(struct se_cmd *);
-int transport_check_aborted_status(struct se_cmd *, int);
int transport_send_check_condition_and_sense(struct se_cmd *,
sense_reason_t, int);
int target_get_sess_cmd(struct se_cmd *, bool);
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index d0a341bc4540..33d291888ba9 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -54,6 +54,35 @@ enum afs_fs_operation {
afs_FS_StoreData64 = 65538, /* AFS Store file data */
afs_FS_GiveUpAllCallBacks = 65539, /* AFS Give up all our callbacks on a server */
afs_FS_GetCapabilities = 65540, /* AFS Get FS server capabilities */
+
+ yfs_FS_FetchData = 130, /* YFS Fetch file data */
+ yfs_FS_FetchACL = 64131, /* YFS Fetch file ACL */
+ yfs_FS_FetchStatus = 64132, /* YFS Fetch file status */
+ yfs_FS_StoreACL = 64134, /* YFS Store file ACL */
+ yfs_FS_StoreStatus = 64135, /* YFS Store file status */
+ yfs_FS_RemoveFile = 64136, /* YFS Remove a file */
+ yfs_FS_CreateFile = 64137, /* YFS Create a file */
+ yfs_FS_Rename = 64138, /* YFS Rename or move a file or directory */
+ yfs_FS_Symlink = 64139, /* YFS Create a symbolic link */
+ yfs_FS_Link = 64140, /* YFS Create a hard link */
+ yfs_FS_MakeDir = 64141, /* YFS Create a directory */
+ yfs_FS_RemoveDir = 64142, /* YFS Remove a directory */
+ yfs_FS_GetVolumeStatus = 64149, /* YFS Get volume status information */
+ yfs_FS_SetVolumeStatus = 64150, /* YFS Set volume status information */
+ yfs_FS_SetLock = 64156, /* YFS Request a file lock */
+ yfs_FS_ExtendLock = 64157, /* YFS Extend a file lock */
+ yfs_FS_ReleaseLock = 64158, /* YFS Release a file lock */
+ yfs_FS_Lookup = 64161, /* YFS lookup file in directory */
+ yfs_FS_FlushCPS = 64165,
+ yfs_FS_FetchOpaqueACL = 64168,
+ yfs_FS_WhoAmI = 64170,
+ yfs_FS_RemoveACL = 64171,
+ yfs_FS_RemoveFile2 = 64173,
+ yfs_FS_StoreOpaqueACL2 = 64174,
+ yfs_FS_InlineBulkStatus = 64536, /* YFS Fetch multiple file statuses with errors */
+ yfs_FS_FetchData64 = 64537, /* YFS Fetch file data */
+ yfs_FS_StoreData64 = 64538, /* YFS Store file data */
+ yfs_FS_UpdateSymlink = 64540,
};
enum afs_vl_operation {
@@ -84,6 +113,44 @@ enum afs_edit_dir_reason {
afs_edit_dir_for_unlink,
};
+enum afs_eproto_cause {
+ afs_eproto_bad_status,
+ afs_eproto_cb_count,
+ afs_eproto_cb_fid_count,
+ afs_eproto_file_type,
+ afs_eproto_ibulkst_cb_count,
+ afs_eproto_ibulkst_count,
+ afs_eproto_motd_len,
+ afs_eproto_offline_msg_len,
+ afs_eproto_volname_len,
+ afs_eproto_yvl_fsendpt4_len,
+ afs_eproto_yvl_fsendpt6_len,
+ afs_eproto_yvl_fsendpt_num,
+ afs_eproto_yvl_fsendpt_type,
+ afs_eproto_yvl_vlendpt4_len,
+ afs_eproto_yvl_vlendpt6_len,
+ afs_eproto_yvl_vlendpt_type,
+};
+
+enum afs_io_error {
+ afs_io_error_cm_reply,
+ afs_io_error_extract,
+ afs_io_error_fs_probe_fail,
+ afs_io_error_vl_lookup_fail,
+ afs_io_error_vl_probe_fail,
+};
+
+enum afs_file_error {
+ afs_file_error_dir_bad_magic,
+ afs_file_error_dir_big,
+ afs_file_error_dir_missing_page,
+ afs_file_error_dir_over_end,
+ afs_file_error_dir_small,
+ afs_file_error_dir_unmarked_ext,
+ afs_file_error_mntpt,
+ afs_file_error_writeback_fail,
+};
+
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
/*
@@ -119,7 +186,34 @@ enum afs_edit_dir_reason {
EM(afs_FS_FetchData64, "FS.FetchData64") \
EM(afs_FS_StoreData64, "FS.StoreData64") \
EM(afs_FS_GiveUpAllCallBacks, "FS.GiveUpAllCallBacks") \
- E_(afs_FS_GetCapabilities, "FS.GetCapabilities")
+ EM(afs_FS_GetCapabilities, "FS.GetCapabilities") \
+ EM(yfs_FS_FetchACL, "YFS.FetchACL") \
+ EM(yfs_FS_FetchStatus, "YFS.FetchStatus") \
+ EM(yfs_FS_StoreACL, "YFS.StoreACL") \
+ EM(yfs_FS_StoreStatus, "YFS.StoreStatus") \
+ EM(yfs_FS_RemoveFile, "YFS.RemoveFile") \
+ EM(yfs_FS_CreateFile, "YFS.CreateFile") \
+ EM(yfs_FS_Rename, "YFS.Rename") \
+ EM(yfs_FS_Symlink, "YFS.Symlink") \
+ EM(yfs_FS_Link, "YFS.Link") \
+ EM(yfs_FS_MakeDir, "YFS.MakeDir") \
+ EM(yfs_FS_RemoveDir, "YFS.RemoveDir") \
+ EM(yfs_FS_GetVolumeStatus, "YFS.GetVolumeStatus") \
+ EM(yfs_FS_SetVolumeStatus, "YFS.SetVolumeStatus") \
+ EM(yfs_FS_SetLock, "YFS.SetLock") \
+ EM(yfs_FS_ExtendLock, "YFS.ExtendLock") \
+ EM(yfs_FS_ReleaseLock, "YFS.ReleaseLock") \
+ EM(yfs_FS_Lookup, "YFS.Lookup") \
+ EM(yfs_FS_FlushCPS, "YFS.FlushCPS") \
+ EM(yfs_FS_FetchOpaqueACL, "YFS.FetchOpaqueACL") \
+ EM(yfs_FS_WhoAmI, "YFS.WhoAmI") \
+ EM(yfs_FS_RemoveACL, "YFS.RemoveACL") \
+ EM(yfs_FS_RemoveFile2, "YFS.RemoveFile2") \
+ EM(yfs_FS_StoreOpaqueACL2, "YFS.StoreOpaqueACL2") \
+ EM(yfs_FS_InlineBulkStatus, "YFS.InlineBulkStatus") \
+ EM(yfs_FS_FetchData64, "YFS.FetchData64") \
+ EM(yfs_FS_StoreData64, "YFS.StoreData64") \
+ E_(yfs_FS_UpdateSymlink, "YFS.UpdateSymlink")
#define afs_vl_operations \
EM(afs_VL_GetEntryByNameU, "VL.GetEntryByNameU") \
@@ -146,6 +240,40 @@ enum afs_edit_dir_reason {
EM(afs_edit_dir_for_symlink, "Symlnk") \
E_(afs_edit_dir_for_unlink, "Unlink")
+#define afs_eproto_causes \
+ EM(afs_eproto_bad_status, "BadStatus") \
+ EM(afs_eproto_cb_count, "CbCount") \
+ EM(afs_eproto_cb_fid_count, "CbFidCount") \
+ EM(afs_eproto_file_type, "FileTYpe") \
+ EM(afs_eproto_ibulkst_cb_count, "IBS.CbCount") \
+ EM(afs_eproto_ibulkst_count, "IBS.FidCount") \
+ EM(afs_eproto_motd_len, "MotdLen") \
+ EM(afs_eproto_offline_msg_len, "OfflineMsgLen") \
+ EM(afs_eproto_volname_len, "VolNameLen") \
+ EM(afs_eproto_yvl_fsendpt4_len, "YVL.FsEnd4Len") \
+ EM(afs_eproto_yvl_fsendpt6_len, "YVL.FsEnd6Len") \
+ EM(afs_eproto_yvl_fsendpt_num, "YVL.FsEndCount") \
+ EM(afs_eproto_yvl_fsendpt_type, "YVL.FsEndType") \
+ EM(afs_eproto_yvl_vlendpt4_len, "YVL.VlEnd4Len") \
+ EM(afs_eproto_yvl_vlendpt6_len, "YVL.VlEnd6Len") \
+ E_(afs_eproto_yvl_vlendpt_type, "YVL.VlEndType")
+
+#define afs_io_errors \
+ EM(afs_io_error_cm_reply, "CM_REPLY") \
+ EM(afs_io_error_extract, "EXTRACT") \
+ EM(afs_io_error_fs_probe_fail, "FS_PROBE_FAIL") \
+ EM(afs_io_error_vl_lookup_fail, "VL_LOOKUP_FAIL") \
+ E_(afs_io_error_vl_probe_fail, "VL_PROBE_FAIL")
+
+#define afs_file_errors \
+ EM(afs_file_error_dir_bad_magic, "DIR_BAD_MAGIC") \
+ EM(afs_file_error_dir_big, "DIR_BIG") \
+ EM(afs_file_error_dir_missing_page, "DIR_MISSING_PAGE") \
+ EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \
+ EM(afs_file_error_dir_small, "DIR_SMALL") \
+ EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \
+ EM(afs_file_error_mntpt, "MNTPT_READ_FAILED") \
+ E_(afs_file_error_writeback_fail, "WRITEBACK_FAILED")
/*
* Export enum symbols via userspace.
@@ -160,6 +288,9 @@ afs_fs_operations;
afs_vl_operations;
afs_edit_dir_ops;
afs_edit_dir_reasons;
+afs_eproto_causes;
+afs_io_errors;
+afs_file_errors;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -170,17 +301,16 @@ afs_edit_dir_reasons;
#define EM(a, b) { a, b },
#define E_(a, b) { a, b }
-TRACE_EVENT(afs_recv_data,
- TP_PROTO(struct afs_call *call, unsigned count, unsigned offset,
+TRACE_EVENT(afs_receive_data,
+ TP_PROTO(struct afs_call *call, struct iov_iter *iter,
bool want_more, int ret),
- TP_ARGS(call, count, offset, want_more, ret),
+ TP_ARGS(call, iter, want_more, ret),
TP_STRUCT__entry(
+ __field(loff_t, remain )
__field(unsigned int, call )
__field(enum afs_call_state, state )
- __field(unsigned int, count )
- __field(unsigned int, offset )
__field(unsigned short, unmarshall )
__field(bool, want_more )
__field(int, ret )
@@ -190,17 +320,18 @@ TRACE_EVENT(afs_recv_data,
__entry->call = call->debug_id;
__entry->state = call->state;
__entry->unmarshall = call->unmarshall;
- __entry->count = count;
- __entry->offset = offset;
+ __entry->remain = iov_iter_count(iter);
__entry->want_more = want_more;
__entry->ret = ret;
),
- TP_printk("c=%08x s=%u u=%u %u/%u wm=%u ret=%d",
+ TP_printk("c=%08x r=%llu u=%u w=%u s=%u ret=%d",
__entry->call,
- __entry->state, __entry->unmarshall,
- __entry->offset, __entry->count,
- __entry->want_more, __entry->ret)
+ __entry->remain,
+ __entry->unmarshall,
+ __entry->want_more,
+ __entry->state,
+ __entry->ret)
);
TRACE_EVENT(afs_notify_call,
@@ -301,7 +432,7 @@ TRACE_EVENT(afs_make_fs_call,
}
),
- TP_printk("c=%08x %06x:%06x:%06x %s",
+ TP_printk("c=%08x %06llx:%06llx:%06x %s",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -555,24 +686,70 @@ TRACE_EVENT(afs_edit_dir,
);
TRACE_EVENT(afs_protocol_error,
- TP_PROTO(struct afs_call *call, int error, const void *where),
+ TP_PROTO(struct afs_call *call, int error, enum afs_eproto_cause cause),
+
+ TP_ARGS(call, error, cause),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(int, error )
+ __field(enum afs_eproto_cause, cause )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call ? call->debug_id : 0;
+ __entry->error = error;
+ __entry->cause = cause;
+ ),
+
+ TP_printk("c=%08x r=%d %s",
+ __entry->call, __entry->error,
+ __print_symbolic(__entry->cause, afs_eproto_causes))
+ );
+
+TRACE_EVENT(afs_io_error,
+ TP_PROTO(unsigned int call, int error, enum afs_io_error where),
TP_ARGS(call, error, where),
TP_STRUCT__entry(
__field(unsigned int, call )
__field(int, error )
- __field(const void *, where )
+ __field(enum afs_io_error, where )
),
TP_fast_assign(
- __entry->call = call ? call->debug_id : 0;
+ __entry->call = call;
+ __entry->error = error;
+ __entry->where = where;
+ ),
+
+ TP_printk("c=%08x r=%d %s",
+ __entry->call, __entry->error,
+ __print_symbolic(__entry->where, afs_io_errors))
+ );
+
+TRACE_EVENT(afs_file_error,
+ TP_PROTO(struct afs_vnode *vnode, int error, enum afs_file_error where),
+
+ TP_ARGS(vnode, error, where),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, fid )
+ __field(int, error )
+ __field(enum afs_file_error, where )
+ ),
+
+ TP_fast_assign(
+ __entry->fid = vnode->fid;
__entry->error = error;
__entry->where = where;
),
- TP_printk("c=%08x r=%d sp=%pSR",
- __entry->call, __entry->error, __entry->where)
+ TP_printk("%llx:%llx:%x r=%d %s",
+ __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+ __entry->error,
+ __print_symbolic(__entry->where, afs_file_errors))
);
TRACE_EVENT(afs_cm_no_server,
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 2cbd6e42ad83..e4526f85c19d 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -221,9 +221,30 @@ DEFINE_EVENT(cache_set, bcache_journal_entry_full,
TP_ARGS(c)
);
-DEFINE_EVENT(bcache_bio, bcache_journal_write,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
+TRACE_EVENT(bcache_journal_write,
+ TP_PROTO(struct bio *bio, u32 keys),
+ TP_ARGS(bio, keys),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(sector_t, sector )
+ __field(unsigned int, nr_sector )
+ __array(char, rwbs, 6 )
+ __field(u32, nr_keys )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio_dev(bio);
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->nr_sector = bio->bi_iter.bi_size >> 9;
+ __entry->nr_keys = keys;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u keys %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __entry->nr_keys)
);
/* Btree */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index b401c4e36394..2887503e4d12 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -92,7 +92,7 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
#define TP_STRUCT__entry_fsid __array(u8, fsid, BTRFS_FSID_SIZE)
#define TP_fast_assign_fsid(fs_info) \
- memcpy(__entry->fsid, fs_info->fsid, BTRFS_FSID_SIZE)
+ memcpy(__entry->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE)
#define TP_STRUCT__entry_btrfs(args...) \
TP_STRUCT__entry( \
@@ -316,7 +316,7 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
),
TP_fast_assign_btrfs(bi->root->fs_info,
- __entry->root_obj = bi->root->objectid;
+ __entry->root_obj = bi->root->root_key.objectid;
__entry->ino = btrfs_ino(bi);
__entry->isize = bi->vfs_inode.i_size;
__entry->disk_isize = bi->disk_i_size;
@@ -367,7 +367,7 @@ DECLARE_EVENT_CLASS(
TP_fast_assign_btrfs(
bi->root->fs_info,
- __entry->root_obj = bi->root->objectid;
+ __entry->root_obj = bi->root->root_key.objectid;
__entry->ino = btrfs_ino(bi);
__entry->isize = bi->vfs_inode.i_size;
__entry->disk_isize = bi->disk_i_size;
@@ -1048,6 +1048,8 @@ TRACE_EVENT(btrfs_trigger_flush,
{ FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS"}, \
{ FLUSH_DELALLOC, "FLUSH_DELALLOC"}, \
{ FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT"}, \
+ { FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR"}, \
+ { FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS"}, \
{ ALLOC_CHUNK, "ALLOC_CHUNK"}, \
{ COMMIT_TRANS, "COMMIT_TRANS"})
@@ -1477,7 +1479,8 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
),
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
- __entry->rootid = BTRFS_I(inode)->root->objectid;
+ __entry->rootid =
+ BTRFS_I(inode)->root->root_key.objectid;
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->start = start;
__entry->len = len;
@@ -1575,6 +1578,27 @@ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
TP_ARGS(fs_info, rec)
);
+TRACE_EVENT(qgroup_num_dirty_extents,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 transid,
+ u64 num_dirty_extents),
+
+ TP_ARGS(fs_info, transid, num_dirty_extents),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, transid )
+ __field( u64, num_dirty_extents )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->transid = transid;
+ __entry->num_dirty_extents = num_dirty_extents;
+ ),
+
+ TP_printk_btrfs("transid=%llu num_dirty_extents=%llu",
+ __entry->transid, __entry->num_dirty_extents)
+);
+
TRACE_EVENT(btrfs_qgroup_account_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 transid, u64 bytenr,
@@ -1675,7 +1699,7 @@ TRACE_EVENT(qgroup_meta_reserve,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->objectid;
+ __entry->refroot = root->root_key.objectid;
__entry->diff = diff;
),
@@ -1697,7 +1721,7 @@ TRACE_EVENT(qgroup_meta_convert,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->objectid;
+ __entry->refroot = root->root_key.objectid;
__entry->diff = diff;
),
@@ -1721,7 +1745,7 @@ TRACE_EVENT(qgroup_meta_free_all_pertrans,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->objectid;
+ __entry->refroot = root->root_key.objectid;
spin_lock(&root->qgroup_meta_rsv_lock);
__entry->diff = -(s64)root->qgroup_meta_rsv_pertrans;
spin_unlock(&root->qgroup_meta_rsv_lock);
@@ -1802,7 +1826,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->objectid;
+ __entry->root_objectid = root->root_key.objectid;
__entry->ino = ino;
__entry->mod = mod;
),
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index d74722c2ac8b..a401ff5e7847 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -53,24 +53,22 @@ DEFINE_EVENT(cgroup_root, cgroup_remount,
DECLARE_EVENT_CLASS(cgroup,
- TP_PROTO(struct cgroup *cgrp),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgrp),
+ TP_ARGS(cgrp, path),
TP_STRUCT__entry(
__field( int, root )
__field( int, id )
__field( int, level )
- __dynamic_array(char, path,
- cgroup_path(cgrp, NULL, 0) + 1)
+ __string( path, path )
),
TP_fast_assign(
__entry->root = cgrp->root->hierarchy_id;
__entry->id = cgrp->id;
__entry->level = cgrp->level;
- cgroup_path(cgrp, __get_dynamic_array(path),
- __get_dynamic_array_len(path));
+ __assign_str(path, path);
),
TP_printk("root=%d id=%d level=%d path=%s",
@@ -79,45 +77,45 @@ DECLARE_EVENT_CLASS(cgroup,
DEFINE_EVENT(cgroup, cgroup_mkdir,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DEFINE_EVENT(cgroup, cgroup_rmdir,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DEFINE_EVENT(cgroup, cgroup_release,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DEFINE_EVENT(cgroup, cgroup_rename,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DECLARE_EVENT_CLASS(cgroup_migrate,
- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+ TP_PROTO(struct cgroup *dst_cgrp, const char *path,
+ struct task_struct *task, bool threadgroup),
- TP_ARGS(dst_cgrp, task, threadgroup),
+ TP_ARGS(dst_cgrp, path, task, threadgroup),
TP_STRUCT__entry(
__field( int, dst_root )
__field( int, dst_id )
__field( int, dst_level )
- __dynamic_array(char, dst_path,
- cgroup_path(dst_cgrp, NULL, 0) + 1)
__field( int, pid )
+ __string( dst_path, path )
__string( comm, task->comm )
),
@@ -125,8 +123,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
__entry->dst_root = dst_cgrp->root->hierarchy_id;
__entry->dst_id = dst_cgrp->id;
__entry->dst_level = dst_cgrp->level;
- cgroup_path(dst_cgrp, __get_dynamic_array(dst_path),
- __get_dynamic_array_len(dst_path));
+ __assign_str(dst_path, path);
__entry->pid = task->pid;
__assign_str(comm, task->comm);
),
@@ -138,16 +135,18 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
DEFINE_EVENT(cgroup_migrate, cgroup_attach_task,
- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+ TP_PROTO(struct cgroup *dst_cgrp, const char *path,
+ struct task_struct *task, bool threadgroup),
- TP_ARGS(dst_cgrp, task, threadgroup)
+ TP_ARGS(dst_cgrp, path, task, threadgroup)
);
DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks,
- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+ TP_PROTO(struct cgroup *dst_cgrp, const char *path,
+ struct task_struct *task, bool threadgroup),
- TP_ARGS(dst_cgrp, task, threadgroup)
+ TP_ARGS(dst_cgrp, path, task, threadgroup)
);
#endif /* _TRACE_CGROUP_H */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 0e31eb136c57..d68e9e536814 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -17,6 +17,7 @@ struct mpage_da_data;
struct ext4_map_blocks;
struct extent_status;
struct ext4_fsmap;
+struct partial_cluster;
#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
@@ -225,6 +226,26 @@ TRACE_EVENT(ext4_drop_inode,
(unsigned long) __entry->ino, __entry->drop)
);
+TRACE_EVENT(ext4_nfs_commit_metadata,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ ),
+
+ TP_printk("dev %d,%d ino %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino)
+);
+
TRACE_EVENT(ext4_mark_inode_dirty,
TP_PROTO(struct inode *inode, unsigned long IP),
@@ -2035,21 +2056,23 @@ TRACE_EVENT(ext4_ext_show_extent,
);
TRACE_EVENT(ext4_remove_blocks,
- TP_PROTO(struct inode *inode, struct ext4_extent *ex,
- ext4_lblk_t from, ext4_fsblk_t to,
- long long partial_cluster),
+ TP_PROTO(struct inode *inode, struct ext4_extent *ex,
+ ext4_lblk_t from, ext4_fsblk_t to,
+ struct partial_cluster *pc),
- TP_ARGS(inode, ex, from, to, partial_cluster),
+ TP_ARGS(inode, ex, from, to, pc),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( ext4_lblk_t, from )
__field( ext4_lblk_t, to )
- __field( long long, partial )
__field( ext4_fsblk_t, ee_pblk )
__field( ext4_lblk_t, ee_lblk )
__field( unsigned short, ee_len )
+ __field( ext4_fsblk_t, pc_pclu )
+ __field( ext4_lblk_t, pc_lblk )
+ __field( int, pc_state)
),
TP_fast_assign(
@@ -2057,14 +2080,16 @@ TRACE_EVENT(ext4_remove_blocks,
__entry->ino = inode->i_ino;
__entry->from = from;
__entry->to = to;
- __entry->partial = partial_cluster;
__entry->ee_pblk = ext4_ext_pblock(ex);
__entry->ee_lblk = le32_to_cpu(ex->ee_block);
__entry->ee_len = ext4_ext_get_actual_len(ex);
+ __entry->pc_pclu = pc->pclu;
+ __entry->pc_lblk = pc->lblk;
+ __entry->pc_state = pc->state;
),
TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
- "from %u to %u partial_cluster %lld",
+ "from %u to %u partial [pclu %lld lblk %u state %d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->ee_lblk,
@@ -2072,45 +2097,53 @@ TRACE_EVENT(ext4_remove_blocks,
(unsigned short) __entry->ee_len,
(unsigned) __entry->from,
(unsigned) __entry->to,
- (long long) __entry->partial)
+ (long long) __entry->pc_pclu,
+ (unsigned int) __entry->pc_lblk,
+ (int) __entry->pc_state)
);
TRACE_EVENT(ext4_ext_rm_leaf,
TP_PROTO(struct inode *inode, ext4_lblk_t start,
struct ext4_extent *ex,
- long long partial_cluster),
+ struct partial_cluster *pc),
- TP_ARGS(inode, start, ex, partial_cluster),
+ TP_ARGS(inode, start, ex, pc),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( long long, partial )
__field( ext4_lblk_t, start )
__field( ext4_lblk_t, ee_lblk )
__field( ext4_fsblk_t, ee_pblk )
__field( short, ee_len )
+ __field( ext4_fsblk_t, pc_pclu )
+ __field( ext4_lblk_t, pc_lblk )
+ __field( int, pc_state)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->partial = partial_cluster;
__entry->start = start;
__entry->ee_lblk = le32_to_cpu(ex->ee_block);
__entry->ee_pblk = ext4_ext_pblock(ex);
__entry->ee_len = ext4_ext_get_actual_len(ex);
+ __entry->pc_pclu = pc->pclu;
+ __entry->pc_lblk = pc->lblk;
+ __entry->pc_state = pc->state;
),
TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
- "partial_cluster %lld",
+ "partial [pclu %lld lblk %u state %d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->start,
(unsigned) __entry->ee_lblk,
(unsigned long long) __entry->ee_pblk,
(unsigned short) __entry->ee_len,
- (long long) __entry->partial)
+ (long long) __entry->pc_pclu,
+ (unsigned int) __entry->pc_lblk,
+ (int) __entry->pc_state)
);
TRACE_EVENT(ext4_ext_rm_idx,
@@ -2168,9 +2201,9 @@ TRACE_EVENT(ext4_ext_remove_space,
TRACE_EVENT(ext4_ext_remove_space_done,
TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end,
- int depth, long long partial, __le16 eh_entries),
+ int depth, struct partial_cluster *pc, __le16 eh_entries),
- TP_ARGS(inode, start, end, depth, partial, eh_entries),
+ TP_ARGS(inode, start, end, depth, pc, eh_entries),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -2178,7 +2211,9 @@ TRACE_EVENT(ext4_ext_remove_space_done,
__field( ext4_lblk_t, start )
__field( ext4_lblk_t, end )
__field( int, depth )
- __field( long long, partial )
+ __field( ext4_fsblk_t, pc_pclu )
+ __field( ext4_lblk_t, pc_lblk )
+ __field( int, pc_state )
__field( unsigned short, eh_entries )
),
@@ -2188,18 +2223,23 @@ TRACE_EVENT(ext4_ext_remove_space_done,
__entry->start = start;
__entry->end = end;
__entry->depth = depth;
- __entry->partial = partial;
+ __entry->pc_pclu = pc->pclu;
+ __entry->pc_lblk = pc->lblk;
+ __entry->pc_state = pc->state;
__entry->eh_entries = le16_to_cpu(eh_entries);
),
- TP_printk("dev %d,%d ino %lu since %u end %u depth %d partial %lld "
+ TP_printk("dev %d,%d ino %lu since %u end %u depth %d "
+ "partial [pclu %lld lblk %u state %d] "
"remaining_entries %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned) __entry->start,
(unsigned) __entry->end,
__entry->depth,
- (long long) __entry->partial,
+ (long long) __entry->pc_pclu,
+ (unsigned int) __entry->pc_lblk,
+ (int) __entry->pc_state,
(unsigned short) __entry->eh_entries)
);
@@ -2270,7 +2310,7 @@ TRACE_EVENT(ext4_es_remove_extent,
__entry->lblk, __entry->len)
);
-TRACE_EVENT(ext4_es_find_delayed_extent_range_enter,
+TRACE_EVENT(ext4_es_find_extent_range_enter,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
TP_ARGS(inode, lblk),
@@ -2292,7 +2332,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_enter,
(unsigned long) __entry->ino, __entry->lblk)
);
-TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
+TRACE_EVENT(ext4_es_find_extent_range_exit,
TP_PROTO(struct inode *inode, struct extent_status *es),
TP_ARGS(inode, es),
@@ -2512,6 +2552,41 @@ TRACE_EVENT(ext4_es_shrink,
__entry->scan_time, __entry->nr_skipped, __entry->retried)
);
+TRACE_EVENT(ext4_es_insert_delayed_block,
+ TP_PROTO(struct inode *inode, struct extent_status *es,
+ bool allocated),
+
+ TP_ARGS(inode, es, allocated),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( ext4_lblk_t, lblk )
+ __field( ext4_lblk_t, len )
+ __field( ext4_fsblk_t, pblk )
+ __field( char, status )
+ __field( bool, allocated )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->lblk = es->es_lblk;
+ __entry->len = es->es_len;
+ __entry->pblk = ext4_es_pblock(es);
+ __entry->status = ext4_es_status(es);
+ __entry->allocated = allocated;
+ ),
+
+ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s "
+ "allocated %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ __entry->lblk, __entry->len,
+ __entry->pblk, show_extent_status(__entry->status),
+ __entry->allocated)
+);
+
/* fsmap traces */
DECLARE_EVENT_CLASS(ext4_fsmap_class,
TP_PROTO(struct super_block *sb, u32 keydev, u32 agno, u64 bno, u64 len,
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 795698925d20..3ec73f17ee2a 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -82,7 +82,6 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ REQ_OP_WRITE, "WRITE" }, \
{ REQ_OP_FLUSH, "FLUSH" }, \
{ REQ_OP_DISCARD, "DISCARD" }, \
- { REQ_OP_ZONE_REPORT, "ZONE_REPORT" }, \
{ REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \
{ REQ_OP_ZONE_RESET, "ZONE_RESET" }, \
{ REQ_OP_WRITE_SAME, "WRITE_SAME" }, \
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index 68b17c116907..fad7befa612d 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, fl_next)
+ __field(struct file_lock *, fl_blocker)
__field(fl_owner_t, fl_owner)
__field(unsigned int, fl_pid)
__field(unsigned int, fl_flags)
@@ -82,7 +82,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
__entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_next = fl ? fl->fl_next : NULL;
+ __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
__entry->fl_owner = fl ? fl->fl_owner : NULL;
__entry->fl_pid = fl ? fl->fl_pid : 0;
__entry->fl_flags = fl ? fl->fl_flags : 0;
@@ -92,9 +92,9 @@ DECLARE_EVENT_CLASS(filelock_lock,
__entry->ret = ret;
),
- TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
+ TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
- __entry->i_ino, __entry->fl_next, __entry->fl_owner,
+ __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
__entry->fl_pid, show_fl_flags(__entry->fl_flags),
show_fl_type(__entry->fl_type),
__entry->fl_start, __entry->fl_end, __entry->ret)
@@ -125,7 +125,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, fl_next)
+ __field(struct file_lock *, fl_blocker)
__field(fl_owner_t, fl_owner)
__field(unsigned int, fl_flags)
__field(unsigned char, fl_type)
@@ -137,7 +137,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
__entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_next = fl ? fl->fl_next : NULL;
+ __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
__entry->fl_owner = fl ? fl->fl_owner : NULL;
__entry->fl_flags = fl ? fl->fl_flags : 0;
__entry->fl_type = fl ? fl->fl_type : 0;
@@ -145,9 +145,9 @@ DECLARE_EVENT_CLASS(filelock_lease,
__entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
),
- TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
+ TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
- __entry->i_ino, __entry->fl_next, __entry->fl_owner,
+ __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
show_fl_flags(__entry->fl_flags),
show_fl_type(__entry->fl_type),
__entry->fl_break_time, __entry->fl_downgrade_time)
diff --git a/include/trace/events/hwmon.h b/include/trace/events/hwmon.h
new file mode 100644
index 000000000000..d7a1d0ffb679
--- /dev/null
+++ b/include/trace/events/hwmon.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hwmon
+
+#if !defined(_TRACE_HWMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HWMON_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(hwmon_attr_class,
+
+ TP_PROTO(int index, const char *attr_name, long val),
+
+ TP_ARGS(index, attr_name, val),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __string(attr_name, attr_name)
+ __field(long, val)
+ ),
+
+ TP_fast_assign(
+ __entry->index = index;
+ __assign_str(attr_name, attr_name);
+ __entry->val = val;
+ ),
+
+ TP_printk("index=%d, attr_name=%s, val=%ld",
+ __entry->index, __get_str(attr_name), __entry->val)
+);
+
+DEFINE_EVENT(hwmon_attr_class, hwmon_attr_show,
+
+ TP_PROTO(int index, const char *attr_name, long val),
+
+ TP_ARGS(index, attr_name, val)
+);
+
+DEFINE_EVENT(hwmon_attr_class, hwmon_attr_store,
+
+ TP_PROTO(int index, const char *attr_name, long val),
+
+ TP_ARGS(index, attr_name, val)
+);
+
+TRACE_EVENT(hwmon_attr_show_string,
+
+ TP_PROTO(int index, const char *attr_name, const char *s),
+
+ TP_ARGS(index, attr_name, s),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __string(attr_name, attr_name)
+ __string(label, s)
+ ),
+
+ TP_fast_assign(
+ __entry->index = index;
+ __assign_str(attr_name, attr_name);
+ __assign_str(label, s);
+ ),
+
+ TP_printk("index=%d, attr_name=%s, val=%s",
+ __entry->index, __get_str(attr_name), __get_str(label))
+);
+
+#endif /* _TRACE_HWMON_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/iscsi.h b/include/trace/events/iscsi.h
new file mode 100644
index 000000000000..87408faf6e4e
--- /dev/null
+++ b/include/trace/events/iscsi.h
@@ -0,0 +1,107 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iscsi
+
+#if !defined(_TRACE_ISCSI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ISCSI_H
+
+#include <linux/tracepoint.h>
+
+/* max debug message length */
+#define ISCSI_MSG_MAX 256
+
+/*
+ * Declare tracepoint helper function.
+ */
+void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *),
+ struct device *dev, const char *fmt, ...);
+
+/*
+ * Declare event class for iscsi debug messages.
+ */
+DECLARE_EVENT_CLASS(iscsi_log_msg,
+
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev) )
+ __dynamic_array(char, msg, ISCSI_MSG_MAX )
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ vsnprintf(__get_str(msg), ISCSI_MSG_MAX, vaf->fmt, *vaf->va);
+ ),
+
+ TP_printk("%s: %s",__get_str(dname), __get_str(msg)
+ )
+);
+
+/*
+ * Define event to capture iscsi connection debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_conn,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi session debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_session,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi error handling debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_eh,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi tcp debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_tcp,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi sw tcp debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_sw_tcp,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi transport session debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_trans_session,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+/*
+ * Define event to capture iscsi transport connection debug messages.
+ */
+DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_trans_conn,
+ TP_PROTO(struct device *dev, struct va_format *vaf),
+
+ TP_ARGS(dev, vaf)
+);
+
+#endif /* _TRACE_ISCSI_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
new file mode 100644
index 000000000000..c0e7d24ca256
--- /dev/null
+++ b/include/trace/events/kyber.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kyber
+
+#if !defined(_TRACE_KYBER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KYBER_H
+
+#include <linux/blkdev.h>
+#include <linux/tracepoint.h>
+
+#define DOMAIN_LEN 16
+#define LATENCY_TYPE_LEN 8
+
+TRACE_EVENT(kyber_latency,
+
+ TP_PROTO(struct request_queue *q, const char *domain, const char *type,
+ unsigned int percentile, unsigned int numerator,
+ unsigned int denominator, unsigned int samples),
+
+ TP_ARGS(q, domain, type, percentile, numerator, denominator, samples),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __array( char, domain, DOMAIN_LEN )
+ __array( char, type, LATENCY_TYPE_LEN )
+ __field( u8, percentile )
+ __field( u8, numerator )
+ __field( u8, denominator )
+ __field( unsigned int, samples )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ strlcpy(__entry->type, type, sizeof(__entry->type));
+ __entry->percentile = percentile;
+ __entry->numerator = numerator;
+ __entry->denominator = denominator;
+ __entry->samples = samples;
+ ),
+
+ TP_printk("%d,%d %s %s p%u %u/%u samples=%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->domain,
+ __entry->type, __entry->percentile, __entry->numerator,
+ __entry->denominator, __entry->samples)
+);
+
+TRACE_EVENT(kyber_adjust,
+
+ TP_PROTO(struct request_queue *q, const char *domain,
+ unsigned int depth),
+
+ TP_ARGS(q, domain, depth),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __array( char, domain, DOMAIN_LEN )
+ __field( unsigned int, depth )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ __entry->depth = depth;
+ ),
+
+ TP_printk("%d,%d %s %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->domain,
+ __entry->depth)
+);
+
+TRACE_EVENT(kyber_throttled,
+
+ TP_PROTO(struct request_queue *q, const char *domain),
+
+ TP_ARGS(q, domain),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __array( char, domain, DOMAIN_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ ),
+
+ TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->domain)
+);
+
+#define _TRACE_KYBER_H
+#endif /* _TRACE_KYBER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 711372845945..705b33d1e395 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -70,33 +70,6 @@ TRACE_EVENT(mm_migrate_pages,
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
-
-TRACE_EVENT(mm_numa_migrate_ratelimit,
-
- TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages),
-
- TP_ARGS(p, dst_nid, nr_pages),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN)
- __field( pid_t, pid)
- __field( int, dst_nid)
- __field( unsigned long, nr_pages)
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->dst_nid = dst_nid;
- __entry->nr_pages = nr_pages;
- ),
-
- TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu",
- __entry->comm,
- __entry->pid,
- __entry->dst_nid,
- __entry->nr_pages)
-);
#endif /* _TRACE_MIGRATE_H */
/* This part must be outside protection */
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index a81cffb76d89..a1675d43777e 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -88,6 +88,7 @@
{1UL << PG_dirty, "dirty" }, \
{1UL << PG_lru, "lru" }, \
{1UL << PG_active, "active" }, \
+ {1UL << PG_workingset, "workingset" }, \
{1UL << PG_slab, "slab" }, \
{1UL << PG_owner_priv_1, "owner_priv_1" }, \
{1UL << PG_arch_1, "arch_1" }, \
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 00aa72ce0e7c..1efd7d9b25fe 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -244,6 +244,65 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
TP_ARGS(skb)
);
+DECLARE_EVENT_CLASS(net_dev_rx_exit_template,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret),
+
+ TP_STRUCT__entry(
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->ret = ret;
+ ),
+
+ TP_printk("ret=%d", __entry->ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_frags_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_receive_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_ni_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
#endif /* _TRACE_NET_H */
/* This part must be outside protection */
diff --git a/include/trace/events/objagg.h b/include/trace/events/objagg.h
new file mode 100644
index 000000000000..fcec0fc9eb0c
--- /dev/null
+++ b/include/trace/events/objagg.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM objagg
+
+#if !defined(__TRACE_OBJAGG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_OBJAGG_H
+
+#include <linux/tracepoint.h>
+
+struct objagg;
+struct objagg_obj;
+
+TRACE_EVENT(objagg_create,
+ TP_PROTO(const struct objagg *objagg),
+
+ TP_ARGS(objagg),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ ),
+
+ TP_printk("objagg %p", __entry->objagg)
+);
+
+TRACE_EVENT(objagg_destroy,
+ TP_PROTO(const struct objagg *objagg),
+
+ TP_ARGS(objagg),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ ),
+
+ TP_printk("objagg %p", __entry->objagg)
+);
+
+TRACE_EVENT(objagg_obj_create,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_destroy,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_get,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ unsigned int refcount),
+
+ TP_ARGS(objagg, obj, refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(unsigned int, refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->refcount = refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, refcount %u",
+ __entry->objagg, __entry->obj, __entry->refcount)
+);
+
+TRACE_EVENT(objagg_obj_put,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ unsigned int refcount),
+
+ TP_ARGS(objagg, obj, refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(unsigned int, refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->refcount = refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, refcount %u",
+ __entry->objagg, __entry->obj, __entry->refcount)
+);
+
+TRACE_EVENT(objagg_obj_parent_assign,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ const struct objagg_obj *parent,
+ unsigned int parent_refcount),
+
+ TP_ARGS(objagg, obj, parent, parent_refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(const void *, parent)
+ __field(unsigned int, parent_refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->parent = parent;
+ __entry->parent_refcount = parent_refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u",
+ __entry->objagg, __entry->obj,
+ __entry->parent, __entry->parent_refcount)
+);
+
+TRACE_EVENT(objagg_obj_parent_unassign,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ const struct objagg_obj *parent,
+ unsigned int parent_refcount),
+
+ TP_ARGS(objagg, obj, parent, parent_refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(const void *, parent)
+ __field(unsigned int, parent_refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->parent = parent;
+ __entry->parent_refcount = parent_refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u",
+ __entry->objagg, __entry->obj,
+ __entry->parent, __entry->parent_refcount)
+);
+
+TRACE_EVENT(objagg_obj_root_create,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p",
+ __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_root_destroy,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p",
+ __entry->objagg, __entry->obj)
+);
+
+#endif /* __TRACE_OBJAGG_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index 9c4eb33c5a1d..9a0d4ceeb166 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -1,4 +1,4 @@
-#ifdef CONFIG_PREEMPTIRQ_EVENTS
+#ifdef CONFIG_PREEMPTIRQ_TRACEPOINTS
#undef TRACE_SYSTEM
#define TRACE_SYSTEM preemptirq
@@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(preemptirq_template,
(void *)((unsigned long)(_stext) + __entry->parent_offs))
);
-#ifndef CONFIG_PROVE_LOCKING
+#ifdef CONFIG_TRACE_IRQFLAGS
DEFINE_EVENT(preemptirq_template, irq_disable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
@@ -40,9 +40,14 @@ DEFINE_EVENT(preemptirq_template, irq_disable,
DEFINE_EVENT(preemptirq_template, irq_enable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
+#else
+#define trace_irq_enable(...)
+#define trace_irq_disable(...)
+#define trace_irq_enable_rcuidle(...)
+#define trace_irq_disable_rcuidle(...)
#endif
-#ifdef CONFIG_DEBUG_PREEMPT
+#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
DEFINE_EVENT(preemptirq_template, preempt_disable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
@@ -50,22 +55,22 @@ DEFINE_EVENT(preemptirq_template, preempt_disable,
DEFINE_EVENT(preemptirq_template, preempt_enable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
+#else
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
+#define trace_preempt_enable_rcuidle(...)
+#define trace_preempt_disable_rcuidle(...)
#endif
#endif /* _TRACE_PREEMPTIRQ_H */
#include <trace/define_trace.h>
-#endif /* !CONFIG_PREEMPTIRQ_EVENTS */
-
-#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
+#else /* !CONFIG_PREEMPTIRQ_TRACEPOINTS */
#define trace_irq_enable(...)
#define trace_irq_disable(...)
#define trace_irq_enable_rcuidle(...)
#define trace_irq_disable_rcuidle(...)
-#endif
-
-#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
#define trace_preempt_enable(...)
#define trace_preempt_disable(...)
#define trace_preempt_enable_rcuidle(...)
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index a8d07feff6a0..f0c4d10e614b 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -393,9 +393,8 @@ TRACE_EVENT(rcu_quiescent_state_report,
* Tracepoint for quiescent states detected by force_quiescent_state().
* These trace events include the type of RCU, the grace-period number
* that was blocked by the CPU, the CPU itself, and the type of quiescent
- * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
- * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
- * CPU got a quiescent state via its rcu_qs_ctr.
+ * state, which can be "dti" for dyntick-idle mode or "kick" when kicking
+ * a CPU that has been in dyntick-idle mode for too long.
*/
TRACE_EVENT(rcu_fqs,
@@ -705,20 +704,20 @@ TRACE_EVENT(rcu_torture_read,
);
/*
- * Tracepoint for _rcu_barrier() execution. The string "s" describes
- * the _rcu_barrier phase:
- * "Begin": _rcu_barrier() started.
- * "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
- * "Inc1": _rcu_barrier() piggyback check counter incremented.
- * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
- * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU.
- * "OnlineQ": _rcu_barrier() found online CPU with callbacks.
- * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks.
+ * Tracepoint for rcu_barrier() execution. The string "s" describes
+ * the rcu_barrier phase:
+ * "Begin": rcu_barrier() started.
+ * "EarlyExit": rcu_barrier() piggybacked, thus early exit.
+ * "Inc1": rcu_barrier() piggyback check counter incremented.
+ * "OfflineNoCB": rcu_barrier() found callback on never-online CPU
+ * "OnlineNoCB": rcu_barrier() found online no-CBs CPU.
+ * "OnlineQ": rcu_barrier() found online CPU with callbacks.
+ * "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
* "IRQNQ": An rcu_barrier_callback() callback found no callbacks.
* "CB": An rcu_barrier_callback() invoked a callback, not the last.
* "LastCB": An rcu_barrier_callback() invoked the last callback.
- * "Inc2": _rcu_barrier() piggyback check counter incremented.
+ * "Inc2": rcu_barrier() piggyback check counter incremented.
* The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
* is the count of remaining callbacks, and "done" is the piggybacking count.
*/
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 53df203b8057..399b1aedc927 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -10,6 +10,7 @@
#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RPCRDMA_H
+#include <linux/scatterlist.h>
#include <linux/tracepoint.h>
#include <trace/events/rdma.h>
@@ -97,7 +98,6 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, mr)
__field(unsigned int, pos)
__field(int, nents)
__field(u32, handle)
@@ -109,7 +109,6 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __entry->mr = mr;
__entry->pos = pos;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
@@ -118,8 +117,8 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
__entry->nsegs = nsegs;
),
- TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
- __entry->task_id, __entry->client_id, __entry->mr,
+ TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id,
__entry->pos, __entry->length,
(unsigned long long)__entry->offset, __entry->handle,
__entry->nents < __entry->nsegs ? "more" : "last"
@@ -127,7 +126,7 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
);
#define DEFINE_RDCH_EVENT(name) \
- DEFINE_EVENT(xprtrdma_rdch_event, name, \
+ DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
TP_PROTO( \
const struct rpc_task *task, \
unsigned int pos, \
@@ -148,7 +147,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, mr)
__field(int, nents)
__field(u32, handle)
__field(u32, length)
@@ -159,7 +157,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
- __entry->mr = mr;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
@@ -167,8 +164,8 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
__entry->nsegs = nsegs;
),
- TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
- __entry->task_id, __entry->client_id, __entry->mr,
+ TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id,
__entry->length, (unsigned long long)__entry->offset,
__entry->handle,
__entry->nents < __entry->nsegs ? "more" : "last"
@@ -176,7 +173,7 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
);
#define DEFINE_WRCH_EVENT(name) \
- DEFINE_EVENT(xprtrdma_wrch_event, name, \
+ DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
TP_PROTO( \
const struct rpc_task *task, \
struct rpcrdma_mr *mr, \
@@ -234,6 +231,18 @@ DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
), \
TP_ARGS(wc, frwr))
+TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
+TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
+TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
+TRACE_DEFINE_ENUM(DMA_NONE);
+
+#define xprtrdma_show_direction(x) \
+ __print_symbolic(x, \
+ { DMA_BIDIRECTIONAL, "BIDIR" }, \
+ { DMA_TO_DEVICE, "TO_DEVICE" }, \
+ { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
+ { DMA_NONE, "NONE" })
+
DECLARE_EVENT_CLASS(xprtrdma_mr,
TP_PROTO(
const struct rpcrdma_mr *mr
@@ -246,6 +255,7 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
__field(u32, handle)
__field(u32, length)
__field(u64, offset)
+ __field(u32, dir)
),
TP_fast_assign(
@@ -253,17 +263,18 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
+ __entry->dir = mr->mr_dir;
),
- TP_printk("mr=%p %u@0x%016llx:0x%08x",
+ TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
__entry->mr, __entry->length,
- (unsigned long long)__entry->offset,
- __entry->handle
+ (unsigned long long)__entry->offset, __entry->handle,
+ xprtrdma_show_direction(__entry->dir)
)
);
#define DEFINE_MR_EVENT(name) \
- DEFINE_EVENT(xprtrdma_mr, name, \
+ DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
TP_PROTO( \
const struct rpcrdma_mr *mr \
), \
@@ -306,7 +317,7 @@ DECLARE_EVENT_CLASS(xprtrdma_cb_event,
** Connection events
**/
-TRACE_EVENT(xprtrdma_conn_upcall,
+TRACE_EVENT(xprtrdma_cm_event,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
struct rdma_cm_event *event
@@ -371,13 +382,15 @@ TRACE_EVENT(xprtrdma_disconnect,
DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
DEFINE_RXPRT_EVENT(xprtrdma_create);
-DEFINE_RXPRT_EVENT(xprtrdma_destroy);
+DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
DEFINE_RXPRT_EVENT(xprtrdma_remove);
DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
-DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
+DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
+DEFINE_RXPRT_EVENT(xprtrdma_op_close);
+DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
-TRACE_EVENT(xprtrdma_qp_error,
+TRACE_EVENT(xprtrdma_qp_event,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
const struct ib_event *event
@@ -437,9 +450,9 @@ TRACE_EVENT(xprtrdma_createmrs,
DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
-DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
-DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
-DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
+DEFINE_RDCH_EVENT(read);
+DEFINE_WRCH_EVENT(write);
+DEFINE_WRCH_EVENT(reply);
TRACE_DEFINE_ENUM(rpcrdma_noch);
TRACE_DEFINE_ENUM(rpcrdma_readch);
@@ -509,7 +522,7 @@ TRACE_EVENT(xprtrdma_post_send,
TP_STRUCT__entry(
__field(const void *, req)
__field(int, num_sge)
- __field(bool, signaled)
+ __field(int, signaled)
__field(int, status)
),
@@ -570,7 +583,7 @@ TRACE_EVENT(xprtrdma_post_recvs,
__entry->r_xprt = r_xprt;
__entry->count = count;
__entry->status = status;
- __entry->posted = r_xprt->rx_buf.rb_posted_receives;
+ __entry->posted = r_xprt->rx_ep.rep_receive_count;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
@@ -651,11 +664,146 @@ DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
-DEFINE_MR_EVENT(xprtrdma_localinv);
-DEFINE_MR_EVENT(xprtrdma_dma_map);
-DEFINE_MR_EVENT(xprtrdma_dma_unmap);
-DEFINE_MR_EVENT(xprtrdma_remoteinv);
-DEFINE_MR_EVENT(xprtrdma_recover_mr);
+TRACE_EVENT(xprtrdma_frwr_alloc,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr,
+ int rc
+ ),
+
+ TP_ARGS(mr, rc),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("mr=%p: rc=%d",
+ __entry->mr, __entry->rc
+ )
+);
+
+TRACE_EVENT(xprtrdma_frwr_dereg,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr,
+ int rc
+ ),
+
+ TP_ARGS(mr, rc),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(u32, dir)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->dir = mr->mr_dir;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
+ __entry->mr, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle,
+ xprtrdma_show_direction(__entry->dir),
+ __entry->rc
+ )
+);
+
+TRACE_EVENT(xprtrdma_frwr_sgerr,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr,
+ int sg_nents
+ ),
+
+ TP_ARGS(mr, sg_nents),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(u64, addr)
+ __field(u32, dir)
+ __field(int, nents)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->addr = mr->mr_sg->dma_address;
+ __entry->dir = mr->mr_dir;
+ __entry->nents = sg_nents;
+ ),
+
+ TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
+ __entry->mr, __entry->addr,
+ xprtrdma_show_direction(__entry->dir),
+ __entry->nents
+ )
+);
+
+TRACE_EVENT(xprtrdma_frwr_maperr,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr,
+ int num_mapped
+ ),
+
+ TP_ARGS(mr, num_mapped),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(u64, addr)
+ __field(u32, dir)
+ __field(int, num_mapped)
+ __field(int, nents)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->addr = mr->mr_sg->dma_address;
+ __entry->dir = mr->mr_dir;
+ __entry->num_mapped = num_mapped;
+ __entry->nents = mr->mr_nents;
+ ),
+
+ TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
+ __entry->mr, __entry->addr,
+ xprtrdma_show_direction(__entry->dir),
+ __entry->num_mapped, __entry->nents
+ )
+);
+
+DEFINE_MR_EVENT(localinv);
+DEFINE_MR_EVENT(map);
+DEFINE_MR_EVENT(unmap);
+DEFINE_MR_EVENT(remoteinv);
+DEFINE_MR_EVENT(recycle);
+
+TRACE_EVENT(xprtrdma_dma_maperr,
+ TP_PROTO(
+ u64 addr
+ ),
+
+ TP_ARGS(addr),
+
+ TP_STRUCT__entry(
+ __field(u64, addr)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ ),
+
+ TP_printk("dma addr=0x%llx\n", __entry->addr)
+);
/**
** Reply events
@@ -824,7 +972,7 @@ TRACE_EVENT(xprtrdma_decode_seg,
** Allocation/release of rpcrdma_reqs and rpcrdma_reps
**/
-TRACE_EVENT(xprtrdma_allocate,
+TRACE_EVENT(xprtrdma_op_allocate,
TP_PROTO(
const struct rpc_task *task,
const struct rpcrdma_req *req
@@ -854,7 +1002,7 @@ TRACE_EVENT(xprtrdma_allocate,
)
);
-TRACE_EVENT(xprtrdma_rpc_done,
+TRACE_EVENT(xprtrdma_op_free,
TP_PROTO(
const struct rpc_task *task,
const struct rpcrdma_req *req
@@ -917,6 +1065,34 @@ TRACE_EVENT(xprtrdma_cb_setup,
DEFINE_CB_EVENT(xprtrdma_cb_call);
DEFINE_CB_EVENT(xprtrdma_cb_reply);
+TRACE_EVENT(xprtrdma_leaked_rep,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ const struct rpcrdma_rep *rep
+ ),
+
+ TP_ARGS(rqst, rep),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(const void *, rep)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->rep = rep;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x rep=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->rep
+ )
+);
+
/**
** Server-side RPC/RDMA events
**/
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 196587b8f204..5b50fe4906d2 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
rxrpc_peer_new,
rxrpc_peer_processing,
rxrpc_peer_put,
- rxrpc_peer_queued_error,
};
enum rxrpc_conn_trace {
@@ -182,6 +181,7 @@ enum rxrpc_timer_trace {
enum rxrpc_propose_ack_trace {
rxrpc_propose_ack_client_tx_end,
rxrpc_propose_ack_input_data,
+ rxrpc_propose_ack_ping_for_check_life,
rxrpc_propose_ack_ping_for_keepalive,
rxrpc_propose_ack_ping_for_lost_ack,
rxrpc_propose_ack_ping_for_lost_reply,
@@ -257,8 +257,7 @@ enum rxrpc_tx_point {
EM(rxrpc_peer_got, "GOT") \
EM(rxrpc_peer_new, "NEW") \
EM(rxrpc_peer_processing, "PRO") \
- EM(rxrpc_peer_put, "PUT") \
- E_(rxrpc_peer_queued_error, "QER")
+ E_(rxrpc_peer_put, "PUT")
#define rxrpc_conn_traces \
EM(rxrpc_conn_got, "GOT") \
@@ -382,6 +381,7 @@ enum rxrpc_tx_point {
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
+ EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
@@ -933,6 +933,7 @@ TRACE_EVENT(rxrpc_tx_packet,
TP_fast_assign(
__entry->call = call_id;
memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
+ __entry->where = where;
),
TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 0be866c91f62..9a4bdfadab07 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -107,6 +107,8 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
#ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
{
+ unsigned int state;
+
#ifdef CONFIG_SCHED_DEBUG
BUG_ON(p != current);
#endif /* CONFIG_SCHED_DEBUG */
@@ -118,7 +120,15 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
if (preempt)
return TASK_REPORT_MAX;
- return 1 << task_state_index(p);
+ /*
+ * task_state_index() uses fls() and returns a value from 0-8 range.
+ * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
+ * it for left shift operation to get the correct task->state
+ * mapping.
+ */
+ state = task_state_index(p);
+
+ return state ? (1 << (state - 1)) : state;
}
#endif /* CREATE_TRACE_POINTS */
@@ -159,9 +169,14 @@ TRACE_EVENT(sched_switch,
(__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
__print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
- { 0x01, "S" }, { 0x02, "D" }, { 0x04, "T" },
- { 0x08, "t" }, { 0x10, "X" }, { 0x20, "Z" },
- { 0x40, "P" }, { 0x80, "I" }) :
+ { TASK_INTERRUPTIBLE, "S" },
+ { TASK_UNINTERRUPTIBLE, "D" },
+ { __TASK_STOPPED, "T" },
+ { __TASK_TRACED, "t" },
+ { EXIT_DEAD, "X" },
+ { EXIT_ZOMBIE, "Z" },
+ { TASK_PARKED, "P" },
+ { TASK_DEAD, "I" }) :
"R",
__entry->prev_state & TASK_REPORT_MAX ? "+" : "",
diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h
index 86582923d51c..1db7e4b07c01 100644
--- a/include/trace/events/signal.h
+++ b/include/trace/events/signal.h
@@ -11,8 +11,7 @@
#define TP_STORE_SIGINFO(__entry, info) \
do { \
- if (info == SEND_SIG_NOINFO || \
- info == SEND_SIG_FORCED) { \
+ if (info == SEND_SIG_NOINFO) { \
__entry->errno = 0; \
__entry->code = SI_USER; \
} else if (info == SEND_SIG_PRIV) { \
@@ -50,7 +49,7 @@ enum {
*/
TRACE_EVENT(signal_generate,
- TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
+ TP_PROTO(int sig, struct kernel_siginfo *info, struct task_struct *task,
int group, int result),
TP_ARGS(sig, info, task, group, result),
@@ -96,7 +95,7 @@ TRACE_EVENT(signal_generate,
*/
TRACE_EVENT(signal_deliver,
- TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka),
+ TP_PROTO(int sig, struct kernel_siginfo *info, struct k_sigaction *ka),
TP_ARGS(sig, info, ka),
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index bbb08a3ef5cc..0d5d0d91f861 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -16,40 +16,6 @@
DECLARE_EVENT_CLASS(rpc_task_status,
- TP_PROTO(struct rpc_task *task),
-
- TP_ARGS(task),
-
- TP_STRUCT__entry(
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(int, status)
- ),
-
- TP_fast_assign(
- __entry->task_id = task->tk_pid;
- __entry->client_id = task->tk_client->cl_clid;
- __entry->status = task->tk_status;
- ),
-
- TP_printk("task:%u@%u status=%d",
- __entry->task_id, __entry->client_id,
- __entry->status)
-);
-
-DEFINE_EVENT(rpc_task_status, rpc_call_status,
- TP_PROTO(struct rpc_task *task),
-
- TP_ARGS(task)
-);
-
-DEFINE_EVENT(rpc_task_status, rpc_bind_status,
- TP_PROTO(struct rpc_task *task),
-
- TP_ARGS(task)
-);
-
-TRACE_EVENT(rpc_connect_status,
TP_PROTO(const struct rpc_task *task),
TP_ARGS(task),
@@ -70,6 +36,16 @@ TRACE_EVENT(rpc_connect_status,
__entry->task_id, __entry->client_id,
__entry->status)
);
+#define DEFINE_RPC_STATUS_EVENT(name) \
+ DEFINE_EVENT(rpc_task_status, rpc_##name##_status, \
+ TP_PROTO( \
+ const struct rpc_task *task \
+ ), \
+ TP_ARGS(task))
+
+DEFINE_RPC_STATUS_EVENT(call);
+DEFINE_RPC_STATUS_EVENT(bind);
+DEFINE_RPC_STATUS_EVENT(connect);
TRACE_EVENT(rpc_request,
TP_PROTO(const struct rpc_task *task),
@@ -134,30 +110,17 @@ DECLARE_EVENT_CLASS(rpc_task_running,
__entry->action
)
);
+#define DEFINE_RPC_RUNNING_EVENT(name) \
+ DEFINE_EVENT(rpc_task_running, rpc_task_##name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ const void *action \
+ ), \
+ TP_ARGS(task, action))
-DEFINE_EVENT(rpc_task_running, rpc_task_begin,
-
- TP_PROTO(const struct rpc_task *task, const void *action),
-
- TP_ARGS(task, action)
-
-);
-
-DEFINE_EVENT(rpc_task_running, rpc_task_run_action,
-
- TP_PROTO(const struct rpc_task *task, const void *action),
-
- TP_ARGS(task, action)
-
-);
-
-DEFINE_EVENT(rpc_task_running, rpc_task_complete,
-
- TP_PROTO(const struct rpc_task *task, const void *action),
-
- TP_ARGS(task, action)
-
-);
+DEFINE_RPC_RUNNING_EVENT(begin);
+DEFINE_RPC_RUNNING_EVENT(run_action);
+DEFINE_RPC_RUNNING_EVENT(complete);
DECLARE_EVENT_CLASS(rpc_task_queued,
@@ -195,22 +158,16 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__get_str(q_name)
)
);
+#define DEFINE_RPC_QUEUED_EVENT(name) \
+ DEFINE_EVENT(rpc_task_queued, rpc_task_##name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ const struct rpc_wait_queue *q \
+ ), \
+ TP_ARGS(task, q))
-DEFINE_EVENT(rpc_task_queued, rpc_task_sleep,
-
- TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q),
-
- TP_ARGS(task, q)
-
-);
-
-DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
-
- TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q),
-
- TP_ARGS(task, q)
-
-);
+DEFINE_RPC_QUEUED_EVENT(sleep);
+DEFINE_RPC_QUEUED_EVENT(wakeup);
TRACE_EVENT(rpc_stats_latency,
@@ -410,7 +367,11 @@ DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
DECLARE_EVENT_CLASS(rpc_xprt_event,
- TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
+ TP_PROTO(
+ const struct rpc_xprt *xprt,
+ __be32 xid,
+ int status
+ ),
TP_ARGS(xprt, xid, status),
@@ -432,22 +393,19 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
__get_str(port), __entry->xid,
__entry->status)
);
+#define DEFINE_RPC_XPRT_EVENT(name) \
+ DEFINE_EVENT(rpc_xprt_event, xprt_##name, \
+ TP_PROTO( \
+ const struct rpc_xprt *xprt, \
+ __be32 xid, \
+ int status \
+ ), \
+ TP_ARGS(xprt, xid, status))
-DEFINE_EVENT(rpc_xprt_event, xprt_timer,
- TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
- TP_ARGS(xprt, xid, status));
-
-DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst,
- TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
- TP_ARGS(xprt, xid, status));
-
-DEFINE_EVENT(rpc_xprt_event, xprt_transmit,
- TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
- TP_ARGS(xprt, xid, status));
-
-DEFINE_EVENT(rpc_xprt_event, xprt_complete_rqst,
- TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
- TP_ARGS(xprt, xid, status));
+DEFINE_RPC_XPRT_EVENT(timer);
+DEFINE_RPC_XPRT_EVENT(lookup_rqst);
+DEFINE_RPC_XPRT_EVENT(transmit);
+DEFINE_RPC_XPRT_EVENT(complete_rqst);
TRACE_EVENT(xprt_ping,
TP_PROTO(const struct rpc_xprt *xprt, int status),
@@ -470,14 +428,14 @@ TRACE_EVENT(xprt_ping,
__get_str(addr), __get_str(port), __entry->status)
);
-TRACE_EVENT(xs_tcp_data_ready,
- TP_PROTO(struct rpc_xprt *xprt, int err, unsigned int total),
+TRACE_EVENT(xs_stream_read_data,
+ TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total),
TP_ARGS(xprt, err, total),
TP_STRUCT__entry(
- __field(int, err)
- __field(unsigned int, total)
+ __field(ssize_t, err)
+ __field(size_t, total)
__string(addr, xprt ? xprt->address_strings[RPC_DISPLAY_ADDR] :
"(null)")
__string(port, xprt ? xprt->address_strings[RPC_DISPLAY_PORT] :
@@ -493,21 +451,11 @@ TRACE_EVENT(xs_tcp_data_ready,
xprt->address_strings[RPC_DISPLAY_PORT] : "(null)");
),
- TP_printk("peer=[%s]:%s err=%d total=%u", __get_str(addr),
+ TP_printk("peer=[%s]:%s err=%zd total=%zu", __get_str(addr),
__get_str(port), __entry->err, __entry->total)
);
-#define rpc_show_sock_xprt_flags(flags) \
- __print_flags(flags, "|", \
- { TCP_RCV_LAST_FRAG, "TCP_RCV_LAST_FRAG" }, \
- { TCP_RCV_COPY_FRAGHDR, "TCP_RCV_COPY_FRAGHDR" }, \
- { TCP_RCV_COPY_XID, "TCP_RCV_COPY_XID" }, \
- { TCP_RCV_COPY_DATA, "TCP_RCV_COPY_DATA" }, \
- { TCP_RCV_READ_CALLDIR, "TCP_RCV_READ_CALLDIR" }, \
- { TCP_RCV_COPY_CALLDIR, "TCP_RCV_COPY_CALLDIR" }, \
- { TCP_RPC_REPLY, "TCP_RPC_REPLY" })
-
-TRACE_EVENT(xs_tcp_data_recv,
+TRACE_EVENT(xs_stream_read_request,
TP_PROTO(struct sock_xprt *xs),
TP_ARGS(xs),
@@ -516,25 +464,22 @@ TRACE_EVENT(xs_tcp_data_recv,
__string(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR])
__string(port, xs->xprt.address_strings[RPC_DISPLAY_PORT])
__field(u32, xid)
- __field(unsigned long, flags)
__field(unsigned long, copied)
__field(unsigned int, reclen)
- __field(unsigned long, offset)
+ __field(unsigned int, offset)
),
TP_fast_assign(
__assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]);
__assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]);
- __entry->xid = be32_to_cpu(xs->tcp_xid);
- __entry->flags = xs->tcp_flags;
- __entry->copied = xs->tcp_copied;
- __entry->reclen = xs->tcp_reclen;
- __entry->offset = xs->tcp_offset;
+ __entry->xid = be32_to_cpu(xs->recv.xid);
+ __entry->copied = xs->recv.copied;
+ __entry->reclen = xs->recv.len;
+ __entry->offset = xs->recv.offset;
),
- TP_printk("peer=[%s]:%s xid=0x%08x flags=%s copied=%lu reclen=%u offset=%lu",
+ TP_printk("peer=[%s]:%s xid=0x%08x copied=%lu reclen=%u offset=%u",
__get_str(addr), __get_str(port), __entry->xid,
- rpc_show_sock_xprt_flags(__entry->flags),
__entry->copied, __entry->reclen, __entry->offset)
);
@@ -582,7 +527,8 @@ TRACE_EVENT(svc_process,
__field(u32, vers)
__field(u32, proc)
__string(service, name)
- __string(addr, rqst->rq_xprt->xpt_remotebuf)
+ __string(addr, rqst->rq_xprt ?
+ rqst->rq_xprt->xpt_remotebuf : "(null)")
),
TP_fast_assign(
@@ -590,7 +536,8 @@ TRACE_EVENT(svc_process,
__entry->vers = rqst->rq_vers;
__entry->proc = rqst->rq_proc;
__assign_str(service, name);
- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
+ __assign_str(addr, rqst->rq_xprt ?
+ rqst->rq_xprt->xpt_remotebuf : "(null)");
),
TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u",
@@ -600,7 +547,9 @@ TRACE_EVENT(svc_process,
DECLARE_EVENT_CLASS(svc_rqst_event,
- TP_PROTO(struct svc_rqst *rqst),
+ TP_PROTO(
+ const struct svc_rqst *rqst
+ ),
TP_ARGS(rqst),
@@ -620,14 +569,15 @@ DECLARE_EVENT_CLASS(svc_rqst_event,
__get_str(addr), __entry->xid,
show_rqstp_flags(__entry->flags))
);
+#define DEFINE_SVC_RQST_EVENT(name) \
+ DEFINE_EVENT(svc_rqst_event, svc_##name, \
+ TP_PROTO( \
+ const struct svc_rqst *rqst \
+ ), \
+ TP_ARGS(rqst))
-DEFINE_EVENT(svc_rqst_event, svc_defer,
- TP_PROTO(struct svc_rqst *rqst),
- TP_ARGS(rqst));
-
-DEFINE_EVENT(svc_rqst_event, svc_drop,
- TP_PROTO(struct svc_rqst *rqst),
- TP_ARGS(rqst));
+DEFINE_SVC_RQST_EVENT(defer);
+DEFINE_SVC_RQST_EVENT(drop);
DECLARE_EVENT_CLASS(svc_rqst_status,
@@ -814,7 +764,9 @@ TRACE_EVENT(svc_stats_latency,
);
DECLARE_EVENT_CLASS(svc_deferred_event,
- TP_PROTO(struct svc_deferred_req *dr),
+ TP_PROTO(
+ const struct svc_deferred_req *dr
+ ),
TP_ARGS(dr),
@@ -831,13 +783,16 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
);
+#define DEFINE_SVC_DEFERRED_EVENT(name) \
+ DEFINE_EVENT(svc_deferred_event, svc_##name##_deferred, \
+ TP_PROTO( \
+ const struct svc_deferred_req *dr \
+ ), \
+ TP_ARGS(dr))
+
+DEFINE_SVC_DEFERRED_EVENT(drop);
+DEFINE_SVC_DEFERRED_EVENT(revisit);
-DEFINE_EVENT(svc_deferred_event, svc_drop_deferred,
- TP_PROTO(struct svc_deferred_req *dr),
- TP_ARGS(dr));
-DEFINE_EVENT(svc_deferred_event, svc_revisit_deferred,
- TP_PROTO(struct svc_deferred_req *dr),
- TP_ARGS(dr));
#endif /* _TRACE_SUNRPC_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index ac55b328d61b..2bc9960a31aa 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -56,6 +56,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
TP_STRUCT__entry(
__field(const void *, skbaddr)
__field(const void *, skaddr)
+ __field(int, state)
__field(__u16, sport)
__field(__u16, dport)
__array(__u8, saddr, 4)
@@ -70,6 +71,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
__entry->skbaddr = skb;
__entry->skaddr = sk;
+ __entry->state = sk->sk_state;
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
@@ -84,9 +86,10 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
+ TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s\n",
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
- __entry->saddr_v6, __entry->daddr_v6)
+ __entry->saddr_v6, __entry->daddr_v6,
+ show_tcp_state_name(__entry->state))
);
DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb,
diff --git a/include/uapi/asm-generic/Kbuild.asm b/include/uapi/asm-generic/Kbuild.asm
index 21381449d98a..355c4ac2c0b0 100644
--- a/include/uapi/asm-generic/Kbuild.asm
+++ b/include/uapi/asm-generic/Kbuild.asm
@@ -3,6 +3,7 @@
#
mandatory-y += auxvec.h
mandatory-y += bitsperlong.h
+mandatory-y += bpf_perf_event.h
mandatory-y += byteorder.h
mandatory-y += errno.h
mandatory-y += fcntl.h
diff --git a/include/uapi/asm-generic/hugetlb_encode.h b/include/uapi/asm-generic/hugetlb_encode.h
index e4732d3c2998..b0f8e87235bd 100644
--- a/include/uapi/asm-generic/hugetlb_encode.h
+++ b/include/uapi/asm-generic/hugetlb_encode.h
@@ -26,7 +26,9 @@
#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB (25 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB (29 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
diff --git a/include/uapi/asm-generic/ioctls.h b/include/uapi/asm-generic/ioctls.h
index 040651735662..cdc9f4ca8c27 100644
--- a/include/uapi/asm-generic/ioctls.h
+++ b/include/uapi/asm-generic/ioctls.h
@@ -79,6 +79,8 @@
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
+#define TIOCGISO7816 _IOR('T', 0x42, struct serial_iso7816)
+#define TIOCSISO7816 _IOWR('T', 0x43, struct serial_iso7816)
#define FIONCLEX 0x5450
#define FIOCLEX 0x5451
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index 80e2a7227205..cb3d6c267181 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -10,18 +10,7 @@ typedef union sigval {
void __user *sival_ptr;
} sigval_t;
-/*
- * This is the size (including padding) of the part of the
- * struct siginfo that is before the union.
- */
-#ifndef __ARCH_SI_PREAMBLE_SIZE
-#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
-#endif
-
#define SI_MAX_SIZE 128
-#ifndef SI_PAD_SIZE
-#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
-#endif
/*
* The default "si_band" type is "long", as specified by POSIX.
@@ -40,96 +29,108 @@ typedef union sigval {
#define __ARCH_SI_ATTRIBUTES
#endif
-typedef struct siginfo {
- int si_signo;
-#ifndef __ARCH_HAS_SWAPPED_SIGINFO
- int si_errno;
- int si_code;
-#else
- int si_code;
- int si_errno;
-#endif
-
- union {
- int _pad[SI_PAD_SIZE];
-
- /* kill() */
- struct {
- __kernel_pid_t _pid; /* sender's pid */
- __kernel_uid32_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- __kernel_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- __kernel_pid_t _pid; /* sender's pid */
- __kernel_uid32_t _uid; /* sender's uid */
- sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- __kernel_pid_t _pid; /* which child */
- __kernel_uid32_t _uid; /* sender's uid */
- int _status; /* exit code */
- __ARCH_SI_CLOCK_T _utime;
- __ARCH_SI_CLOCK_T _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
- struct {
- void __user *_addr; /* faulting insn/memory ref. */
+union __sifields {
+ /* kill() */
+ struct {
+ __kernel_pid_t _pid; /* sender's pid */
+ __kernel_uid32_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ __kernel_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ __kernel_pid_t _pid; /* sender's pid */
+ __kernel_uid32_t _uid; /* sender's uid */
+ sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ __kernel_pid_t _pid; /* which child */
+ __kernel_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ __ARCH_SI_CLOCK_T _utime;
+ __ARCH_SI_CLOCK_T _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
+ struct {
+ void __user *_addr; /* faulting insn/memory ref. */
#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
+ int _trapno; /* TRAP # which caused the signal */
#endif
#ifdef __ia64__
- int _imm; /* immediate value for "break" */
- unsigned int _flags; /* see ia64 si_flags */
- unsigned long _isr; /* isr */
+ int _imm; /* immediate value for "break" */
+ unsigned int _flags; /* see ia64 si_flags */
+ unsigned long _isr; /* isr */
#endif
#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \
sizeof(short) : __alignof__(void *))
- union {
- /*
- * used when si_code=BUS_MCEERR_AR or
- * used when si_code=BUS_MCEERR_AO
- */
- short _addr_lsb; /* LSB of the reported address */
- /* used when si_code=SEGV_BNDERR */
- struct {
- char _dummy_bnd[__ADDR_BND_PKEY_PAD];
- void __user *_lower;
- void __user *_upper;
- } _addr_bnd;
- /* used when si_code=SEGV_PKUERR */
- struct {
- char _dummy_pkey[__ADDR_BND_PKEY_PAD];
- __u32 _pkey;
- } _addr_pkey;
- };
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
+ union {
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short _addr_lsb; /* LSB of the reported address */
+ /* used when si_code=SEGV_BNDERR */
+ struct {
+ char _dummy_bnd[__ADDR_BND_PKEY_PAD];
+ void __user *_lower;
+ void __user *_upper;
+ } _addr_bnd;
+ /* used when si_code=SEGV_PKUERR */
+ struct {
+ char _dummy_pkey[__ADDR_BND_PKEY_PAD];
+ __u32 _pkey;
+ } _addr_pkey;
+ };
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+
+ /* SIGSYS */
+ struct {
+ void __user *_call_addr; /* calling user insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
+};
- /* SIGSYS */
- struct {
- void __user *_call_addr; /* calling user insn */
- int _syscall; /* triggering system call number */
- unsigned int _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
+#define __SIGINFO \
+struct { \
+ int si_signo; \
+ int si_errno; \
+ int si_code; \
+ union __sifields _sifields; \
+}
+#else
+#define __SIGINFO \
+struct { \
+ int si_signo; \
+ int si_code; \
+ int si_errno; \
+ union __sifields _sifields; \
+}
+#endif /* __ARCH_HAS_SWAPPED_SIGINFO */
+
+typedef struct siginfo {
+ union {
+ __SIGINFO;
+ int _si_pad[SI_MAX_SIZE/sizeof(int)];
+ };
} __ARCH_SI_ATTRIBUTES siginfo_t;
/*
@@ -286,6 +287,12 @@ typedef struct siginfo {
#define NSIGSYS 1
/*
+ * SIGEMT si_codes
+ */
+#define EMT_TAGOVF 1 /* tag overflow */
+#define NSIGEMT 1
+
+/*
* sigevent definitions
*
* It seems likely that SIGEV_THREAD will have to be handled from
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index df4bedb9b01c..d90127298f12 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -242,10 +242,12 @@ __SYSCALL(__NR_tee, sys_tee)
/* fs/stat.c */
#define __NR_readlinkat 78
__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR3264_fstatat 79
__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
#define __NR3264_fstat 80
__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
+#endif
/* fs/sync.c */
#define __NR_sync 81
@@ -736,9 +738,11 @@ __SYSCALL(__NR_statx, sys_statx)
__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
+#define __NR_kexec_file_load 294
+__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
#undef __NR_syscalls
-#define __NR_syscalls 294
+#define __NR_syscalls 295
/*
* 32 bit systems traditionally used different
@@ -758,8 +762,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_ftruncate __NR3264_ftruncate
#define __NR_lseek __NR3264_lseek
#define __NR_sendfile __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR_newfstatat __NR3264_fstatat
#define __NR_fstat __NR3264_fstat
+#endif
#define __NR_mmap __NR3264_mmap
#define __NR_fadvise64 __NR3264_fadvise64
#ifdef __NR3264_stat
@@ -774,8 +780,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_ftruncate64 __NR3264_ftruncate
#define __NR_llseek __NR3264_lseek
#define __NR_sendfile64 __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR_fstatat64 __NR3264_fstatat
#define __NR_fstat64 __NR3264_fstat
+#endif
#define __NR_mmap2 __NR3264_mmap
#define __NR_fadvise64_64 __NR3264_fadvise64
#ifdef __NR3264_stat
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 1ceec56de015..be84e43c1e19 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -326,6 +326,12 @@ struct drm_amdgpu_gem_userptr {
/* GFX9 and later: */
#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
+#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
+#define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF
+#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29
+#define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
@@ -665,6 +671,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
/* Subquery id: Query GFX RLC SRLS firmware version */
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
+ /* Subquery id: Query DMCU firmware version */
+ #define AMDGPU_INFO_FW_DMCU 0x12
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 721ab7e54d96..0b44260a5ee9 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -30,11 +30,50 @@
extern "C" {
#endif
+/**
+ * DOC: overview
+ *
+ * In the DRM subsystem, framebuffer pixel formats are described using the
+ * fourcc codes defined in `include/uapi/drm/drm_fourcc.h`. In addition to the
+ * fourcc code, a Format Modifier may optionally be provided, in order to
+ * further describe the buffer's format - for example tiling or compression.
+ *
+ * Format Modifiers
+ * ----------------
+ *
+ * Format modifiers are used in conjunction with a fourcc code, forming a
+ * unique fourcc:modifier pair. This format:modifier pair must fully define the
+ * format and data layout of the buffer, and should be the only way to describe
+ * that particular buffer.
+ *
+ * Having multiple fourcc:modifier pairs which describe the same layout should
+ * be avoided, as such aliases run the risk of different drivers exposing
+ * different names for the same data format, forcing userspace to understand
+ * that they are aliases.
+ *
+ * Format modifiers may change any property of the buffer, including the number
+ * of planes and/or the required allocation size. Format modifiers are
+ * vendor-namespaced, and as such the relationship between a fourcc code and a
+ * modifier is specific to the modifer being used. For example, some modifiers
+ * may preserve meaning - such as number of planes - from the fourcc code,
+ * whereas others may not.
+ *
+ * Vendors should document their modifier usage in as much detail as
+ * possible, to ensure maximum compatibility across devices, drivers and
+ * applications.
+ *
+ * The authoritative list of format modifier codes is found in
+ * `include/uapi/drm/drm_fourcc.h`
+ */
+
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
((__u32)(c) << 16) | ((__u32)(d) << 24))
#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+/* Reserve 0 for the invalid format specifier */
+#define DRM_FORMAT_INVALID 0
+
/* color index */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
@@ -112,6 +151,21 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * packed YCbCr420 2x2 tiled formats
+ * first 64 bits will contain Y,Cb,Cr components for a 2x2 tile
+ */
+/* [63:0] A3:A2:Y3:0:Cr0:0:Y2:0:A1:A0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_Y0L0 fourcc_code('Y', '0', 'L', '0')
+/* [63:0] X3:X2:Y3:0:Cr0:0:Y2:0:X1:X0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_X0L0 fourcc_code('X', '0', 'L', '0')
+
+/* [63:0] A3:A2:Y3:Cr0:Y2:A1:A0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_Y0L2 fourcc_code('Y', '0', 'L', '2')
+/* [63:0] X3:X2:Y3:Cr0:Y2:X1:X0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_X0L2 fourcc_code('X', '0', 'L', '2')
/*
* 2 plane RGB + A
@@ -300,6 +354,15 @@ extern "C" {
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
/*
+ * Tiled, 16 (pixels) x 16 (lines) - sized macroblocks
+ *
+ * This is a simple tiled layout using tiles of 16x16 pixels in a row-major
+ * layout. For YCbCr formats Cb/Cr components are taken in such a way that
+ * they correspond to their 16x16 luma block.
+ */
+#define DRM_FORMAT_MOD_SAMSUNG_16_16_TILE fourcc_mod_code(SAMSUNG, 2)
+
+/*
* Qualcomm Compressed Format
*
* Refers to a compressed variant of the base format that is compressed.
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 8d67243952f4..a439c2e67896 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -186,8 +186,9 @@ extern "C" {
/*
* DRM_MODE_REFLECT_<axis>
*
- * Signals that the contents of a drm plane is reflected in the <axis> axis,
+ * Signals that the contents of a drm plane is reflected along the <axis> axis,
* in the same way as mirroring.
+ * See kerneldoc chapter "Plane Composition Properties" for more details.
*
* This define is provided as a convenience, looking up the property id
* using the name->prop id lookup is the preferred method.
@@ -887,6 +888,25 @@ struct drm_mode_revoke_lease {
__u32 lessee_id;
};
+/**
+ * struct drm_mode_rect - Two dimensional rectangle.
+ * @x1: Horizontal starting coordinate (inclusive).
+ * @y1: Vertical starting coordinate (inclusive).
+ * @x2: Horizontal ending coordinate (exclusive).
+ * @y2: Vertical ending coordinate (exclusive).
+ *
+ * With drm subsystem using struct drm_rect to manage rectangular area this
+ * export it to user-space.
+ *
+ * Currently used by drm_mode_atomic blob property FB_DAMAGE_CLIPS.
+ */
+struct drm_mode_rect {
+ __s32 x1;
+ __s32 y1;
+ __s32 x2;
+ __s32 y2;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 7f5634ce8e88..298b2e197744 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait {
int irq_seq;
} drm_i915_irq_wait_t;
+/*
+ * Different modes of per-process Graphics Translation Table,
+ * see I915_PARAM_HAS_ALIASING_PPGTT
+ */
+#define I915_GEM_PPGTT_NONE 0
+#define I915_GEM_PPGTT_ALIASING 1
+#define I915_GEM_PPGTT_FULL 2
+
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
@@ -529,6 +537,28 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
+/*
+ * Once upon a time we supposed that writes through the GGTT would be
+ * immediately in physical memory (once flushed out of the CPU path). However,
+ * on a few different processors and chipsets, this is not necessarily the case
+ * as the writes appear to be buffered internally. Thus a read of the backing
+ * storage (physical memory) via a different path (with different physical tags
+ * to the indirect write via the GGTT) will see stale values from before
+ * the GGTT write. Inside the kernel, we can for the most part keep track of
+ * the different read/write domains in use (e.g. set-domain), but the assumption
+ * of coherency is baked into the ABI, hence reporting its true state in this
+ * parameter.
+ *
+ * Reports true when writes via mmap_gtt are immediately visible following an
+ * lfence to flush the WCB.
+ *
+ * Reports false when writes via mmap_gtt are indeterminately delayed in an in
+ * internal buffer and are _not_ immediately visible to third parties accessing
+ * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
+ * communications channel when reporting false is strongly disadvised.
+ */
+#define I915_PARAM_MMAP_GTT_COHERENT 52
+
typedef struct drm_i915_getparam {
__s32 param;
/*
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index c06d0a5bdd80..91a16b333c69 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -105,14 +105,24 @@ struct drm_msm_gem_new {
__u32 handle; /* out */
};
-#define MSM_INFO_IOVA 0x01
-
-#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
+/* Get or set GEM buffer info. The requested value can be passed
+ * directly in 'value', or for data larger than 64b 'value' is a
+ * pointer to userspace buffer, with 'len' specifying the number of
+ * bytes copied into that buffer. For info returned by pointer,
+ * calling the GEM_INFO ioctl with null 'value' will return the
+ * required buffer size in 'len'
+ */
+#define MSM_INFO_GET_OFFSET 0x00 /* get mmap() offset, returned by value */
+#define MSM_INFO_GET_IOVA 0x01 /* get iova, returned by value */
+#define MSM_INFO_SET_NAME 0x02 /* set the debug name (by pointer) */
+#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
struct drm_msm_gem_info {
__u32 handle; /* in */
- __u32 flags; /* in - combination of MSM_INFO_* flags */
- __u64 offset; /* out, mmap() offset or iova */
+ __u32 info; /* in - one of MSM_INFO_* */
+ __u64 value; /* in or out */
+ __u32 len; /* in or out */
+ __u32 pad;
};
#define MSM_PREP_READ 0x01
@@ -188,8 +198,11 @@ struct drm_msm_gem_submit_cmd {
*/
#define MSM_SUBMIT_BO_READ 0x0001
#define MSM_SUBMIT_BO_WRITE 0x0002
+#define MSM_SUBMIT_BO_DUMP 0x0004
-#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | \
+ MSM_SUBMIT_BO_WRITE | \
+ MSM_SUBMIT_BO_DUMP)
struct drm_msm_gem_submit_bo {
__u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 7b6627783608..35c7d813c66e 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -36,6 +36,7 @@ extern "C" {
#define DRM_V3D_MMAP_BO 0x03
#define DRM_V3D_GET_PARAM 0x04
#define DRM_V3D_GET_BO_OFFSET 0x05
+#define DRM_V3D_SUBMIT_TFU 0x06
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -43,6 +44,7 @@ extern "C" {
#define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
+#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -58,10 +60,15 @@ struct drm_v3d_submit_cl {
* coordinate shader to determine where primitives land on the screen,
* then writes out the state updates and draw calls necessary per tile
* to the tile allocation BO.
+ *
+ * This BCL will block on any previous BCL submitted on the
+ * same FD, but not on any RCL or BCLs submitted by other
+ * clients -- that is left up to the submitter to control
+ * using in_sync_bcl if necessary.
*/
__u32 bcl_start;
- /** End address of the BCL (first byte after the BCL) */
+ /** End address of the BCL (first byte after the BCL) */
__u32 bcl_end;
/* Offset of the render command list.
@@ -69,10 +76,15 @@ struct drm_v3d_submit_cl {
* This is the second set of commands executed, which will either
* execute the tiles that have been set up by the BCL, or a fixed set
* of tiles (in the case of RCL-only blits).
+ *
+ * This RCL will block on this submit's BCL, and any previous
+ * RCL submitted on the same FD, but not on any RCL or BCLs
+ * submitted by other clients -- that is left up to the
+ * submitter to control using in_sync_rcl if necessary.
*/
__u32 rcl_start;
- /** End address of the RCL (first byte after the RCL) */
+ /** End address of the RCL (first byte after the RCL) */
__u32 rcl_end;
/** An optional sync object to wait on before starting the BCL. */
@@ -169,6 +181,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_V3D_CORE0_IDENT0,
DRM_V3D_PARAM_V3D_CORE0_IDENT1,
DRM_V3D_PARAM_V3D_CORE0_IDENT2,
+ DRM_V3D_PARAM_SUPPORTS_TFU,
};
struct drm_v3d_get_param {
@@ -187,6 +200,28 @@ struct drm_v3d_get_bo_offset {
__u32 offset;
};
+struct drm_v3d_submit_tfu {
+ __u32 icfg;
+ __u32 iia;
+ __u32 iis;
+ __u32 ica;
+ __u32 iua;
+ __u32 ioa;
+ __u32 ios;
+ __u32 coef[4];
+ /* First handle is the output BO, following are other inputs.
+ * 0 for unused.
+ */
+ __u32 bo_handles[4];
+ /* sync object to block on before running the TFU job. Each TFU
+ * job will execute in the order submitted to its FD. Synchronization
+ * against rendering jobs requires using sync objects.
+ */
+ __u32 in_sync;
+ /* Sync object to signal when the TFU job is done. */
+ __u32 out_sync;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 9a781f0611df..f06a789f34cd 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -47,6 +47,13 @@ extern "C" {
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
+#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_FLAGS (\
+ VIRTGPU_EXECBUF_FENCE_FD_IN |\
+ VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ 0)
+
struct drm_virtgpu_map {
__u64 offset; /* use for mmap system call */
__u32 handle;
@@ -54,12 +61,12 @@ struct drm_virtgpu_map {
};
struct drm_virtgpu_execbuffer {
- __u32 flags; /* for future use */
+ __u32 flags;
__u32 size;
__u64 command; /* void* */
__u64 bo_handles;
__u32 num_bo_handles;
- __u32 pad;
+ __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@@ -137,7 +144,7 @@ struct drm_virtgpu_get_caps {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
- DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
struct drm_virtgpu_execbuffer)
#define DRM_IOCTL_VIRTGPU_GETPARAM \
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index ce43d340f010..8387e0af0f76 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -50,6 +50,8 @@ enum {
*
* IOCB_FLAG_RESFD - Set if the "aio_resfd" member of the "struct iocb"
* is valid.
+ * IOCB_FLAG_IOPRIO - Set if the "aio_reqprio" member of the "struct iocb"
+ * is valid.
*/
#define IOCB_FLAG_RESFD (1 << 0)
#define IOCB_FLAG_IOPRIO (1 << 1)
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index bfaec6903b8b..b9ba520f7e4b 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -200,6 +200,15 @@ struct binder_node_debug_info {
__u32 has_weak_ref;
};
+struct binder_node_info_for_ref {
+ __u32 handle;
+ __u32 strong_count;
+ __u32 weak_count;
+ __u32 reserved1;
+ __u32 reserved2;
+ __u32 reserved3;
+};
+
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -208,6 +217,7 @@ struct binder_node_debug_info {
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
+#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
/*
* NOTE: Two special error codes you should check for when calling
diff --git a/include/uapi/linux/android/binder_ctl.h b/include/uapi/linux/android/binder_ctl.h
new file mode 100644
index 000000000000..65b2efd1a0a5
--- /dev/null
+++ b/include/uapi/linux/android/binder_ctl.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2018 Canonical Ltd.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_CTL_H
+#define _UAPI_LINUX_BINDER_CTL_H
+
+#include <linux/android/binder.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define BINDERFS_MAX_NAME 255
+
+/**
+ * struct binderfs_device - retrieve information about a new binder device
+ * @name: the name to use for the new binderfs binder device
+ * @major: major number allocated for binderfs binder devices
+ * @minor: minor number allocated for the new binderfs binder device
+ *
+ */
+struct binderfs_device {
+ char name[BINDERFS_MAX_NAME + 1];
+ __u8 major;
+ __u8 minor;
+};
+
+/**
+ * Allocate a new binder device.
+ */
+#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device)
+
+#endif /* _UAPI_LINUX_BINDER_CTL_H */
+
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 818ae690ab79..36a7e3f18e69 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -378,6 +378,7 @@ enum {
#define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_ARMEB (EM_ARM)
#define AUDIT_ARCH_CRIS (EM_CRIS|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_CSKY (EM_CSKY|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_FRV (EM_FRV)
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_IA64 (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
@@ -411,6 +412,7 @@ enum {
#define AUDIT_ARCH_TILEGX32 (EM_TILEGX|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_TILEPRO (EM_TILEPRO|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_XTENSA (EM_XTENSA)
#define AUDIT_PERM_EXEC 1
#define AUDIT_PERM_WRITE 2
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index e13eec3dfb2f..082119630b49 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -23,7 +23,7 @@
#define AUTOFS_MIN_PROTO_VERSION 3
#define AUTOFS_MAX_PROTO_VERSION 5
-#define AUTOFS_PROTO_SUBVERSION 2
+#define AUTOFS_PROTO_SUBVERSION 4
/*
* The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
@@ -90,8 +90,10 @@ enum {
/* autofs version 4 and later definitions */
/* Mask for expire behaviour */
-#define AUTOFS_EXP_IMMEDIATE 1
-#define AUTOFS_EXP_LEAVES 2
+#define AUTOFS_EXP_NORMAL 0x00
+#define AUTOFS_EXP_IMMEDIATE 0x01
+#define AUTOFS_EXP_LEAVES 0x02
+#define AUTOFS_EXP_FORCED 0x04
#define AUTOFS_TYPE_ANY 0U
#define AUTOFS_TYPE_INDIRECT 1U
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 8d19e02d752a..5d4f58e059fd 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -30,10 +30,10 @@ struct bkey {
BITMASK(name, struct bkey, field, offset, size)
#define PTR_FIELD(name, offset, size) \
-static inline __u64 name(const struct bkey *k, unsigned i) \
+static inline __u64 name(const struct bkey *k, unsigned int i) \
{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
\
-static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \
+static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
{ \
k->ptr[i] &= ~(~(~0ULL << size) << offset); \
k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
@@ -117,12 +117,14 @@ static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
static inline struct bkey *bkey_next(const struct bkey *k)
{
__u64 *d = (void *) k;
+
return (struct bkey *) (d + bkey_u64s(k));
}
-static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys)
+static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
{
__u64 *d = (void *) k;
+
return (struct bkey *) (d + nr_keys);
}
/* Enough for a key with 6 pointers */
diff --git a/include/uapi/linux/bfs_fs.h b/include/uapi/linux/bfs_fs.h
index 940b04772af8..08f6b4956359 100644
--- a/include/uapi/linux/bfs_fs.h
+++ b/include/uapi/linux/bfs_fs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* include/linux/bfs_fs.h - BFS data structures on disk.
- * Copyright (C) 1999 Tigran Aivazian <tigran@veritas.com>
+ * Copyright (C) 1999-2018 Tigran Aivazian <aivazian.tigran@gmail.com>
*/
#ifndef _LINUX_BFS_FS_H
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index ff5a5db8906a..6fa38d001d84 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -137,8 +137,11 @@ struct blk_zone_range {
* sector specified in the report request structure.
* @BLKRESETZONE: Reset the write pointer of the zones in the specified
* sector range. The sector range must be zone aligned.
+ * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors.
*/
#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
+#define BLKGETZONESZ _IOR(0x12, 132, __u32)
+#define BLKGETNRZONES _IOR(0x12, 133, __u32)
#endif /* _UAPI_BLKZONED_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 66917a4eba27..91c43884f295 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -103,6 +103,7 @@ enum bpf_cmd {
BPF_BTF_LOAD,
BPF_BTF_GET_FD_BY_ID,
BPF_TASK_FD_QUERY,
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM,
};
enum bpf_map_type {
@@ -127,8 +128,19 @@ enum bpf_map_type {
BPF_MAP_TYPE_SOCKHASH,
BPF_MAP_TYPE_CGROUP_STORAGE,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ BPF_MAP_TYPE_QUEUE,
+ BPF_MAP_TYPE_STACK,
};
+/* Note that tracing related programs such as
+ * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
+ * are not subject to a stable API since kernel internal data
+ * structures can change from release to release and may
+ * therefore break existing tracing BPF programs. Tracing BPF
+ * programs correspond to /a/ specific kernel which is to be
+ * analyzed, and not /a/ specific kernel /and/ all future ones.
+ */
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
@@ -152,6 +164,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LWT_SEG6LOCAL,
BPF_PROG_TYPE_LIRC_MODE2,
BPF_PROG_TYPE_SK_REUSEPORT,
+ BPF_PROG_TYPE_FLOW_DISSECTOR,
};
enum bpf_attach_type {
@@ -172,6 +185,7 @@ enum bpf_attach_type {
BPF_CGROUP_UDP4_SENDMSG,
BPF_CGROUP_UDP6_SENDMSG,
BPF_LIRC_MODE2,
+ BPF_FLOW_DISSECTOR,
__MAX_BPF_ATTACH_TYPE
};
@@ -226,6 +240,20 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
+/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
+ * verifier will allow any alignment whatsoever. On platforms
+ * with strict alignment requirements for loads ands stores (such
+ * as sparc and mips) the verifier validates that all loads and
+ * stores provably follow this requirement. This flag turns that
+ * checking and enforcement off.
+ *
+ * It is mostly used for testing when we want to validate the
+ * context and memory access aspects of the verifier, but because
+ * of an unaligned access the alignment check would trigger before
+ * the one we are interested in.
+ */
+#define BPF_F_ANY_ALIGNMENT (1U << 1)
+
/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
@@ -251,9 +279,6 @@ enum bpf_attach_type {
/* Specify numa node during map creation */
#define BPF_F_NUMA_NODE (1U << 2)
-/* flags for BPF_PROG_QUERY */
-#define BPF_F_QUERY_EFFECTIVE (1U << 0)
-
#define BPF_OBJ_NAME_LEN 16U
/* Flags for accessing BPF object */
@@ -263,6 +288,12 @@ enum bpf_attach_type {
/* Flag for stack_map, store build_id+offset instead of pointer */
#define BPF_F_STACK_BUILD_ID (1U << 5)
+/* Zero-initialize hash function seed. This should only be used for testing. */
+#define BPF_F_ZERO_SEED (1U << 6)
+
+/* flags for BPF_PROG_QUERY */
+#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+
enum bpf_stack_build_id_status {
/* user space need an empty entry to identify end of a trace */
BPF_STACK_BUILD_ID_EMPTY = 0,
@@ -320,7 +351,7 @@ union bpf_attr {
__u32 log_level; /* verbosity level of verifier */
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
- __u32 kern_version; /* checked when prog_type=kprobe */
+ __u32 kern_version; /* not used */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_ifindex; /* ifindex of netdev to prep for */
@@ -329,6 +360,13 @@ union bpf_attr {
* (context accesses, allowed helpers, etc).
*/
__u32 expected_attach_type;
+ __u32 prog_btf_fd; /* fd pointing to BTF type data */
+ __u32 func_info_rec_size; /* userspace bpf_func_info size */
+ __aligned_u64 func_info; /* func info */
+ __u32 func_info_cnt; /* number of bpf_func_info records */
+ __u32 line_info_rec_size; /* userspace bpf_line_info size */
+ __aligned_u64 line_info; /* line info */
+ __u32 line_info_cnt; /* number of bpf_line_info records */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -347,8 +385,11 @@ union bpf_attr {
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
__u32 prog_fd;
__u32 retval;
- __u32 data_size_in;
- __u32 data_size_out;
+ __u32 data_size_in; /* input: len of data_in */
+ __u32 data_size_out; /* input/output: len of data_out
+ * returns ENOSPC if data_out
+ * is too small.
+ */
__aligned_u64 data_in;
__aligned_u64 data_out;
__u32 repeat;
@@ -459,6 +500,16 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * Description
+ * Push an element *value* in *map*. *flags* is one of:
+ *
+ * **BPF_EXIST**
+ * If the queue/stack is full, the oldest element is removed to
+ * make room for this.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@@ -1430,7 +1481,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_adjust_room(struct sk_buff *skb, u32 len_diff, u32 mode, u64 flags)
+ * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
* Description
* Grow or shrink the room for data in the packet associated to
* *skb* by *len_diff*, and according to the selected *mode*.
@@ -1882,9 +1933,9 @@ union bpf_attr {
* is set to metric from route (IPv4/IPv6 only), and ifindex
* is set to the device index of the nexthop from the FIB lookup.
*
- * *plen* argument is the size of the passed in struct.
- * *flags* argument can be a combination of one or more of the
- * following values:
+ * *plen* argument is the size of the passed in struct.
+ * *flags* argument can be a combination of one or more of the
+ * following values:
*
* **BPF_FIB_LOOKUP_DIRECT**
* Do a direct table lookup vs full lookup using FIB
@@ -1893,9 +1944,9 @@ union bpf_attr {
* Perform lookup from an egress perspective (default is
* ingress).
*
- * *ctx* is either **struct xdp_md** for XDP programs or
- * **struct sk_buff** tc cls_act programs.
- * Return
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** tc cls_act programs.
+ * Return
* * < 0 if any input argument is invalid
* * 0 on success (packet is forwarded, nexthop neighbor exists)
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
@@ -2040,8 +2091,8 @@ union bpf_attr {
* translated to a keycode using the rc keymap, and reported as
* an input key down event. After a period a key up event is
* generated. This period can be extended by calling either
- * **bpf_rc_keydown** () again with the same values, or calling
- * **bpf_rc_repeat** ().
+ * **bpf_rc_keydown**\ () again with the same values, or calling
+ * **bpf_rc_repeat**\ ().
*
* Some protocols include a toggle bit, in case the button was
* released and pressed again between consecutive scancodes.
@@ -2124,23 +2175,158 @@ union bpf_attr {
* The *flags* meaning is specific for each map type,
* and has to be 0 for cgroup local storage.
*
- * Depending on the bpf program type, a local storage area
- * can be shared between multiple instances of the bpf program,
+ * Depending on the BPF program type, a local storage area
+ * can be shared between multiple instances of the BPF program,
* running simultaneously.
*
* A user should care about the synchronization by himself.
- * For example, by using the BPF_STX_XADD instruction to alter
+ * For example, by using the **BPF_STX_XADD** instruction to alter
* the shared data.
* Return
- * Pointer to the local storage area.
+ * A pointer to the local storage area.
*
* int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
* Description
- * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map
- * It checks the selected sk is matching the incoming
- * request in the skb.
+ * Select a **SO_REUSEPORT** socket from a
+ * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
+ * It checks the selected socket is matching the incoming
+ * request in the socket buffer.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
+ * Description
+ * Look for TCP socket matching *tuple*, optionally in a child
+ * network namespace *netns*. The return value must be checked,
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
+ *
+ * The *ctx* should point to the context of the program, such as
+ * the skb or socket (depending on the hook in use). This is used
+ * to determine the base network namespace for the lookup.
+ *
+ * *tuple_size* must be one of:
+ *
+ * **sizeof**\ (*tuple*\ **->ipv4**)
+ * Look for an IPv4 socket.
+ * **sizeof**\ (*tuple*\ **->ipv6**)
+ * Look for an IPv6 socket.
+ *
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx* will
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_NET** configuration option.
+ * Return
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from **reuse->socks**\ [] using the hash of the tuple.
+ *
+ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
+ * Description
+ * Look for UDP socket matching *tuple*, optionally in a child
+ * network namespace *netns*. The return value must be checked,
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
+ *
+ * The *ctx* should point to the context of the program, such as
+ * the skb or socket (depending on the hook in use). This is used
+ * to determine the base network namespace for the lookup.
+ *
+ * *tuple_size* must be one of:
+ *
+ * **sizeof**\ (*tuple*\ **->ipv4**)
+ * Look for an IPv4 socket.
+ * **sizeof**\ (*tuple*\ **->ipv6**)
+ * Look for an IPv6 socket.
+ *
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx* will
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
+ *
+ * All values for *flags* are reserved for future usage, and must
+ * be left at zero.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_NET** configuration option.
+ * Return
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from **reuse->socks**\ [] using the hash of the tuple.
+ *
+ * int bpf_sk_release(struct bpf_sock *sock)
+ * Description
+ * Release the reference held by *sock*. *sock* must be a
+ * non-**NULL** pointer that was returned from
+ * **bpf_sk_lookup_xxx**\ ().
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+ * Description
+ * Pop an element from *map*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+ * Description
+ * Get an element from *map* without removing it.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
+ * Description
+ * For socket policies, insert *len* bytes into *msg* at offset
+ * *start*.
+ *
+ * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
+ * *msg* it may want to insert metadata or options into the *msg*.
+ * This can later be read and used by any of the lower layer BPF
+ * hooks.
+ *
+ * This helper may fail if under memory pressure (a malloc
+ * fails) in these cases BPF programs will get an appropriate
+ * error and BPF programs will need to handle them.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags)
+ * Description
+ * Will remove *pop* bytes from a *msg* starting at byte *start*.
+ * This may result in **ENOMEM** errors under certain situations if
+ * an allocation and copy are required due to a full ring buffer.
+ * However, the helper will try to avoid doing the allocation
+ * if possible. Other errors can occur if input parameters are
+ * invalid either due to *start* byte not being valid part of *msg*
+ * payload and/or *pop* value being to large.
* Return
* 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
+ * Description
+ * This helper is used in programs implementing IR decoding, to
+ * report a successfully decoded pointer movement.
+ *
+ * The *ctx* should point to the lirc sample as passed into
+ * the program.
+ *
+ * This helper is only available is the kernel was compiled with
+ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
+ * "**y**".
+ * Return
+ * 0
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2226,7 +2412,16 @@ union bpf_attr {
FN(get_current_cgroup_id), \
FN(get_local_storage), \
FN(sk_select_reuseport), \
- FN(skb_ancestor_cgroup_id),
+ FN(skb_ancestor_cgroup_id), \
+ FN(sk_lookup_tcp), \
+ FN(sk_lookup_udp), \
+ FN(sk_release), \
+ FN(map_push_elem), \
+ FN(map_pop_elem), \
+ FN(map_peek_elem), \
+ FN(msg_push_data), \
+ FN(msg_pop_data), \
+ FN(rc_pointer_rel),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2282,6 +2477,9 @@ enum bpf_func_id {
/* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
+/* Current network namespace */
+#define BPF_F_CURRENT_NETNS (-1L)
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
@@ -2299,6 +2497,12 @@ enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6_INLINE
};
+#define __bpf_md_ptr(type, name) \
+union { \
+ type name; \
+ __u64 :64; \
+} __attribute__((aligned(8)))
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@@ -2333,6 +2537,9 @@ struct __sk_buff {
/* ... here. */
__u32 data_meta;
+ __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
+ __u64 tstamp;
+ __u32 wire_len;
};
struct bpf_tunnel_key {
@@ -2395,6 +2602,23 @@ struct bpf_sock {
*/
};
+struct bpf_sock_tuple {
+ union {
+ struct {
+ __be32 saddr;
+ __be32 daddr;
+ __be16 sport;
+ __be16 dport;
+ } ipv4;
+ struct {
+ __be32 saddr[4];
+ __be32 daddr[4];
+ __be16 sport;
+ __be16 dport;
+ } ipv6;
+ };
+};
+
#define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type.
@@ -2431,8 +2655,8 @@ enum sk_action {
* be added to the end of this structure
*/
struct sk_msg_md {
- void *data;
- void *data_end;
+ __bpf_md_ptr(void *, data);
+ __bpf_md_ptr(void *, data_end);
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
@@ -2441,6 +2665,7 @@ struct sk_msg_md {
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
+ __u32 size; /* Total size of sk_msg */
};
struct sk_reuseport_md {
@@ -2448,8 +2673,9 @@ struct sk_reuseport_md {
* Start of directly accessible data. It begins from
* the tcp/udp header.
*/
- void *data;
- void *data_end; /* End of directly accessible data */
+ __bpf_md_ptr(void *, data);
+ /* End of directly accessible data */
+ __bpf_md_ptr(void *, data_end);
/*
* Total length of packet (starting from the tcp/udp header).
* Note that the directly accessible bytes (data_end - data)
@@ -2490,6 +2716,18 @@ struct bpf_prog_info {
__u32 nr_jited_func_lens;
__aligned_u64 jited_ksyms;
__aligned_u64 jited_func_lens;
+ __u32 btf_id;
+ __u32 func_info_rec_size;
+ __aligned_u64 func_info;
+ __u32 nr_func_info;
+ __u32 nr_line_info;
+ __aligned_u64 line_info;
+ __aligned_u64 jited_line_info;
+ __u32 nr_jited_line_info;
+ __u32 line_info_rec_size;
+ __u32 jited_line_info_rec_size;
+ __u32 nr_prog_tags;
+ __aligned_u64 prog_tags;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -2778,4 +3016,42 @@ enum bpf_task_fd_type {
BPF_FD_TYPE_URETPROBE, /* filename + offset */
};
+struct bpf_flow_keys {
+ __u16 nhoff;
+ __u16 thoff;
+ __u16 addr_proto; /* ETH_P_* of valid addrs */
+ __u8 is_frag;
+ __u8 is_first_frag;
+ __u8 is_encap;
+ __u8 ip_proto;
+ __be16 n_proto;
+ __be16 sport;
+ __be16 dport;
+ union {
+ struct {
+ __be32 ipv4_src;
+ __be32 ipv4_dst;
+ };
+ struct {
+ __u32 ipv6_src[4]; /* in6_addr; network order */
+ __u32 ipv6_dst[4]; /* in6_addr; network order */
+ };
+ };
+};
+
+struct bpf_func_info {
+ __u32 insn_off;
+ __u32 type_id;
+};
+
+#define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10)
+#define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
+
+struct bpf_line_info {
+ __u32 insn_off;
+ __u32 file_name_off;
+ __u32 line_off;
+ __u32 line_col;
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 972265f32871..7b7475ef2f17 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -34,13 +34,16 @@ struct btf_type {
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
* bits 24-27: kind (e.g. int, ptr, array...etc)
- * bits 28-31: unused
+ * bits 28-30: unused
+ * bit 31: kind_flag, currently used by
+ * struct, union and fwd
*/
__u32 info;
/* "size" is used by INT, ENUM, STRUCT and UNION.
* "size" tells the size of the type it is describing.
*
- * "type" is used by PTR, TYPEDEF, VOLATILE, CONST and RESTRICT.
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+ * FUNC and FUNC_PROTO.
* "type" is a type_id referring to another type.
*/
union {
@@ -51,6 +54,7 @@ struct btf_type {
#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
+#define BTF_INFO_KFLAG(info) ((info) >> 31)
#define BTF_KIND_UNKN 0 /* Unknown */
#define BTF_KIND_INT 1 /* Integer */
@@ -64,8 +68,10 @@ struct btf_type {
#define BTF_KIND_VOLATILE 9 /* Volatile */
#define BTF_KIND_CONST 10 /* Const */
#define BTF_KIND_RESTRICT 11 /* Restrict */
-#define BTF_KIND_MAX 11
-#define NR_BTF_KINDS 12
+#define BTF_KIND_FUNC 12 /* Function */
+#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
+#define BTF_KIND_MAX 13
+#define NR_BTF_KINDS 14
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
@@ -107,7 +113,29 @@ struct btf_array {
struct btf_member {
__u32 name_off;
__u32 type;
- __u32 offset; /* offset in bits */
+ /* If the type info kind_flag is set, the btf_member offset
+ * contains both member bitfield size and bit offset. The
+ * bitfield size is set for bitfield members. If the type
+ * info kind_flag is not set, the offset contains only bit
+ * offset.
+ */
+ __u32 offset;
+};
+
+/* If the struct/union type info kind_flag is set, the
+ * following two macros are used to access bitfield_size
+ * and bit_offset from btf_member.offset.
+ */
+#define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24)
+#define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff)
+
+/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param".
+ * The exact number of btf_param is stored in the vlen (of the
+ * info in "struct btf_type").
+ */
+struct btf_param {
+ __u32 name_off;
+ __u32 type;
};
#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 5ca1d21fc4a7..e0763bc4158e 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -269,6 +269,7 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7)
#define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8)
#define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9)
+#define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index aff1356c2bb8..e974f4bb5378 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -458,6 +458,7 @@ struct btrfs_free_space_header {
#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33)
#define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34)
#define BTRFS_SUPER_FLAG_CHANGING_FSID (1ULL << 35)
+#define BTRFS_SUPER_FLAG_CHANGING_FSID_V2 (1ULL << 36)
/*
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 097fcd812471..3094af68b6e7 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -152,10 +152,13 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg,
#define CEC_TX_STATUS_LOW_DRIVE (1 << 3)
#define CEC_TX_STATUS_ERROR (1 << 4)
#define CEC_TX_STATUS_MAX_RETRIES (1 << 5)
+#define CEC_TX_STATUS_ABORTED (1 << 6)
+#define CEC_TX_STATUS_TIMEOUT (1 << 7)
#define CEC_RX_STATUS_OK (1 << 0)
#define CEC_RX_STATUS_TIMEOUT (1 << 1)
#define CEC_RX_STATUS_FEATURE_ABORT (1 << 2)
+#define CEC_RX_STATUS_ABORTED (1 << 3)
static inline int cec_msg_status_is_ok(const struct cec_msg *msg)
{
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 19bf0ca6d635..4dc1603919ce 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -29,6 +29,7 @@ enum {
CRYPTO_MSG_UPDATEALG,
CRYPTO_MSG_GETALG,
CRYPTO_MSG_DELRNG,
+ CRYPTO_MSG_GETSTAT,
__CRYPTO_MSG_MAX
};
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
@@ -50,6 +51,16 @@ enum crypto_attr_type_t {
CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
+ CRYPTOCFGA_STAT_LARVAL, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_HASH, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_BLKCIPHER, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_AEAD, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_COMPRESS, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_RNG, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_CIPHER, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_AKCIPHER, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_KPP, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_ACOMP, /* struct crypto_stat */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -65,6 +76,71 @@ struct crypto_user_alg {
__u32 cru_flags;
};
+struct crypto_stat_aead {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_encrypt_cnt;
+ __u64 stat_encrypt_tlen;
+ __u64 stat_decrypt_cnt;
+ __u64 stat_decrypt_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_akcipher {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_encrypt_cnt;
+ __u64 stat_encrypt_tlen;
+ __u64 stat_decrypt_cnt;
+ __u64 stat_decrypt_tlen;
+ __u64 stat_verify_cnt;
+ __u64 stat_sign_cnt;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_cipher {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_encrypt_cnt;
+ __u64 stat_encrypt_tlen;
+ __u64 stat_decrypt_cnt;
+ __u64 stat_decrypt_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_compress {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_compress_cnt;
+ __u64 stat_compress_tlen;
+ __u64 stat_decompress_cnt;
+ __u64 stat_decompress_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_hash {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_hash_cnt;
+ __u64 stat_hash_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_kpp {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_setsecret_cnt;
+ __u64 stat_generate_public_key_cnt;
+ __u64 stat_compute_shared_secret_cnt;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_rng {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_generate_cnt;
+ __u64 stat_generate_tlen;
+ __u64 stat_seed_cnt;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_larval {
+ char type[CRYPTO_MAX_NAME];
+};
+
struct crypto_report_larval {
char type[CRYPTO_MAX_NAME];
};
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 79407bbd296d..6e52d3660654 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -163,6 +163,11 @@ enum devlink_param_cmode {
DEVLINK_PARAM_CMODE_MAX = __DEVLINK_PARAM_CMODE_MAX - 1
};
+enum devlink_param_fw_load_policy_value {
+ DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER,
+ DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH,
+};
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
diff --git a/include/uapi/linux/dns_resolver.h b/include/uapi/linux/dns_resolver.h
new file mode 100644
index 000000000000..129745f9c794
--- /dev/null
+++ b/include/uapi/linux/dns_resolver.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/* DNS resolver interface definitions.
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_DNS_RESOLVER_H
+#define _UAPI_LINUX_DNS_RESOLVER_H
+
+#include <linux/types.h>
+
+/*
+ * Type of payload.
+ */
+enum dns_payload_content_type {
+ DNS_PAYLOAD_IS_SERVER_LIST = 0, /* List of servers, requested by srv=1 */
+};
+
+/*
+ * Type of address that might be found in an address record.
+ */
+enum dns_payload_address_type {
+ DNS_ADDRESS_IS_IPV4 = 0, /* 4-byte AF_INET address */
+ DNS_ADDRESS_IS_IPV6 = 1, /* 16-byte AF_INET6 address */
+};
+
+/*
+ * Type of protocol used to access a server.
+ */
+enum dns_payload_protocol_type {
+ DNS_SERVER_PROTOCOL_UNSPECIFIED = 0,
+ DNS_SERVER_PROTOCOL_UDP = 1, /* Use UDP to talk to the server */
+ DNS_SERVER_PROTOCOL_TCP = 2, /* Use TCP to talk to the server */
+};
+
+/*
+ * Source of record included in DNS resolver payload.
+ */
+enum dns_record_source {
+ DNS_RECORD_UNAVAILABLE = 0, /* No source available (empty record) */
+ DNS_RECORD_FROM_CONFIG = 1, /* From local configuration data */
+ DNS_RECORD_FROM_DNS_A = 2, /* From DNS A or AAAA record */
+ DNS_RECORD_FROM_DNS_AFSDB = 3, /* From DNS AFSDB record */
+ DNS_RECORD_FROM_DNS_SRV = 4, /* From DNS SRV record */
+ DNS_RECORD_FROM_NSS = 5, /* From NSS */
+ NR__dns_record_source
+};
+
+/*
+ * Status of record included in DNS resolver payload.
+ */
+enum dns_lookup_status {
+ DNS_LOOKUP_NOT_DONE = 0, /* No lookup has been made */
+ DNS_LOOKUP_GOOD = 1, /* Good records obtained */
+ DNS_LOOKUP_GOOD_WITH_BAD = 2, /* Good records, some decoding errors */
+ DNS_LOOKUP_BAD = 3, /* Couldn't decode results */
+ DNS_LOOKUP_GOT_NOT_FOUND = 4, /* Got a "Not Found" result */
+ DNS_LOOKUP_GOT_LOCAL_FAILURE = 5, /* Local failure during lookup */
+ DNS_LOOKUP_GOT_TEMP_FAILURE = 6, /* Temporary failure during lookup */
+ DNS_LOOKUP_GOT_NS_FAILURE = 7, /* Name server failure */
+ NR__dns_lookup_status
+};
+
+/*
+ * Header at the beginning of binary format payload.
+ */
+struct dns_payload_header {
+ __u8 zero; /* Zero byte: marks this as not being text */
+ __u8 content; /* enum dns_payload_content_type */
+ __u8 version; /* Encoding version */
+} __packed;
+
+/*
+ * Header at the beginning of a V1 server list. This is followed directly by
+ * the server records. Each server records begins with a struct of type
+ * dns_server_list_v1_server.
+ */
+struct dns_server_list_v1_header {
+ struct dns_payload_header hdr;
+ __u8 source; /* enum dns_record_source */
+ __u8 status; /* enum dns_lookup_status */
+ __u8 nr_servers; /* Number of server records following this */
+} __packed;
+
+/*
+ * Header at the beginning of each V1 server record. This is followed by the
+ * characters of the name with no NUL-terminator, followed by the address
+ * records for that server. Each address record begins with a struct of type
+ * struct dns_server_list_v1_address.
+ */
+struct dns_server_list_v1_server {
+ __u16 name_len; /* Length of name (LE) */
+ __u16 priority; /* Priority (as SRV record) (LE) */
+ __u16 weight; /* Weight (as SRV record) (LE) */
+ __u16 port; /* UDP/TCP port number (LE) */
+ __u8 source; /* enum dns_record_source */
+ __u8 status; /* enum dns_lookup_status */
+ __u8 protocol; /* enum dns_payload_protocol_type */
+ __u8 nr_addrs;
+} __packed;
+
+/*
+ * Header at the beginning of each V1 address record. This is followed by the
+ * bytes of the address, 4 for IPV4 and 16 for IPV6.
+ */
+struct dns_server_list_v1_address {
+ __u8 address_type; /* enum dns_payload_address_type */
+} __packed;
+
+#endif /* _UAPI_LINUX_DNS_RESOLVER_H */
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index 31aa10178335..0c3000faedba 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -34,6 +34,7 @@
#define EM_M32R 88 /* Renesas M32R */
#define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */
#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */
+#define EM_XTENSA 94 /* Tensilica Xtensa Architecture */
#define EM_BLACKFIN 106 /* ADI Blackfin Processor */
#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
#define EM_TI_C6000 140 /* TI C6X DSPs */
@@ -41,7 +42,9 @@
#define EM_TILEPRO 188 /* Tilera TILEPro */
#define EM_MICROBLAZE 189 /* Xilinx MicroBlaze */
#define EM_TILEGX 191 /* Tilera TILE-Gx */
+#define EM_RISCV 243 /* RISC-V */
#define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */
+#define EM_CSKY 252 /* C-SKY */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
/*
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index c5358e0ae7c5..e4d6ddd93567 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -420,10 +420,12 @@ typedef struct elf64_shdr {
#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
+#define NT_ARM_PAC_MASK 0x406 /* ARM pointer authentication code masks */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
+#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index dc69391d2bba..17be76aeb468 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -91,10 +91,6 @@
* %ETHTOOL_GSET to get the current values before making specific
* changes and then applying them with %ETHTOOL_SSET.
*
- * Drivers that implement set_settings() should validate all fields
- * other than @cmd that are not described as read-only or deprecated,
- * and must ignore all fields described as read-only.
- *
* Deprecated fields should be ignored by both users and drivers.
*/
struct ethtool_cmd {
@@ -886,7 +882,7 @@ struct ethtool_rx_flow_spec {
__u32 location;
};
-/* How rings are layed out when accessing virtual functions or
+/* How rings are laid out when accessing virtual functions or
* offloaded queues is device specific. To allow users to do flow
* steering and specify these queues the ring cookie is partitioned
* into a 32bit queue index with an 8 bit virtual function id.
@@ -895,7 +891,7 @@ struct ethtool_rx_flow_spec {
* devices start supporting PCIe w/ARI. However at the moment I
* do not know of any devices that support this so I do not reserve
* space for this at this time. If a future patch consumes the next
- * byte it should be aware of this possiblity.
+ * byte it should be aware of this possibility.
*/
#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL
#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL
@@ -1800,14 +1796,9 @@ enum ethtool_reset_flags {
* rejected.
*
* Deprecated %ethtool_cmd fields transceiver, maxtxpkt and maxrxpkt
- * are not available in %ethtool_link_settings. Until all drivers are
- * converted to ignore them or to the new %ethtool_link_settings API,
- * for both queries and changes, users should always try
- * %ETHTOOL_GLINKSETTINGS first, and if it fails with -ENOTSUPP stick
- * only to %ETHTOOL_GSET and %ETHTOOL_SSET consistently. If it
- * succeeds, then users should stick to %ETHTOOL_GLINKSETTINGS and
- * %ETHTOOL_SLINKSETTINGS (which would support drivers implementing
- * either %ethtool_cmd or %ethtool_link_settings).
+ * are not available in %ethtool_link_settings. These fields will be
+ * always set to zero in %ETHTOOL_GSET reply and %ETHTOOL_SSET will
+ * fail if any of them is set to non-zero value.
*
* Users should assume that all fields not marked read-only are
* writable and subject to validation by the driver. They should use
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index 74247917de04..909c98fcace2 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -10,11 +10,13 @@
#define FAN_CLOSE_WRITE 0x00000008 /* Writtable file closed */
#define FAN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
#define FAN_OPEN 0x00000020 /* File was opened */
+#define FAN_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
+#define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */
#define FAN_ONDIR 0x40000000 /* event occurred against dir */
@@ -27,10 +29,12 @@
#define FAN_CLOEXEC 0x00000001
#define FAN_NONBLOCK 0x00000002
-/* These are NOT bitwise flags. Both bits are used togther. */
+/* These are NOT bitwise flags. Both bits are used together. */
#define FAN_CLASS_NOTIF 0x00000000
#define FAN_CLASS_CONTENT 0x00000004
#define FAN_CLASS_PRE_CONTENT 0x00000008
+
+/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \
FAN_CLASS_PRE_CONTENT)
@@ -38,6 +42,10 @@
#define FAN_UNLIMITED_MARKS 0x00000020
#define FAN_ENABLE_AUDIT 0x00000040
+/* Flags to determine fanotify event format */
+#define FAN_REPORT_TID 0x00000100 /* event->pid is thread id */
+
+/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \
FAN_ALL_CLASS_BITS | FAN_UNLIMITED_QUEUE |\
FAN_UNLIMITED_MARKS)
@@ -47,11 +55,18 @@
#define FAN_MARK_REMOVE 0x00000002
#define FAN_MARK_DONT_FOLLOW 0x00000004
#define FAN_MARK_ONLYDIR 0x00000008
-#define FAN_MARK_MOUNT 0x00000010
+/* FAN_MARK_MOUNT is 0x00000010 */
#define FAN_MARK_IGNORED_MASK 0x00000020
#define FAN_MARK_IGNORED_SURV_MODIFY 0x00000040
#define FAN_MARK_FLUSH 0x00000080
+/* FAN_MARK_FILESYSTEM is 0x00000100 */
+/* These are NOT bitwise flags. Both bits can be used togther. */
+#define FAN_MARK_INODE 0x00000000
+#define FAN_MARK_MOUNT 0x00000010
+#define FAN_MARK_FILESYSTEM 0x00000100
+
+/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_MARK_FLAGS (FAN_MARK_ADD |\
FAN_MARK_REMOVE |\
FAN_MARK_DONT_FOLLOW |\
@@ -61,11 +76,7 @@
FAN_MARK_IGNORED_SURV_MODIFY |\
FAN_MARK_FLUSH)
-/*
- * All of the events - we build the list by hand so that we can add flags in
- * the future and not break backward compatibility. Apps will get only the
- * events that they originally wanted. Be sure to add new events here!
- */
+/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_EVENTS (FAN_ACCESS |\
FAN_MODIFY |\
FAN_CLOSE |\
@@ -74,9 +85,11 @@
/*
* All events which require a permission response from userspace
*/
+/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_PERM_EVENTS (FAN_OPEN_PERM |\
FAN_ACCESS_PERM)
+/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_OUTGOING_EVENTS (FAN_ALL_EVENTS |\
FAN_ALL_PERM_EVENTS |\
FAN_Q_OVERFLOW)
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index 1db453e4b550..1acd2b179aef 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -47,11 +47,11 @@
#define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09
/**
- * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
+ * struct fw_cdev_event_common - Common part of all fw_cdev_event_* types
* @closure: For arbitrary use by userspace
- * @type: Discriminates the fw_cdev_event_ types
+ * @type: Discriminates the fw_cdev_event_* types
*
- * This struct may be used to access generic members of all fw_cdev_event_
+ * This struct may be used to access generic members of all fw_cdev_event_*
* types regardless of the specific type.
*
* Data passed in the @closure field for a request will be returned in the
@@ -123,7 +123,13 @@ struct fw_cdev_event_response {
/**
* struct fw_cdev_event_request - Old version of &fw_cdev_event_request2
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST
+ * @tcode: Transaction code of the incoming request
+ * @offset: The offset into the 48-bit per-node address space
+ * @handle: Reference to the kernel-side pending request
+ * @length: Data length, i.e. the request's payload size in bytes
+ * @data: Incoming data, if any
*
* This event is sent instead of &fw_cdev_event_request2 if the kernel or
* the client implements ABI version <= 3. &fw_cdev_event_request lacks
@@ -353,7 +359,7 @@ struct fw_cdev_event_phy_packet {
};
/**
- * union fw_cdev_event - Convenience union of fw_cdev_event_ types
+ * union fw_cdev_event - Convenience union of fw_cdev_event_* types
* @common: Valid for all types
* @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
* @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
@@ -735,7 +741,7 @@ struct fw_cdev_set_iso_channels {
* @header: Header and payload in case of a transmit context.
*
* &struct fw_cdev_iso_packet is used to describe isochronous packet queues.
- * Use the FW_CDEV_ISO_ macros to fill in @control.
+ * Use the FW_CDEV_ISO_* macros to fill in @control.
* The @header array is empty in case of receive contexts.
*
* Context type %FW_CDEV_ISO_CONTEXT_TRANSMIT:
@@ -842,7 +848,7 @@ struct fw_cdev_queue_iso {
* the %FW_CDEV_ISO_SYNC bit set
* @tags: Tag filter bit mask. Only valid for isochronous reception.
* Determines the tag values for which packets will be accepted.
- * Use FW_CDEV_ISO_CONTEXT_MATCH_ macros to set @tags.
+ * Use FW_CDEV_ISO_CONTEXT_MATCH_* macros to set @tags.
* @handle: Isochronous context handle within which to transmit or receive
*/
struct fw_cdev_start_iso {
@@ -1009,8 +1015,8 @@ struct fw_cdev_send_stream_packet {
* on the same card as this device. After transmission, an
* %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated.
*
- * The payload @data[] shall be specified in host byte order. Usually,
- * @data[1] needs to be the bitwise inverse of @data[0]. VersaPHY packets
+ * The payload @data\[\] shall be specified in host byte order. Usually,
+ * @data\[1\] needs to be the bitwise inverse of @data\[0\]. VersaPHY packets
* are an exception to this rule.
*
* The ioctl is only permitted on device files which represent a local node.
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 73e01918f996..53a22e8e0408 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -14,6 +14,11 @@
#include <linux/ioctl.h>
#include <linux/types.h>
+/* Use of MS_* flags within the kernel is restricted to core mount(2) code. */
+#if !defined(__KERNEL__)
+#include <linux/mount.h>
+#endif
+
/*
* It's silly to have NR_OPEN bigger than NR_FILE, but you can change
* the file limit at runtime and only root can increase the per-process
@@ -101,57 +106,6 @@ struct inodes_stat_t {
#define NR_FILE 8192 /* this can well be larger on a larger system */
-
-/*
- * These are the fs-independent mount-flags: up to 32 flags are supported
- */
-#define MS_RDONLY 1 /* Mount read-only */
-#define MS_NOSUID 2 /* Ignore suid and sgid bits */
-#define MS_NODEV 4 /* Disallow access to device special files */
-#define MS_NOEXEC 8 /* Disallow program execution */
-#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
-#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
-#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
-#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
-#define MS_NOATIME 1024 /* Do not update access times. */
-#define MS_NODIRATIME 2048 /* Do not update directory access times */
-#define MS_BIND 4096
-#define MS_MOVE 8192
-#define MS_REC 16384
-#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
- MS_VERBOSE is deprecated. */
-#define MS_SILENT 32768
-#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
-#define MS_UNBINDABLE (1<<17) /* change to unbindable */
-#define MS_PRIVATE (1<<18) /* change to private */
-#define MS_SLAVE (1<<19) /* change to slave */
-#define MS_SHARED (1<<20) /* change to shared */
-#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
-#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
-#define MS_I_VERSION (1<<23) /* Update inode I_version field */
-#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
-#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
-
-/* These sb flags are internal to the kernel */
-#define MS_SUBMOUNT (1<<26)
-#define MS_NOREMOTELOCK (1<<27)
-#define MS_NOSEC (1<<28)
-#define MS_BORN (1<<29)
-#define MS_ACTIVE (1<<30)
-#define MS_NOUSER (1<<31)
-
-/*
- * Superblock flags that can be altered by MS_REMOUNT
- */
-#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
- MS_LAZYTIME)
-
-/*
- * Old magic mount flag and mask
- */
-#define MS_MGC_VAL 0xC0ED0000
-#define MS_MGC_MSK 0xffff0000
-
/*
* Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR.
*/
@@ -279,8 +233,8 @@ struct fsxattr {
#define FS_ENCRYPTION_MODE_AES_256_CTS 4
#define FS_ENCRYPTION_MODE_AES_128_CBC 5
#define FS_ENCRYPTION_MODE_AES_128_CTS 6
-#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7
-#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8
+#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
+#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
struct fscrypt_policy {
__u8 version;
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 92fa24c24c92..b4967d48bfda 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -116,6 +116,12 @@
*
* 7.27
* - add FUSE_ABORT_ERROR
+ *
+ * 7.28
+ * - add FUSE_COPY_FILE_RANGE
+ * - add FOPEN_CACHE_DIR
+ * - add FUSE_MAX_PAGES, add max_pages to init_out
+ * - add FUSE_CACHE_SYMLINKS
*/
#ifndef _LINUX_FUSE_H
@@ -151,7 +157,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 27
+#define FUSE_KERNEL_MINOR_VERSION 28
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -219,10 +225,12 @@ struct fuse_file_lock {
* FOPEN_DIRECT_IO: bypass page cache for this open file
* FOPEN_KEEP_CACHE: don't invalidate the data cache on open
* FOPEN_NONSEEKABLE: the file is not seekable
+ * FOPEN_CACHE_DIR: allow caching this directory
*/
#define FOPEN_DIRECT_IO (1 << 0)
#define FOPEN_KEEP_CACHE (1 << 1)
#define FOPEN_NONSEEKABLE (1 << 2)
+#define FOPEN_CACHE_DIR (1 << 3)
/**
* INIT request/reply flags
@@ -249,6 +257,8 @@ struct fuse_file_lock {
* FUSE_HANDLE_KILLPRIV: fs handles killing suid/sgid/cap on write/chown/trunc
* FUSE_POSIX_ACL: filesystem supports posix acls
* FUSE_ABORT_ERROR: reading the device after abort returns ECONNABORTED
+ * FUSE_MAX_PAGES: init_out.max_pages contains the max number of req pages
+ * FUSE_CACHE_SYMLINKS: cache READLINK responses
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -272,6 +282,8 @@ struct fuse_file_lock {
#define FUSE_HANDLE_KILLPRIV (1 << 19)
#define FUSE_POSIX_ACL (1 << 20)
#define FUSE_ABORT_ERROR (1 << 21)
+#define FUSE_MAX_PAGES (1 << 22)
+#define FUSE_CACHE_SYMLINKS (1 << 23)
/**
* CUSE INIT request/reply flags
@@ -337,53 +349,54 @@ struct fuse_file_lock {
#define FUSE_POLL_SCHEDULE_NOTIFY (1 << 0)
enum fuse_opcode {
- FUSE_LOOKUP = 1,
- FUSE_FORGET = 2, /* no reply */
- FUSE_GETATTR = 3,
- FUSE_SETATTR = 4,
- FUSE_READLINK = 5,
- FUSE_SYMLINK = 6,
- FUSE_MKNOD = 8,
- FUSE_MKDIR = 9,
- FUSE_UNLINK = 10,
- FUSE_RMDIR = 11,
- FUSE_RENAME = 12,
- FUSE_LINK = 13,
- FUSE_OPEN = 14,
- FUSE_READ = 15,
- FUSE_WRITE = 16,
- FUSE_STATFS = 17,
- FUSE_RELEASE = 18,
- FUSE_FSYNC = 20,
- FUSE_SETXATTR = 21,
- FUSE_GETXATTR = 22,
- FUSE_LISTXATTR = 23,
- FUSE_REMOVEXATTR = 24,
- FUSE_FLUSH = 25,
- FUSE_INIT = 26,
- FUSE_OPENDIR = 27,
- FUSE_READDIR = 28,
- FUSE_RELEASEDIR = 29,
- FUSE_FSYNCDIR = 30,
- FUSE_GETLK = 31,
- FUSE_SETLK = 32,
- FUSE_SETLKW = 33,
- FUSE_ACCESS = 34,
- FUSE_CREATE = 35,
- FUSE_INTERRUPT = 36,
- FUSE_BMAP = 37,
- FUSE_DESTROY = 38,
- FUSE_IOCTL = 39,
- FUSE_POLL = 40,
- FUSE_NOTIFY_REPLY = 41,
- FUSE_BATCH_FORGET = 42,
- FUSE_FALLOCATE = 43,
- FUSE_READDIRPLUS = 44,
- FUSE_RENAME2 = 45,
- FUSE_LSEEK = 46,
+ FUSE_LOOKUP = 1,
+ FUSE_FORGET = 2, /* no reply */
+ FUSE_GETATTR = 3,
+ FUSE_SETATTR = 4,
+ FUSE_READLINK = 5,
+ FUSE_SYMLINK = 6,
+ FUSE_MKNOD = 8,
+ FUSE_MKDIR = 9,
+ FUSE_UNLINK = 10,
+ FUSE_RMDIR = 11,
+ FUSE_RENAME = 12,
+ FUSE_LINK = 13,
+ FUSE_OPEN = 14,
+ FUSE_READ = 15,
+ FUSE_WRITE = 16,
+ FUSE_STATFS = 17,
+ FUSE_RELEASE = 18,
+ FUSE_FSYNC = 20,
+ FUSE_SETXATTR = 21,
+ FUSE_GETXATTR = 22,
+ FUSE_LISTXATTR = 23,
+ FUSE_REMOVEXATTR = 24,
+ FUSE_FLUSH = 25,
+ FUSE_INIT = 26,
+ FUSE_OPENDIR = 27,
+ FUSE_READDIR = 28,
+ FUSE_RELEASEDIR = 29,
+ FUSE_FSYNCDIR = 30,
+ FUSE_GETLK = 31,
+ FUSE_SETLK = 32,
+ FUSE_SETLKW = 33,
+ FUSE_ACCESS = 34,
+ FUSE_CREATE = 35,
+ FUSE_INTERRUPT = 36,
+ FUSE_BMAP = 37,
+ FUSE_DESTROY = 38,
+ FUSE_IOCTL = 39,
+ FUSE_POLL = 40,
+ FUSE_NOTIFY_REPLY = 41,
+ FUSE_BATCH_FORGET = 42,
+ FUSE_FALLOCATE = 43,
+ FUSE_READDIRPLUS = 44,
+ FUSE_RENAME2 = 45,
+ FUSE_LSEEK = 46,
+ FUSE_COPY_FILE_RANGE = 47,
/* CUSE specific operations */
- CUSE_INIT = 4096,
+ CUSE_INIT = 4096,
};
enum fuse_notify_code {
@@ -610,7 +623,9 @@ struct fuse_init_out {
uint16_t congestion_threshold;
uint32_t max_write;
uint32_t time_gran;
- uint32_t unused[9];
+ uint16_t max_pages;
+ uint16_t padding;
+ uint32_t unused[8];
};
#define CUSE_INIT_INFO_MAX 4096
@@ -792,4 +807,14 @@ struct fuse_lseek_out {
uint64_t offset;
};
+struct fuse_copy_file_range_in {
+ uint64_t fh_in;
+ uint64_t off_in;
+ uint64_t nodeid_out;
+ uint64_t fh_out;
+ uint64_t off_out;
+ uint64_t len;
+ uint64_t flags;
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h
index 24a861c0d29d..065408e16a80 100644
--- a/include/uapi/linux/gen_stats.h
+++ b/include/uapi/linux/gen_stats.h
@@ -12,6 +12,7 @@ enum {
TCA_STATS_APP,
TCA_STATS_RATE_EST64,
TCA_STATS_PAD,
+ TCA_STATS_BASIC_HW,
__TCA_STATS_MAX,
};
#define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h
index 1bf6e6df084b..4ebfe0ac6c5b 100644
--- a/include/uapi/linux/gpio.h
+++ b/include/uapi/linux/gpio.h
@@ -65,7 +65,7 @@ struct gpioline_info {
/**
* struct gpiohandle_request - Information about a GPIO handle request
- * @lineoffsets: an array desired lines, specified by offset index for the
+ * @lineoffsets: an array of desired lines, specified by offset index for the
* associated GPIO device
* @flags: desired flags for the desired GPIO lines, such as
* GPIOHANDLE_REQUEST_OUTPUT, GPIOHANDLE_REQUEST_ACTIVE_LOW etc, OR:ed
diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h
index eea5d02c58de..74a8609fcb4d 100644
--- a/include/uapi/linux/hash_info.h
+++ b/include/uapi/linux/hash_info.h
@@ -33,6 +33,8 @@ enum hash_algo {
HASH_ALGO_TGR_160,
HASH_ALGO_TGR_192,
HASH_ALGO_SM3_256,
+ HASH_ALGO_STREEBOG_256,
+ HASH_ALGO_STREEBOG_512,
HASH_ALGO__LAST
};
diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h
index ebaf5701c9db..dfcf3ce0097f 100644
--- a/include/uapi/linux/if_addr.h
+++ b/include/uapi/linux/if_addr.h
@@ -34,6 +34,7 @@ enum {
IFA_MULTICAST,
IFA_FLAGS,
IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */
+ IFA_TARGET_NETNSID,
__IFA_MAX,
};
diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h
index 4605527ca41b..c3cc5a9e5eaf 100644
--- a/include/uapi/linux/if_arp.h
+++ b/include/uapi/linux/if_arp.h
@@ -114,18 +114,18 @@
/* ARP ioctl request. */
struct arpreq {
- struct sockaddr arp_pa; /* protocol address */
- struct sockaddr arp_ha; /* hardware address */
- int arp_flags; /* flags */
- struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
- char arp_dev[16];
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+ char arp_dev[IFNAMSIZ];
};
struct arpreq_old {
- struct sockaddr arp_pa; /* protocol address */
- struct sockaddr arp_ha; /* hardware address */
- int arp_flags; /* flags */
- struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
+ struct sockaddr arp_pa; /* protocol address */
+ struct sockaddr arp_ha; /* hardware address */
+ int arp_flags; /* flags */
+ struct sockaddr arp_netmask; /* netmask (only for proxy arps) */
};
/* ARP Flag values. */
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index e41eda3c71f1..773e476a8e54 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -292,4 +292,25 @@ struct br_mcast_stats {
__u64 mcast_bytes[BR_MCAST_DIR_SIZE];
__u64 mcast_packets[BR_MCAST_DIR_SIZE];
};
+
+/* bridge boolean options
+ * BR_BOOLOPT_NO_LL_LEARN - disable learning from link-local packets
+ *
+ * IMPORTANT: if adding a new option do not forget to handle
+ * it in br_boolopt_toggle/get and bridge sysfs
+ */
+enum br_boolopt_id {
+ BR_BOOLOPT_NO_LL_LEARN,
+ BR_BOOLOPT_MAX
+};
+
+/* struct br_boolopt_multi - change multiple bridge boolean options
+ *
+ * @optval: new option values (bit per option)
+ * @optmask: options to change (bit per option)
+ */
+struct br_boolopt_multi {
+ __u32 optval;
+ __u32 optmask;
+};
#endif /* _UAPI_LINUX_IF_BRIDGE_H */
diff --git a/include/uapi/linux/if_fddi.h b/include/uapi/linux/if_fddi.h
index 75eed8b62823..7239aa9c0766 100644
--- a/include/uapi/linux/if_fddi.h
+++ b/include/uapi/linux/if_fddi.h
@@ -6,9 +6,10 @@
*
* Global definitions for the ANSI FDDI interface.
*
- * Version: @(#)if_fddi.h 1.0.2 Sep 29 2004
+ * Version: @(#)if_fddi.h 1.0.3 Oct 6 2018
*
- * Author: Lawrence V. Stefani, <stefani@lkg.dec.com>
+ * Author: Lawrence V. Stefani, <stefani@yahoo.com>
+ * Maintainer: Maciej W. Rozycki, <macro@linux-mips.org>
*
* if_fddi.h is based on previous if_ether.h and if_tr.h work by
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -45,7 +46,21 @@
#define FDDI_K_OUI_LEN 3 /* Octets in OUI in 802.2 SNAP
header */
-/* Define FDDI Frame Control (FC) Byte values */
+/* Define FDDI Frame Control (FC) Byte masks */
+#define FDDI_FC_K_CLASS_MASK 0x80 /* class bit */
+#define FDDI_FC_K_CLASS_SYNC 0x80
+#define FDDI_FC_K_CLASS_ASYNC 0x00
+#define FDDI_FC_K_ALEN_MASK 0x40 /* address length bit */
+#define FDDI_FC_K_ALEN_48 0x40
+#define FDDI_FC_K_ALEN_16 0x00
+#define FDDI_FC_K_FORMAT_MASK 0x30 /* format bits */
+#define FDDI_FC_K_FORMAT_FUTURE 0x30
+#define FDDI_FC_K_FORMAT_IMPLEMENTOR 0x20
+#define FDDI_FC_K_FORMAT_LLC 0x10
+#define FDDI_FC_K_FORMAT_MANAGEMENT 0x00
+#define FDDI_FC_K_CONTROL_MASK 0x0f /* control bits */
+
+/* Define FDDI Frame Control (FC) Byte specific values */
#define FDDI_FC_K_VOID 0x00
#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80
#define FDDI_FC_K_RESTRICTED_TOKEN 0xC0
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 43391e2d1153..d6533828123a 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -161,6 +161,7 @@ enum {
IFLA_EVENT,
IFLA_NEW_NETNSID,
IFLA_IF_NETNSID,
+ IFLA_TARGET_NETNSID = IFLA_IF_NETNSID, /* new alias */
IFLA_CARRIER_UP_COUNT,
IFLA_CARRIER_DOWN_COUNT,
IFLA_NEW_IFINDEX,
@@ -286,6 +287,8 @@ enum {
IFLA_BR_MCAST_STATS_ENABLED,
IFLA_BR_MCAST_IGMP_VERSION,
IFLA_BR_MCAST_MLD_VERSION,
+ IFLA_BR_VLAN_STATS_PER_PORT,
+ IFLA_BR_MULTI_BOOLOPT,
__IFLA_BR_MAX,
};
@@ -531,6 +534,7 @@ enum {
IFLA_VXLAN_LABEL,
IFLA_VXLAN_GPE,
IFLA_VXLAN_TTL_INHERIT,
+ IFLA_VXLAN_DF,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -540,6 +544,14 @@ struct ifla_vxlan_port_range {
__be16 high;
};
+enum ifla_vxlan_df {
+ VXLAN_DF_UNSET = 0,
+ VXLAN_DF_SET,
+ VXLAN_DF_INHERIT,
+ __VXLAN_DF_END,
+ VXLAN_DF_MAX = __VXLAN_DF_END - 1,
+};
+
/* GENEVE section */
enum {
IFLA_GENEVE_UNSPEC,
@@ -554,10 +566,20 @@ enum {
IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
IFLA_GENEVE_LABEL,
+ IFLA_GENEVE_TTL_INHERIT,
+ IFLA_GENEVE_DF,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
+enum ifla_geneve_df {
+ GENEVE_DF_UNSET = 0,
+ GENEVE_DF_SET,
+ GENEVE_DF_INHERIT,
+ __GENEVE_DF_END,
+ GENEVE_DF_MAX = __GENEVE_DF_END - 1,
+};
+
/* PPP section */
enum {
IFLA_PPP_UNSPEC,
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index 67b61d91d89b..467b654bd4c7 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -57,6 +57,7 @@ struct sockaddr_ll {
#define PACKET_QDISC_BYPASS 20
#define PACKET_ROLLOVER_STATS 21
#define PACKET_FANOUT_DATA 22
+#define PACKET_IGNORE_OUTGOING 23
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index ee432cd3018c..23a6753b37df 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -59,6 +59,7 @@
#define TUNGETVNETBE _IOR('T', 223, int)
#define TUNSETSTEERINGEBPF _IOR('T', 224, int)
#define TUNSETFILTEREBPF _IOR('T', 225, int)
+#define TUNSETCARRIER _IOW('T', 226, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 1b3d148c4560..7d9105533c7b 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -160,4 +160,24 @@ enum {
};
#define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1)
+
+#define TUNNEL_CSUM __cpu_to_be16(0x01)
+#define TUNNEL_ROUTING __cpu_to_be16(0x02)
+#define TUNNEL_KEY __cpu_to_be16(0x04)
+#define TUNNEL_SEQ __cpu_to_be16(0x08)
+#define TUNNEL_STRICT __cpu_to_be16(0x10)
+#define TUNNEL_REC __cpu_to_be16(0x20)
+#define TUNNEL_VERSION __cpu_to_be16(0x40)
+#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
+#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
+#define TUNNEL_OAM __cpu_to_be16(0x0200)
+#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
+#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
+#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
+#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
+#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
+
+#define TUNNEL_OPTIONS_PRESENT \
+ (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
+
#endif /* _UAPI_IF_TUNNEL_H_ */
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 48e8a225b985..f6052e70bf40 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -266,10 +266,14 @@ struct sockaddr_in {
#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
#define IN_MULTICAST(a) IN_CLASSD(a)
-#define IN_MULTICAST_NET 0xF0000000
+#define IN_MULTICAST_NET 0xe0000000
-#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
-#define IN_BADCLASS(a) IN_EXPERIMENTAL((a))
+#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
+#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
+
+#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+#define IN_CLASSE_NET 0xffffffff
+#define IN_CLASSE_NSHIFT 0
/* Address to accept any incoming messages. */
#define INADDR_ANY ((unsigned long int) 0x00000000)
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index ed291e55f024..71d82fe15b03 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -177,6 +177,7 @@ struct in6_flowlabel_req {
#define IPV6_V6ONLY 26
#define IPV6_JOIN_ANYCAST 27
#define IPV6_LEAVE_ANYCAST 28
+#define IPV6_MULTICAST_ALL 29
/* IPV6_MTU_DISCOVER values */
#define IPV6_PMTUDISC_DONT 0
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 53fbae27b280..ae366b87426a 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -708,6 +708,14 @@
#define REL_DIAL 0x07
#define REL_WHEEL 0x08
#define REL_MISC 0x09
+/*
+ * 0x0a is reserved and should not be used in input drivers.
+ * It was used by HID as REL_MISC+1 and userspace needs to detect if
+ * the next REL_* event is correct or is just REL_MISC + n.
+ * We define here REL_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define REL_RESERVED 0x0a
#define REL_MAX 0x0f
#define REL_CNT (REL_MAX+1)
@@ -744,6 +752,15 @@
#define ABS_MISC 0x28
+/*
+ * 0x2e is reserved and should not be used in input drivers.
+ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
+ * the next ABS_* event is correct or is just ABS_MISC + n.
+ * We define here ABS_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define ABS_RESERVED 0x2e
+
#define ABS_MT_SLOT 0x2f /* MT slot being modified */
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 7b8c9e19bad1..f45ee0f69c0c 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -61,11 +61,21 @@
#define KEYCTL_INVALIDATE 21 /* invalidate a key */
#define KEYCTL_GET_PERSISTENT 22 /* get a user's persistent keyring */
#define KEYCTL_DH_COMPUTE 23 /* Compute Diffie-Hellman values */
+#define KEYCTL_PKEY_QUERY 24 /* Query public key parameters */
+#define KEYCTL_PKEY_ENCRYPT 25 /* Encrypt a blob using a public key */
+#define KEYCTL_PKEY_DECRYPT 26 /* Decrypt a blob using a public key */
+#define KEYCTL_PKEY_SIGN 27 /* Create a public key signature */
+#define KEYCTL_PKEY_VERIFY 28 /* Verify a public key signature */
#define KEYCTL_RESTRICT_KEYRING 29 /* Restrict keys allowed to link to a keyring */
/* keyctl structures */
struct keyctl_dh_params {
- __s32 private;
+ union {
+#ifndef __cplusplus
+ __s32 private;
+#endif
+ __s32 priv;
+ };
__s32 prime;
__s32 base;
};
@@ -77,4 +87,29 @@ struct keyctl_kdf_params {
__u32 __spare[8];
};
+#define KEYCTL_SUPPORTS_ENCRYPT 0x01
+#define KEYCTL_SUPPORTS_DECRYPT 0x02
+#define KEYCTL_SUPPORTS_SIGN 0x04
+#define KEYCTL_SUPPORTS_VERIFY 0x08
+
+struct keyctl_pkey_query {
+ __u32 supported_ops; /* Which ops are supported */
+ __u32 key_size; /* Size of the key in bits */
+ __u16 max_data_size; /* Maximum size of raw data to sign in bytes */
+ __u16 max_sig_size; /* Maximum size of signature in bytes */
+ __u16 max_enc_size; /* Maximum size of encrypted blob in bytes */
+ __u16 max_dec_size; /* Maximum size of decrypted blob in bytes */
+ __u32 __spare[10];
+};
+
+struct keyctl_pkey_params {
+ __s32 key_id; /* Serial no. of public key to use */
+ __u32 in_len; /* Input data size */
+ union {
+ __u32 out_len; /* Output buffer size (encrypt/decrypt/sign) */
+ __u32 in2_len; /* 2nd input data size (verify) */
+ };
+ __u32 __spare[7];
+};
+
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 01674b56e14f..e622fd1fbd46 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -82,6 +82,14 @@ struct kfd_ioctl_set_cu_mask_args {
__u64 cu_mask_ptr; /* to KFD */
};
+struct kfd_ioctl_get_queue_wave_state_args {
+ __u64 ctl_stack_address; /* to KFD */
+ __u32 ctl_stack_used_size; /* from KFD */
+ __u32 save_area_used_size; /* from KFD */
+ __u32 queue_id; /* to KFD */
+ __u32 pad;
+};
+
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
#define KFD_IOC_CACHE_POLICY_COHERENT 0
#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
@@ -247,10 +255,10 @@ struct kfd_hsa_memory_exception_data {
/* hw exception data */
struct kfd_hsa_hw_exception_data {
- uint32_t reset_type;
- uint32_t reset_cause;
- uint32_t memory_lost;
- uint32_t gpu_id;
+ __u32 reset_type;
+ __u32 reset_cause;
+ __u32 memory_lost;
+ __u32 gpu_id;
};
/* Event data */
@@ -390,6 +398,24 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
__u32 n_success; /* to/from KFD */
};
+struct kfd_ioctl_get_dmabuf_info_args {
+ __u64 size; /* from KFD */
+ __u64 metadata_ptr; /* to KFD */
+ __u32 metadata_size; /* to KFD (space allocated by user)
+ * from KFD (actual metadata size)
+ */
+ __u32 gpu_id; /* from KFD */
+ __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
+ __u32 dmabuf_fd; /* to KFD */
+};
+
+struct kfd_ioctl_import_dmabuf_args {
+ __u64 va_addr; /* to KFD */
+ __u64 handle; /* from KFD */
+ __u32 gpu_id; /* to KFD */
+ __u32 dmabuf_fd; /* to KFD */
+};
+
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -475,7 +501,16 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
#define AMDKFD_IOC_SET_CU_MASK \
AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
+#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
+ AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
+
+#define AMDKFD_IOC_GET_DMABUF_INFO \
+ AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
+
+#define AMDKFD_IOC_IMPORT_DMABUF \
+ AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x1B
+#define AMDKFD_COMMAND_END 0x1E
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 3cf632839337..6d4ea4b6c922 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -420,13 +420,19 @@ struct kvm_run {
struct kvm_coalesced_mmio_zone {
__u64 addr;
__u32 size;
- __u32 pad;
+ union {
+ __u32 pad;
+ __u32 pio;
+ };
};
struct kvm_coalesced_mmio {
__u64 phys_addr;
__u32 len;
- __u32 pad;
+ union {
+ __u32 pad;
+ __u32 pio;
+ };
__u8 data[8];
};
@@ -486,6 +492,17 @@ struct kvm_dirty_log {
};
};
+/* for KVM_CLEAR_DIRTY_LOG */
+struct kvm_clear_dirty_log {
+ __u32 slot;
+ __u32 num_pages;
+ __u64 first_page;
+ union {
+ void __user *dirty_bitmap; /* one bit per page */
+ __u64 padding2;
+ };
+};
+
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
@@ -719,6 +736,7 @@ struct kvm_ppc_one_seg_page_size {
#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
#define KVM_PPC_1T_SEGMENTS 0x00000002
+#define KVM_PPC_NO_HASH 0x00000004
struct kvm_ppc_smmu_info {
__u64 flags;
@@ -751,6 +769,15 @@ struct kvm_ppc_resize_hpt {
#define KVM_S390_SIE_PAGE_OFFSET 1
/*
+ * On arm64, machine type can be used to request the physical
+ * address size for the VM. Bits[7-0] are reserved for the guest
+ * PA size shift (i.e, log2(PA_Size)). For backward compatibility,
+ * value 0 implies the default IPA size, 40bits.
+ */
+#define KVM_VM_TYPE_ARM_IPA_SIZE_MASK 0xffULL
+#define KVM_VM_TYPE_ARM_IPA_SIZE(x) \
+ ((x) & KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+/*
* ioctls for /dev/kvm fds:
*/
#define KVM_GET_API_VERSION _IO(KVMIO, 0x00)
@@ -951,6 +978,16 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_HYPERV_TLBFLUSH 155
#define KVM_CAP_S390_HPAGE_1M 156
#define KVM_CAP_NESTED_STATE 157
+#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
+#define KVM_CAP_MSR_PLATFORM_INFO 159
+#define KVM_CAP_PPC_NESTED_HV 160
+#define KVM_CAP_HYPERV_SEND_IPI 161
+#define KVM_CAP_COALESCED_PIO 162
+#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
+#define KVM_CAP_EXCEPTION_PAYLOAD 164
+#define KVM_CAP_ARM_VM_IPA_SIZE 165
+#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166
+#define KVM_CAP_HYPERV_CPUID 167
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1397,6 +1434,12 @@ struct kvm_enc_region {
#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
+/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */
+#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
+
+/* Available with KVM_CAP_HYPERV_CPUID */
+#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 1a6fee974116..f8c00045d537 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -29,6 +29,7 @@
#define HPFS_SUPER_MAGIC 0xf995e849
#define ISOFS_SUPER_MAGIC 0x9660
#define JFFS2_SUPER_MAGIC 0x72b6
+#define XFS_SUPER_MAGIC 0x58465342 /* "XFSB" */
#define PSTOREFS_MAGIC 0x6165676C
#define EFIVARFS_MAGIC 0xde5e81e4
#define HOSTFS_SUPER_MAGIC 0x00c0ffee
@@ -72,6 +73,7 @@
#define DAXFS_MAGIC 0x64646178
#define BINFMTFS_MAGIC 0x42494e4d
#define DEVPTS_SUPER_MAGIC 0x1cd1
+#define BINDERFS_SUPER_MAGIC 0x6c6f6f70
#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
#define PIPEFS_MAGIC 0x50495045
#define PROC_SUPER_MAGIC 0x9fa0
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index 36f76e777ef9..e5d0c5c611b5 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -369,6 +369,14 @@ struct media_v2_topology {
#define MEDIA_IOC_ENUM_LINKS _IOWR('|', 0x02, struct media_links_enum)
#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc)
#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology)
+#define MEDIA_IOC_REQUEST_ALLOC _IOR ('|', 0x05, int)
+
+/*
+ * These ioctls are called on the request file descriptor as returned
+ * by MEDIA_IOC_REQUEST_ALLOC.
+ */
+#define MEDIA_REQUEST_IOC_QUEUE _IO('|', 0x80)
+#define MEDIA_REQUEST_IOC_REINIT _IO('|', 0x81)
#ifndef __KERNEL__
diff --git a/include/uapi/linux/memfd.h b/include/uapi/linux/memfd.h
index 015a4c0bbb47..7a8a26751c23 100644
--- a/include/uapi/linux/memfd.h
+++ b/include/uapi/linux/memfd.h
@@ -25,7 +25,9 @@
#define MFD_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define MFD_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define MFD_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define MFD_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
#define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MFD_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
#define MFD_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define MFD_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MFD_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index bfd5938fede6..d0f515d53299 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -28,7 +28,9 @@
#define MAP_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define MAP_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define MAP_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define MAP_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
#define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MAP_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
#define MAP_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h
index 45f369dc0a42..00c08120f3ba 100644
--- a/include/uapi/linux/mmc/ioctl.h
+++ b/include/uapi/linux/mmc/ioctl.h
@@ -5,7 +5,10 @@
#include <linux/types.h>
struct mmc_ioc_cmd {
- /* Implies direction of data. true = write, false = read */
+ /*
+ * Direction of data: nonzero = write, zero = read.
+ * Bit 31 selects 'Reliable Write' for RPMB.
+ */
int write_flag;
/* Application-specific command. true = precede with CMD55 */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
new file mode 100644
index 000000000000..3f9ec42510b0
--- /dev/null
+++ b/include/uapi/linux/mount.h
@@ -0,0 +1,58 @@
+#ifndef _UAPI_LINUX_MOUNT_H
+#define _UAPI_LINUX_MOUNT_H
+
+/*
+ * These are the fs-independent mount-flags: up to 32 flags are supported
+ *
+ * Usage of these is restricted within the kernel to core mount(2) code and
+ * callers of sys_mount() only. Filesystems should be using the SB_*
+ * equivalent instead.
+ */
+#define MS_RDONLY 1 /* Mount read-only */
+#define MS_NOSUID 2 /* Ignore suid and sgid bits */
+#define MS_NODEV 4 /* Disallow access to device special files */
+#define MS_NOEXEC 8 /* Disallow program execution */
+#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
+#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
+#define MS_NOATIME 1024 /* Do not update access times. */
+#define MS_NODIRATIME 2048 /* Do not update directory access times */
+#define MS_BIND 4096
+#define MS_MOVE 8192
+#define MS_REC 16384
+#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
+ MS_VERBOSE is deprecated. */
+#define MS_SILENT 32768
+#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
+#define MS_UNBINDABLE (1<<17) /* change to unbindable */
+#define MS_PRIVATE (1<<18) /* change to private */
+#define MS_SLAVE (1<<19) /* change to slave */
+#define MS_SHARED (1<<20) /* change to shared */
+#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
+#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
+#define MS_I_VERSION (1<<23) /* Update inode I_version field */
+#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
+#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
+
+/* These sb flags are internal to the kernel */
+#define MS_SUBMOUNT (1<<26)
+#define MS_NOREMOTELOCK (1<<27)
+#define MS_NOSEC (1<<28)
+#define MS_BORN (1<<29)
+#define MS_ACTIVE (1<<30)
+#define MS_NOUSER (1<<31)
+
+/*
+ * Superblock flags that can be altered by MS_REMOUNT
+ */
+#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
+ MS_LAZYTIME)
+
+/*
+ * Old magic mount flag and mask
+ */
+#define MS_MGC_VAL 0xC0ED0000
+#define MS_MGC_MSK 0xffff0000
+
+#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h
index fde753735aba..a5773899f4d9 100644
--- a/include/uapi/linux/msdos_fs.h
+++ b/include/uapi/linux/msdos_fs.h
@@ -58,9 +58,6 @@
#define MSDOS_DOT ". " /* ".", padded to MSDOS_NAME chars */
#define MSDOS_DOTDOT ".. " /* "..", padded to MSDOS_NAME chars */
-#define FAT_FIRST_ENT(s, x) ((MSDOS_SB(s)->fat_bits == 32 ? 0x0FFFFF00 : \
- MSDOS_SB(s)->fat_bits == 16 ? 0xFF00 : 0xF00) | (x))
-
/* start of data cluster's entry (number of reserved clusters) */
#define FAT_START_ENT 2
@@ -68,8 +65,6 @@
#define MAX_FAT12 0xFF4
#define MAX_FAT16 0xFFF4
#define MAX_FAT32 0x0FFFFFF6
-#define MAX_FAT(s) (MSDOS_SB(s)->fat_bits == 32 ? MAX_FAT32 : \
- MSDOS_SB(s)->fat_bits == 16 ? MAX_FAT16 : MAX_FAT12)
/* bad cluster mark */
#define BAD_FAT12 0xFF7
@@ -135,7 +130,7 @@ struct fat_boot_sector {
for mount state. */
__u8 signature; /* extended boot signature */
__u8 vol_id[4]; /* volume ID */
- __u8 vol_label[11]; /* volume label */
+ __u8 vol_label[MSDOS_NAME]; /* volume label */
__u8 fs_type[8]; /* file system type */
/* other fields are not added here */
} fat16;
@@ -158,7 +153,7 @@ struct fat_boot_sector {
for mount state. */
__u8 signature; /* extended boot signature */
__u8 vol_id[4]; /* volume ID */
- __u8 vol_label[11]; /* volume label */
+ __u8 vol_label[MSDOS_NAME]; /* volume label */
__u8 fs_type[8]; /* file system type */
/* other fields are not added here */
} fat32;
diff --git a/include/uapi/linux/ncsi.h b/include/uapi/linux/ncsi.h
index 4c292ecbb748..a3f87c54fdb3 100644
--- a/include/uapi/linux/ncsi.h
+++ b/include/uapi/linux/ncsi.h
@@ -23,6 +23,15 @@
* optionally the preferred NCSI_ATTR_CHANNEL_ID.
* @NCSI_CMD_CLEAR_INTERFACE: clear any preferred package/channel combination.
* Requires NCSI_ATTR_IFINDEX.
+ * @NCSI_CMD_SEND_CMD: send NC-SI command to network card.
+ * Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID
+ * and NCSI_ATTR_CHANNEL_ID.
+ * @NCSI_CMD_SET_PACKAGE_MASK: set a whitelist of allowed packages.
+ * Requires NCSI_ATTR_IFINDEX and NCSI_ATTR_PACKAGE_MASK.
+ * @NCSI_CMD_SET_CHANNEL_MASK: set a whitelist of allowed channels.
+ * Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID, and
+ * NCSI_ATTR_CHANNEL_MASK. If NCSI_ATTR_CHANNEL_ID is present it sets
+ * the primary channel.
* @NCSI_CMD_MAX: highest command number
*/
enum ncsi_nl_commands {
@@ -30,6 +39,9 @@ enum ncsi_nl_commands {
NCSI_CMD_PKG_INFO,
NCSI_CMD_SET_INTERFACE,
NCSI_CMD_CLEAR_INTERFACE,
+ NCSI_CMD_SEND_CMD,
+ NCSI_CMD_SET_PACKAGE_MASK,
+ NCSI_CMD_SET_CHANNEL_MASK,
__NCSI_CMD_AFTER_LAST,
NCSI_CMD_MAX = __NCSI_CMD_AFTER_LAST - 1
@@ -43,6 +55,11 @@ enum ncsi_nl_commands {
* @NCSI_ATTR_PACKAGE_LIST: nested array of NCSI_PKG_ATTR attributes
* @NCSI_ATTR_PACKAGE_ID: package ID
* @NCSI_ATTR_CHANNEL_ID: channel ID
+ * @NCSI_ATTR_DATA: command payload
+ * @NCSI_ATTR_MULTI_FLAG: flag to signal that multi-mode should be enabled with
+ * NCSI_CMD_SET_PACKAGE_MASK or NCSI_CMD_SET_CHANNEL_MASK.
+ * @NCSI_ATTR_PACKAGE_MASK: 32-bit mask of allowed packages.
+ * @NCSI_ATTR_CHANNEL_MASK: 32-bit mask of allowed channels.
* @NCSI_ATTR_MAX: highest attribute number
*/
enum ncsi_nl_attrs {
@@ -51,6 +68,10 @@ enum ncsi_nl_attrs {
NCSI_ATTR_PACKAGE_LIST,
NCSI_ATTR_PACKAGE_ID,
NCSI_ATTR_CHANNEL_ID,
+ NCSI_ATTR_DATA,
+ NCSI_ATTR_MULTI_FLAG,
+ NCSI_ATTR_PACKAGE_MASK,
+ NCSI_ATTR_CHANNEL_MASK,
__NCSI_ATTR_AFTER_LAST,
NCSI_ATTR_MAX = __NCSI_ATTR_AFTER_LAST - 1
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 7e27070b9440..f57c9e434d2d 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -128,37 +128,31 @@ enum {
static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
{
- static const char * const names[] = {
- [ND_CMD_ARS_CAP] = "ars_cap",
- [ND_CMD_ARS_START] = "ars_start",
- [ND_CMD_ARS_STATUS] = "ars_status",
- [ND_CMD_CLEAR_ERROR] = "clear_error",
- [ND_CMD_CALL] = "cmd_call",
- };
-
- if (cmd < ARRAY_SIZE(names) && names[cmd])
- return names[cmd];
- return "unknown";
+ switch (cmd) {
+ case ND_CMD_ARS_CAP: return "ars_cap";
+ case ND_CMD_ARS_START: return "ars_start";
+ case ND_CMD_ARS_STATUS: return "ars_status";
+ case ND_CMD_CLEAR_ERROR: return "clear_error";
+ case ND_CMD_CALL: return "cmd_call";
+ default: return "unknown";
+ }
}
static inline const char *nvdimm_cmd_name(unsigned cmd)
{
- static const char * const names[] = {
- [ND_CMD_SMART] = "smart",
- [ND_CMD_SMART_THRESHOLD] = "smart_thresh",
- [ND_CMD_DIMM_FLAGS] = "flags",
- [ND_CMD_GET_CONFIG_SIZE] = "get_size",
- [ND_CMD_GET_CONFIG_DATA] = "get_data",
- [ND_CMD_SET_CONFIG_DATA] = "set_data",
- [ND_CMD_VENDOR_EFFECT_LOG_SIZE] = "effect_size",
- [ND_CMD_VENDOR_EFFECT_LOG] = "effect_log",
- [ND_CMD_VENDOR] = "vendor",
- [ND_CMD_CALL] = "cmd_call",
- };
-
- if (cmd < ARRAY_SIZE(names) && names[cmd])
- return names[cmd];
- return "unknown";
+ switch (cmd) {
+ case ND_CMD_SMART: return "smart";
+ case ND_CMD_SMART_THRESHOLD: return "smart_thresh";
+ case ND_CMD_DIMM_FLAGS: return "flags";
+ case ND_CMD_GET_CONFIG_SIZE: return "get_size";
+ case ND_CMD_GET_CONFIG_DATA: return "get_data";
+ case ND_CMD_SET_CONFIG_DATA: return "set_data";
+ case ND_CMD_VENDOR_EFFECT_LOG_SIZE: return "effect_size";
+ case ND_CMD_VENDOR_EFFECT_LOG: return "effect_log";
+ case ND_CMD_VENDOR: return "vendor";
+ case ND_CMD_CALL: return "cmd_call";
+ default: return "unknown";
+ }
}
#define ND_IOCTL 'N'
@@ -208,10 +202,6 @@ enum nd_driver_flags {
ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM,
};
-enum {
- ND_MIN_NAMESPACE_SIZE = PAGE_SIZE,
-};
-
enum ars_masks {
ARS_STATUS_MASK = 0x0000FFFF,
ARS_EXT_STATUS_SHIFT = 16,
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 904db6148476..cd144e3099a3 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -28,6 +28,7 @@ enum {
NDA_MASTER,
NDA_LINK_NETNSID,
NDA_SRC_VNI,
+ NDA_PROTOCOL, /* Originator of entry */
__NDA_MAX
};
@@ -43,6 +44,7 @@ enum {
#define NTF_PROXY 0x08 /* == ATF_PUBL */
#define NTF_EXT_LEARNED 0x10
#define NTF_OFFLOADED 0x20
+#define NTF_STICKY 0x40
#define NTF_ROUTER 0x80
/*
diff --git a/include/uapi/linux/net_namespace.h b/include/uapi/linux/net_namespace.h
index 0187c74d8889..9f9956809565 100644
--- a/include/uapi/linux/net_namespace.h
+++ b/include/uapi/linux/net_namespace.h
@@ -16,6 +16,8 @@ enum {
NETNSA_NSID,
NETNSA_PID,
NETNSA_FD,
+ NETNSA_TARGET_NSID,
+ NETNSA_CURRENT_NSID,
__NETNSA_MAX,
};
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 97ff3c17ec4d..e5b39721c6e4 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -155,8 +155,8 @@ enum txtime_flags {
};
struct sock_txtime {
- clockid_t clockid; /* reference clockid */
- __u32 flags; /* as defined by enum txtime_flags */
+ __kernel_clockid_t clockid;/* reference clockid */
+ __u32 flags; /* as defined by enum txtime_flags */
};
#endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index cca10e767cd8..ca9e63d6e0e4 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -34,10 +34,6 @@
/* only for userspace compatibility */
#ifndef __KERNEL__
-/* Generic cache responses from hook functions.
- <= 0x2000 is used for protocol-flags. */
-#define NFC_UNKNOWN 0x4000
-#define NFC_ALTERED 0x8000
/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */
#define NF_VERDICT_BITS 16
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 60236f694143..ea69ca21ff23 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -13,8 +13,9 @@
#include <linux/types.h>
-/* The protocol version */
-#define IPSET_PROTOCOL 6
+/* The protocol versions */
+#define IPSET_PROTOCOL 7
+#define IPSET_PROTOCOL_MIN 6
/* The max length of strings including NUL: set and type identifiers */
#define IPSET_MAXNAMELEN 32
@@ -38,17 +39,19 @@ enum ipset_cmd {
IPSET_CMD_TEST, /* 11: Test an element in a set */
IPSET_CMD_HEADER, /* 12: Get set header data only */
IPSET_CMD_TYPE, /* 13: Get set type */
+ IPSET_CMD_GET_BYNAME, /* 14: Get set index by name */
+ IPSET_CMD_GET_BYINDEX, /* 15: Get set name by index */
IPSET_MSG_MAX, /* Netlink message commands */
/* Commands in userspace: */
- IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */
- IPSET_CMD_HELP, /* 15: Get help */
- IPSET_CMD_VERSION, /* 16: Get program version */
- IPSET_CMD_QUIT, /* 17: Quit from interactive mode */
+ IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 16: Enter restore mode */
+ IPSET_CMD_HELP, /* 17: Get help */
+ IPSET_CMD_VERSION, /* 18: Get program version */
+ IPSET_CMD_QUIT, /* 19: Quit from interactive mode */
IPSET_CMD_MAX,
- IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */
+ IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 20: Commit buffered commands */
};
/* Attributes at command level */
@@ -66,6 +69,7 @@ enum {
IPSET_ATTR_LINENO, /* 9: Restore lineno */
IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */
IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */
+ IPSET_ATTR_INDEX, /* 11: Kernel index of set */
__IPSET_ATTR_CMD_MAX,
};
#define IPSET_ATTR_CMD_MAX (__IPSET_ATTR_CMD_MAX - 1)
@@ -223,6 +227,7 @@ enum ipset_adt {
/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t
* and IPSET_INVALID_ID if you want to increase the max number of sets.
+ * Also, IPSET_ATTR_INDEX must be changed.
*/
typedef __u16 ip_set_id_t;
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index e23290ffdc77..7de4f1bdaf06 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -826,12 +826,14 @@ enum nft_meta_keys {
* @NFT_RT_NEXTHOP4: routing nexthop for IPv4
* @NFT_RT_NEXTHOP6: routing nexthop for IPv6
* @NFT_RT_TCPMSS: fetch current path tcp mss
+ * @NFT_RT_XFRM: boolean, skb->dst->xfrm != NULL
*/
enum nft_rt_keys {
NFT_RT_CLASSID,
NFT_RT_NEXTHOP4,
NFT_RT_NEXTHOP6,
NFT_RT_TCPMSS,
+ NFT_RT_XFRM,
__NFT_RT_MAX
};
#define NFT_RT_MAX (__NFT_RT_MAX - 1)
@@ -1175,6 +1177,21 @@ enum nft_quota_attributes {
#define NFTA_QUOTA_MAX (__NFTA_QUOTA_MAX - 1)
/**
+ * enum nft_secmark_attributes - nf_tables secmark object netlink attributes
+ *
+ * @NFTA_SECMARK_CTX: security context (NLA_STRING)
+ */
+enum nft_secmark_attributes {
+ NFTA_SECMARK_UNSPEC,
+ NFTA_SECMARK_CTX,
+ __NFTA_SECMARK_MAX,
+};
+#define NFTA_SECMARK_MAX (__NFTA_SECMARK_MAX - 1)
+
+/* Max security context length */
+#define NFT_SECMARK_CTX_MAXLEN 256
+
+/**
* enum nft_reject_types - nf_tables reject expression reject types
*
* @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable
@@ -1430,7 +1447,8 @@ enum nft_ct_timeout_timeout_attributes {
#define NFT_OBJECT_CONNLIMIT 5
#define NFT_OBJECT_TUNNEL 6
#define NFT_OBJECT_CT_TIMEOUT 7
-#define __NFT_OBJECT_MAX 8
+#define NFT_OBJECT_SECMARK 8
+#define __NFT_OBJECT_MAX 9
#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
/**
@@ -1493,9 +1511,16 @@ enum nft_flowtable_hook_attributes {
};
#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1)
+/**
+ * enum nft_osf_attributes - nftables osf expression netlink attributes
+ *
+ * @NFTA_OSF_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_OSF_TTL: Value of the TTL osf option (NLA_U8)
+ */
enum nft_osf_attributes {
NFTA_OSF_UNSPEC,
NFTA_OSF_DREG,
+ NFTA_OSF_TTL,
__NFTA_OSF_MAX,
};
#define NFTA_OSF_MAX (__NFTA_OSF_MAX - 1)
@@ -1512,6 +1537,35 @@ enum nft_devices_attributes {
};
#define NFTA_DEVICE_MAX (__NFTA_DEVICE_MAX - 1)
+/*
+ * enum nft_xfrm_attributes - nf_tables xfrm expr netlink attributes
+ *
+ * @NFTA_XFRM_DREG: destination register (NLA_U32)
+ * @NFTA_XFRM_KEY: enum nft_xfrm_keys (NLA_U32)
+ * @NFTA_XFRM_DIR: direction (NLA_U8)
+ * @NFTA_XFRM_SPNUM: index in secpath array (NLA_U32)
+ */
+enum nft_xfrm_attributes {
+ NFTA_XFRM_UNSPEC,
+ NFTA_XFRM_DREG,
+ NFTA_XFRM_KEY,
+ NFTA_XFRM_DIR,
+ NFTA_XFRM_SPNUM,
+ __NFTA_XFRM_MAX
+};
+#define NFTA_XFRM_MAX (__NFTA_XFRM_MAX - 1)
+
+enum nft_xfrm_keys {
+ NFT_XFRM_KEY_UNSPEC,
+ NFT_XFRM_KEY_DADDR_IP4,
+ NFT_XFRM_KEY_DADDR_IP6,
+ NFT_XFRM_KEY_SADDR_IP4,
+ NFT_XFRM_KEY_SADDR_IP6,
+ NFT_XFRM_KEY_REQID,
+ NFT_XFRM_KEY_SPI,
+ __NFT_XFRM_KEY_MAX,
+};
+#define NFT_XFRM_KEY_MAX (__NFT_XFRM_KEY_MAX - 1)
/**
* enum nft_trace_attributes - nf_tables trace netlink attributes
@@ -1581,8 +1635,8 @@ enum nft_ng_attributes {
NFTA_NG_MODULUS,
NFTA_NG_TYPE,
NFTA_NG_OFFSET,
- NFTA_NG_SET_NAME,
- NFTA_NG_SET_ID,
+ NFTA_NG_SET_NAME, /* deprecated */
+ NFTA_NG_SET_ID, /* deprecated */
__NFTA_NG_MAX
};
#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
diff --git a/include/uapi/linux/netfilter/xt_cgroup.h b/include/uapi/linux/netfilter/xt_cgroup.h
index e96dfa1b34f7..b74e370d6133 100644
--- a/include/uapi/linux/netfilter/xt_cgroup.h
+++ b/include/uapi/linux/netfilter/xt_cgroup.h
@@ -22,4 +22,20 @@ struct xt_cgroup_info_v1 {
void *priv __attribute__((aligned(8)));
};
+#define XT_CGROUP_PATH_MAX 512
+
+struct xt_cgroup_info_v2 {
+ __u8 has_path;
+ __u8 has_classid;
+ __u8 invert_path;
+ __u8 invert_classid;
+ union {
+ char path[XT_CGROUP_PATH_MAX];
+ __u32 classid;
+ };
+
+ /* kernel internal data */
+ void *priv __attribute__((aligned(8)));
+};
+
#endif /* _UAPI_XT_CGROUP_H */
diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
index 156ccd089df1..1610fdbab98d 100644
--- a/include/uapi/linux/netfilter_bridge.h
+++ b/include/uapi/linux/netfilter_bridge.h
@@ -11,6 +11,10 @@
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
+#ifndef __KERNEL__
+#include <limits.h> /* for INT_MIN, INT_MAX */
+#endif
+
/* Bridge Hooks */
/* After promisc drops, checksum checks. */
#define NF_BR_PRE_ROUTING 0
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
index 61f1c7dfd033..3c77f54560f2 100644
--- a/include/uapi/linux/netfilter_decnet.h
+++ b/include/uapi/linux/netfilter_decnet.h
@@ -15,16 +15,6 @@
#include <limits.h> /* for INT_MIN, INT_MAX */
-/* IP Cache bits. */
-/* Src IP address. */
-#define NFC_DN_SRC 0x0001
-/* Dest IP address. */
-#define NFC_DN_DST 0x0002
-/* Input device. */
-#define NFC_DN_IF_IN 0x0004
-/* Output device. */
-#define NFC_DN_IF_OUT 0x0008
-
/* kernel define is in netfilter_defs.h */
#define NF_DN_NUMHOOKS 7
#endif /* ! __KERNEL__ */
diff --git a/include/uapi/linux/netfilter_ipv4.h b/include/uapi/linux/netfilter_ipv4.h
index c3b060775e13..155e77d6a42d 100644
--- a/include/uapi/linux/netfilter_ipv4.h
+++ b/include/uapi/linux/netfilter_ipv4.h
@@ -13,34 +13,6 @@
#include <limits.h> /* for INT_MIN, INT_MAX */
-/* IP Cache bits. */
-/* Src IP address. */
-#define NFC_IP_SRC 0x0001
-/* Dest IP address. */
-#define NFC_IP_DST 0x0002
-/* Input device. */
-#define NFC_IP_IF_IN 0x0004
-/* Output device. */
-#define NFC_IP_IF_OUT 0x0008
-/* TOS. */
-#define NFC_IP_TOS 0x0010
-/* Protocol. */
-#define NFC_IP_PROTO 0x0020
-/* IP options. */
-#define NFC_IP_OPTIONS 0x0040
-/* Frag & flags. */
-#define NFC_IP_FRAG 0x0080
-
-/* Per-protocol information: only matters if proto match. */
-/* TCP flags. */
-#define NFC_IP_TCPFLAGS 0x0100
-/* Source port. */
-#define NFC_IP_SRC_PT 0x0200
-/* Dest port. */
-#define NFC_IP_DST_PT 0x0400
-/* Something else about the proto */
-#define NFC_IP_PROTO_UNKNOWN 0x2000
-
/* IP Hooks */
/* After promisc drops, checksum checks. */
#define NF_IP_PRE_ROUTING 0
diff --git a/include/uapi/linux/netfilter_ipv6.h b/include/uapi/linux/netfilter_ipv6.h
index dc624fd24d25..80aa9b0799af 100644
--- a/include/uapi/linux/netfilter_ipv6.h
+++ b/include/uapi/linux/netfilter_ipv6.h
@@ -16,35 +16,6 @@
#include <limits.h> /* for INT_MIN, INT_MAX */
-/* IP Cache bits. */
-/* Src IP address. */
-#define NFC_IP6_SRC 0x0001
-/* Dest IP address. */
-#define NFC_IP6_DST 0x0002
-/* Input device. */
-#define NFC_IP6_IF_IN 0x0004
-/* Output device. */
-#define NFC_IP6_IF_OUT 0x0008
-/* TOS. */
-#define NFC_IP6_TOS 0x0010
-/* Protocol. */
-#define NFC_IP6_PROTO 0x0020
-/* IP options. */
-#define NFC_IP6_OPTIONS 0x0040
-/* Frag & flags. */
-#define NFC_IP6_FRAG 0x0080
-
-
-/* Per-protocol information: only matters if proto match. */
-/* TCP flags. */
-#define NFC_IP6_TCPFLAGS 0x0100
-/* Source port. */
-#define NFC_IP6_SRC_PT 0x0200
-/* Dest port. */
-#define NFC_IP6_DST_PT 0x0400
-/* Something else about the proto */
-#define NFC_IP6_PROTO_UNKNOWN 0x2000
-
/* IP6 Hooks */
/* After promisc drops, checksum checks. */
#define NF_IP6_PRE_ROUTING 0
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 776bc92e9118..0a4d73317759 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -155,6 +155,7 @@ enum nlmsgerr_attrs {
#define NETLINK_LIST_MEMBERSHIPS 9
#define NETLINK_CAP_ACK 10
#define NETLINK_EXT_ACK 11
+#define NETLINK_GET_STRICT_CHK 12
struct nl_pktinfo {
__u32 group;
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 7acc16f34942..31ae5c7f10e3 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1033,6 +1033,38 @@
* %NL80211_ATTR_CHANNEL_WIDTH,%NL80211_ATTR_NSS attributes with its
* address(specified in %NL80211_ATTR_MAC).
*
+ * @NL80211_CMD_GET_FTM_RESPONDER_STATS: Retrieve FTM responder statistics, in
+ * the %NL80211_ATTR_FTM_RESPONDER_STATS attribute.
+ *
+ * @NL80211_CMD_PEER_MEASUREMENT_START: start a (set of) peer measurement(s)
+ * with the given parameters, which are encapsulated in the nested
+ * %NL80211_ATTR_PEER_MEASUREMENTS attribute. Optionally, MAC address
+ * randomization may be enabled and configured by specifying the
+ * %NL80211_ATTR_MAC and %NL80211_ATTR_MAC_MASK attributes.
+ * If a timeout is requested, use the %NL80211_ATTR_TIMEOUT attribute.
+ * A u64 cookie for further %NL80211_ATTR_COOKIE use is is returned in
+ * the netlink extended ack message.
+ *
+ * To cancel a measurement, close the socket that requested it.
+ *
+ * Measurement results are reported to the socket that requested the
+ * measurement using @NL80211_CMD_PEER_MEASUREMENT_RESULT when they
+ * become available, so applications must ensure a large enough socket
+ * buffer size.
+ *
+ * Depending on driver support it may or may not be possible to start
+ * multiple concurrent measurements.
+ * @NL80211_CMD_PEER_MEASUREMENT_RESULT: This command number is used for the
+ * result notification from the driver to the requesting socket.
+ * @NL80211_CMD_PEER_MEASUREMENT_COMPLETE: Notification only, indicating that
+ * the measurement completed, using the measurement cookie
+ * (%NL80211_ATTR_COOKIE).
+ *
+ * @NL80211_CMD_NOTIFY_RADAR: Notify the kernel that a radar signal was
+ * detected and reported by a neighboring device on the channel
+ * indicated by %NL80211_ATTR_WIPHY_FREQ and other attributes
+ * determining the width and type.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1245,6 +1277,14 @@ enum nl80211_commands {
NL80211_CMD_CONTROL_PORT_FRAME,
+ NL80211_CMD_GET_FTM_RESPONDER_STATS,
+
+ NL80211_CMD_PEER_MEASUREMENT_START,
+ NL80211_CMD_PEER_MEASUREMENT_RESULT,
+ NL80211_CMD_PEER_MEASUREMENT_COMPLETE,
+
+ NL80211_CMD_NOTIFY_RADAR,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1701,7 +1741,7 @@ enum nl80211_commands {
* the values passed in @NL80211_ATTR_SCAN_SSIDS (eg. if an SSID
* is included in the probe request, but the match attributes
* will never let it go through), -EINVAL may be returned.
- * If ommited, no filtering is done.
+ * If omitted, no filtering is done.
*
* @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported
* interface combinations. In each nested item, it contains attributes
@@ -1806,7 +1846,7 @@ enum nl80211_commands {
*
* @NL80211_ATTR_INACTIVITY_TIMEOUT: timeout value in seconds, this can be
* used by the drivers which has MLME in firmware and does not have support
- * to report per station tx/rx activity to free up the staion entry from
+ * to report per station tx/rx activity to free up the station entry from
* the list. This needs to be used when the driver advertises the
* capability to timeout the stations.
*
@@ -2167,7 +2207,7 @@ enum nl80211_commands {
*
* @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in
* the specified band is to be adjusted before doing
- * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparision to figure out
+ * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparison to figure out
* better BSSs. The attribute value is a packed structure
* value as specified by &struct nl80211_bss_select_rssi_adjust.
*
@@ -2241,6 +2281,24 @@ enum nl80211_commands {
* association request when used with NL80211_CMD_NEW_STATION). Can be set
* only if %NL80211_STA_FLAG_WME is set.
*
+ * @NL80211_ATTR_FTM_RESPONDER: nested attribute which user-space can include
+ * in %NL80211_CMD_START_AP or %NL80211_CMD_SET_BEACON for fine timing
+ * measurement (FTM) responder functionality and containing parameters as
+ * possible, see &enum nl80211_ftm_responder_attr
+ *
+ * @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder
+ * statistics, see &enum nl80211_ftm_responder_stats.
+ *
+ * @NL80211_ATTR_TIMEOUT: Timeout for the given operation in milliseconds (u32),
+ * if the attribute is not given no timeout is requested. Note that 0 is an
+ * invalid value.
+ *
+ * @NL80211_ATTR_PEER_MEASUREMENTS: peer measurements request (and result)
+ * data, uses nested attributes specified in
+ * &enum nl80211_peer_measurement_attrs.
+ * This is also used for capability advertisement in the wiphy information,
+ * with the appropriate sub-attributes.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2682,6 +2740,14 @@ enum nl80211_attrs {
NL80211_ATTR_HE_CAPABILITY,
+ NL80211_ATTR_FTM_RESPONDER,
+
+ NL80211_ATTR_FTM_RESPONDER_STATS,
+
+ NL80211_ATTR_TIMEOUT,
+
+ NL80211_ATTR_PEER_MEASUREMENTS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3050,8 +3116,15 @@ enum nl80211_sta_bss_param {
* received from the station (u64, usec)
* @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment
* @NL80211_STA_INFO_ACK_SIGNAL: signal strength of the last ACK frame(u8, dBm)
- * @NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG: avg signal strength of (data)
- * ACK frame (s8, dBm)
+ * @NL80211_STA_INFO_ACK_SIGNAL_AVG: avg signal strength of ACK frames (s8, dBm)
+ * @NL80211_STA_INFO_RX_MPDUS: total number of received packets (MPDUs)
+ * (u32, from this station)
+ * @NL80211_STA_INFO_FCS_ERROR_COUNT: total number of packets (MPDUs) received
+ * with an FCS error (u32, from this station). This count may not include
+ * some packets with an FCS error due to TA corruption. Hence this counter
+ * might not be fully accurate.
+ * @NL80211_STA_INFO_CONNECTED_TO_GATE: set to true if STA has a path to a
+ * mesh gate (u8, 0 or 1)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -3091,13 +3164,20 @@ enum nl80211_sta_info {
NL80211_STA_INFO_RX_DURATION,
NL80211_STA_INFO_PAD,
NL80211_STA_INFO_ACK_SIGNAL,
- NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG,
+ NL80211_STA_INFO_ACK_SIGNAL_AVG,
+ NL80211_STA_INFO_RX_MPDUS,
+ NL80211_STA_INFO_FCS_ERROR_COUNT,
+ NL80211_STA_INFO_CONNECTED_TO_GATE,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
NL80211_STA_INFO_MAX = __NL80211_STA_INFO_AFTER_LAST - 1
};
+/* we renamed this - stay compatible */
+#define NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG NL80211_STA_INFO_ACK_SIGNAL_AVG
+
+
/**
* enum nl80211_tid_stats - per TID statistics attributes
* @__NL80211_TID_STATS_INVALID: attribute number 0 is reserved
@@ -3867,6 +3947,11 @@ enum nl80211_mesh_power_mode {
* remove it from the STA's list of peers. You may set this to 0 to disable
* the removal of the STA. Default is 30 minutes.
*
+ * @NL80211_MESHCONF_CONNECTED_TO_GATE: If set to true then this mesh STA
+ * will advertise that it is connected to a gate in the mesh formation
+ * field. If left unset then the mesh formation field will only
+ * advertise such if there is an active root mesh path.
+ *
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
*/
enum nl80211_meshconf_params {
@@ -3899,6 +3984,7 @@ enum nl80211_meshconf_params {
NL80211_MESHCONF_POWER_MODE,
NL80211_MESHCONF_AWAKE_WINDOW,
NL80211_MESHCONF_PLINK_TIMEOUT,
+ NL80211_MESHCONF_CONNECTED_TO_GATE,
/* keep last */
__NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -4338,7 +4424,7 @@ enum nl80211_txrate_gi {
* enum nl80211_band - Frequency band
* @NL80211_BAND_2GHZ: 2.4 GHz ISM band
* @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz)
- * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
+ * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz)
* @NUM_NL80211_BANDS: number of bands, avoid using this in userspace
* since newer kernel versions may support more bands
*/
@@ -4831,7 +4917,7 @@ enum nl80211_iface_limit_attrs {
* numbers = [ #{STA} <= 1, #{P2P-client,P2P-GO} <= 3 ], max = 4
* => allows a STA plus three P2P interfaces
*
- * The list of these four possiblities could completely be contained
+ * The list of these four possibilities could completely be contained
* within the %NL80211_ATTR_INTERFACE_COMBINATIONS attribute to indicate
* that any of these groups must match.
*
@@ -4861,7 +4947,7 @@ enum nl80211_if_combination_attrs {
* enum nl80211_plink_state - state of a mesh peer link finite state machine
*
* @NL80211_PLINK_LISTEN: initial state, considered the implicit
- * state of non existant mesh peer links
+ * state of non existent mesh peer links
* @NL80211_PLINK_OPN_SNT: mesh plink open frame has been sent to
* this mesh peer
* @NL80211_PLINK_OPN_RCVD: mesh plink open frame has been received
@@ -5213,9 +5299,8 @@ enum nl80211_feature_flags {
* "radar detected" event.
* @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211: Driver supports sending and
* receiving control port frames over nl80211 instead of the netdevice.
- * @NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT: This Driver support data ack
- * rssi if firmware support, this flag is to intimate about ack rssi
- * support to nl80211.
+ * @NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT: This driver/device supports
+ * (average) ACK signal strength reporting.
* @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate
* TXQs.
* @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the
@@ -5223,6 +5308,13 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data
* except for supported rates from the probe request content if requested
* by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag.
+ * @NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER: Driver supports enabling fine
+ * timing measurement responder role.
+ *
+ * @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0: Driver/device confirm that they are
+ * able to rekey an in-use key correctly. Userspace must not rekey PTK keys
+ * if this flag is not set. Ignoring this can leak clear text packets and/or
+ * freeze the connection.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -5255,10 +5347,14 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN,
NL80211_EXT_FEATURE_DFS_OFFLOAD,
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
- NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT,
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT,
+ /* we renamed this - stay compatible */
+ NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT,
NL80211_EXT_FEATURE_TXQS,
NL80211_EXT_FEATURE_SCAN_RANDOM_SN,
NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT,
+ NL80211_EXT_FEATURE_CAN_REPLACE_PTK0,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5343,7 +5439,7 @@ enum nl80211_timeout_reason {
* request parameters IE in the probe request
* @NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe responses
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE: send probe request frames at
- * rate of at least 5.5M. In case non OCE AP is dicovered in the channel,
+ * rate of at least 5.5M. In case non OCE AP is discovered in the channel,
* only the first probe req in the channel will be sent in high rate.
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: allow probe request
* tx deferral (dot11FILSProbeDelay shall be set to 15ms)
@@ -5798,4 +5894,458 @@ enum nl80211_external_auth_action {
NL80211_EXTERNAL_AUTH_ABORT,
};
+/**
+ * enum nl80211_ftm_responder_attributes - fine timing measurement
+ * responder attributes
+ * @__NL80211_FTM_RESP_ATTR_INVALID: Invalid
+ * @NL80211_FTM_RESP_ATTR_ENABLED: FTM responder is enabled
+ * @NL80211_FTM_RESP_ATTR_LCI: The content of Measurement Report Element
+ * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10),
+ * i.e. starting with the measurement token
+ * @NL80211_FTM_RESP_ATTR_CIVIC: The content of Measurement Report Element
+ * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13),
+ * i.e. starting with the measurement token
+ * @__NL80211_FTM_RESP_ATTR_LAST: Internal
+ * @NL80211_FTM_RESP_ATTR_MAX: highest FTM responder attribute.
+ */
+enum nl80211_ftm_responder_attributes {
+ __NL80211_FTM_RESP_ATTR_INVALID,
+
+ NL80211_FTM_RESP_ATTR_ENABLED,
+ NL80211_FTM_RESP_ATTR_LCI,
+ NL80211_FTM_RESP_ATTR_CIVICLOC,
+
+ /* keep last */
+ __NL80211_FTM_RESP_ATTR_LAST,
+ NL80211_FTM_RESP_ATTR_MAX = __NL80211_FTM_RESP_ATTR_LAST - 1,
+};
+
+/*
+ * enum nl80211_ftm_responder_stats - FTM responder statistics
+ *
+ * These attribute types are used with %NL80211_ATTR_FTM_RESPONDER_STATS
+ * when getting FTM responder statistics.
+ *
+ * @__NL80211_FTM_STATS_INVALID: attribute number 0 is reserved
+ * @NL80211_FTM_STATS_SUCCESS_NUM: number of FTM sessions in which all frames
+ * were ssfully answered (u32)
+ * @NL80211_FTM_STATS_PARTIAL_NUM: number of FTM sessions in which part of the
+ * frames were successfully answered (u32)
+ * @NL80211_FTM_STATS_FAILED_NUM: number of failed FTM sessions (u32)
+ * @NL80211_FTM_STATS_ASAP_NUM: number of ASAP sessions (u32)
+ * @NL80211_FTM_STATS_NON_ASAP_NUM: number of non-ASAP sessions (u32)
+ * @NL80211_FTM_STATS_TOTAL_DURATION_MSEC: total sessions durations - gives an
+ * indication of how much time the responder was busy (u64, msec)
+ * @NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM: number of unknown FTM triggers -
+ * triggers from initiators that didn't finish successfully the negotiation
+ * phase with the responder (u32)
+ * @NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM: number of FTM reschedule requests
+ * - initiator asks for a new scheduling although it already has scheduled
+ * FTM slot (u32)
+ * @NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM: number of FTM triggers out of
+ * scheduled window (u32)
+ * @NL80211_FTM_STATS_PAD: used for padding, ignore
+ * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal
+ * @NL80211_FTM_STATS_MAX: highest possible FTM responder stats attribute
+ */
+enum nl80211_ftm_responder_stats {
+ __NL80211_FTM_STATS_INVALID,
+ NL80211_FTM_STATS_SUCCESS_NUM,
+ NL80211_FTM_STATS_PARTIAL_NUM,
+ NL80211_FTM_STATS_FAILED_NUM,
+ NL80211_FTM_STATS_ASAP_NUM,
+ NL80211_FTM_STATS_NON_ASAP_NUM,
+ NL80211_FTM_STATS_TOTAL_DURATION_MSEC,
+ NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM,
+ NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM,
+ NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM,
+ NL80211_FTM_STATS_PAD,
+
+ /* keep last */
+ __NL80211_FTM_STATS_AFTER_LAST,
+ NL80211_FTM_STATS_MAX = __NL80211_FTM_STATS_AFTER_LAST - 1
+};
+
+/**
+ * enum nl80211_preamble - frame preamble types
+ * @NL80211_PREAMBLE_LEGACY: legacy (HR/DSSS, OFDM, ERP PHY) preamble
+ * @NL80211_PREAMBLE_HT: HT preamble
+ * @NL80211_PREAMBLE_VHT: VHT preamble
+ * @NL80211_PREAMBLE_DMG: DMG preamble
+ */
+enum nl80211_preamble {
+ NL80211_PREAMBLE_LEGACY,
+ NL80211_PREAMBLE_HT,
+ NL80211_PREAMBLE_VHT,
+ NL80211_PREAMBLE_DMG,
+};
+
+/**
+ * enum nl80211_peer_measurement_type - peer measurement types
+ * @NL80211_PMSR_TYPE_INVALID: invalid/unused, needed as we use
+ * these numbers also for attributes
+ *
+ * @NL80211_PMSR_TYPE_FTM: flight time measurement
+ *
+ * @NUM_NL80211_PMSR_TYPES: internal
+ * @NL80211_PMSR_TYPE_MAX: highest type number
+ */
+enum nl80211_peer_measurement_type {
+ NL80211_PMSR_TYPE_INVALID,
+
+ NL80211_PMSR_TYPE_FTM,
+
+ NUM_NL80211_PMSR_TYPES,
+ NL80211_PMSR_TYPE_MAX = NUM_NL80211_PMSR_TYPES - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_status - peer measurement status
+ * @NL80211_PMSR_STATUS_SUCCESS: measurement completed successfully
+ * @NL80211_PMSR_STATUS_REFUSED: measurement was locally refused
+ * @NL80211_PMSR_STATUS_TIMEOUT: measurement timed out
+ * @NL80211_PMSR_STATUS_FAILURE: measurement failed, a type-dependent
+ * reason may be available in the response data
+ */
+enum nl80211_peer_measurement_status {
+ NL80211_PMSR_STATUS_SUCCESS,
+ NL80211_PMSR_STATUS_REFUSED,
+ NL80211_PMSR_STATUS_TIMEOUT,
+ NL80211_PMSR_STATUS_FAILURE,
+};
+
+/**
+ * enum nl80211_peer_measurement_req - peer measurement request attributes
+ * @__NL80211_PMSR_REQ_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_REQ_ATTR_DATA: This is a nested attribute with measurement
+ * type-specific request data inside. The attributes used are from the
+ * enums named nl80211_peer_measurement_<type>_req.
+ * @NL80211_PMSR_REQ_ATTR_GET_AP_TSF: include AP TSF timestamp, if supported
+ * (flag attribute)
+ *
+ * @NUM_NL80211_PMSR_REQ_ATTRS: internal
+ * @NL80211_PMSR_REQ_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_req {
+ __NL80211_PMSR_REQ_ATTR_INVALID,
+
+ NL80211_PMSR_REQ_ATTR_DATA,
+ NL80211_PMSR_REQ_ATTR_GET_AP_TSF,
+
+ /* keep last */
+ NUM_NL80211_PMSR_REQ_ATTRS,
+ NL80211_PMSR_REQ_ATTR_MAX = NUM_NL80211_PMSR_REQ_ATTRS - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_resp - peer measurement response attributes
+ * @__NL80211_PMSR_RESP_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_RESP_ATTR_DATA: This is a nested attribute with measurement
+ * type-specific results inside. The attributes used are from the enums
+ * named nl80211_peer_measurement_<type>_resp.
+ * @NL80211_PMSR_RESP_ATTR_STATUS: u32 value with the measurement status
+ * (using values from &enum nl80211_peer_measurement_status.)
+ * @NL80211_PMSR_RESP_ATTR_HOST_TIME: host time (%CLOCK_BOOTTIME) when the
+ * result was measured; this value is not expected to be accurate to
+ * more than 20ms. (u64, nanoseconds)
+ * @NL80211_PMSR_RESP_ATTR_AP_TSF: TSF of the AP that the interface
+ * doing the measurement is connected to when the result was measured.
+ * This shall be accurately reported if supported and requested
+ * (u64, usec)
+ * @NL80211_PMSR_RESP_ATTR_FINAL: If results are sent to the host partially
+ * (*e.g. with FTM per-burst data) this flag will be cleared on all but
+ * the last result; if all results are combined it's set on the single
+ * result.
+ * @NL80211_PMSR_RESP_ATTR_PAD: padding for 64-bit attributes, ignore
+ *
+ * @NUM_NL80211_PMSR_RESP_ATTRS: internal
+ * @NL80211_PMSR_RESP_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_resp {
+ __NL80211_PMSR_RESP_ATTR_INVALID,
+
+ NL80211_PMSR_RESP_ATTR_DATA,
+ NL80211_PMSR_RESP_ATTR_STATUS,
+ NL80211_PMSR_RESP_ATTR_HOST_TIME,
+ NL80211_PMSR_RESP_ATTR_AP_TSF,
+ NL80211_PMSR_RESP_ATTR_FINAL,
+ NL80211_PMSR_RESP_ATTR_PAD,
+
+ /* keep last */
+ NUM_NL80211_PMSR_RESP_ATTRS,
+ NL80211_PMSR_RESP_ATTR_MAX = NUM_NL80211_PMSR_RESP_ATTRS - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_peer_attrs - peer attributes for measurement
+ * @__NL80211_PMSR_PEER_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_PEER_ATTR_ADDR: peer's MAC address
+ * @NL80211_PMSR_PEER_ATTR_CHAN: channel definition, nested, using top-level
+ * attributes like %NL80211_ATTR_WIPHY_FREQ etc.
+ * @NL80211_PMSR_PEER_ATTR_REQ: This is a nested attribute indexed by
+ * measurement type, with attributes from the
+ * &enum nl80211_peer_measurement_req inside.
+ * @NL80211_PMSR_PEER_ATTR_RESP: This is a nested attribute indexed by
+ * measurement type, with attributes from the
+ * &enum nl80211_peer_measurement_resp inside.
+ *
+ * @NUM_NL80211_PMSR_PEER_ATTRS: internal
+ * @NL80211_PMSR_PEER_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_peer_attrs {
+ __NL80211_PMSR_PEER_ATTR_INVALID,
+
+ NL80211_PMSR_PEER_ATTR_ADDR,
+ NL80211_PMSR_PEER_ATTR_CHAN,
+ NL80211_PMSR_PEER_ATTR_REQ,
+ NL80211_PMSR_PEER_ATTR_RESP,
+
+ /* keep last */
+ NUM_NL80211_PMSR_PEER_ATTRS,
+ NL80211_PMSR_PEER_ATTR_MAX = NUM_NL80211_PMSR_PEER_ATTRS - 1,
+};
+
+/**
+ * enum nl80211_peer_measurement_attrs - peer measurement attributes
+ * @__NL80211_PMSR_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_ATTR_MAX_PEERS: u32 attribute used for capability
+ * advertisement only, indicates the maximum number of peers
+ * measurements can be done with in a single request
+ * @NL80211_PMSR_ATTR_REPORT_AP_TSF: flag attribute in capability
+ * indicating that the connected AP's TSF can be reported in
+ * measurement results
+ * @NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR: flag attribute in capability
+ * indicating that MAC address randomization is supported.
+ * @NL80211_PMSR_ATTR_TYPE_CAPA: capabilities reported by the device,
+ * this contains a nesting indexed by measurement type, and
+ * type-specific capabilities inside, which are from the enums
+ * named nl80211_peer_measurement_<type>_capa.
+ * @NL80211_PMSR_ATTR_PEERS: nested attribute, the nesting index is
+ * meaningless, just a list of peers to measure with, with the
+ * sub-attributes taken from
+ * &enum nl80211_peer_measurement_peer_attrs.
+ *
+ * @NUM_NL80211_PMSR_ATTR: internal
+ * @NL80211_PMSR_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_attrs {
+ __NL80211_PMSR_ATTR_INVALID,
+
+ NL80211_PMSR_ATTR_MAX_PEERS,
+ NL80211_PMSR_ATTR_REPORT_AP_TSF,
+ NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR,
+ NL80211_PMSR_ATTR_TYPE_CAPA,
+ NL80211_PMSR_ATTR_PEERS,
+
+ /* keep last */
+ NUM_NL80211_PMSR_ATTR,
+ NL80211_PMSR_ATTR_MAX = NUM_NL80211_PMSR_ATTR - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_capa - FTM capabilities
+ * @__NL80211_PMSR_FTM_CAPA_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_FTM_CAPA_ATTR_ASAP: flag attribute indicating ASAP mode
+ * is supported
+ * @NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP: flag attribute indicating non-ASAP
+ * mode is supported
+ * @NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI: flag attribute indicating if LCI
+ * data can be requested during the measurement
+ * @NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC: flag attribute indicating if civic
+ * location data can be requested during the measurement
+ * @NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES: u32 bitmap attribute of bits
+ * from &enum nl80211_preamble.
+ * @NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS: bitmap of values from
+ * &enum nl80211_chan_width indicating the supported channel
+ * bandwidths for FTM. Note that a higher channel bandwidth may be
+ * configured to allow for other measurements types with different
+ * bandwidth requirement in the same measurement.
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT: u32 attribute indicating
+ * the maximum bursts exponent that can be used (if not present anything
+ * is valid)
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST: u32 attribute indicating
+ * the maximum FTMs per burst (if not present anything is valid)
+ *
+ * @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_ftm_capa {
+ __NL80211_PMSR_FTM_CAPA_ATTR_INVALID,
+
+ NL80211_PMSR_FTM_CAPA_ATTR_ASAP,
+ NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP,
+ NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI,
+ NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC,
+ NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES,
+ NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST,
+
+ /* keep last */
+ NUM_NL80211_PMSR_FTM_CAPA_ATTR,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX = NUM_NL80211_PMSR_FTM_CAPA_ATTR - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_req - FTM request attributes
+ * @__NL80211_PMSR_FTM_REQ_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_FTM_REQ_ATTR_ASAP: ASAP mode requested (flag)
+ * @NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE: preamble type (see
+ * &enum nl80211_preamble), optional for DMG (u32)
+ * @NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP: number of bursts exponent as in
+ * 802.11-2016 9.4.2.168 "Fine Timing Measurement Parameters element"
+ * (u8, 0-15, optional with default 15 i.e. "no preference")
+ * @NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD: interval between bursts in units
+ * of 100ms (u16, optional with default 0)
+ * @NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION: burst duration, as in 802.11-2016
+ * Table 9-257 "Burst Duration field encoding" (u8, 0-15, optional with
+ * default 15 i.e. "no preference")
+ * @NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST: number of successful FTM frames
+ * requested per burst
+ * (u8, 0-31, optional with default 0 i.e. "no preference")
+ * @NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES: number of FTMR frame retries
+ * (u8, default 3)
+ * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI: request LCI data (flag)
+ * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC: request civic location data
+ * (flag)
+ *
+ * @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal
+ * @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_ftm_req {
+ __NL80211_PMSR_FTM_REQ_ATTR_INVALID,
+
+ NL80211_PMSR_FTM_REQ_ATTR_ASAP,
+ NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE,
+ NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP,
+ NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD,
+ NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION,
+ NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST,
+ NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES,
+ NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI,
+ NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC,
+
+ /* keep last */
+ NUM_NL80211_PMSR_FTM_REQ_ATTR,
+ NL80211_PMSR_FTM_REQ_ATTR_MAX = NUM_NL80211_PMSR_FTM_REQ_ATTR - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_failure_reasons - FTM failure reasons
+ * @NL80211_PMSR_FTM_FAILURE_UNSPECIFIED: unspecified failure, not used
+ * @NL80211_PMSR_FTM_FAILURE_NO_RESPONSE: no response from the FTM responder
+ * @NL80211_PMSR_FTM_FAILURE_REJECTED: FTM responder rejected measurement
+ * @NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL: we already know the peer is
+ * on a different channel, so can't measure (if we didn't know, we'd
+ * try and get no response)
+ * @NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE: peer can't actually do FTM
+ * @NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP: invalid T1/T4 timestamps
+ * received
+ * @NL80211_PMSR_FTM_FAILURE_PEER_BUSY: peer reports busy, you may retry
+ * later (see %NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME)
+ * @NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS: parameters were changed
+ * by the peer and are no longer supported
+ */
+enum nl80211_peer_measurement_ftm_failure_reasons {
+ NL80211_PMSR_FTM_FAILURE_UNSPECIFIED,
+ NL80211_PMSR_FTM_FAILURE_NO_RESPONSE,
+ NL80211_PMSR_FTM_FAILURE_REJECTED,
+ NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL,
+ NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE,
+ NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP,
+ NL80211_PMSR_FTM_FAILURE_PEER_BUSY,
+ NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS,
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_resp - FTM response attributes
+ * @__NL80211_PMSR_FTM_RESP_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON: FTM-specific failure reason
+ * (u32, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX: optional, if bursts are reported
+ * as separate results then it will be the burst index 0...(N-1) and
+ * the top level will indicate partial results (u32)
+ * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS: number of FTM Request frames
+ * transmitted (u32, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES: number of FTM Request frames
+ * that were acknowleged (u32, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME: retry time received from the
+ * busy peer (u32, seconds)
+ * @NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP: actual number of bursts exponent
+ * used by the responder (similar to request, u8)
+ * @NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION: actual burst duration used by
+ * the responder (similar to request, u8)
+ * @NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST: actual FTMs per burst used
+ * by the responder (similar to request, u8)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG: average RSSI across all FTM action
+ * frames (optional, s32, 1/2 dBm)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD: RSSI spread across all FTM action
+ * frames (optional, s32, 1/2 dBm)
+ * @NL80211_PMSR_FTM_RESP_ATTR_TX_RATE: bitrate we used for the response to the
+ * FTM action frame (optional, nested, using &enum nl80211_rate_info
+ * attributes)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RX_RATE: bitrate the responder used for the FTM
+ * action frame (optional, nested, using &enum nl80211_rate_info attrs)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG: average RTT (s64, picoseconds, optional
+ * but one of RTT/DIST must be present)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE: RTT variance (u64, ps^2, note that
+ * standard deviation is the square root of variance, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD: RTT spread (u64, picoseconds,
+ * optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG: average distance (s64, mm, optional
+ * but one of RTT/DIST must be present)
+ * @NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE: distance variance (u64, mm^2, note
+ * that standard deviation is the square root of variance, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD: distance spread (u64, mm, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_LCI: LCI data from peer (binary, optional);
+ * this is the contents of the Measurement Report Element (802.11-2016
+ * 9.4.2.22.1) starting with the Measurement Token, with Measurement
+ * Type 8.
+ * @NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC: civic location data from peer
+ * (binary, optional);
+ * this is the contents of the Measurement Report Element (802.11-2016
+ * 9.4.2.22.1) starting with the Measurement Token, with Measurement
+ * Type 11.
+ * @NL80211_PMSR_FTM_RESP_ATTR_PAD: ignore, for u64/s64 padding only
+ *
+ * @NUM_NL80211_PMSR_FTM_RESP_ATTR: internal
+ * @NL80211_PMSR_FTM_RESP_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_ftm_resp {
+ __NL80211_PMSR_FTM_RESP_ATTR_INVALID,
+
+ NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON,
+ NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX,
+ NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS,
+ NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES,
+ NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME,
+ NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP,
+ NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION,
+ NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST,
+ NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG,
+ NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD,
+ NL80211_PMSR_FTM_RESP_ATTR_TX_RATE,
+ NL80211_PMSR_FTM_RESP_ATTR_RX_RATE,
+ NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG,
+ NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE,
+ NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD,
+ NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG,
+ NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE,
+ NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD,
+ NL80211_PMSR_FTM_RESP_ATTR_LCI,
+ NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC,
+ NL80211_PMSR_FTM_RESP_ATTR_PAD,
+
+ /* keep last */
+ NUM_NL80211_PMSR_FTM_RESP_ATTR,
+ NL80211_PMSR_FTM_RESP_ATTR_MAX = NUM_NL80211_PMSR_FTM_RESP_ATTR - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index ee556ccc93f4..e1e9888c85e6 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -52,6 +52,7 @@
#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_IMM_READY 0x01 /* Immediate Readiness */
#define PCI_STATUS_INTERRUPT 0x08 /* Interrupt status */
#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
#define PCI_STATUS_66MHZ 0x20 /* Support 66 MHz PCI 2.1 bus */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index eeb787b1c53c..9de8780ac8d9 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -144,7 +144,7 @@ enum perf_event_sample_format {
PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
- __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
+ __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
};
/*
@@ -646,10 +646,12 @@ struct perf_event_mmap_page {
*
* PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
* PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
+ * PERF_RECORD_MISC_FORK_EXEC - PERF_RECORD_FORK event (perf internal)
* PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
*/
#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
+#define PERF_RECORD_MISC_FORK_EXEC (1 << 13)
#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
/*
* These PERF_RECORD_MISC_* flags below are safely reused
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index be382fb0592d..95d0db2a8350 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -483,6 +483,13 @@ enum {
TCA_FLOWER_KEY_ENC_OPTS,
TCA_FLOWER_KEY_ENC_OPTS_MASK,
+ TCA_FLOWER_IN_HW_COUNT,
+
+ TCA_FLOWER_KEY_PORT_SRC_MIN, /* be16 */
+ TCA_FLOWER_KEY_PORT_SRC_MAX, /* be16 */
+ TCA_FLOWER_KEY_PORT_DST_MIN, /* be16 */
+ TCA_FLOWER_KEY_PORT_DST_MAX, /* be16 */
+
__TCA_FLOWER_MAX,
};
@@ -516,6 +523,8 @@ enum {
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
+#define TCA_FLOWER_MASK_FLAGS_RANGE (1 << 0) /* Range-based match */
+
/* Match-all classifier */
enum {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 8975fd1a1421..0d18b1d1fbbc 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -291,11 +291,38 @@ enum {
TCA_GRED_DPS,
TCA_GRED_MAX_P,
TCA_GRED_LIMIT,
+ TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
__TCA_GRED_MAX,
};
#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
+enum {
+ TCA_GRED_VQ_ENTRY_UNSPEC,
+ TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
+ __TCA_GRED_VQ_ENTRY_MAX,
+};
+#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
+
+enum {
+ TCA_GRED_VQ_UNSPEC,
+ TCA_GRED_VQ_PAD,
+ TCA_GRED_VQ_DP, /* u32 */
+ TCA_GRED_VQ_STAT_BYTES, /* u64 */
+ TCA_GRED_VQ_STAT_PACKETS, /* u32 */
+ TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_PDROP, /* u32 */
+ TCA_GRED_VQ_STAT_OTHER, /* u32 */
+ TCA_GRED_VQ_FLAGS, /* u32 */
+ __TCA_GRED_VQ_MAX
+};
+
+#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
+
struct tc_gred_qopt {
__u32 limit; /* HARD maximal queue length (bytes) */
__u32 qth_min; /* Min average length threshold (bytes) */
@@ -395,9 +422,9 @@ enum {
struct tc_htb_xstats {
__u32 lends;
__u32 borrows;
- __u32 giants; /* too big packets (rate will not be accurate) */
- __u32 tokens;
- __u32 ctokens;
+ __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */
+ __s32 tokens;
+ __s32 ctokens;
};
/* HFSC section */
@@ -864,6 +891,8 @@ enum {
TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+ TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
+
__TCA_FQ_MAX
};
@@ -882,6 +911,7 @@ struct tc_fq_qd_stats {
__u32 inactive_flows;
__u32 throttled_flows;
__u32 unthrottle_latency_ns;
+ __u64 ce_mark; /* packets above ce_threshold */
};
/* Heavy-Hitter Filter */
@@ -1084,4 +1114,50 @@ enum {
CAKE_ATM_MAX
};
+
+/* TAPRIO */
+enum {
+ TC_TAPRIO_CMD_SET_GATES = 0x00,
+ TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
+ TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
+};
+
+enum {
+ TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
+ TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
+ TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
+ TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
+ TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
+ __TCA_TAPRIO_SCHED_ENTRY_MAX,
+};
+#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
+
+/* The format for schedule entry list is:
+ * [TCA_TAPRIO_SCHED_ENTRY_LIST]
+ * [TCA_TAPRIO_SCHED_ENTRY]
+ * [TCA_TAPRIO_SCHED_ENTRY_CMD]
+ * [TCA_TAPRIO_SCHED_ENTRY_GATES]
+ * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
+ */
+enum {
+ TCA_TAPRIO_SCHED_UNSPEC,
+ TCA_TAPRIO_SCHED_ENTRY,
+ __TCA_TAPRIO_SCHED_MAX,
+};
+
+#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
+
+enum {
+ TCA_TAPRIO_ATTR_UNSPEC,
+ TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
+ TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
+ TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
+ TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
+ TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
+ TCA_TAPRIO_PAD,
+ __TCA_TAPRIO_ATTR_MAX,
+};
+
+#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index c0d7ea0bf5b6..b4875a93363a 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -212,6 +212,7 @@ struct prctl_mm_map {
#define PR_SET_SPECULATION_CTRL 53
/* Speculation control variants */
# define PR_SPEC_STORE_BYPASS 0
+# define PR_SPEC_INDIRECT_BRANCH 1
/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
# define PR_SPEC_NOT_AFFECTED 0
# define PR_SPEC_PRCTL (1UL << 0)
@@ -219,4 +220,12 @@ struct prctl_mm_map {
# define PR_SPEC_DISABLE (1UL << 2)
# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+/* Reset arm64 pointer authentication keys */
+#define PR_PAC_RESET_KEYS 54
+# define PR_PAC_APIAKEY (1UL << 0)
+# define PR_PAC_APIBKEY (1UL << 1)
+# define PR_PAC_APDAKEY (1UL << 2)
+# define PR_PAC_APDBKEY (1UL << 3)
+# define PR_PAC_APGAKEY (1UL << 4)
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 3039bf6a742e..d73d83950265 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -84,6 +84,16 @@ struct ptp_sys_offset {
struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
};
+struct ptp_sys_offset_extended {
+ unsigned int n_samples; /* Desired number of measurements. */
+ unsigned int rsv[3]; /* Reserved for future use. */
+ /*
+ * Array of [system, phc, system] time stamps. The kernel will provide
+ * 3*n_samples time stamps.
+ */
+ struct ptp_clock_time ts[PTP_MAX_SAMPLES][3];
+};
+
struct ptp_sys_offset_precise {
struct ptp_clock_time device;
struct ptp_clock_time sys_realtime;
@@ -136,6 +146,8 @@ struct ptp_pin_desc {
#define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc)
#define PTP_SYS_OFFSET_PRECISE \
_IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise)
+#define PTP_SYS_OFFSET_EXTENDED \
+ _IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended)
struct ptp_extts_event {
struct ptp_clock_time t; /* Time event occured. */
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index dc520e1a4123..8b73cb603c5f 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/socket.h> /* For __kernel_sockaddr_storage. */
+#include <linux/in6.h> /* For struct in6_addr. */
#define RDS_IB_ABI_VERSION 0x301
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index b479db5c71d9..d584073532b8 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -129,6 +129,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_STREAM_SCHEDULER_VALUE 124
#define SCTP_INTERLEAVING_SUPPORTED 125
#define SCTP_SENDMSG_CONNECT 126
+#define SCTP_EVENT 127
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -301,6 +302,7 @@ enum sctp_sinfo_flags {
SCTP_SACK_IMMEDIATELY = (1 << 3), /* SACK should be sent without delay. */
/* 2 bits here have been used by SCTP_PR_SCTP_MASK */
SCTP_SENDALL = (1 << 6),
+ SCTP_PR_SCTP_ALL = (1 << 7),
SCTP_NOTIFICATION = MSG_NOTIFICATION, /* Next message is not user msg but notification. */
SCTP_EOF = MSG_FIN, /* Initiate graceful shutdown process. */
};
@@ -567,6 +569,8 @@ struct sctp_assoc_reset_event {
#define SCTP_ASSOC_CHANGE_DENIED 0x0004
#define SCTP_ASSOC_CHANGE_FAILED 0x0008
+#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED
+#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED
struct sctp_stream_change_event {
__u16 strchange_type;
__u16 strchange_flags;
@@ -629,7 +633,9 @@ union sctp_notification {
*/
enum sctp_sn_type {
- SCTP_SN_TYPE_BASE = (1<<15),
+ SCTP_SN_TYPE_BASE = (1<<15),
+ SCTP_DATA_IO_EVENT = SCTP_SN_TYPE_BASE,
+#define SCTP_DATA_IO_EVENT SCTP_DATA_IO_EVENT
SCTP_ASSOC_CHANGE,
#define SCTP_ASSOC_CHANGE SCTP_ASSOC_CHANGE
SCTP_PEER_ADDR_CHANGE,
@@ -654,6 +660,8 @@ enum sctp_sn_type {
#define SCTP_ASSOC_RESET_EVENT SCTP_ASSOC_RESET_EVENT
SCTP_STREAM_CHANGE_EVENT,
#define SCTP_STREAM_CHANGE_EVENT SCTP_STREAM_CHANGE_EVENT
+ SCTP_SN_TYPE_MAX = SCTP_STREAM_CHANGE_EVENT,
+#define SCTP_SN_TYPE_MAX SCTP_SN_TYPE_MAX
};
/* Notification error codes used to fill up the error fields in some
@@ -1147,9 +1155,16 @@ struct sctp_add_streams {
uint16_t sas_outstrms;
};
+struct sctp_event {
+ sctp_assoc_t se_assoc_id;
+ uint16_t se_type;
+ uint8_t se_on;
+};
+
/* SCTP Stream schedulers */
enum sctp_sched_type {
SCTP_SS_FCFS,
+ SCTP_SS_DEFAULT = SCTP_SS_FCFS,
SCTP_SS_PRIO,
SCTP_SS_RR,
SCTP_SS_MAX = SCTP_SS_RR
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 9efc0e73d50b..90734aa5aa36 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -15,11 +15,13 @@
#define SECCOMP_SET_MODE_STRICT 0
#define SECCOMP_SET_MODE_FILTER 1
#define SECCOMP_GET_ACTION_AVAIL 2
+#define SECCOMP_GET_NOTIF_SIZES 3
/* Valid flags for SECCOMP_SET_MODE_FILTER */
-#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
-#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
-#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
+#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
+#define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3)
/*
* All BPF programs must return a 32-bit value.
@@ -35,6 +37,7 @@
#define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD
#define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
#define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
+#define SECCOMP_RET_USER_NOTIF 0x7fc00000U /* notifies userspace */
#define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
#define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */
#define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
@@ -60,4 +63,35 @@ struct seccomp_data {
__u64 args[6];
};
+struct seccomp_notif_sizes {
+ __u16 seccomp_notif;
+ __u16 seccomp_notif_resp;
+ __u16 seccomp_data;
+};
+
+struct seccomp_notif {
+ __u64 id;
+ __u32 pid;
+ __u32 flags;
+ struct seccomp_data data;
+};
+
+struct seccomp_notif_resp {
+ __u64 id;
+ __s64 val;
+ __s32 error;
+ __u32 flags;
+};
+
+#define SECCOMP_IOC_MAGIC '!'
+#define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr)
+#define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type)
+#define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type)
+#define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type)
+
+/* Flags for seccomp notification fd ioctl. */
+#define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
+#define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
+ struct seccomp_notif_resp)
+#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64)
#endif /* _UAPI_LINUX_SECCOMP_H */
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h
index 3fdd0dee8b41..93eb3c496ff1 100644
--- a/include/uapi/linux/serial.h
+++ b/include/uapi/linux/serial.h
@@ -132,4 +132,21 @@ struct serial_rs485 {
are a royal PITA .. */
};
+/*
+ * Serial interface for controlling ISO7816 settings on chips with suitable
+ * support. Set with TIOCSISO7816 and get with TIOCGISO7816 if supported by
+ * your platform.
+ */
+struct serial_iso7816 {
+ __u32 flags; /* ISO7816 feature flags */
+#define SER_ISO7816_ENABLED (1 << 0)
+#define SER_ISO7816_T_PARAM (0x0f << 4)
+#define SER_ISO7816_T(t) (((t) & 0x0f) << 4)
+ __u32 tg;
+ __u32 sc_fi;
+ __u32 sc_di;
+ __u32 clk;
+ __u32 reserved[5];
+};
+
#endif /* _UAPI_LINUX_SERIAL_H */
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index dce5f9dae121..df4a7534e239 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -281,4 +281,7 @@
/* MediaTek BTIF */
#define PORT_MTK_BTIF 117
+/* RDA UART */
+#define PORT_RDA 118
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h
index dde1344f047c..6507ad0afc81 100644
--- a/include/uapi/linux/shm.h
+++ b/include/uapi/linux/shm.h
@@ -65,7 +65,9 @@ struct shmid_ds {
#define SHM_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define SHM_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define SHM_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define SHM_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
#define SHM_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define SHM_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
#define SHM_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define SHM_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define SHM_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index ac9e8c96d9bd..8cb3a6fef553 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -18,14 +18,17 @@ struct smc_diag_req {
* on the internal clcsock, and more SMC-related socket data
*/
struct smc_diag_msg {
- __u8 diag_family;
- __u8 diag_state;
- __u8 diag_mode;
- __u8 diag_shutdown;
+ __u8 diag_family;
+ __u8 diag_state;
+ union {
+ __u8 diag_mode;
+ __u8 diag_fallback; /* the old name of the field */
+ };
+ __u8 diag_shutdown;
struct inet_diag_sockid id;
- __u32 diag_uid;
- __u64 diag_inode;
+ __u32 diag_uid;
+ __aligned_u64 diag_inode;
};
/* Mode of a connection */
@@ -99,11 +102,11 @@ struct smc_diag_fallback {
};
struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
- __u32 linkid; /* Link identifier */
- __u64 peer_gid; /* Peer GID */
- __u64 my_gid; /* My GID */
- __u64 token; /* Token of DMB */
- __u64 peer_token; /* Token of remote DMBE */
+ __u32 linkid; /* Link identifier */
+ __aligned_u64 peer_gid; /* Peer GID */
+ __aligned_u64 my_gid; /* My GID */
+ __aligned_u64 token; /* Token of DMB */
+ __aligned_u64 peer_token; /* Token of remote DMBE */
};
#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index f80135e5feaa..86dc24a96c90 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -243,6 +243,7 @@ enum
LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */
LINUX_MIB_TCPRETRANSFAIL, /* TCPRetransFail */
LINUX_MIB_TCPRCVCOALESCE, /* TCPRcvCoalesce */
+ LINUX_MIB_TCPBACKLOGCOALESCE, /* TCPBacklogCoalesce */
LINUX_MIB_TCPOFOQUEUE, /* TCPOFOQueue */
LINUX_MIB_TCPOFODROP, /* TCPOFODrop */
LINUX_MIB_TCPOFOMERGE, /* TCPOFOMerge */
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index d71013fffaf6..87aa2a6d9125 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -153,6 +153,7 @@ enum
KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */
KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
KERN_PANIC_ON_WARN=77, /* int: call panic() in WARN() functions */
+ KERN_PANIC_PRINT=78, /* ulong: bitmask to print system info on panic */
};
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index b7aa7bb2349f..5e8ca16a9079 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -34,7 +34,7 @@
*/
-#define TASKSTATS_VERSION 8
+#define TASKSTATS_VERSION 9
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -164,6 +164,10 @@ struct taskstats {
/* Delay waiting for memory reclaim */
__u64 freepages_count;
__u64 freepages_delay_total;
+
+ /* Delay waiting for thrashing page */
+ __u64 thrashing_count;
+ __u64 thrashing_delay_total;
};
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index e02d31986ff9..8bb6cc5f3235 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -266,6 +266,7 @@ enum {
TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */
TCP_NLA_DSACK_DUPS, /* DSACK blocks received */
TCP_NLA_REORD_SEEN, /* reordering events seen */
+ TCP_NLA_SRTT, /* smoothed RTT in usecs */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/udmabuf.h b/include/uapi/linux/udmabuf.h
new file mode 100644
index 000000000000..46b6532ed855
--- /dev/null
+++ b/include/uapi/linux/udmabuf.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_UDMABUF_H
+#define _UAPI_LINUX_UDMABUF_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define UDMABUF_FLAGS_CLOEXEC 0x01
+
+struct udmabuf_create {
+ __u32 memfd;
+ __u32 flags;
+ __u64 offset;
+ __u64 size;
+};
+
+struct udmabuf_create_item {
+ __u32 memfd;
+ __u32 __pad;
+ __u64 offset;
+ __u64 size;
+};
+
+struct udmabuf_create_list {
+ __u32 flags;
+ __u32 count;
+ struct udmabuf_create_item list[];
+};
+
+#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
+#define UDMABUF_CREATE_LIST _IOW('u', 0x43, struct udmabuf_create_list)
+
+#endif /* _UAPI_LINUX_UDMABUF_H */
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 09d00f8c442b..30baccb6c9c4 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -33,6 +33,7 @@ struct udphdr {
#define UDP_NO_CHECK6_TX 101 /* Disable sending checksum for UDP6X */
#define UDP_NO_CHECK6_RX 102 /* Disable accpeting checksum for UDP6 */
#define UDP_SEGMENT 103 /* Set GSO segmentation size */
+#define UDP_GRO 104 /* This socket can receive UDP GRO packets */
/* UDP encapsulation types */
#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
@@ -40,5 +41,6 @@ struct udphdr {
#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */
#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */
+#define UDP_ENCAP_RXRPC 6
#endif /* _UAPI_LINUX_UDP_H */
diff --git a/include/uapi/linux/usb/tmc.h b/include/uapi/linux/usb/tmc.h
index 729af2f861a4..fdd4d88a7b95 100644
--- a/include/uapi/linux/usb/tmc.h
+++ b/include/uapi/linux/usb/tmc.h
@@ -4,6 +4,7 @@
* Copyright (C) 2008 Novell, Inc.
* Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (C) 2015 Dave Penkler <dpenkler@gmail.com>
+ * Copyright (C) 2018 IVI Foundation, Inc.
*
* This file holds USB constants defined by the USB Device Class
* and USB488 Subclass Definitions for Test and Measurement devices
@@ -40,11 +41,38 @@
#define USBTMC488_REQUEST_GOTO_LOCAL 161
#define USBTMC488_REQUEST_LOCAL_LOCKOUT 162
+struct usbtmc_request {
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
+} __attribute__ ((packed));
+
+struct usbtmc_ctrlrequest {
+ struct usbtmc_request req;
+ void __user *data; /* pointer to user space */
+} __attribute__ ((packed));
+
struct usbtmc_termchar {
__u8 term_char;
__u8 term_char_enabled;
} __attribute__ ((packed));
+/*
+ * usbtmc_message->flags:
+ */
+#define USBTMC_FLAG_ASYNC 0x0001
+#define USBTMC_FLAG_APPEND 0x0002
+#define USBTMC_FLAG_IGNORE_TRAILER 0x0004
+
+struct usbtmc_message {
+ __u32 transfer_size; /* size of bytes to transfer */
+ __u32 transferred; /* size of received/written bytes */
+ __u32 flags; /* bit 0: 0 = synchronous; 1 = asynchronous */
+ void __user *message; /* pointer to header and data in user space */
+} __attribute__ ((packed));
+
/* Request values for USBTMC driver's ioctl entry point */
#define USBTMC_IOC_NR 91
#define USBTMC_IOCTL_INDICATOR_PULSE _IO(USBTMC_IOC_NR, 1)
@@ -53,10 +81,15 @@ struct usbtmc_termchar {
#define USBTMC_IOCTL_ABORT_BULK_IN _IO(USBTMC_IOC_NR, 4)
#define USBTMC_IOCTL_CLEAR_OUT_HALT _IO(USBTMC_IOC_NR, 6)
#define USBTMC_IOCTL_CLEAR_IN_HALT _IO(USBTMC_IOC_NR, 7)
+#define USBTMC_IOCTL_CTRL_REQUEST _IOWR(USBTMC_IOC_NR, 8, struct usbtmc_ctrlrequest)
#define USBTMC_IOCTL_GET_TIMEOUT _IOR(USBTMC_IOC_NR, 9, __u32)
#define USBTMC_IOCTL_SET_TIMEOUT _IOW(USBTMC_IOC_NR, 10, __u32)
#define USBTMC_IOCTL_EOM_ENABLE _IOW(USBTMC_IOC_NR, 11, __u8)
#define USBTMC_IOCTL_CONFIG_TERMCHAR _IOW(USBTMC_IOC_NR, 12, struct usbtmc_termchar)
+#define USBTMC_IOCTL_WRITE _IOWR(USBTMC_IOC_NR, 13, struct usbtmc_message)
+#define USBTMC_IOCTL_READ _IOWR(USBTMC_IOC_NR, 14, struct usbtmc_message)
+#define USBTMC_IOCTL_WRITE_RESULT _IOWR(USBTMC_IOC_NR, 15, __u32)
+#define USBTMC_IOCTL_API_VERSION _IOR(USBTMC_IOC_NR, 16, __u32)
#define USBTMC488_IOCTL_GET_CAPS _IOR(USBTMC_IOC_NR, 17, unsigned char)
#define USBTMC488_IOCTL_READ_STB _IOR(USBTMC_IOC_NR, 18, unsigned char)
@@ -64,6 +97,14 @@ struct usbtmc_termchar {
#define USBTMC488_IOCTL_GOTO_LOCAL _IO(USBTMC_IOC_NR, 20)
#define USBTMC488_IOCTL_LOCAL_LOCKOUT _IO(USBTMC_IOC_NR, 21)
#define USBTMC488_IOCTL_TRIGGER _IO(USBTMC_IOC_NR, 22)
+#define USBTMC488_IOCTL_WAIT_SRQ _IOW(USBTMC_IOC_NR, 23, __u32)
+
+#define USBTMC_IOCTL_MSG_IN_ATTR _IOR(USBTMC_IOC_NR, 24, __u8)
+#define USBTMC_IOCTL_AUTO_ABORT _IOW(USBTMC_IOC_NR, 25, __u8)
+
+/* Cancel and cleanup asynchronous calls */
+#define USBTMC_IOCTL_CANCEL_IO _IO(USBTMC_IOC_NR, 35)
+#define USBTMC_IOCTL_CLEANUP_IO _IO(USBTMC_IOC_NR, 36)
/* Driver encoded usb488 capabilities */
#define USBTMC488_CAPABILITY_TRIGGER 1
diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
index ff6cc6cb4227..d854cb19c42c 100644
--- a/include/uapi/linux/usb/video.h
+++ b/include/uapi/linux/usb/video.h
@@ -192,14 +192,14 @@ struct uvc_descriptor_header {
/* 3.7.2. Video Control Interface Header Descriptor */
struct uvc_header_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u16 bcdUVC;
- __u16 wTotalLength;
- __u32 dwClockFrequency;
- __u8 bInCollection;
- __u8 baInterfaceNr[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __le16 bcdUVC;
+ __le16 wTotalLength;
+ __le32 dwClockFrequency;
+ __u8 bInCollection;
+ __u8 baInterfaceNr[];
} __attribute__((__packed__));
#define UVC_DT_HEADER_SIZE(n) (12+(n))
@@ -209,57 +209,57 @@ struct uvc_header_descriptor {
#define DECLARE_UVC_HEADER_DESCRIPTOR(n) \
struct UVC_HEADER_DESCRIPTOR(n) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u16 bcdUVC; \
- __u16 wTotalLength; \
- __u32 dwClockFrequency; \
- __u8 bInCollection; \
- __u8 baInterfaceNr[n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __le16 bcdUVC; \
+ __le16 wTotalLength; \
+ __le32 dwClockFrequency; \
+ __u8 bInCollection; \
+ __u8 baInterfaceNr[n]; \
} __attribute__ ((packed))
/* 3.7.2.1. Input Terminal Descriptor */
struct uvc_input_terminal_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bTerminalID;
- __u16 wTerminalType;
- __u8 bAssocTerminal;
- __u8 iTerminal;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 iTerminal;
} __attribute__((__packed__));
#define UVC_DT_INPUT_TERMINAL_SIZE 8
/* 3.7.2.2. Output Terminal Descriptor */
struct uvc_output_terminal_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bTerminalID;
- __u16 wTerminalType;
- __u8 bAssocTerminal;
- __u8 bSourceID;
- __u8 iTerminal;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bSourceID;
+ __u8 iTerminal;
} __attribute__((__packed__));
#define UVC_DT_OUTPUT_TERMINAL_SIZE 9
/* 3.7.2.3. Camera Terminal Descriptor */
struct uvc_camera_terminal_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bTerminalID;
- __u16 wTerminalType;
- __u8 bAssocTerminal;
- __u8 iTerminal;
- __u16 wObjectiveFocalLengthMin;
- __u16 wObjectiveFocalLengthMax;
- __u16 wOcularFocalLength;
- __u8 bControlSize;
- __u8 bmControls[3];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 iTerminal;
+ __le16 wObjectiveFocalLengthMin;
+ __le16 wObjectiveFocalLengthMax;
+ __le16 wOcularFocalLength;
+ __u8 bControlSize;
+ __u8 bmControls[3];
} __attribute__((__packed__));
#define UVC_DT_CAMERA_TERMINAL_SIZE(n) (15+(n))
@@ -293,15 +293,15 @@ struct UVC_SELECTOR_UNIT_DESCRIPTOR(n) { \
/* 3.7.2.5. Processing Unit Descriptor */
struct uvc_processing_unit_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bUnitID;
- __u8 bSourceID;
- __u16 wMaxMultiplier;
- __u8 bControlSize;
- __u8 bmControls[2];
- __u8 iProcessing;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ __le16 wMaxMultiplier;
+ __u8 bControlSize;
+ __u8 bmControls[2];
+ __u8 iProcessing;
} __attribute__((__packed__));
#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n))
@@ -343,29 +343,29 @@ struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) { \
/* 3.8.2.2. Video Control Interrupt Endpoint Descriptor */
struct uvc_control_endpoint_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u16 wMaxTransferSize;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __le16 wMaxTransferSize;
} __attribute__((__packed__));
#define UVC_DT_CONTROL_ENDPOINT_SIZE 5
/* 3.9.2.1. Input Header Descriptor */
struct uvc_input_header_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bNumFormats;
- __u16 wTotalLength;
- __u8 bEndpointAddress;
- __u8 bmInfo;
- __u8 bTerminalLink;
- __u8 bStillCaptureMethod;
- __u8 bTriggerSupport;
- __u8 bTriggerUsage;
- __u8 bControlSize;
- __u8 bmaControls[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bNumFormats;
+ __le16 wTotalLength;
+ __u8 bEndpointAddress;
+ __u8 bmInfo;
+ __u8 bTerminalLink;
+ __u8 bStillCaptureMethod;
+ __u8 bTriggerSupport;
+ __u8 bTriggerUsage;
+ __u8 bControlSize;
+ __u8 bmaControls[];
} __attribute__((__packed__));
#define UVC_DT_INPUT_HEADER_SIZE(n, p) (13+(n*p))
@@ -375,32 +375,32 @@ struct uvc_input_header_descriptor {
#define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p) \
struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bNumFormats; \
- __u16 wTotalLength; \
- __u8 bEndpointAddress; \
- __u8 bmInfo; \
- __u8 bTerminalLink; \
- __u8 bStillCaptureMethod; \
- __u8 bTriggerSupport; \
- __u8 bTriggerUsage; \
- __u8 bControlSize; \
- __u8 bmaControls[p][n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bNumFormats; \
+ __le16 wTotalLength; \
+ __u8 bEndpointAddress; \
+ __u8 bmInfo; \
+ __u8 bTerminalLink; \
+ __u8 bStillCaptureMethod; \
+ __u8 bTriggerSupport; \
+ __u8 bTriggerUsage; \
+ __u8 bControlSize; \
+ __u8 bmaControls[p][n]; \
} __attribute__ ((packed))
/* 3.9.2.2. Output Header Descriptor */
struct uvc_output_header_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bNumFormats;
- __u16 wTotalLength;
- __u8 bEndpointAddress;
- __u8 bTerminalLink;
- __u8 bControlSize;
- __u8 bmaControls[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bNumFormats;
+ __le16 wTotalLength;
+ __u8 bEndpointAddress;
+ __u8 bTerminalLink;
+ __u8 bControlSize;
+ __u8 bmaControls[];
} __attribute__((__packed__));
#define UVC_DT_OUTPUT_HEADER_SIZE(n, p) (9+(n*p))
@@ -410,15 +410,15 @@ struct uvc_output_header_descriptor {
#define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \
struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bNumFormats; \
- __u16 wTotalLength; \
- __u8 bEndpointAddress; \
- __u8 bTerminalLink; \
- __u8 bControlSize; \
- __u8 bmaControls[p][n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bNumFormats; \
+ __le16 wTotalLength; \
+ __u8 bEndpointAddress; \
+ __u8 bTerminalLink; \
+ __u8 bControlSize; \
+ __u8 bmaControls[p][n]; \
} __attribute__ ((packed))
/* 3.9.2.6. Color matching descriptor */
@@ -473,19 +473,19 @@ struct uvc_format_uncompressed {
/* Uncompressed Payload - 3.1.2. Uncompressed Video Frame Descriptor */
struct uvc_frame_uncompressed {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bFrameIndex;
- __u8 bmCapabilities;
- __u16 wWidth;
- __u16 wHeight;
- __u32 dwMinBitRate;
- __u32 dwMaxBitRate;
- __u32 dwMaxVideoFrameBufferSize;
- __u32 dwDefaultFrameInterval;
- __u8 bFrameIntervalType;
- __u32 dwFrameInterval[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __le16 wWidth;
+ __le16 wHeight;
+ __le32 dwMinBitRate;
+ __le32 dwMaxBitRate;
+ __le32 dwMaxVideoFrameBufferSize;
+ __le32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __le32 dwFrameInterval[];
} __attribute__((__packed__));
#define UVC_DT_FRAME_UNCOMPRESSED_SIZE(n) (26+4*(n))
@@ -495,19 +495,19 @@ struct uvc_frame_uncompressed {
#define DECLARE_UVC_FRAME_UNCOMPRESSED(n) \
struct UVC_FRAME_UNCOMPRESSED(n) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bFrameIndex; \
- __u8 bmCapabilities; \
- __u16 wWidth; \
- __u16 wHeight; \
- __u32 dwMinBitRate; \
- __u32 dwMaxBitRate; \
- __u32 dwMaxVideoFrameBufferSize; \
- __u32 dwDefaultFrameInterval; \
- __u8 bFrameIntervalType; \
- __u32 dwFrameInterval[n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bFrameIndex; \
+ __u8 bmCapabilities; \
+ __le16 wWidth; \
+ __le16 wHeight; \
+ __le32 dwMinBitRate; \
+ __le32 dwMaxBitRate; \
+ __le32 dwMaxVideoFrameBufferSize; \
+ __le32 dwDefaultFrameInterval; \
+ __u8 bFrameIntervalType; \
+ __le32 dwFrameInterval[n]; \
} __attribute__ ((packed))
/* MJPEG Payload - 3.1.1. MJPEG Video Format Descriptor */
@@ -529,19 +529,19 @@ struct uvc_format_mjpeg {
/* MJPEG Payload - 3.1.2. MJPEG Video Frame Descriptor */
struct uvc_frame_mjpeg {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bFrameIndex;
- __u8 bmCapabilities;
- __u16 wWidth;
- __u16 wHeight;
- __u32 dwMinBitRate;
- __u32 dwMaxBitRate;
- __u32 dwMaxVideoFrameBufferSize;
- __u32 dwDefaultFrameInterval;
- __u8 bFrameIntervalType;
- __u32 dwFrameInterval[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __le16 wWidth;
+ __le16 wHeight;
+ __le32 dwMinBitRate;
+ __le32 dwMaxBitRate;
+ __le32 dwMaxVideoFrameBufferSize;
+ __le32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __le32 dwFrameInterval[];
} __attribute__((__packed__));
#define UVC_DT_FRAME_MJPEG_SIZE(n) (26+4*(n))
@@ -551,19 +551,19 @@ struct uvc_frame_mjpeg {
#define DECLARE_UVC_FRAME_MJPEG(n) \
struct UVC_FRAME_MJPEG(n) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bFrameIndex; \
- __u8 bmCapabilities; \
- __u16 wWidth; \
- __u16 wHeight; \
- __u32 dwMinBitRate; \
- __u32 dwMaxBitRate; \
- __u32 dwMaxVideoFrameBufferSize; \
- __u32 dwDefaultFrameInterval; \
- __u8 bFrameIntervalType; \
- __u32 dwFrameInterval[n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bFrameIndex; \
+ __u8 bmCapabilities; \
+ __le16 wWidth; \
+ __le16 wHeight; \
+ __le32 dwMinBitRate; \
+ __le32 dwMaxBitRate; \
+ __le32 dwMaxVideoFrameBufferSize; \
+ __le32 dwDefaultFrameInterval; \
+ __u8 bFrameIntervalType; \
+ __le32 dwFrameInterval[n]; \
} __attribute__ ((packed))
#endif /* __LINUX_USB_VIDEO_H */
diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h
index 4f7b892377cd..7d21c1634b4d 100644
--- a/include/uapi/linux/v4l2-common.h
+++ b/include/uapi/linux/v4l2-common.h
@@ -79,24 +79,11 @@
/* Current composing area plus all padding pixels */
#define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103
-/* Backward compatibility target definitions --- to be removed. */
-#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
-#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
-#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP
-#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE
-#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS
-#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS
-
/* Selection flags */
#define V4L2_SEL_FLAG_GE (1 << 0)
#define V4L2_SEL_FLAG_LE (1 << 1)
#define V4L2_SEL_FLAG_KEEP_CONFIG (1 << 2)
-/* Backward compatibility flag definitions --- to be removed. */
-#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
-#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
-#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
-
struct v4l2_edid {
__u32 pad;
__u32 start_block;
@@ -105,4 +92,19 @@ struct v4l2_edid {
__u8 *edid;
};
+#ifndef __KERNEL__
+/* Backward compatibility target definitions --- to be removed. */
+#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
+#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
+#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE
+#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS
+
+/* Backward compatibility flag definitions --- to be removed. */
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
+#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
+#endif
+
#endif /* __V4L2_COMMON__ */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index e4ee10ee917d..3dcfc6148f99 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -50,6 +50,8 @@
#ifndef __LINUX_V4L2_CONTROLS_H
#define __LINUX_V4L2_CONTROLS_H
+#include <linux/types.h>
+
/* Control classes */
#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */
#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 1aa7b82e8169..02bb7ad6e986 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -200,6 +200,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
#define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
+#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
};
@@ -215,6 +216,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform"
#define VFIO_DEVICE_API_AMBA_STRING "vfio-amba"
#define VFIO_DEVICE_API_CCW_STRING "vfio-ccw"
+#define VFIO_DEVICE_API_AP_STRING "vfio-ap"
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
@@ -301,6 +303,71 @@ struct vfio_region_info_cap_type {
#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2)
#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3)
+#define VFIO_REGION_TYPE_GFX (1)
+#define VFIO_REGION_SUBTYPE_GFX_EDID (1)
+
+/**
+ * struct vfio_region_gfx_edid - EDID region layout.
+ *
+ * Set display link state and EDID blob.
+ *
+ * The EDID blob has monitor information such as brand, name, serial
+ * number, physical size, supported video modes and more.
+ *
+ * This special region allows userspace (typically qemu) set a virtual
+ * EDID for the virtual monitor, which allows a flexible display
+ * configuration.
+ *
+ * For the edid blob spec look here:
+ * https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
+ *
+ * On linux systems you can find the EDID blob in sysfs:
+ * /sys/class/drm/${card}/${connector}/edid
+ *
+ * You can use the edid-decode ulility (comes with xorg-x11-utils) to
+ * decode the EDID blob.
+ *
+ * @edid_offset: location of the edid blob, relative to the
+ * start of the region (readonly).
+ * @edid_max_size: max size of the edid blob (readonly).
+ * @edid_size: actual edid size (read/write).
+ * @link_state: display link state (read/write).
+ * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
+ * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
+ * @max_xres: max display width (0 == no limitation, readonly).
+ * @max_yres: max display height (0 == no limitation, readonly).
+ *
+ * EDID update protocol:
+ * (1) set link-state to down.
+ * (2) update edid blob and size.
+ * (3) set link-state to up.
+ */
+struct vfio_region_gfx_edid {
+ __u32 edid_offset;
+ __u32 edid_max_size;
+ __u32 edid_size;
+ __u32 max_xres;
+ __u32 max_yres;
+ __u32 link_state;
+#define VFIO_DEVICE_GFX_LINK_STATE_UP 1
+#define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2
+};
+
+/*
+ * 10de vendor sub-type
+ *
+ * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
+ */
+#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1)
+
+/*
+ * 1014 vendor sub-type
+ *
+ * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
+ * to do TLB invalidation on a GPU.
+ */
+#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1)
+
/*
* The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
* which allows direct access to non-MSIX registers which happened to be within
@@ -311,6 +378,33 @@ struct vfio_region_info_cap_type {
*/
#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3
+/*
+ * Capability with compressed real address (aka SSA - small system address)
+ * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
+ * and by the userspace to associate a NVLink bridge with a GPU.
+ */
+#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4
+
+struct vfio_region_info_cap_nvlink2_ssatgt {
+ struct vfio_info_cap_header header;
+ __u64 tgt;
+};
+
+/*
+ * Capability with an NVLink link speed. The value is read by
+ * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
+ * property in the device tree. The value is fixed in the hardware
+ * and failing to provide the correct value results in the link
+ * not working with no indication from the driver why.
+ */
+#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5
+
+struct vfio_region_info_cap_nvlink2_lnkspd {
+ struct vfio_info_cap_header header;
+ __u32 link_speed;
+ __u32 __pad;
+};
+
/**
* VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
* struct vfio_irq_info)
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index b1e22c40c4b6..40d028eed645 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -11,94 +11,9 @@
* device configuration.
*/
+#include <linux/vhost_types.h>
#include <linux/types.h>
-#include <linux/compiler.h>
#include <linux/ioctl.h>
-#include <linux/virtio_config.h>
-#include <linux/virtio_ring.h>
-
-struct vhost_vring_state {
- unsigned int index;
- unsigned int num;
-};
-
-struct vhost_vring_file {
- unsigned int index;
- int fd; /* Pass -1 to unbind from file. */
-
-};
-
-struct vhost_vring_addr {
- unsigned int index;
- /* Option flags. */
- unsigned int flags;
- /* Flag values: */
- /* Whether log address is valid. If set enables logging. */
-#define VHOST_VRING_F_LOG 0
-
- /* Start of array of descriptors (virtually contiguous) */
- __u64 desc_user_addr;
- /* Used structure address. Must be 32 bit aligned */
- __u64 used_user_addr;
- /* Available structure address. Must be 16 bit aligned */
- __u64 avail_user_addr;
- /* Logging support. */
- /* Log writes to used structure, at offset calculated from specified
- * address. Address must be 32 bit aligned. */
- __u64 log_guest_addr;
-};
-
-/* no alignment requirement */
-struct vhost_iotlb_msg {
- __u64 iova;
- __u64 size;
- __u64 uaddr;
-#define VHOST_ACCESS_RO 0x1
-#define VHOST_ACCESS_WO 0x2
-#define VHOST_ACCESS_RW 0x3
- __u8 perm;
-#define VHOST_IOTLB_MISS 1
-#define VHOST_IOTLB_UPDATE 2
-#define VHOST_IOTLB_INVALIDATE 3
-#define VHOST_IOTLB_ACCESS_FAIL 4
- __u8 type;
-};
-
-#define VHOST_IOTLB_MSG 0x1
-#define VHOST_IOTLB_MSG_V2 0x2
-
-struct vhost_msg {
- int type;
- union {
- struct vhost_iotlb_msg iotlb;
- __u8 padding[64];
- };
-};
-
-struct vhost_msg_v2 {
- __u32 type;
- __u32 reserved;
- union {
- struct vhost_iotlb_msg iotlb;
- __u8 padding[64];
- };
-};
-
-struct vhost_memory_region {
- __u64 guest_phys_addr;
- __u64 memory_size; /* bytes */
- __u64 userspace_addr;
- __u64 flags_padding; /* No flags are currently specified. */
-};
-
-/* All region addresses and sizes must be 4K aligned. */
-#define VHOST_PAGE_SIZE 0x1000
-
-struct vhost_memory {
- __u32 nregions;
- __u32 padding;
- struct vhost_memory_region regions[0];
-};
/* ioctls */
@@ -176,7 +91,7 @@ struct vhost_memory {
#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
-#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
/* VHOST_NET specific defines */
@@ -186,31 +101,7 @@ struct vhost_memory {
* device. This can be used to stop the ring (e.g. for migration). */
#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
-/* Feature bits */
-/* Log all write descriptors. Can be changed while device is active. */
-#define VHOST_F_LOG_ALL 26
-/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
-#define VHOST_NET_F_VIRTIO_NET_HDR 27
-
-/* VHOST_SCSI specific definitions */
-
-/*
- * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
- *
- * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
- * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
- * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target.
- * All the targets under vhost_wwpn can be seen and used by guset.
- */
-
-#define VHOST_SCSI_ABI_VERSION 1
-
-struct vhost_scsi_target {
- int abi_version;
- char vhost_wwpn[224]; /* TRANSPORT_IQN_LEN */
- unsigned short vhost_tpgt;
- unsigned short reserved;
-};
+/* VHOST_SCSI specific defines */
#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
new file mode 100644
index 000000000000..c907290ff065
--- /dev/null
+++ b/include/uapi/linux/vhost_types.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_VHOST_TYPES_H
+#define _LINUX_VHOST_TYPES_H
+/* Userspace interface for in-kernel virtio accelerators. */
+
+/* vhost is used to reduce the number of system calls involved in virtio.
+ *
+ * Existing virtio net code is used in the guest without modification.
+ *
+ * This header includes interface used by userspace hypervisor for
+ * device configuration.
+ */
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+
+struct vhost_vring_state {
+ unsigned int index;
+ unsigned int num;
+};
+
+struct vhost_vring_file {
+ unsigned int index;
+ int fd; /* Pass -1 to unbind from file. */
+
+};
+
+struct vhost_vring_addr {
+ unsigned int index;
+ /* Option flags. */
+ unsigned int flags;
+ /* Flag values: */
+ /* Whether log address is valid. If set enables logging. */
+#define VHOST_VRING_F_LOG 0
+
+ /* Start of array of descriptors (virtually contiguous) */
+ __u64 desc_user_addr;
+ /* Used structure address. Must be 32 bit aligned */
+ __u64 used_user_addr;
+ /* Available structure address. Must be 16 bit aligned */
+ __u64 avail_user_addr;
+ /* Logging support. */
+ /* Log writes to used structure, at offset calculated from specified
+ * address. Address must be 32 bit aligned. */
+ __u64 log_guest_addr;
+};
+
+/* no alignment requirement */
+struct vhost_iotlb_msg {
+ __u64 iova;
+ __u64 size;
+ __u64 uaddr;
+#define VHOST_ACCESS_RO 0x1
+#define VHOST_ACCESS_WO 0x2
+#define VHOST_ACCESS_RW 0x3
+ __u8 perm;
+#define VHOST_IOTLB_MISS 1
+#define VHOST_IOTLB_UPDATE 2
+#define VHOST_IOTLB_INVALIDATE 3
+#define VHOST_IOTLB_ACCESS_FAIL 4
+ __u8 type;
+};
+
+#define VHOST_IOTLB_MSG 0x1
+#define VHOST_IOTLB_MSG_V2 0x2
+
+struct vhost_msg {
+ int type;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ __u8 padding[64];
+ };
+};
+
+struct vhost_msg_v2 {
+ __u32 type;
+ __u32 reserved;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ __u8 padding[64];
+ };
+};
+
+struct vhost_memory_region {
+ __u64 guest_phys_addr;
+ __u64 memory_size; /* bytes */
+ __u64 userspace_addr;
+ __u64 flags_padding; /* No flags are currently specified. */
+};
+
+/* All region addresses and sizes must be 4K aligned. */
+#define VHOST_PAGE_SIZE 0x1000
+
+struct vhost_memory {
+ __u32 nregions;
+ __u32 padding;
+ struct vhost_memory_region regions[0];
+};
+
+/* VHOST_SCSI specific definitions */
+
+/*
+ * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
+ *
+ * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
+ * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
+ * ABI Rev 1: January 2013. Ignore vhost_tpgt field in struct vhost_scsi_target.
+ * All the targets under vhost_wwpn can be seen and used by guset.
+ */
+
+#define VHOST_SCSI_ABI_VERSION 1
+
+struct vhost_scsi_target {
+ int abi_version;
+ char vhost_wwpn[224]; /* TRANSPORT_IQN_LEN */
+ unsigned short vhost_tpgt;
+ unsigned short reserved;
+};
+
+/* Feature bits */
+/* Log all write descriptors. Can be changed while device is active. */
+#define VHOST_F_LOG_ALL 26
+/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
+#define VHOST_NET_F_VIRTIO_NET_HDR 27
+
+#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 5d1a3685bea9..b5671ce2724f 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -145,6 +145,7 @@ enum v4l2_buf_type {
V4L2_BUF_TYPE_SDR_CAPTURE = 11,
V4L2_BUF_TYPE_SDR_OUTPUT = 12,
V4L2_BUF_TYPE_META_CAPTURE = 13,
+ V4L2_BUF_TYPE_META_OUTPUT = 14,
/* Deprecated, do not use */
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
@@ -225,8 +226,8 @@ enum v4l2_colorspace {
/* For RGB colorspaces such as produces by most webcams. */
V4L2_COLORSPACE_SRGB = 8,
- /* AdobeRGB colorspace */
- V4L2_COLORSPACE_ADOBERGB = 9,
+ /* opRGB colorspace */
+ V4L2_COLORSPACE_OPRGB = 9,
/* BT.2020 colorspace, used for UHDTV. */
V4L2_COLORSPACE_BT2020 = 10,
@@ -258,7 +259,7 @@ enum v4l2_xfer_func {
*
* V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_JPEG: V4L2_XFER_FUNC_SRGB
*
- * V4L2_COLORSPACE_ADOBERGB: V4L2_XFER_FUNC_ADOBERGB
+ * V4L2_COLORSPACE_OPRGB: V4L2_XFER_FUNC_OPRGB
*
* V4L2_COLORSPACE_SMPTE240M: V4L2_XFER_FUNC_SMPTE240M
*
@@ -269,7 +270,7 @@ enum v4l2_xfer_func {
V4L2_XFER_FUNC_DEFAULT = 0,
V4L2_XFER_FUNC_709 = 1,
V4L2_XFER_FUNC_SRGB = 2,
- V4L2_XFER_FUNC_ADOBERGB = 3,
+ V4L2_XFER_FUNC_OPRGB = 3,
V4L2_XFER_FUNC_SMPTE240M = 4,
V4L2_XFER_FUNC_NONE = 5,
V4L2_XFER_FUNC_DCI_P3 = 6,
@@ -281,7 +282,7 @@ enum v4l2_xfer_func {
* This depends on the colorspace.
*/
#define V4L2_MAP_XFER_FUNC_DEFAULT(colsp) \
- ((colsp) == V4L2_COLORSPACE_ADOBERGB ? V4L2_XFER_FUNC_ADOBERGB : \
+ ((colsp) == V4L2_COLORSPACE_OPRGB ? V4L2_XFER_FUNC_OPRGB : \
((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_XFER_FUNC_SMPTE240M : \
((colsp) == V4L2_COLORSPACE_DCI_P3 ? V4L2_XFER_FUNC_DCI_P3 : \
((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \
@@ -295,7 +296,7 @@ enum v4l2_ycbcr_encoding {
*
* V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
* V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_SRGB,
- * V4L2_COLORSPACE_ADOBERGB and V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
+ * V4L2_COLORSPACE_OPRGB and V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
*
* V4L2_COLORSPACE_REC709 and V4L2_COLORSPACE_DCI_P3: V4L2_YCBCR_ENC_709
*
@@ -382,6 +383,17 @@ enum v4l2_quantization {
(((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
+/*
+ * Deprecated names for opRGB colorspace (IEC 61966-2-5)
+ *
+ * WARNING: Please don't use these deprecated defines in your code, as
+ * there is a chance we have to remove them in the future.
+ */
+#ifndef __KERNEL__
+#define V4L2_COLORSPACE_ADOBERGB V4L2_COLORSPACE_OPRGB
+#define V4L2_XFER_FUNC_ADOBERGB V4L2_XFER_FUNC_OPRGB
+#endif
+
enum v4l2_priority {
V4L2_PRIORITY_UNSET = 0, /* not initialized */
V4L2_PRIORITY_BACKGROUND = 1,
@@ -458,6 +470,7 @@ struct v4l2_capability {
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
+#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
#define V4L2_CAP_TOUCH 0x10000000 /* Is a touch device */
@@ -635,6 +648,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_H263 v4l2_fourcc('H', '2', '6', '3') /* H263 */
#define V4L2_PIX_FMT_MPEG1 v4l2_fourcc('M', 'P', 'G', '1') /* MPEG-1 ES */
#define V4L2_PIX_FMT_MPEG2 v4l2_fourcc('M', 'P', 'G', '2') /* MPEG-2 ES */
+#define V4L2_PIX_FMT_MPEG2_SLICE v4l2_fourcc('M', 'G', '2', 'S') /* MPEG-2 parsed slice data */
#define V4L2_PIX_FMT_MPEG4 v4l2_fourcc('M', 'P', 'G', '4') /* MPEG-4 part 2 ES */
#define V4L2_PIX_FMT_XVID v4l2_fourcc('X', 'V', 'I', 'D') /* Xvid */
#define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
@@ -676,6 +690,8 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
+#define V4L2_PIX_FMT_SUNXI_TILED_NV12 v4l2_fourcc('S', 'T', '1', '2') /* Sunxi Tiled NV12 Format */
+#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
@@ -703,6 +719,7 @@ struct v4l2_pix_format {
#define V4L2_META_FMT_VSP1_HGO v4l2_fourcc('V', 'S', 'P', 'H') /* R-Car VSP1 1-D Histogram */
#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */
+#define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
@@ -856,9 +873,17 @@ struct v4l2_requestbuffers {
__u32 count;
__u32 type; /* enum v4l2_buf_type */
__u32 memory; /* enum v4l2_memory */
- __u32 reserved[2];
+ __u32 capabilities;
+ __u32 reserved[1];
};
+/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */
+#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
+#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
+#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
+#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
+#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
+
/**
* struct v4l2_plane - plane info for multi-planar buffers
* @bytesused: number of bytes occupied by data in the plane (payload)
@@ -917,6 +942,7 @@ struct v4l2_plane {
* @length: size in bytes of the buffer (NOT its payload) for single-plane
* buffers (when type != *_MPLANE); number of elements in the
* planes array for multi-plane buffers
+ * @request_fd: fd of the request that this buffer should use
*
* Contains data exchanged by application and driver using one of the Streaming
* I/O methods.
@@ -941,7 +967,10 @@ struct v4l2_buffer {
} m;
__u32 length;
__u32 reserved2;
- __u32 reserved;
+ union {
+ __s32 request_fd;
+ __u32 reserved;
+ };
};
/* Flags for 'flags' field */
@@ -959,6 +988,8 @@ struct v4l2_buffer {
#define V4L2_BUF_FLAG_BFRAME 0x00000020
/* Buffer is ready, but the data contained within is corrupted. */
#define V4L2_BUF_FLAG_ERROR 0x00000040
+/* Buffer is added to an unqueued request */
+#define V4L2_BUF_FLAG_IN_REQUEST 0x00000080
/* timecode field is valid */
#define V4L2_BUF_FLAG_TIMECODE 0x00000100
/* Buffer is prepared for queuing */
@@ -977,6 +1008,8 @@ struct v4l2_buffer {
#define V4L2_BUF_FLAG_TSTAMP_SRC_SOE 0x00010000
/* mem2mem encoder/decoder */
#define V4L2_BUF_FLAG_LAST 0x00100000
+/* request_fd is valid */
+#define V4L2_BUF_FLAG_REQUEST_FD 0x00800000
/**
* struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor
@@ -1400,6 +1433,13 @@ struct v4l2_bt_timings {
* InfoFrame).
*/
#define V4L2_DV_FL_HAS_HDMI_VIC (1 << 8)
+/*
+ * CEA-861 specific: only valid for video receivers.
+ * If set, then HW can detect the difference between regular FPS and
+ * 1000/1001 FPS. Note: This flag is only valid for HDMI VIC codes with
+ * the V4L2_DV_FL_CAN_REDUCE_FPS flag set.
+ */
+#define V4L2_DV_FL_CAN_DETECT_REDUCED_FPS (1 << 9)
/* A few useful defines to calculate the total blanking and frame sizes */
#define V4L2_DV_BT_BLANKING_WIDTH(bt) \
@@ -1599,7 +1639,8 @@ struct v4l2_ext_controls {
};
__u32 count;
__u32 error_idx;
- __u32 reserved[2];
+ __s32 request_fd;
+ __u32 reserved[1];
struct v4l2_ext_control *controls;
};
@@ -1612,6 +1653,7 @@ struct v4l2_ext_controls {
#define V4L2_CTRL_MAX_DIMS (4)
#define V4L2_CTRL_WHICH_CUR_VAL 0
#define V4L2_CTRL_WHICH_DEF_VAL 0x0f000000
+#define V4L2_CTRL_WHICH_REQUEST_VAL 0x0f010000
enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_INTEGER = 1,
@@ -2302,6 +2344,7 @@ struct v4l2_dbg_chip_info {
* return: number of created buffers
* @memory: enum v4l2_memory; buffer memory type
* @format: frame format, for which buffers are requested
+ * @capabilities: capabilities of this buffer type.
* @reserved: future extensions
*/
struct v4l2_create_buffers {
@@ -2309,7 +2352,8 @@ struct v4l2_create_buffers {
__u32 count;
__u32 memory;
struct v4l2_format format;
- __u32 reserved[8];
+ __u32 capabilities;
+ __u32 reserved[7];
};
/*
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 13b8cb563892..a1966cd7b677 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -34,15 +34,23 @@
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
#define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */
#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */
+#define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */
+#define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
+#define VIRTIO_BALLOON_CMD_ID_STOP 0
+#define VIRTIO_BALLOON_CMD_ID_DONE 1
struct virtio_balloon_config {
/* Number of pages host wants Guest to give up. */
__u32 num_pages;
/* Number of pages we've actually got in balloon. */
__u32 actual;
+ /* Free page report command id, readonly by guest */
+ __u32 free_page_report_cmd_id;
+ /* Stores PAGE_POISON if page poisoning is in use */
+ __u32 poison_val;
};
#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 9ebe4d968dd5..0f99d7b49ede 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -38,6 +38,8 @@
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */
#define VIRTIO_BLK_F_MQ 12 /* support more than one vq */
+#define VIRTIO_BLK_F_DISCARD 13 /* DISCARD is supported */
+#define VIRTIO_BLK_F_WRITE_ZEROES 14 /* WRITE ZEROES is supported */
/* Legacy feature bits */
#ifndef VIRTIO_BLK_NO_LEGACY
@@ -86,6 +88,39 @@ struct virtio_blk_config {
/* number of vqs, only available when VIRTIO_BLK_F_MQ is set */
__u16 num_queues;
+
+ /* the next 3 entries are guarded by VIRTIO_BLK_F_DISCARD */
+ /*
+ * The maximum discard sectors (in 512-byte sectors) for
+ * one segment.
+ */
+ __u32 max_discard_sectors;
+ /*
+ * The maximum number of discard segments in a
+ * discard command.
+ */
+ __u32 max_discard_seg;
+ /* Discard commands must be aligned to this number of sectors. */
+ __u32 discard_sector_alignment;
+
+ /* the next 3 entries are guarded by VIRTIO_BLK_F_WRITE_ZEROES */
+ /*
+ * The maximum number of write zeroes sectors (in 512-byte sectors) in
+ * one segment.
+ */
+ __u32 max_write_zeroes_sectors;
+ /*
+ * The maximum number of segments in a write zeroes
+ * command.
+ */
+ __u32 max_write_zeroes_seg;
+ /*
+ * Set if a VIRTIO_BLK_T_WRITE_ZEROES request may result in the
+ * deallocation of one or more of the sectors.
+ */
+ __u8 write_zeroes_may_unmap;
+
+ __u8 unused1[3];
} __attribute__((packed));
/*
@@ -114,6 +149,12 @@ struct virtio_blk_config {
/* Get device ID command */
#define VIRTIO_BLK_T_GET_ID 8
+/* Discard command */
+#define VIRTIO_BLK_T_DISCARD 11
+
+/* Write zeroes command */
+#define VIRTIO_BLK_T_WRITE_ZEROES 13
+
#ifndef VIRTIO_BLK_NO_LEGACY
/* Barrier before this op. */
#define VIRTIO_BLK_T_BARRIER 0x80000000
@@ -133,6 +174,19 @@ struct virtio_blk_outhdr {
__virtio64 sector;
};
+/* Unmap this range (only valid for write zeroes command) */
+#define VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP 0x00000001
+
+/* Discard/write zeroes range for each request. */
+struct virtio_blk_discard_write_zeroes {
+ /* discard/write zeroes start sector */
+ __le64 sector;
+ /* number of discard/write zeroes sectors */
+ __le32 num_sectors;
+ /* flags for this range */
+ __le32 flags;
+};
+
#ifndef VIRTIO_BLK_NO_LEGACY
struct virtio_scsi_inhdr {
__virtio32 errors;
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 449132c76b1c..1196e1c1d4f6 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -75,6 +75,9 @@
*/
#define VIRTIO_F_IOMMU_PLATFORM 33
+/* This feature indicates support for the packed virtqueue layout. */
+#define VIRTIO_F_RING_PACKED 34
+
/*
* Does the device support Single Root I/O Virtualization?
*/
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index f43c3c6171ff..8e88eba1fa7a 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -41,6 +41,7 @@
#include <linux/types.h>
#define VIRTIO_GPU_F_VIRGL 0
+#define VIRTIO_GPU_F_EDID 1
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -56,6 +57,7 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
VIRTIO_GPU_CMD_GET_CAPSET_INFO,
VIRTIO_GPU_CMD_GET_CAPSET,
+ VIRTIO_GPU_CMD_GET_EDID,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
@@ -76,6 +78,7 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
VIRTIO_GPU_RESP_OK_CAPSET_INFO,
VIRTIO_GPU_RESP_OK_CAPSET,
+ VIRTIO_GPU_RESP_OK_EDID,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -291,6 +294,21 @@ struct virtio_gpu_resp_capset {
__u8 capset_data[];
};
+/* VIRTIO_GPU_CMD_GET_EDID */
+struct virtio_gpu_cmd_get_edid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 scanout;
+ __le32 padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_EDID */
+struct virtio_gpu_resp_edid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 size;
+ __le32 padding;
+ __u8 edid[1024];
+};
+
#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
struct virtio_gpu_config {
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 6d5d5faa989b..2414f8af26b3 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -44,6 +44,13 @@
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4
+/*
+ * Mark a descriptor as available or used in packed ring.
+ * Notice: they are defined as shifts instead of shifted values.
+ */
+#define VRING_PACKED_DESC_F_AVAIL 7
+#define VRING_PACKED_DESC_F_USED 15
+
/* The Host uses this in used->flags to advise the Guest: don't kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
* will still kick if it's out of buffers. */
@@ -53,6 +60,23 @@
* optimization. */
#define VRING_AVAIL_F_NO_INTERRUPT 1
+/* Enable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
+/* Disable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor in packed ring.
+ * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
+ * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
+ */
+#define VRING_PACKED_EVENT_FLAG_DESC 0x2
+
+/*
+ * Wrap counter bit shift in event suppression structure
+ * of packed ring.
+ */
+#define VRING_PACKED_EVENT_F_WRAP_CTR 15
+
/* We support indirect buffer descriptors */
#define VIRTIO_RING_F_INDIRECT_DESC 28
@@ -171,4 +195,32 @@ static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
}
+struct vring_packed_desc_event {
+ /* Descriptor Ring Change Event Offset/Wrap Counter. */
+ __le16 off_wrap;
+ /* Descriptor Ring Change Event Flags. */
+ __le16 flags;
+};
+
+struct vring_packed_desc {
+ /* Buffer Address. */
+ __le64 addr;
+ /* Buffer Length. */
+ __le32 len;
+ /* Buffer ID. */
+ __le16 id;
+ /* The flags depending on descriptor type. */
+ __le16 flags;
+};
+
+struct vring_packed {
+ unsigned int num;
+
+ struct vring_packed_desc *desc;
+
+ struct vring_packed_desc_event *driver;
+
+ struct vring_packed_desc_event *device;
+};
+
#endif /* _UAPI_LINUX_VIRTIO_RING_H */
diff --git a/include/uapi/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h
index 5b04a494d139..aad3b6201fc0 100644
--- a/include/uapi/mtd/ubi-user.h
+++ b/include/uapi/mtd/ubi-user.h
@@ -285,6 +285,20 @@ struct ubi_attach_req {
__s8 padding[10];
};
+/*
+ * UBI volume flags.
+ *
+ * @UBI_VOL_SKIP_CRC_CHECK_FLG: skip the CRC check done on a static volume at
+ * open time. Only valid for static volumes and
+ * should only be used if the volume user has a
+ * way to verify data integrity
+ */
+enum {
+ UBI_VOL_SKIP_CRC_CHECK_FLG = 0x1,
+};
+
+#define UBI_VOL_VALID_FLGS (UBI_VOL_SKIP_CRC_CHECK_FLG)
+
/**
* struct ubi_mkvol_req - volume description data structure used in
* volume creation requests.
@@ -292,7 +306,7 @@ struct ubi_attach_req {
* @alignment: volume alignment
* @bytes: volume size in bytes
* @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
- * @padding1: reserved for future, not used, has to be zeroed
+ * @flags: volume flags (%UBI_VOL_SKIP_CRC_CHECK_FLG)
* @name_len: volume name length
* @padding2: reserved for future, not used, has to be zeroed
* @name: volume name
@@ -321,7 +335,7 @@ struct ubi_mkvol_req {
__s32 alignment;
__s64 bytes;
__s8 vol_type;
- __s8 padding1;
+ __u8 flags;
__s16 name_len;
__s8 padding2[4];
char name[UBI_MAX_VOLUME_NAME + 1];
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index c6a984c0c881..01ac5853d9ac 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -6,7 +6,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -95,7 +95,7 @@
#define HFI1_CAP_SDMA_AHG (1UL << 2) /* Enable SDMA AHG support */
#define HFI1_CAP_EXTENDED_PSN (1UL << 3) /* Enable Extended PSN support */
#define HFI1_CAP_HDRSUPP (1UL << 4) /* Enable Header Suppression */
-/* 1UL << 5 unused */
+#define HFI1_CAP_TID_RDMA (1UL << 5) /* Enable TID RDMA operations */
#define HFI1_CAP_USE_SDMA_HEAD (1UL << 6) /* DMA Hdr Q tail vs. use CSR */
#define HFI1_CAP_MULTI_PKT_EGR (1UL << 7) /* Enable multi-packet Egr buffs*/
#define HFI1_CAP_NODROP_RHQ_FULL (1UL << 8) /* Don't drop on Hdr Q full */
@@ -106,7 +106,7 @@
#define HFI1_CAP_NO_INTEGRITY (1UL << 13) /* Enable ctxt integrity checks */
#define HFI1_CAP_PKEY_CHECK (1UL << 14) /* Enable ctxt PKey checking */
#define HFI1_CAP_STATIC_RATE_CTRL (1UL << 15) /* Allow PBC.StaticRateControl */
-/* 1UL << 16 unused */
+#define HFI1_CAP_OPFN (1UL << 16) /* Enable the OPFN protocol */
#define HFI1_CAP_SDMA_HEAD_CHECK (1UL << 17) /* SDMA head checking */
#define HFI1_CAP_EARLY_CREDIT_RETURN (1UL << 18) /* early credit return */
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index c1f87735514f..ef3c7ec793a7 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -46,6 +46,12 @@ struct hns_roce_ib_create_cq_resp {
__aligned_u64 cap_flags;
};
+struct hns_roce_ib_create_srq {
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
+ __aligned_u64 que_addr;
+};
+
struct hns_roce_ib_create_qp {
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 2c881aaf05c2..64f0e3aacd3f 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -63,6 +63,23 @@ enum {
UVERBS_ATTR_UHW_OUT,
};
+enum uverbs_methods_device {
+ UVERBS_METHOD_INVOKE_WRITE,
+ UVERBS_METHOD_INFO_HANDLES,
+ UVERBS_METHOD_QUERY_PORT,
+};
+
+enum uverbs_attrs_invoke_write_cmd_attr_ids {
+ UVERBS_ATTR_CORE_IN,
+ UVERBS_ATTR_CORE_OUT,
+ UVERBS_ATTR_WRITE_CMD,
+};
+
+enum uverbs_attrs_query_port_cmd_attr_ids {
+ UVERBS_ATTR_QUERY_PORT_PORT_NUM,
+ UVERBS_ATTR_QUERY_PORT_RESP,
+};
+
enum uverbs_attrs_create_cq_cmd_attr_ids {
UVERBS_ATTR_CREATE_CQ_HANDLE,
UVERBS_ATTR_CREATE_CQ_CQE,
@@ -135,6 +152,19 @@ enum uverbs_attrs_reg_dm_mr_cmd_attr_ids {
enum uverbs_methods_mr {
UVERBS_METHOD_DM_MR_REG,
+ UVERBS_METHOD_MR_DESTROY,
+ UVERBS_METHOD_ADVISE_MR,
+};
+
+enum uverbs_attrs_mr_destroy_ids {
+ UVERBS_ATTR_DESTROY_MR_HANDLE,
+};
+
+enum uverbs_attrs_advise_mr_cmd_attr_ids {
+ UVERBS_ATTR_ADVISE_MR_PD_HANDLE,
+ UVERBS_ATTR_ADVISE_MR_ADVICE,
+ UVERBS_ATTR_ADVISE_MR_FLAGS,
+ UVERBS_ATTR_ADVISE_MR_SGE_LIST,
};
enum uverbs_attrs_create_counters_cmd_attr_ids {
@@ -157,4 +187,58 @@ enum uverbs_methods_actions_counters_ops {
UVERBS_METHOD_COUNTERS_READ,
};
+enum uverbs_attrs_info_handles_id {
+ UVERBS_ATTR_INFO_OBJECT_ID,
+ UVERBS_ATTR_INFO_TOTAL_HANDLES,
+ UVERBS_ATTR_INFO_HANDLES_LIST,
+};
+
+enum uverbs_methods_pd {
+ UVERBS_METHOD_PD_DESTROY,
+};
+
+enum uverbs_attrs_pd_destroy_ids {
+ UVERBS_ATTR_DESTROY_PD_HANDLE,
+};
+
+enum uverbs_methods_mw {
+ UVERBS_METHOD_MW_DESTROY,
+};
+
+enum uverbs_attrs_mw_destroy_ids {
+ UVERBS_ATTR_DESTROY_MW_HANDLE,
+};
+
+enum uverbs_methods_xrcd {
+ UVERBS_METHOD_XRCD_DESTROY,
+};
+
+enum uverbs_attrs_xrcd_destroy_ids {
+ UVERBS_ATTR_DESTROY_XRCD_HANDLE,
+};
+
+enum uverbs_methods_ah {
+ UVERBS_METHOD_AH_DESTROY,
+};
+
+enum uverbs_attrs_ah_destroy_ids {
+ UVERBS_ATTR_DESTROY_AH_HANDLE,
+};
+
+enum uverbs_methods_rwq_ind_tbl {
+ UVERBS_METHOD_RWQ_IND_TBL_DESTROY,
+};
+
+enum uverbs_attrs_rwq_ind_tbl_destroy_ids {
+ UVERBS_ATTR_DESTROY_RWQ_IND_TBL_HANDLE,
+};
+
+enum uverbs_methods_flow {
+ UVERBS_METHOD_FLOW_DESTROY,
+};
+
+enum uverbs_attrs_flow_destroy_ids {
+ UVERBS_ATTR_DESTROY_FLOW_HANDLE,
+};
+
#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 6cdf192070a2..72c7fc75f960 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -35,6 +35,7 @@
#define IB_USER_IOCTL_VERBS_H
#include <linux/types.h>
+#include <rdma/ib_user_verbs.h>
#ifndef RDMA_UAPI_PTR
#define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name
@@ -157,4 +158,19 @@ enum ib_uverbs_read_counters_flags {
IB_UVERBS_READ_COUNTERS_PREFER_CACHED = 1 << 0,
};
+enum ib_uverbs_advise_mr_advice {
+ IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH,
+ IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE,
+};
+
+enum ib_uverbs_advise_mr_flag {
+ IB_UVERBS_ADVISE_MR_FLAG_FLUSH = 1 << 0,
+};
+
+struct ib_uverbs_query_port_resp_ex {
+ struct ib_uverbs_query_port_resp legacy_resp;
+ __u16 port_cap_flags2;
+ __u8 reserved[6];
+};
+
#endif
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 25a16760de2a..480d9a60b68e 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -46,7 +46,7 @@
#define IB_USER_VERBS_ABI_VERSION 6
#define IB_USER_VERBS_CMD_THRESHOLD 50
-enum {
+enum ib_uverbs_write_cmds {
IB_USER_VERBS_CMD_GET_CONTEXT,
IB_USER_VERBS_CMD_QUERY_DEVICE,
IB_USER_VERBS_CMD_QUERY_PORT,
@@ -164,6 +164,7 @@ struct ib_uverbs_get_context {
struct ib_uverbs_get_context_resp {
__u32 async_fd;
__u32 num_comp_vectors;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_device {
@@ -310,6 +311,7 @@ struct ib_uverbs_alloc_pd {
struct ib_uverbs_alloc_pd_resp {
__u32 pd_handle;
+ __u32 driver_data[0];
};
struct ib_uverbs_dealloc_pd {
@@ -325,6 +327,7 @@ struct ib_uverbs_open_xrcd {
struct ib_uverbs_open_xrcd_resp {
__u32 xrcd_handle;
+ __u32 driver_data[0];
};
struct ib_uverbs_close_xrcd {
@@ -345,6 +348,7 @@ struct ib_uverbs_reg_mr_resp {
__u32 mr_handle;
__u32 lkey;
__u32 rkey;
+ __u32 driver_data[0];
};
struct ib_uverbs_rereg_mr {
@@ -356,11 +360,13 @@ struct ib_uverbs_rereg_mr {
__aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_rereg_mr_resp {
__u32 lkey;
__u32 rkey;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_dereg_mr {
@@ -372,11 +378,13 @@ struct ib_uverbs_alloc_mw {
__u32 pd_handle;
__u8 mw_type;
__u8 reserved[3];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_alloc_mw_resp {
__u32 mw_handle;
__u32 rkey;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_dealloc_mw {
@@ -419,6 +427,7 @@ struct ib_uverbs_ex_create_cq {
struct ib_uverbs_create_cq_resp {
__u32 cq_handle;
__u32 cqe;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_ex_create_cq_resp {
@@ -629,6 +638,7 @@ struct ib_uverbs_create_qp_resp {
__u32 max_recv_sge;
__u32 max_inline_data;
__u32 reserved;
+ __u32 driver_data[0];
};
struct ib_uverbs_ex_create_qp_resp {
@@ -733,9 +743,6 @@ struct ib_uverbs_ex_modify_qp {
__u32 reserved;
};
-struct ib_uverbs_modify_qp_resp {
-};
-
struct ib_uverbs_ex_modify_qp_resp {
__u32 comp_mask;
__u32 response_length;
@@ -763,10 +770,28 @@ struct ib_uverbs_sge {
__u32 lkey;
};
+enum ib_uverbs_wr_opcode {
+ IB_UVERBS_WR_RDMA_WRITE = 0,
+ IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1,
+ IB_UVERBS_WR_SEND = 2,
+ IB_UVERBS_WR_SEND_WITH_IMM = 3,
+ IB_UVERBS_WR_RDMA_READ = 4,
+ IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5,
+ IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6,
+ IB_UVERBS_WR_LOCAL_INV = 7,
+ IB_UVERBS_WR_BIND_MW = 8,
+ IB_UVERBS_WR_SEND_WITH_INV = 9,
+ IB_UVERBS_WR_TSO = 10,
+ IB_UVERBS_WR_RDMA_READ_WITH_INV = 11,
+ IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12,
+ IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13,
+ /* Review enum ib_wr_opcode before modifying this */
+};
+
struct ib_uverbs_send_wr {
__aligned_u64 wr_id;
__u32 num_sge;
- __u32 opcode;
+ __u32 opcode; /* see enum ib_uverbs_wr_opcode */
__u32 send_flags;
union {
__be32 imm_data;
@@ -845,10 +870,12 @@ struct ib_uverbs_create_ah {
__u32 pd_handle;
__u32 reserved;
struct ib_uverbs_ah_attr attr;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_create_ah_resp {
__u32 ah_handle;
+ __u32 driver_data[0];
};
struct ib_uverbs_destroy_ah {
@@ -1157,6 +1184,7 @@ struct ib_uverbs_create_srq_resp {
__u32 max_wr;
__u32 max_sge;
__u32 srqn;
+ __u32 driver_data[0];
};
struct ib_uverbs_modify_srq {
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index addbb9c4529e..87b3198f4b5d 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -45,6 +45,10 @@ enum {
MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
+ MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9,
};
enum {
@@ -233,6 +237,7 @@ enum mlx5_ib_query_dev_resp_flags {
/* Support 128B CQE compression */
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
+ MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
};
enum mlx5_ib_tunnel_offloads {
@@ -349,9 +354,22 @@ struct mlx5_ib_create_qp_rss {
__u32 flags;
};
+enum mlx5_ib_create_qp_resp_mask {
+ MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
+ MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
+ MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2,
+ MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3,
+};
+
struct mlx5_ib_create_qp_resp {
__u32 bfreg_index;
__u32 reserved;
+ __u32 comp_mask;
+ __u32 tirn;
+ __u32 tisn;
+ __u32 rqn;
+ __u32 sqn;
+ __u32 reserved1;
};
struct mlx5_ib_alloc_mw {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 9c51801b9e64..b8d121d457f1 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -125,6 +125,7 @@ enum mlx5_ib_flow_matcher_create_attrs {
MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
};
enum mlx5_ib_flow_matcher_destroy_attrs {
@@ -155,6 +156,9 @@ enum mlx5_ib_create_flow_attrs {
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
+ MLX5_IB_ATTR_CREATE_FLOW_TAG,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
};
enum mlx5_ib_destoy_flow_attrs {
@@ -166,4 +170,22 @@ enum mlx5_ib_flow_methods {
MLX5_IB_METHOD_DESTROY_FLOW,
};
+enum mlx5_ib_flow_action_methods {
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
+};
+
+enum mlx5_ib_create_flow_action_create_modify_header_attrs {
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
+};
+
+enum mlx5_ib_create_flow_action_create_packet_reformat_attrs {
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
+};
+
#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 8a2fb33f3ed4..4ef62c0e8452 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -39,5 +39,17 @@ enum mlx5_ib_uapi_flow_action_flags {
MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0,
};
+enum mlx5_ib_uapi_flow_table_type {
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
+};
+
+enum mlx5_ib_uapi_flow_action_packet_reformat_type {
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 = 0x0,
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x1,
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x2,
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x3,
+};
+
#endif
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index edba6351ac13..2e18b77a817f 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -227,8 +227,9 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_UNSPEC,
RDMA_NLDEV_CMD_GET, /* can dump */
+ RDMA_NLDEV_CMD_SET,
- /* 2 - 4 are free to use */
+ /* 3 - 4 are free to use */
RDMA_NLDEV_CMD_PORT_GET = 5, /* can dump */
@@ -282,6 +283,9 @@ enum rdma_nldev_attr {
/*
* Device and port capabilities
+ *
+ * When used for port info, first 32-bits are CapabilityMask followed by
+ * 16-bit CapabilityMask2.
*/
RDMA_NLDEV_ATTR_CAP_FLAGS, /* u64 */
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
index 24800c6c1f32..06c34d99be85 100644
--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -53,7 +53,7 @@ enum {
struct ib_uverbs_attr {
__u16 attr_id; /* command specific type attribute */
- __u16 len; /* only for pointers */
+ __u16 len; /* only for pointers and IDRs array */
__u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
union {
struct {
@@ -63,7 +63,10 @@ struct ib_uverbs_attr {
__u16 reserved;
} attr_data;
union {
- /* Used by PTR_IN/OUT, ENUM_IN and IDR */
+ /*
+ * ptr to command, inline data, idr/fd or
+ * ptr to __u32 array of IDRs
+ */
__aligned_u64 data;
/* Used by FD_IN and FD_OUT */
__s64 data_s64;
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
new file mode 100644
index 000000000000..17c7abd0803a
--- /dev/null
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * UFS Transport SGIO v4 BSG Message Support
+ *
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#ifndef SCSI_BSG_UFS_H
+#define SCSI_BSG_UFS_H
+
+#include <linux/types.h>
+/*
+ * This file intended to be included by both kernel and user space
+ */
+
+#define UFS_CDB_SIZE 16
+#define UPIU_TRANSACTION_UIC_CMD 0x1F
+/* uic commands are 4DW long, per UFSHCI V2.1 paragraph 5.6.1 */
+#define UIC_CMD_SIZE (sizeof(__u32) * 4)
+
+/**
+ * struct utp_upiu_header - UPIU header structure
+ * @dword_0: UPIU header DW-0
+ * @dword_1: UPIU header DW-1
+ * @dword_2: UPIU header DW-2
+ */
+struct utp_upiu_header {
+ __be32 dword_0;
+ __be32 dword_1;
+ __be32 dword_2;
+};
+
+/**
+ * struct utp_upiu_query - upiu request buffer structure for
+ * query request.
+ * @opcode: command to perform B-0
+ * @idn: a value that indicates the particular type of data B-1
+ * @index: Index to further identify data B-2
+ * @selector: Index to further identify data B-3
+ * @reserved_osf: spec reserved field B-4,5
+ * @length: number of descriptor bytes to read/write B-6,7
+ * @value: Attribute value to be written DW-5
+ * @reserved: spec reserved DW-6,7
+ */
+struct utp_upiu_query {
+ __u8 opcode;
+ __u8 idn;
+ __u8 index;
+ __u8 selector;
+ __be16 reserved_osf;
+ __be16 length;
+ __be32 value;
+ __be32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_cmd - Command UPIU structure
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+struct utp_upiu_cmd {
+ __be32 exp_data_transfer_len;
+ __u8 cdb[UFS_CDB_SIZE];
+};
+
+/**
+ * struct utp_upiu_req - general upiu request structure
+ * @header:UPIU header structure DW-0 to DW-2
+ * @sc: fields structure for scsi command DW-3 to DW-7
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_req {
+ struct utp_upiu_header header;
+ union {
+ struct utp_upiu_cmd sc;
+ struct utp_upiu_query qr;
+ struct utp_upiu_query tr;
+ /* use utp_upiu_query to host the 4 dwords of uic command */
+ struct utp_upiu_query uc;
+ };
+};
+
+/* request (CDB) structure of the sg_io_v4 */
+struct ufs_bsg_request {
+ __u32 msgcode;
+ struct utp_upiu_req upiu_req;
+};
+
+/* response (request sense data) structure of the sg_io_v4 */
+struct ufs_bsg_reply {
+ /*
+ * The completion result. Result exists in two forms:
+ * if negative, it is an -Exxx system errno value. There will
+ * be no further reply information supplied.
+ * else, it's the 4-byte scsi error result, with driver, host,
+ * msg and status fields. The per-msgcode reply structure
+ * will contain valid data.
+ */
+ __u32 result;
+
+ /* If there was reply_payload, how much was received? */
+ __u32 reply_payload_rcv_len;
+
+ struct utp_upiu_req upiu_rsp;
+};
+#endif /* UFS_BSG_H */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index ed0a120d4f08..404d4b9ffe76 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -752,7 +752,7 @@ struct snd_timer_info {
#define SNDRV_TIMER_PSFLG_EARLY_EVENT (1<<2) /* write early event to the poll queue */
struct snd_timer_params {
- unsigned int flags; /* flags - SNDRV_MIXER_PSFLG_* */
+ unsigned int flags; /* flags - SNDRV_TIMER_PSFLG_* */
unsigned int ticks; /* requested resolution in ticks */
unsigned int queue_size; /* total size of queue (32-1024) */
unsigned int reserved0; /* reserved, was: failure locations */
diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h
index f0a547d86679..ae12826ed641 100644
--- a/include/uapi/sound/firewire.h
+++ b/include/uapi/sound/firewire.h
@@ -12,6 +12,7 @@
#define SNDRV_FIREWIRE_EVENT_EFW_RESPONSE 0x4e617475
#define SNDRV_FIREWIRE_EVENT_DIGI00X_MESSAGE 0x746e736c
#define SNDRV_FIREWIRE_EVENT_MOTU_NOTIFICATION 0x64776479
+#define SNDRV_FIREWIRE_EVENT_TASCAM_CONTROL 0x7473636d
struct snd_firewire_event_common {
unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */
@@ -53,12 +54,24 @@ struct snd_firewire_event_motu_notification {
__u32 message; /* MOTU-specific bits. */
};
+struct snd_firewire_tascam_change {
+ unsigned int index;
+ __be32 before;
+ __be32 after;
+};
+
+struct snd_firewire_event_tascam_control {
+ unsigned int type;
+ struct snd_firewire_tascam_change changes[0];
+};
+
union snd_firewire_event {
struct snd_firewire_event_common common;
struct snd_firewire_event_lock_status lock_status;
struct snd_firewire_event_dice_notification dice_notification;
struct snd_firewire_event_efw_response efw_response;
struct snd_firewire_event_digi00x_message digi00x_message;
+ struct snd_firewire_event_tascam_control tascam_control;
struct snd_firewire_event_motu_notification motu_notification;
};
@@ -66,6 +79,7 @@ union snd_firewire_event {
#define SNDRV_FIREWIRE_IOCTL_GET_INFO _IOR('H', 0xf8, struct snd_firewire_get_info)
#define SNDRV_FIREWIRE_IOCTL_LOCK _IO('H', 0xf9)
#define SNDRV_FIREWIRE_IOCTL_UNLOCK _IO('H', 0xfa)
+#define SNDRV_FIREWIRE_IOCTL_TASCAM_STATE _IOR('H', 0xfb, struct snd_firewire_tascam_state)
#define SNDRV_FIREWIRE_TYPE_DICE 1
#define SNDRV_FIREWIRE_TYPE_FIREWORKS 2
@@ -88,4 +102,10 @@ struct snd_firewire_get_info {
* Returns -EBUSY if the driver is already streaming.
*/
+#define SNDRV_FIREWIRE_TASCAM_STATE_COUNT 64
+
+struct snd_firewire_tascam_state {
+ __be32 data[SNDRV_FIREWIRE_TASCAM_STATE_COUNT];
+};
+
#endif /* _UAPI_SOUND_FIREWIRE_H_INCLUDED */
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
index f58cafa42f18..f39352cef382 100644
--- a/include/uapi/sound/skl-tplg-interface.h
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -10,6 +10,8 @@
#ifndef __HDA_TPLG_INTERFACE_H__
#define __HDA_TPLG_INTERFACE_H__
+#include <linux/types.h>
+
/*
* Default types range from 0~12. type can range from 0 to 0xff
* SST types start at higher to avoid any overlapping in future
@@ -143,10 +145,10 @@ enum skl_module_param_type {
};
struct skl_dfw_algo_data {
- u32 set_params:2;
- u32 rsvd:30;
- u32 param_id;
- u32 max;
+ __u32 set_params:2;
+ __u32 rsvd:30;
+ __u32 param_id;
+ __u32 max;
char params[0];
} __packed;
@@ -163,68 +165,68 @@ enum skl_tuple_type {
/* v4 configuration data */
struct skl_dfw_v4_module_pin {
- u16 module_id;
- u16 instance_id;
+ __u16 module_id;
+ __u16 instance_id;
} __packed;
struct skl_dfw_v4_module_fmt {
- u32 channels;
- u32 freq;
- u32 bit_depth;
- u32 valid_bit_depth;
- u32 ch_cfg;
- u32 interleaving_style;
- u32 sample_type;
- u32 ch_map;
+ __u32 channels;
+ __u32 freq;
+ __u32 bit_depth;
+ __u32 valid_bit_depth;
+ __u32 ch_cfg;
+ __u32 interleaving_style;
+ __u32 sample_type;
+ __u32 ch_map;
} __packed;
struct skl_dfw_v4_module_caps {
- u32 set_params:2;
- u32 rsvd:30;
- u32 param_id;
- u32 caps_size;
- u32 caps[HDA_SST_CFG_MAX];
+ __u32 set_params:2;
+ __u32 rsvd:30;
+ __u32 param_id;
+ __u32 caps_size;
+ __u32 caps[HDA_SST_CFG_MAX];
} __packed;
struct skl_dfw_v4_pipe {
- u8 pipe_id;
- u8 pipe_priority;
- u16 conn_type:4;
- u16 rsvd:4;
- u16 memory_pages:8;
+ __u8 pipe_id;
+ __u8 pipe_priority;
+ __u16 conn_type:4;
+ __u16 rsvd:4;
+ __u16 memory_pages:8;
} __packed;
struct skl_dfw_v4_module {
char uuid[SKL_UUID_STR_SZ];
- u16 module_id;
- u16 instance_id;
- u32 max_mcps;
- u32 mem_pages;
- u32 obs;
- u32 ibs;
- u32 vbus_id;
-
- u32 max_in_queue:8;
- u32 max_out_queue:8;
- u32 time_slot:8;
- u32 core_id:4;
- u32 rsvd1:4;
-
- u32 module_type:8;
- u32 conn_type:4;
- u32 dev_type:4;
- u32 hw_conn_type:4;
- u32 rsvd2:12;
-
- u32 params_fixup:8;
- u32 converter:8;
- u32 input_pin_type:1;
- u32 output_pin_type:1;
- u32 is_dynamic_in_pin:1;
- u32 is_dynamic_out_pin:1;
- u32 is_loadable:1;
- u32 rsvd3:11;
+ __u16 module_id;
+ __u16 instance_id;
+ __u32 max_mcps;
+ __u32 mem_pages;
+ __u32 obs;
+ __u32 ibs;
+ __u32 vbus_id;
+
+ __u32 max_in_queue:8;
+ __u32 max_out_queue:8;
+ __u32 time_slot:8;
+ __u32 core_id:4;
+ __u32 rsvd1:4;
+
+ __u32 module_type:8;
+ __u32 conn_type:4;
+ __u32 dev_type:4;
+ __u32 hw_conn_type:4;
+ __u32 rsvd2:12;
+
+ __u32 params_fixup:8;
+ __u32 converter:8;
+ __u32 input_pin_type:1;
+ __u32 output_pin_type:1;
+ __u32 is_dynamic_in_pin:1;
+ __u32 is_dynamic_out_pin:1;
+ __u32 is_loadable:1;
+ __u32 rsvd3:11;
struct skl_dfw_v4_pipe pipe;
struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE];
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index abbad94e14a1..e582e8e7527a 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -246,6 +246,9 @@ struct ipu_image {
struct v4l2_rect rect;
dma_addr_t phys0;
dma_addr_t phys1;
+ /* chroma plane offset overrides */
+ u32 u_offset;
+ u32 v_offset;
};
void ipu_cpmem_zero(struct ipuv3_channel *ch);
@@ -387,6 +390,12 @@ int ipu_ic_task_init(struct ipu_ic *ic,
int out_width, int out_height,
enum ipu_color_space in_cs,
enum ipu_color_space out_cs);
+int ipu_ic_task_init_rsc(struct ipu_ic *ic,
+ int in_width, int in_height,
+ int out_width, int out_height,
+ enum ipu_color_space in_cs,
+ enum ipu_color_space out_cs,
+ u32 rsc);
int ipu_ic_task_graphics_init(struct ipu_ic *ic,
enum ipu_color_space in_g_cs,
bool galpha_en, u32 galpha,
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index d8fc96ed11e9..4ba5efe8d086 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -198,6 +198,7 @@
#define WINCONx_BURSTLEN_8WORD (0x1 << 9)
#define WINCONx_BURSTLEN_4WORD (0x2 << 9)
#define WINCONx_ENWIN (1 << 0)
+#define WINCONx_BLEND_MODE_MASK (0xc2)
#define WINCON0_BPPMODE_MASK (0xf << 2)
#define WINCON0_BPPMODE_SHIFT 2
@@ -211,6 +212,7 @@
#define WINCON0_BPPMODE_24BPP_888 (0xb << 2)
#define WINCON1_LOCALSEL_CAMIF (1 << 23)
+#define WINCON1_ALPHA_MUL (1 << 7)
#define WINCON1_BLD_PIX (1 << 6)
#define WINCON1_BPPMODE_MASK (0xf << 2)
#define WINCON1_BPPMODE_SHIFT 2
@@ -437,6 +439,14 @@
#define WPALCON_W0PAL_16BPP_565 (0x6 << 0)
/* Blending equation control */
+#define BLENDEQx(_win) (0x244 + ((_win - 1) * 4))
+#define BLENDEQ_ZERO 0x0
+#define BLENDEQ_ONE 0x1
+#define BLENDEQ_ALPHA_A 0x2
+#define BLENDEQ_ONE_MINUS_ALPHA_A 0x3
+#define BLENDEQ_ALPHA0 0x6
+#define BLENDEQ_B_FUNC_F(_x) (_x << 6)
+#define BLENDEQ_A_FUNC_F(_x) (_x << 0)
#define BLENDCON 0x260
#define BLENDCON_NEW_MASK (1 << 0)
#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index 0cabe6b09095..7d09e54ae54e 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -20,7 +20,6 @@ struct dloarea {
struct urb_node {
struct list_head entry;
struct dlfb_data *dlfb;
- struct delayed_work release_urb_work;
struct urb *urb;
};
@@ -37,12 +36,9 @@ struct dlfb_data {
struct usb_device *udev;
struct fb_info *info;
struct urb_list urbs;
- struct kref kref;
char *backing_buffer;
int fb_count;
bool virtualized; /* true when physical usb device not present */
- struct delayed_work init_framebuffer_work;
- struct delayed_work free_framebuffer_work;
atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
char *edid; /* null until we read edid from hw or get from sysfs */
@@ -52,11 +48,14 @@ struct dlfb_data {
int base8;
u32 pseudo_palette[256];
int blank_mode; /*one of FB_BLANK_ */
+ struct fb_ops ops;
/* blit-only rendering path metrics, exposed through sysfs */
atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
atomic_t bytes_identical; /* saved effort with backbuffer comparison */
atomic_t bytes_sent; /* to usb, after compression including overhead */
atomic_t cpu_kcycles_used; /* transpired during pixel processing */
+ struct fb_var_screeninfo current_mode;
+ struct list_head deferred_free;
};
#define NR_USB_REQUEST_I2C_SUB_IO 0x02
@@ -87,7 +86,7 @@ struct dlfb_data {
#define MIN_RAW_PIX_BYTES 2
#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
+#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
#define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
/* remove these once align.h patch is taken into kernel */
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 61f410fd74e4..4914b93a23f2 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
{
}
#endif
-
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-struct resource;
-void arch_xen_balloon_init(struct resource *hostmem_resource);
-#endif
diff --git a/include/xen/events.h b/include/xen/events.h
index c3e6bc643a7b..a48897199975 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -89,11 +89,13 @@ unsigned irq_from_evtchn(unsigned int evtchn);
int irq_from_virq(unsigned int cpu, unsigned int virq);
unsigned int evtchn_from_irq(unsigned irq);
+#ifdef CONFIG_XEN_PVHVM
/* Xen HVM evtchn vector callback */
void xen_hvm_callback_vector(void);
#ifdef CONFIG_TRACING
#define trace_xen_hvm_callback_vector xen_hvm_callback_vector
#endif
+#endif
int xen_set_callback_via(uint64_t via);
void xen_evtchn_do_upcall(struct pt_regs *regs);
void xen_hvm_evtchn_do_upcall(void);
diff --git a/include/xen/interface/hvm/start_info.h b/include/xen/interface/hvm/start_info.h
index 648415976ead..50af9ea2ff1e 100644
--- a/include/xen/interface/hvm/start_info.h
+++ b/include/xen/interface/hvm/start_info.h
@@ -33,7 +33,7 @@
* | magic | Contains the magic value XEN_HVM_START_MAGIC_VALUE
* | | ("xEn3" with the 0x80 bit of the "E" set).
* 4 +----------------+
- * | version | Version of this structure. Current version is 0. New
+ * | version | Version of this structure. Current version is 1. New
* | | versions are guaranteed to be backwards-compatible.
* 8 +----------------+
* | flags | SIF_xxx flags.
@@ -48,6 +48,15 @@
* 32 +----------------+
* | rsdp_paddr | Physical address of the RSDP ACPI data structure.
* 40 +----------------+
+ * | memmap_paddr | Physical address of the (optional) memory map. Only
+ * | | present in version 1 and newer of the structure.
+ * 48 +----------------+
+ * | memmap_entries | Number of entries in the memory map table. Zero
+ * | | if there is no memory map being provided. Only
+ * | | present in version 1 and newer of the structure.
+ * 52 +----------------+
+ * | reserved | Version 1 and newer only.
+ * 56 +----------------+
*
* The layout of each entry in the module structure is the following:
*
@@ -62,14 +71,52 @@
* | reserved |
* 32 +----------------+
*
+ * The layout of each entry in the memory map table is as follows:
+ *
+ * 0 +----------------+
+ * | addr | Base address
+ * 8 +----------------+
+ * | size | Size of mapping in bytes
+ * 16 +----------------+
+ * | type | Type of mapping as defined between the hypervisor
+ * | | and guest. See XEN_HVM_MEMMAP_TYPE_* values below.
+ * 20 +----------------|
+ * | reserved |
+ * 24 +----------------+
+ *
* The address and sizes are always a 64bit little endian unsigned integer.
*
* NB: Xen on x86 will always try to place all the data below the 4GiB
* boundary.
+ *
+ * Version numbers of the hvm_start_info structure have evolved like this:
+ *
+ * Version 0: Initial implementation.
+ *
+ * Version 1: Added the memmap_paddr/memmap_entries fields (plus 4 bytes of
+ * padding) to the end of the hvm_start_info struct. These new
+ * fields can be used to pass a memory map to the guest. The
+ * memory map is optional and so guests that understand version 1
+ * of the structure must check that memmap_entries is non-zero
+ * before trying to read the memory map.
*/
#define XEN_HVM_START_MAGIC_VALUE 0x336ec578
/*
+ * The values used in the type field of the memory map table entries are
+ * defined below and match the Address Range Types as defined in the "System
+ * Address Map Interfaces" section of the ACPI Specification. Please refer to
+ * section 15 in version 6.2 of the ACPI spec: http://uefi.org/specifications
+ */
+#define XEN_HVM_MEMMAP_TYPE_RAM 1
+#define XEN_HVM_MEMMAP_TYPE_RESERVED 2
+#define XEN_HVM_MEMMAP_TYPE_ACPI 3
+#define XEN_HVM_MEMMAP_TYPE_NVS 4
+#define XEN_HVM_MEMMAP_TYPE_UNUSABLE 5
+#define XEN_HVM_MEMMAP_TYPE_DISABLED 6
+#define XEN_HVM_MEMMAP_TYPE_PMEM 7
+
+/*
* C representation of the x86/HVM start info layout.
*
* The canonical definition of this layout is above, this is just a way to
@@ -86,6 +133,13 @@ struct hvm_start_info {
uint64_t cmdline_paddr; /* Physical address of the command line. */
uint64_t rsdp_paddr; /* Physical address of the RSDP ACPI data */
/* structure. */
+ /* All following fields only present in version 1 and newer */
+ uint64_t memmap_paddr; /* Physical address of an array of */
+ /* hvm_memmap_table_entry. */
+ uint32_t memmap_entries; /* Number of entries in the memmap table. */
+ /* Value will be zero if there is no memory */
+ /* map being provided. */
+ uint32_t reserved; /* Must be zero. */
};
struct hvm_modlist_entry {
@@ -95,4 +149,11 @@ struct hvm_modlist_entry {
uint64_t reserved;
};
+struct hvm_memmap_table_entry {
+ uint64_t addr; /* Base address of the memory region */
+ uint64_t size; /* Size of the memory region in bytes */
+ uint32_t type; /* Mapping type */
+ uint32_t reserved; /* Must be zero for Version 1. */
+};
+
#endif /* __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__ */
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index 4c5751c26f87..447004861f00 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -245,12 +245,6 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_map);
/*
- * Prevent the balloon driver from changing the memory reservation
- * during a driver critical region.
- */
-extern spinlock_t xen_reservation_lock;
-
-/*
* Unmaps the page appearing at a particular GPFN from the specified guest's
* pseudophysical address space.
* arg == addr of xen_remove_from_physmap_t.
diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h
index 80b52b4945e9..a2ab516fcd2c 100644
--- a/include/xen/mem-reservation.h
+++ b/include/xen/mem-reservation.h
@@ -17,11 +17,12 @@
#include <xen/page.h>
+extern bool xen_scrub_pages;
+
static inline void xenmem_reservation_scrub_page(struct page *page)
{
-#ifdef CONFIG_XEN_SCRUB_PAGES
- clear_highpage(page);
-#endif
+ if (xen_scrub_pages)
+ clear_highpage(page);
}
#ifdef CONFIG_XEN_HAVE_PVMMU
diff --git a/include/xen/xen-front-pgdir-shbuf.h b/include/xen/xen-front-pgdir-shbuf.h
new file mode 100644
index 000000000000..150ef7ec51ec
--- /dev/null
+++ b/include/xen/xen-front-pgdir-shbuf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Xen frontend/backend page directory based shared buffer
+ * helper module.
+ *
+ * Copyright (C) 2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_FRONT_PGDIR_SHBUF_H_
+#define __XEN_FRONT_PGDIR_SHBUF_H_
+
+#include <linux/kernel.h>
+
+#include <xen/grant_table.h>
+
+struct xen_front_pgdir_shbuf_ops;
+
+struct xen_front_pgdir_shbuf {
+ /*
+ * Number of references granted for the backend use:
+ *
+ * - for frontend allocated/imported buffers this holds the number
+ * of grant references for the page directory and the pages
+ * of the buffer
+ *
+ * - for the buffer provided by the backend this only holds the number
+ * of grant references for the page directory itself as grant
+ * references for the buffer will be provided by the backend.
+ */
+ int num_grefs;
+ grant_ref_t *grefs;
+ /* Page directory backing storage. */
+ u8 *directory;
+
+ /*
+ * Number of pages for the shared buffer itself (excluding the page
+ * directory).
+ */
+ int num_pages;
+ /*
+ * Backing storage of the shared buffer: these are the pages being
+ * shared.
+ */
+ struct page **pages;
+
+ struct xenbus_device *xb_dev;
+
+ /* These are the ops used internally depending on be_alloc mode. */
+ const struct xen_front_pgdir_shbuf_ops *ops;
+
+ /* Xen map handles for the buffer allocated by the backend. */
+ grant_handle_t *backend_map_handles;
+};
+
+struct xen_front_pgdir_shbuf_cfg {
+ struct xenbus_device *xb_dev;
+
+ /* Number of pages of the buffer backing storage. */
+ int num_pages;
+ /* Pages of the buffer to be shared. */
+ struct page **pages;
+
+ /*
+ * This is allocated outside because there are use-cases when
+ * the buffer structure is allocated as a part of a bigger one.
+ */
+ struct xen_front_pgdir_shbuf *pgdir;
+ /*
+ * Mode of grant reference sharing: if set then backend will share
+ * grant references to the buffer with the frontend.
+ */
+ int be_alloc;
+};
+
+int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg);
+
+grant_ref_t
+xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf);
+
+int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf);
+
+int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf);
+
+void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf);
+
+#endif /* __XEN_FRONT_PGDIR_SHBUF_H_ */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index fd18c974a619..4969817124a8 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -5,6 +5,7 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/efi.h>
+#include <xen/features.h>
#include <asm/xen/interface.h>
#include <xen/interface/vcpu.h>
@@ -41,7 +42,7 @@ int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
@@ -60,8 +61,54 @@ static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
unsigned int order) { }
#endif
+#if defined(CONFIG_XEN_PV)
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+ unsigned int domid, bool no_translate, struct page **pages);
+#else
+static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr,
+ pgprot_t prot, unsigned int domid,
+ bool no_translate, struct page **pages)
+{
+ BUG();
+ return 0;
+}
+#endif
+
struct vm_area_struct;
+#ifdef CONFIG_XEN_AUTO_XLATE
+int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid,
+ struct page **pages);
+int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
+ int nr, struct page **pages);
+#else
+/*
+ * These two functions are called from arch/x86/xen/mmu.c and so stubs
+ * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
+ */
+static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid,
+ struct page **pages)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
+ int nr, struct page **pages)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/*
* xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
* @vma: VMA to map the pages into
@@ -79,12 +126,25 @@ struct vm_area_struct;
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *gfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned domid,
- struct page **pages);
+static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *gfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid,
+ struct page **pages)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
+ prot, domid, pages);
+
+ /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
+ * and the consequences later is quite hard to detect what the actual
+ * cause of "wrong memory was mapped in".
+ */
+ BUG_ON(err_ptr == NULL);
+ return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
+ false, pages);
+}
/*
* xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
@@ -103,10 +163,18 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
- unsigned long addr, xen_pfn_t *mfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned int domid, struct page **pages);
+static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr, xen_pfn_t *mfn,
+ int nr, int *err_ptr,
+ pgprot_t prot, unsigned int domid,
+ struct page **pages)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return -EOPNOTSUPP;
+
+ return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
+ true, pages);
+}
/* xen_remap_domain_gfn_range() - map a range of foreign frames
* @vma: VMA to map the pages into
@@ -120,44 +188,21 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
* Returns the number of successfully mapped frames, or a -ve error
* code.
*/
-int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t gfn, int nr,
- pgprot_t prot, unsigned domid,
- struct page **pages);
-int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
- int numpgs, struct page **pages);
-
-#ifdef CONFIG_XEN_AUTO_XLATE
-int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *gfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned domid,
- struct page **pages);
-int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
- int nr, struct page **pages);
-#else
-/*
- * These two functions are called from arch/x86/xen/mmu.c and so stubs
- * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
- */
-static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t *gfn, int nr,
- int *err_ptr, pgprot_t prot,
- unsigned int domid,
- struct page **pages)
+static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t gfn, int nr,
+ pgprot_t prot, unsigned int domid,
+ struct page **pages)
{
- return -EOPNOTSUPP;
-}
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return -EOPNOTSUPP;
-static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
- int nr, struct page **pages)
-{
- return -EOPNOTSUPP;
+ return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
+ pages);
}
-#endif
+
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
+ int numpgs, struct page **pages);
int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
unsigned long nr_grant_frames);
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 1e1d9bd0bd37..0e2156786ad2 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -29,6 +29,9 @@ extern bool xen_pvh;
extern uint32_t xen_start_flags;
+#include <xen/interface/hvm/start_info.h>
+extern struct hvm_start_info pvh_start_info;
+
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>
#include <asm/xen/hypervisor.h>
@@ -39,4 +42,8 @@ extern uint32_t xen_start_flags;
#define xen_initial_domain() (0)
#endif /* CONFIG_XEN_DOM0 */
+struct bio_vec;
+bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+
#endif /* _XEN_XEN_H */
OpenPOWER on IntegriCloud