summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/export.h8
-rw-r--r--include/asm-generic/mm_hooks.h5
-rw-r--r--include/drm/ati_pcigart.h31
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h9
-rw-r--r--include/drm/drm_atomic.h62
-rw-r--r--include/drm/drm_atomic_helper.h8
-rw-r--r--include/drm/drm_atomic_state_helper.h6
-rw-r--r--include/drm/drm_bridge.h136
-rw-r--r--include/drm/drm_color_mgmt.h25
-rw-r--r--include/drm/drm_connector.h24
-rw-r--r--include/drm/drm_dp_helper.h12
-rw-r--r--include/drm/drm_dp_mst_helper.h32
-rw-r--r--include/drm/drm_encoder.h7
-rw-r--r--include/drm/drm_fb_cma_helper.h2
-rw-r--r--include/drm/drm_fb_helper.h40
-rw-r--r--include/drm/drm_file.h3
-rw-r--r--include/drm/drm_fourcc.h8
-rw-r--r--include/drm/drm_gem.h4
-rw-r--r--include/drm/drm_gem_vram_helper.h8
-rw-r--r--include/drm/drm_legacy.h29
-rw-r--r--include/drm/drm_mipi_dsi.h4
-rw-r--r--include/drm/drm_of.h21
-rw-r--r--include/drm/drm_panel.h58
-rw-r--r--include/drm/drm_pci.h19
-rw-r--r--include/drm/drm_print.h304
-rw-r--r--include/drm/drm_rect.h2
-rw-r--r--include/drm/drm_scdc_helper.h6
-rw-r--r--include/drm/drm_util.h2
-rw-r--r--include/drm/gpu_scheduler.h22
-rw-r--r--include/drm/i915_pciids.h31
-rw-r--r--include/drm/task_barrier.h107
-rw-r--r--include/drm/ttm/ttm_bo_api.h10
-rw-r--r--include/kunit/assert.h3
-rw-r--r--include/kunit/string-stream.h51
-rw-r--r--include/kunit/test.h37
-rw-r--r--include/kunit/try-catch.h10
-rw-r--r--include/linux/attribute_container.h7
-rw-r--r--include/linux/backing-dev.h10
-rw-r--r--include/linux/bitops.h1
-rw-r--r--include/linux/blkdev.h7
-rw-r--r--include/linux/compat.h18
-rw-r--r--include/linux/console.h2
-rw-r--r--include/linux/context_tracking.h9
-rw-r--r--include/linux/cpufreq.h32
-rw-r--r--include/linux/dma-buf.h27
-rw-r--r--include/linux/dma-heap.h59
-rw-r--r--include/linux/eventpoll.h9
-rw-r--r--include/linux/export.h33
-rw-r--r--include/linux/f2fs_fs.h5
-rw-r--r--include/linux/falloc.h2
-rw-r--r--include/linux/fb.h4
-rw-r--r--include/linux/fcntl.h16
-rw-r--r--include/linux/file.h2
-rw-r--r--include/linux/fs.h10
-rw-r--r--include/linux/host1x.h28
-rw-r--r--include/linux/huge_mm.h6
-rw-r--r--include/linux/hyperv.h4
-rw-r--r--include/linux/ide.h6
-rw-r--r--include/linux/io-mapping.h5
-rw-r--r--include/linux/jbd2.h1
-rw-r--r--include/linux/jiffies.h20
-rw-r--r--include/linux/kvm_host.h40
-rw-r--r--include/linux/kvm_types.h9
-rw-r--r--include/linux/leds-bd2802.h1
-rw-r--r--include/linux/leds.h6
-rw-r--r--include/linux/libata.h6
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h11
-rw-r--r--include/linux/memblock.h7
-rw-r--r--include/linux/memory.h29
-rw-r--r--include/linux/memory_hotplug.h3
-rw-r--r--include/linux/mfd/db8500-prcmu.h18
-rw-r--r--include/linux/mfd/dbx500-prcmu.h30
-rw-r--r--include/linux/mfd/rohm-bd70528.h19
-rw-r--r--include/linux/mfd/rohm-bd71828.h423
-rw-r--r--include/linux/mfd/rohm-bd718x7.h6
-rw-r--r--include/linux/mfd/rohm-generic.h70
-rw-r--r--include/linux/mfd/rohm-shared.h21
-rw-r--r--include/linux/mfd/syscon.h14
-rw-r--r--include/linux/mfd/wcd934x/registers.h531
-rw-r--r--include/linux/mfd/wcd934x/wcd934x.h31
-rw-r--r--include/linux/mlx4/cq.h5
-rw-r--r--include/linux/mlx5/driver.h5
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/linux/mm.h105
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmu_notifier.h86
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/module.h14
-rw-r--r--include/linux/moduleparam.h82
-rw-r--r--include/linux/mtd/spi-nor.h16
-rw-r--r--include/linux/namei.h12
-rw-r--r--include/linux/page-isolation.h4
-rw-r--r--include/linux/pagemap.h28
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/percpu-refcount.h26
-rw-r--r--include/linux/platform_data/bd6107.h1
-rw-r--r--include/linux/platform_data/tc35876x.h11
-rw-r--r--include/linux/power/max17042_battery.h48
-rw-r--r--include/linux/power_supply.h10
-rw-r--r--include/linux/proc_ns.h4
-rw-r--r--include/linux/random.h24
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h53
-rw-r--r--include/linux/soc/mediatek/mtk_sip_svc.h25
-rw-r--r--include/linux/sunrpc/cache.h42
-rw-r--r--include/linux/sunrpc/gss_api.h4
-rw-r--r--include/linux/sunrpc/gss_krb5.h2
-rw-r--r--include/linux/swab.h1
-rw-r--r--include/linux/switchtec.h160
-rw-r--r--include/linux/syscalls.h13
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/thermal.h11
-rw-r--r--include/linux/transport_class.h6
-rw-r--r--include/linux/units.h84
-rw-r--r--include/linux/zlib.h6
-rw-r--r--include/media/cec-notifier.h27
-rw-r--r--include/media/cec.h46
-rw-r--r--include/media/dvb-usb-ids.h2
-rw-r--r--include/media/v4l2-common.h21
-rw-r--r--include/media/v4l2-device.h12
-rw-r--r--include/media/v4l2-ioctl.h55
-rw-r--r--include/media/v4l2-rect.h8
-rw-r--r--include/media/v4l2-subdev.h2
-rw-r--r--include/net/mptcp.h9
-rw-r--r--include/net/udp.h7
-rw-r--r--include/rdma/ib_cm.h34
-rw-r--r--include/rdma/ib_verbs.h42
-rw-r--r--include/rdma/iba.h146
-rw-r--r--include/rdma/ibta_vol1_c12.h213
-rw-r--r--include/rdma/rdmavt_qp.h22
-rw-r--r--include/rdma/uverbs_named_ioctl.h6
-rw-r--r--include/rdma/uverbs_std_types.h13
-rw-r--r--include/rdma/uverbs_types.h34
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_ioctl.h1
-rw-r--r--include/scsi/sg.h30
-rw-r--r--include/trace/events/ext4.h27
-rw-r--r--include/trace/events/f2fs.h103
-rw-r--r--include/trace/events/io_uring.h13
-rw-r--r--include/trace/events/kmem.h4
-rw-r--r--include/trace/events/rdma_core.h394
-rw-r--r--include/trace/events/v4l2.h2
-rw-r--r--include/trace/events/writeback.h37
-rw-r--r--include/uapi/asm-generic/unistd.h7
-rw-r--r--include/uapi/drm/amdgpu_drm.h3
-rw-r--r--include/uapi/drm/drm_fourcc.h24
-rw-r--r--include/uapi/drm/exynos_drm.h2
-rw-r--r--include/uapi/drm/i915_drm.h32
-rw-r--r--include/uapi/drm/nouveau_drm.h1
-rw-r--r--include/uapi/drm/vmwgfx_drm.h17
-rw-r--r--include/uapi/linux/acct.h2
-rw-r--r--include/uapi/linux/capability.h1
-rw-r--r--include/uapi/linux/dma-heap.h53
-rw-r--r--include/uapi/linux/fcntl.h2
-rw-r--r--include/uapi/linux/io_uring.h73
-rw-r--r--include/uapi/linux/openat2.h39
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/prctl.h4
-rw-r--r--include/uapi/linux/random.h4
-rw-r--r--include/uapi/linux/swab.h10
-rw-r--r--include/uapi/linux/switchtec_ioctl.h17
-rw-r--r--include/uapi/linux/sysctl.h2
-rw-r--r--include/uapi/linux/taskstats.h6
-rw-r--r--include/uapi/linux/time_types.h5
-rw-r--r--include/uapi/linux/timex.h2
-rw-r--r--include/uapi/linux/videodev2.h29
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h15
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h12
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h17
-rw-r--r--include/uapi/rdma/qedr-abi.h18
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h3
-rw-r--r--include/video/mipi_display.h24
173 files changed, 4338 insertions, 1073 deletions
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index afddc5442e92..365345f9a9e3 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -27,9 +27,11 @@
.endm
/*
- * note on .section use: @progbits vs %progbits nastiness doesn't matter,
- * since we immediately emit into those sections anyway.
+ * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
*/
+
.macro ___EXPORT_SYMBOL name,val,sec
#ifdef CONFIG_MODULES
.section ___ksymtab\sec+\name,"a"
@@ -37,7 +39,7 @@
__ksymtab_\name:
__put \val, __kstrtab_\name
.previous
- .section __ksymtab_strings,"a"
+ .section __ksymtab_strings,"aMS",%progbits,1
__kstrtab_\name:
.asciz "\name"
.previous
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 6736ed2f632b..4dbb177d1150 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -22,11 +22,6 @@ static inline void arch_unmap(struct mm_struct *mm,
{
}
-static inline void arch_bprm_mm_init(struct mm_struct *mm,
- struct vm_area_struct *vma)
-{
-}
-
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
diff --git a/include/drm/ati_pcigart.h b/include/drm/ati_pcigart.h
deleted file mode 100644
index a728a1364e66..000000000000
--- a/include/drm/ati_pcigart.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef DRM_ATI_PCIGART_H
-#define DRM_ATI_PCIGART_H
-
-#include <drm/drm_legacy.h>
-
-/* location of GART table */
-#define DRM_ATI_GART_MAIN 1
-#define DRM_ATI_GART_FB 2
-
-#define DRM_ATI_GART_PCI 1
-#define DRM_ATI_GART_PCIE 2
-#define DRM_ATI_GART_IGP 3
-
-struct drm_ati_pcigart_info {
- int gart_table_location;
- int gart_reg_if;
- void *addr;
- dma_addr_t bus_addr;
- dma_addr_t table_mask;
- struct drm_dma_handle *table_handle;
- struct drm_local_map mapping;
- int table_size;
-};
-
-extern int drm_ati_pcigart_init(struct drm_device *dev,
- struct drm_ati_pcigart_info * gart_info);
-extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
- struct drm_ati_pcigart_info * gart_info);
-
-#endif
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index 94cc64a342e1..b0e390b3288e 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -19,6 +19,13 @@ struct dw_mipi_dsi;
struct mipi_dsi_device;
struct platform_device;
+struct dw_mipi_dsi_dphy_timing {
+ u16 data_hs2lp;
+ u16 data_lp2hs;
+ u16 clk_hs2lp;
+ u16 clk_lp2hs;
+};
+
struct dw_mipi_dsi_phy_ops {
int (*init)(void *priv_data);
void (*power_on)(void *priv_data);
@@ -27,6 +34,8 @@ struct dw_mipi_dsi_phy_ops {
const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format,
unsigned int *lane_mbps);
+ int (*get_timing)(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi_dphy_timing *timing);
};
struct dw_mipi_dsi_host_ops {
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 927e1205d7aa..951dfb15c27b 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -35,7 +35,7 @@
* struct drm_crtc_commit - track modeset commits on a CRTC
*
* This structure is used to track pending modeset changes and atomic commit on
- * a per-CRTC basis. Since updating the list should never block this structure
+ * a per-CRTC basis. Since updating the list should never block, this structure
* is reference counted to allow waiters to safely wait on an event to complete,
* without holding any locks.
*
@@ -60,8 +60,8 @@
* wait for flip_done <----
* clean up atomic state
*
- * The important bit to know is that cleanup_done is the terminal event, but the
- * ordering between flip_done and hw_done is entirely up to the specific driver
+ * The important bit to know is that &cleanup_done is the terminal event, but the
+ * ordering between &flip_done and &hw_done is entirely up to the specific driver
* and modeset state change.
*
* For an implementation of how to use this look at
@@ -92,6 +92,9 @@ struct drm_crtc_commit {
* commit is sent to userspace, or when an out-fence is singalled. Note
* that for most hardware, in most cases this happens after @hw_done is
* signalled.
+ *
+ * Completion of this stage is signalled implicitly by calling
+ * drm_crtc_send_vblank_event() on &drm_crtc_state.event.
*/
struct completion flip_done;
@@ -107,6 +110,9 @@ struct drm_crtc_commit {
* Note that this does not need to include separately reference-counted
* resources like backing storage buffer pinning, or runtime pm
* management.
+ *
+ * Drivers should call drm_atomic_helper_commit_hw_done() to signal
+ * completion of this stage.
*/
struct completion hw_done;
@@ -118,6 +124,9 @@ struct drm_crtc_commit {
* a vblank wait completed it might be a bit later. This completion is
* useful to throttle updates and avoid hardware updates getting ahead
* of the buffer cleanup too much.
+ *
+ * Drivers should call drm_atomic_helper_commit_cleanup_done() to signal
+ * completion of this stage.
*/
struct completion cleanup_done;
@@ -354,7 +363,7 @@ struct drm_atomic_state {
* When a connector or plane is not bound to any CRTC, it's still important
* to preserve linearity to prevent the atomic states from being freed to early.
*
- * This commit (if set) is not bound to any crtc, but will be completed when
+ * This commit (if set) is not bound to any CRTC, but will be completed when
* drm_atomic_helper_commit_hw_done() is called.
*/
struct drm_crtc_commit *fake_commit;
@@ -467,12 +476,12 @@ drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder);
/**
- * drm_atomic_get_existing_crtc_state - get crtc state, if it exists
+ * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists
* @state: global atomic state object
- * @crtc: crtc to grab
+ * @crtc: CRTC to grab
*
- * This function returns the crtc state for the given crtc, or NULL
- * if the crtc is not part of the global atomic state.
+ * This function returns the CRTC state for the given CRTC, or NULL
+ * if the CRTC is not part of the global atomic state.
*
* This function is deprecated, @drm_atomic_get_old_crtc_state or
* @drm_atomic_get_new_crtc_state should be used instead.
@@ -485,12 +494,12 @@ drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
}
/**
- * drm_atomic_get_old_crtc_state - get old crtc state, if it exists
+ * drm_atomic_get_old_crtc_state - get old CRTC state, if it exists
* @state: global atomic state object
- * @crtc: crtc to grab
+ * @crtc: CRTC to grab
*
- * This function returns the old crtc state for the given crtc, or
- * NULL if the crtc is not part of the global atomic state.
+ * This function returns the old CRTC state for the given CRTC, or
+ * NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
@@ -499,12 +508,12 @@ drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
return state->crtcs[drm_crtc_index(crtc)].old_state;
}
/**
- * drm_atomic_get_new_crtc_state - get new crtc state, if it exists
+ * drm_atomic_get_new_crtc_state - get new CRTC state, if it exists
* @state: global atomic state object
- * @crtc: crtc to grab
+ * @crtc: CRTC to grab
*
- * This function returns the new crtc state for the given crtc, or
- * NULL if the crtc is not part of the global atomic state.
+ * This function returns the new CRTC state for the given CRTC, or
+ * NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
@@ -693,6 +702,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->connectors[__i].ptr && \
((connector) = (__state)->connectors[__i].ptr, \
+ (void)(connector) /* Only to avoid unused-but-set-variable warning */, \
(old_connector_state) = (__state)->connectors[__i].old_state, \
(new_connector_state) = (__state)->connectors[__i].new_state, 1))
@@ -714,6 +724,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->connectors[__i].ptr && \
((connector) = (__state)->connectors[__i].ptr, \
+ (void)(connector) /* Only to avoid unused-but-set-variable warning */, \
(old_connector_state) = (__state)->connectors[__i].old_state, 1))
/**
@@ -734,7 +745,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->connectors[__i].ptr && \
((connector) = (__state)->connectors[__i].ptr, \
- (new_connector_state) = (__state)->connectors[__i].new_state, 1))
+ (void)(connector) /* Only to avoid unused-but-set-variable warning */, \
+ (new_connector_state) = (__state)->connectors[__i].new_state, \
+ (void)(new_connector_state) /* Only to avoid unused-but-set-variable warning */, 1))
/**
* for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update
@@ -754,7 +767,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->crtcs[__i].ptr && \
((crtc) = (__state)->crtcs[__i].ptr, \
+ (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \
(old_crtc_state) = (__state)->crtcs[__i].old_state, \
+ (void)(old_crtc_state) /* Only to avoid unused-but-set-variable warning */, \
(new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
/**
@@ -793,7 +808,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->crtcs[__i].ptr && \
((crtc) = (__state)->crtcs[__i].ptr, \
- (new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
+ (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \
+ (new_crtc_state) = (__state)->crtcs[__i].new_state, \
+ (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1))
/**
* for_each_oldnew_plane_in_state - iterate over all planes in an atomic update
@@ -813,6 +830,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->planes[__i].ptr && \
((plane) = (__state)->planes[__i].ptr, \
+ (void)(plane) /* Only to avoid unused-but-set-variable warning */, \
(old_plane_state) = (__state)->planes[__i].old_state,\
(new_plane_state) = (__state)->planes[__i].new_state, 1))
@@ -873,7 +891,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->planes[__i].ptr && \
((plane) = (__state)->planes[__i].ptr, \
- (new_plane_state) = (__state)->planes[__i].new_state, 1))
+ (void)(plane) /* Only to avoid unused-but-set-variable warning */, \
+ (new_plane_state) = (__state)->planes[__i].new_state, \
+ (void)(new_plane_state) /* Only to avoid unused-but-set-variable warning */, 1))
/**
* for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update
@@ -958,11 +978,11 @@ drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state)
}
/**
- * drm_atomic_crtc_effectively_active - compute whether crtc is actually active
+ * drm_atomic_crtc_effectively_active - compute whether CRTC is actually active
* @state: &drm_crtc_state for the CRTC
*
* When in self refresh mode, the crtc_state->active value will be false, since
- * the crtc is off. However in some cases we're interested in whether the crtc
+ * the CRTC is off. However in some cases we're interested in whether the CRTC
* is active, or effectively active (ie: it's connected to an active display).
* In these cases, use this function instead of just checking active.
*/
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index bf4e07141d81..9db3cac48f4f 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -152,7 +152,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
* @plane: the loop cursor
- * @crtc: the crtc whose planes are iterated
+ * @crtc: the CRTC whose planes are iterated
*
* This iterates over the current state, useful (for example) when applying
* atomic state after it has been checked and swapped. To iterate over the
@@ -166,7 +166,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
/**
* drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
* @plane: the loop cursor
- * @crtc_state: the incoming crtc-state
+ * @crtc_state: the incoming CRTC state
*
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
* attached if the specified state is applied. Useful during for example
@@ -180,7 +180,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
* drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
* @plane: the loop cursor
* @plane_state: loop cursor for the plane's state, must be const
- * @crtc_state: the incoming crtc-state
+ * @crtc_state: the incoming CRTC state
*
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
* attached if the specified state is applied. Useful during for example
@@ -189,7 +189,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
*
* Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a
* const plane_state. This is useful when a driver just wants to peek at other
- * active planes on this crtc, but does not need to change it.
+ * active planes on this CRTC, but does not need to change it.
*/
#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index e4577cc11689..8171dea4cc22 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -37,6 +37,8 @@ struct drm_private_state;
struct drm_modeset_acquire_ctx;
struct drm_device;
+void __drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *state,
+ struct drm_crtc *crtc);
void __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
@@ -48,6 +50,8 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *state,
+ struct drm_plane *plane);
void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
struct drm_plane_state *state);
void drm_atomic_helper_plane_reset(struct drm_plane *plane);
@@ -59,6 +63,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
+void __drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state,
+ struct drm_connector *connector);
void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index c0a2286a81e9..694e153a7531 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/ctype.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
@@ -254,14 +255,15 @@ struct drm_bridge_funcs {
* there is one) when this callback is called.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_pre_enable. It
- * would be prudent to also provide an implementation of @pre_enable if
- * you are expecting driver calls into &drm_bridge_pre_enable.
+ * atomic commit. It will not be invoked from
+ * &drm_bridge_chain_pre_enable. It would be prudent to also provide an
+ * implementation of @pre_enable if you are expecting driver calls into
+ * &drm_bridge_chain_pre_enable.
*
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *old_state);
/**
* @atomic_enable:
@@ -279,14 +281,14 @@ struct drm_bridge_funcs {
* chain if there is one.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_enable. It
- * would be prudent to also provide an implementation of @enable if
- * you are expecting driver calls into &drm_bridge_enable.
+ * atomic commit. It will not be invoked from &drm_bridge_chain_enable.
+ * It would be prudent to also provide an implementation of @enable if
+ * you are expecting driver calls into &drm_bridge_chain_enable.
*
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *old_state);
/**
* @atomic_disable:
*
@@ -301,14 +303,15 @@ struct drm_bridge_funcs {
* signals) feeding it is still running when this callback is called.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_disable. It
- * would be prudent to also provide an implementation of @disable if
- * you are expecting driver calls into &drm_bridge_disable.
+ * atomic commit. It will not be invoked from
+ * &drm_bridge_chain_disable. It would be prudent to also provide an
+ * implementation of @disable if you are expecting driver calls into
+ * &drm_bridge_chain_disable.
*
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *old_state);
/**
* @atomic_post_disable:
@@ -325,15 +328,16 @@ struct drm_bridge_funcs {
* called.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_post_disable.
+ * atomic commit. It will not be invoked from
+ * &drm_bridge_chain_post_disable.
* It would be prudent to also provide an implementation of
* @post_disable if you are expecting driver calls into
- * &drm_bridge_post_disable.
+ * &drm_bridge_chain_post_disable.
*
* The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *old_state);
};
/**
@@ -380,8 +384,8 @@ struct drm_bridge {
struct drm_device *dev;
/** @encoder: encoder to which this bridge is connected */
struct drm_encoder *encoder;
- /** @next: the next bridge in the encoder chain */
- struct drm_bridge *next;
+ /** @chain_node: used to form a bridge chain */
+ struct list_head chain_node;
#ifdef CONFIG_OF
/** @of_node: device node pointer to the bridge */
struct device_node *of_node;
@@ -406,27 +410,86 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous);
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_mode *mode);
-void drm_bridge_disable(struct drm_bridge *bridge);
-void drm_bridge_post_disable(struct drm_bridge *bridge);
-void drm_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode);
-void drm_bridge_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_enable(struct drm_bridge *bridge);
+/**
+ * drm_bridge_get_next_bridge() - Get the next bridge in the chain
+ * @bridge: bridge object
+ *
+ * RETURNS:
+ * the next bridge in the chain after @bridge, or NULL if @bridge is the last.
+ */
+static inline struct drm_bridge *
+drm_bridge_get_next_bridge(struct drm_bridge *bridge)
+{
+ if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain))
+ return NULL;
+
+ return list_next_entry(bridge, chain_node);
+}
+
+/**
+ * drm_bridge_get_prev_bridge() - Get the previous bridge in the chain
+ * @bridge: bridge object
+ *
+ * RETURNS:
+ * the previous bridge in the chain, or NULL if @bridge is the first.
+ */
+static inline struct drm_bridge *
+drm_bridge_get_prev_bridge(struct drm_bridge *bridge)
+{
+ if (list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain))
+ return NULL;
+
+ return list_prev_entry(bridge, chain_node);
+}
+
+/**
+ * drm_bridge_chain_get_first_bridge() - Get the first bridge in the chain
+ * @encoder: encoder object
+ *
+ * RETURNS:
+ * the first bridge in the chain, or NULL if @encoder has no bridge attached
+ * to it.
+ */
+static inline struct drm_bridge *
+drm_bridge_chain_get_first_bridge(struct drm_encoder *encoder)
+{
+ return list_first_entry_or_null(&encoder->bridge_chain,
+ struct drm_bridge, chain_node);
+}
+
+/**
+ * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain
+ * @encoder: the encoder to iterate bridges on
+ * @bridge: a bridge pointer updated to point to the current bridge at each
+ * iteration
+ *
+ * Iterate over all bridges present in the bridge chain attached to @encoder.
+ */
+#define drm_for_each_bridge_in_chain(encoder, bridge) \
+ list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node)
+
+bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+enum drm_mode_status
+drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode);
+void drm_bridge_chain_disable(struct drm_bridge *bridge);
+void drm_bridge_chain_post_disable(struct drm_bridge *bridge);
+void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode);
+void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
+void drm_bridge_chain_enable(struct drm_bridge *bridge);
-void drm_atomic_bridge_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
-void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
+void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
-void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
-void drm_atomic_bridge_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
#ifdef CONFIG_DRM_PANEL_BRIDGE
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel);
@@ -438,6 +501,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
struct drm_panel *panel,
u32 connector_type);
+struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
#endif
#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index d1c662d92ab7..81c298488b0c 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -29,7 +29,30 @@
struct drm_crtc;
struct drm_plane;
-uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision);
+/**
+ * drm_color_lut_extract - clamp and round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract a degamma/gamma LUT value provided by user (in the form of
+ * &drm_color_lut entries) and round it to the precision supported by the
+ * hardware.
+ */
+static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision)
+{
+ u32 val = user_input;
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ /* Round only if we're not using full precision. */
+ if (bit_precision < 16) {
+ val += 1UL << (16 - bit_precision - 1);
+ val >>= 16 - bit_precision;
+ }
+
+ return clamp_val(val, 0, max);
+}
+
+u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n);
void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
uint degamma_lut_size,
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 5f8c3389d46f..221910948b37 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -188,19 +188,19 @@ struct drm_hdmi_info {
/**
* @y420_vdb_modes: bitmap of modes which can support ycbcr420
- * output only (not normal RGB/YCBCR444/422 outputs). There are total
- * 107 VICs defined by CEA-861-F spec, so the size is 128 bits to map
- * upto 128 VICs;
+ * output only (not normal RGB/YCBCR444/422 outputs). The max VIC
+ * defined by the CEA-861-G spec is 219, so the size is 256 bits to map
+ * up to 256 VICs.
*/
- unsigned long y420_vdb_modes[BITS_TO_LONGS(128)];
+ unsigned long y420_vdb_modes[BITS_TO_LONGS(256)];
/**
* @y420_cmdb_modes: bitmap of modes which can support ycbcr420
- * output also, along with normal HDMI outputs. There are total 107
- * VICs defined by CEA-861-F spec, so the size is 128 bits to map upto
- * 128 VICs;
+ * output also, along with normal HDMI outputs. The max VIC defined by
+ * the CEA-861-G spec is 219, so the size is 256 bits to map up to 256
+ * VICs.
*/
- unsigned long y420_cmdb_modes[BITS_TO_LONGS(128)];
+ unsigned long y420_cmdb_modes[BITS_TO_LONGS(256)];
/** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */
u64 y420_cmdb_map;
@@ -1070,6 +1070,14 @@ struct drm_cmdline_mode {
unsigned int rotation_reflection;
/**
+ * @panel_orientation:
+ *
+ * drm-connector "panel orientation" property override value,
+ * DRM_MODE_PANEL_ORIENTATION_UNKNOWN if not set.
+ */
+ enum drm_panel_orientation panel_orientation;
+
+ /**
* @tv_margins: TV margins to apply to the mode.
*/
struct drm_connector_tv_margins tv_margins;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 51ecb5112ef8..bc04467f7c3a 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -307,7 +307,7 @@
# define DP_DSC_THROUGHPUT_MODE_0_900 (12 << 0)
# define DP_DSC_THROUGHPUT_MODE_0_950 (13 << 0)
# define DP_DSC_THROUGHPUT_MODE_0_1000 (14 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 4)
+# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 0) /* 1.4a */
# define DP_DSC_THROUGHPUT_MODE_1_MASK (0xf << 4)
# define DP_DSC_THROUGHPUT_MODE_1_SHIFT 4
# define DP_DSC_THROUGHPUT_MODE_1_UPSUPPORTED 0
@@ -1042,6 +1042,8 @@
#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */
+#define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */
+#define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */
/* Repeater modes */
#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */
@@ -1463,6 +1465,7 @@ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4], struct drm_dp_aux *aux);
+void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
void drm_dp_aux_init(struct drm_dp_aux *aux);
int drm_dp_aux_register(struct drm_dp_aux *aux);
void drm_dp_aux_unregister(struct drm_dp_aux *aux);
@@ -1520,6 +1523,13 @@ enum drm_dp_quirk {
* The driver should ignore SINK_COUNT during detection.
*/
DP_DPCD_QUIRK_NO_SINK_COUNT,
+ /**
+ * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD:
+ *
+ * The device supports MST DSC despite not supporting Virtual DPCD.
+ * The DSC caps can be read from the physical aux instead.
+ */
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
};
/**
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index c1bda7030e2d..bcb39da9adb4 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -156,6 +156,8 @@ struct drm_dp_mst_port {
* audio-capable.
*/
bool has_audio;
+
+ bool fec_capable;
};
/**
@@ -383,6 +385,7 @@ struct drm_dp_port_number_req {
struct drm_dp_enum_path_resources_ack_reply {
u8 port_number;
+ bool fec_capable;
u16 full_payload_bw_number;
u16 avail_payload_bw_number;
};
@@ -499,6 +502,8 @@ struct drm_dp_payload {
struct drm_dp_vcpi_allocation {
struct drm_dp_mst_port *port;
int vcpi;
+ int pbn;
+ bool dsc_enabled;
struct list_head next;
};
@@ -561,7 +566,8 @@ struct drm_dp_mst_topology_mgr {
struct drm_dp_sideband_msg_rx up_req_recv;
/**
- * @lock: protects mst state, primary, dpcd.
+ * @lock: protects @mst_state, @mst_primary, @dpcd, and
+ * @payload_id_table_cleared.
*/
struct mutex lock;
@@ -576,7 +582,14 @@ struct drm_dp_mst_topology_mgr {
* @mst_state: If this manager is enabled for an MST capable port. False
* if no MST sink/branch devices is connected.
*/
- bool mst_state;
+ bool mst_state : 1;
+
+ /**
+ * @payload_id_table_cleared: Whether or not we've cleared the payload
+ * ID table for @mst_primary. Protected by @lock.
+ */
+ bool payload_id_table_cleared : 1;
+
/**
* @mst_primary: Pointer to the primary/first branch device.
*/
@@ -725,8 +738,7 @@ bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-int drm_dp_calc_pbn_mode(int clock, int bpp);
-
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, int pbn, int slots);
@@ -775,7 +787,15 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
int __must_check
drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn);
+ struct drm_dp_mst_port *port, int pbn,
+ int pbn_div);
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+ struct drm_dp_mst_port *port,
+ int pbn, int pbn_div,
+ bool enable);
+int __must_check
+drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
int __must_check
drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
@@ -787,6 +807,8 @@ int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
+
extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
/**
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index f06164f44efe..5623994b6e9e 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -172,7 +172,12 @@ struct drm_encoder {
* &drm_connector_state.crtc.
*/
struct drm_crtc *crtc;
- struct drm_bridge *bridge;
+
+ /**
+ * @bridge_chain: Bridges attached to this encoder.
+ */
+ struct list_head bridge_chain;
+
const struct drm_encoder_funcs *funcs;
const struct drm_encoder_helper_funcs *helper_private;
};
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 4becb09975a4..795aea1d0a25 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -2,6 +2,8 @@
#ifndef __DRM_FB_CMA_HELPER_H__
#define __DRM_FB_CMA_HELPER_H__
+#include <linux/types.h>
+
struct drm_framebuffer;
struct drm_plane_state;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 2338e9f94a03..1c6633da0f91 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -231,8 +231,6 @@ void drm_fb_helper_fill_info(struct fb_info *info,
struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes);
-void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
-
void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist);
@@ -269,18 +267,9 @@ int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
-int drm_fb_helper_fbdev_setup(struct drm_device *dev,
- struct drm_fb_helper *fb_helper,
- const struct drm_fb_helper_funcs *funcs,
- unsigned int preferred_bpp,
- unsigned int max_conn_count);
-void drm_fb_helper_fbdev_teardown(struct drm_device *dev);
-
void drm_fb_helper_lastclose(struct drm_device *dev);
void drm_fb_helper_output_poll_changed(struct drm_device *dev);
-int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes);
int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
@@ -363,10 +352,6 @@ static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
return 0;
}
-static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
-{
-}
-
static inline void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
@@ -452,24 +437,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info)
return 0;
}
-static inline int
-drm_fb_helper_fbdev_setup(struct drm_device *dev,
- struct drm_fb_helper *fb_helper,
- const struct drm_fb_helper_funcs *funcs,
- unsigned int preferred_bpp,
- unsigned int max_conn_count)
-{
- /* So drivers can use it to free the struct */
- dev->fb_helper = fb_helper;
-
- return 0;
-}
-
-static inline void drm_fb_helper_fbdev_teardown(struct drm_device *dev)
-{
- dev->fb_helper = NULL;
-}
-
static inline void drm_fb_helper_lastclose(struct drm_device *dev)
{
}
@@ -479,13 +446,6 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
}
static inline int
-drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- return 0;
-}
-
-static inline int
drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
{
return 0;
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 67af60bb527a..8b099b347817 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -42,6 +42,7 @@ struct dma_fence;
struct drm_file;
struct drm_device;
struct device;
+struct file;
/*
* FIXME: Not sure we want to have drm_minor here in the end, but to avoid
@@ -387,4 +388,6 @@ void drm_event_cancel_free(struct drm_device *dev,
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
+struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
+
#endif /* _DRM_FILE_H_ */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 306d1efeb5e0..156b122c0ad5 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -78,7 +78,7 @@ struct drm_format_info {
* triplet @char_per_block, @block_w, @block_h for better
* describing the pixel format.
*/
- u8 cpp[3];
+ u8 cpp[4];
/**
* @char_per_block:
@@ -104,7 +104,7 @@ struct drm_format_info {
* information from their drm_mode_config.get_format_info hook
* if they want the core to be validating the pitch.
*/
- u8 char_per_block[3];
+ u8 char_per_block[4];
};
/**
@@ -113,7 +113,7 @@ struct drm_format_info {
* Block width in pixels, this is intended to be accessed through
* drm_format_info_block_width()
*/
- u8 block_w[3];
+ u8 block_w[4];
/**
* @block_h:
@@ -121,7 +121,7 @@ struct drm_format_info {
* Block height in pixels, this is intended to be accessed through
* drm_format_info_block_height()
*/
- u8 block_h[3];
+ u8 block_h[4];
/** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 97a48165642c..0b375069cd48 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -159,9 +159,7 @@ struct drm_gem_object_funcs {
*
* The callback is used by by both drm_gem_mmap_obj() and
* drm_gem_prime_mmap(). When @mmap is present @vm_ops is not
- * used, the @mmap callback must set vma->vm_ops instead. The @mmap
- * callback is always called with a 0 offset. The caller will remove
- * the fake offset as necessary.
+ * used, the @mmap callback must set vma->vm_ops instead.
*/
int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index e040541a105f..573e9fd109bf 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -16,7 +16,6 @@ struct drm_mode_create_dumb;
struct drm_plane;
struct drm_plane_state;
struct drm_simple_display_pipe;
-struct drm_vram_mm_funcs;
struct filp;
struct vm_area_struct;
@@ -94,10 +93,8 @@ static inline struct drm_gem_vram_object *drm_gem_vram_of_gem(
}
struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
- struct ttm_bo_device *bdev,
size_t size,
- unsigned long pg_align,
- bool interruptible);
+ unsigned long pg_align);
void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo);
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
@@ -111,9 +108,8 @@ void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr);
int drm_gem_vram_fill_create_dumb(struct drm_file *file,
struct drm_device *dev,
- struct ttm_bo_device *bdev,
unsigned long pg_align,
- bool interruptible,
+ unsigned long pitch_align,
struct drm_mode_create_dumb *args);
/*
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 58dc0c04bf99..5745710453c8 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -38,7 +38,9 @@
#include <drm/drm_hashtab.h>
struct drm_device;
+struct drm_driver;
struct file;
+struct pci_driver;
/*
* Legacy Support for palateontologic DRM drivers
@@ -188,8 +190,33 @@ do { \
void drm_legacy_idlelock_take(struct drm_lock_data *lock);
void drm_legacy_idlelock_release(struct drm_lock_data *lock);
-/* drm_pci.c dma alloc wrappers */
+/* drm_pci.c */
+
+#ifdef CONFIG_PCI
+
void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
+int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
+void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+
+#else
+
+static inline void __drm_legacy_pci_free(struct drm_device *dev,
+ drm_dma_handle_t *dmah)
+{
+}
+
+static inline int drm_legacy_pci_init(struct drm_driver *driver,
+ struct pci_driver *pdriver)
+{
+ return -EINVAL;
+}
+
+static inline void drm_legacy_pci_exit(struct drm_driver *driver,
+ struct pci_driver *pdriver)
+{
+}
+
+#endif
/* drm_memory.c */
void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 13cf2ae59f6c..360e6377e84b 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -13,6 +13,7 @@
struct mipi_dsi_host;
struct mipi_dsi_device;
+struct drm_dsc_picture_parameter_set;
/* request ACK from peripheral */
#define MIPI_DSI_MSG_REQ_ACK BIT(0)
@@ -228,6 +229,9 @@ int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
u16 value);
+ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
+ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+ const struct drm_dsc_picture_parameter_set *pps);
ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
size_t size);
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index ead34ab5ca4e..b9b093add92e 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -16,6 +16,18 @@ struct drm_panel;
struct drm_bridge;
struct device_node;
+/**
+ * enum drm_lvds_dual_link_pixels - Pixel order of an LVDS dual-link connection
+ * @DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS: Even pixels are expected to be generated
+ * from the first port, odd pixels from the second port
+ * @DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS: Odd pixels are expected to be generated
+ * from the first port, even pixels from the second port
+ */
+enum drm_lvds_dual_link_pixels {
+ DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 0,
+ DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 1,
+};
+
#ifdef CONFIG_OF
uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
struct device_node *port);
@@ -35,6 +47,8 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
int port, int endpoint,
struct drm_panel **panel,
struct drm_bridge **bridge);
+int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
+ const struct device_node *port2);
#else
static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
struct device_node *port)
@@ -77,6 +91,13 @@ static inline int drm_of_find_panel_or_bridge(const struct device_node *np,
{
return -EINVAL;
}
+
+static inline int
+drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
+ const struct device_node *port2)
+{
+ return -EINVAL;
+}
#endif
/*
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index ce8da64022b4..121f7aabccd1 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/list.h>
+struct backlight_device;
struct device_node;
struct drm_connector;
struct drm_device;
@@ -59,12 +60,18 @@ struct display_timing;
*
* To save power when no video data is transmitted, a driver can power down
* the panel. This is the job of the .unprepare() function.
+ *
+ * Backlight can be handled automatically if configured using
+ * drm_panel_of_backlight(). Then the driver does not need to implement the
+ * functionality to enable/disable backlight.
*/
struct drm_panel_funcs {
/**
* @prepare:
*
* Turn on panel and perform set up.
+ *
+ * This function is optional.
*/
int (*prepare)(struct drm_panel *panel);
@@ -72,6 +79,8 @@ struct drm_panel_funcs {
* @enable:
*
* Enable panel (turn on back light, etc.).
+ *
+ * This function is optional.
*/
int (*enable)(struct drm_panel *panel);
@@ -79,6 +88,8 @@ struct drm_panel_funcs {
* @disable:
*
* Disable panel (turn off back light, etc.).
+ *
+ * This function is optional.
*/
int (*disable)(struct drm_panel *panel);
@@ -86,22 +97,29 @@ struct drm_panel_funcs {
* @unprepare:
*
* Turn off panel.
+ *
+ * This function is optional.
*/
int (*unprepare)(struct drm_panel *panel);
/**
* @get_modes:
*
- * Add modes to the connector that the panel is attached to and
- * return the number of modes added.
+ * Add modes to the connector that the panel is attached to
+ * and returns the number of modes added.
+ *
+ * This function is mandatory.
*/
- int (*get_modes)(struct drm_panel *panel);
+ int (*get_modes)(struct drm_panel *panel,
+ struct drm_connector *connector);
/**
* @get_timings:
*
* Copy display timings into the provided array and return
* the number of display timings available.
+ *
+ * This function is optional.
*/
int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
struct display_timing *timings);
@@ -112,25 +130,22 @@ struct drm_panel_funcs {
*/
struct drm_panel {
/**
- * @drm:
- *
- * DRM device owning the panel.
- */
- struct drm_device *drm;
-
- /**
- * @connector:
+ * @dev:
*
- * DRM connector that the panel is attached to.
+ * Parent device of the panel.
*/
- struct drm_connector *connector;
+ struct device *dev;
/**
- * @dev:
+ * @backlight:
*
- * Parent device of the panel.
+ * Backlight device, used to turn on backlight after the call
+ * to enable(), and to turn off backlight before the call to
+ * disable().
+ * backlight is set by drm_panel_of_backlight() and drivers
+ * shall not assign it.
*/
- struct device *dev;
+ struct backlight_device *backlight;
/**
* @funcs:
@@ -172,7 +187,7 @@ int drm_panel_unprepare(struct drm_panel *panel);
int drm_panel_enable(struct drm_panel *panel);
int drm_panel_disable(struct drm_panel *panel);
-int drm_panel_get_modes(struct drm_panel *panel);
+int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector);
#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
struct drm_panel *of_drm_find_panel(const struct device_node *np);
@@ -183,4 +198,13 @@ static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
}
#endif
+#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int drm_panel_of_backlight(struct drm_panel *panel);
+#else
+static inline int drm_panel_of_backlight(struct drm_panel *panel)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
index 8181e9e7cf1d..9031e217b506 100644
--- a/include/drm/drm_pci.h
+++ b/include/drm/drm_pci.h
@@ -39,23 +39,36 @@ struct drm_device;
struct drm_driver;
struct drm_master;
+#ifdef CONFIG_PCI
+
struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align);
void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
-int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
-void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
-#ifdef CONFIG_PCI
int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
+
#else
+
+static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
+ size_t size, size_t align)
+{
+ return NULL;
+}
+
+static inline void drm_pci_free(struct drm_device *dev,
+ struct drm_dma_handle *dmah)
+{
+}
+
static inline int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
return -ENOSYS;
}
+
#endif
#endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 5b8049992c24..8f99d389792d 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -34,7 +34,8 @@
#include <drm/drm.h>
-extern unsigned int drm_debug;
+/* Do *not* use outside of drm_print.[ch]! */
+extern unsigned int __drm_debug;
/**
* DOC: print
@@ -248,87 +249,90 @@ static inline struct drm_printer drm_err_printer(const char *prefix)
return p;
}
-/*
- * The following categories are defined:
- *
- * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
- * This is the category used by the DRM_DEBUG() macro.
- *
- * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ...
- * This is the category used by the DRM_DEBUG_DRIVER() macro.
- *
- * KMS: used in the modesetting code.
- * This is the category used by the DRM_DEBUG_KMS() macro.
- *
- * PRIME: used in the prime code.
- * This is the category used by the DRM_DEBUG_PRIME() macro.
+/**
+ * enum drm_debug_category - The DRM debug categories
*
- * ATOMIC: used in the atomic code.
- * This is the category used by the DRM_DEBUG_ATOMIC() macro.
+ * Each of the DRM debug logging macros use a specific category, and the logging
+ * is filtered by the drm.debug module parameter. This enum specifies the values
+ * for the interface.
*
- * VBL: used for verbose debug message in the vblank code
- * This is the category used by the DRM_DEBUG_VBL() macro.
+ * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except
+ * DRM_DEBUG() logs to DRM_UT_CORE.
*
- * Enabling verbose debug messages is done through the drm.debug parameter,
- * each category being enabled by a bit.
+ * Enabling verbose debug messages is done through the drm.debug parameter, each
+ * category being enabled by a bit:
*
- * drm.debug=0x1 will enable CORE messages
- * drm.debug=0x2 will enable DRIVER messages
- * drm.debug=0x3 will enable CORE and DRIVER messages
- * ...
- * drm.debug=0x3f will enable all messages
+ * - drm.debug=0x1 will enable CORE messages
+ * - drm.debug=0x2 will enable DRIVER messages
+ * - drm.debug=0x3 will enable CORE and DRIVER messages
+ * - ...
+ * - drm.debug=0x1ff will enable all messages
*
* An interesting feature is that it's possible to enable verbose logging at
- * run-time by echoing the debug value in its sysfs node:
+ * run-time by echoing the debug value in its sysfs node::
+ *
* # echo 0xf > /sys/module/drm/parameters/debug
+ *
*/
-#define DRM_UT_NONE 0x00
-#define DRM_UT_CORE 0x01
-#define DRM_UT_DRIVER 0x02
-#define DRM_UT_KMS 0x04
-#define DRM_UT_PRIME 0x08
-#define DRM_UT_ATOMIC 0x10
-#define DRM_UT_VBL 0x20
-#define DRM_UT_STATE 0x40
-#define DRM_UT_LEASE 0x80
-#define DRM_UT_DP 0x100
-
-static inline bool drm_debug_enabled(unsigned int category)
+enum drm_debug_category {
+ /**
+ * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c,
+ * drm_memory.c, ...
+ */
+ DRM_UT_CORE = 0x01,
+ /**
+ * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915,
+ * radeon, ... macro.
+ */
+ DRM_UT_DRIVER = 0x02,
+ /**
+ * @DRM_UT_KMS: Used in the modesetting code.
+ */
+ DRM_UT_KMS = 0x04,
+ /**
+ * @DRM_UT_PRIME: Used in the prime code.
+ */
+ DRM_UT_PRIME = 0x08,
+ /**
+ * @DRM_UT_ATOMIC: Used in the atomic code.
+ */
+ DRM_UT_ATOMIC = 0x10,
+ /**
+ * @DRM_UT_VBL: Used for verbose debug message in the vblank code.
+ */
+ DRM_UT_VBL = 0x20,
+ /**
+ * @DRM_UT_STATE: Used for verbose atomic state debugging.
+ */
+ DRM_UT_STATE = 0x40,
+ /**
+ * @DRM_UT_LEASE: Used in the lease code.
+ */
+ DRM_UT_LEASE = 0x80,
+ /**
+ * @DRM_UT_DP: Used in the DP code.
+ */
+ DRM_UT_DP = 0x100,
+};
+
+static inline bool drm_debug_enabled(enum drm_debug_category category)
{
- return unlikely(drm_debug & category);
+ return unlikely(__drm_debug & category);
}
+/*
+ * struct device based logging
+ *
+ * Prefer drm_device based logging over device or prink based logging.
+ */
+
__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...);
__printf(3, 4)
-void drm_dev_dbg(const struct device *dev, unsigned int category,
+void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
const char *format, ...);
-__printf(2, 3)
-void drm_dbg(unsigned int category, const char *format, ...);
-__printf(1, 2)
-void drm_err(const char *format, ...);
-
-/* Macros to make printk easier */
-
-#define _DRM_PRINTK(once, level, fmt, ...) \
- printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
-
-#define DRM_INFO(fmt, ...) \
- _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
-#define DRM_NOTE(fmt, ...) \
- _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
-#define DRM_WARN(fmt, ...) \
- _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
-
-#define DRM_INFO_ONCE(fmt, ...) \
- _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
-#define DRM_NOTE_ONCE(fmt, ...) \
- _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
-#define DRM_WARN_ONCE(fmt, ...) \
- _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
-
/**
* Error output.
*
@@ -337,8 +341,6 @@ void drm_err(const char *format, ...);
*/
#define DRM_DEV_ERROR(dev, fmt, ...) \
drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__)
-#define DRM_ERROR(fmt, ...) \
- drm_err(fmt, ##__VA_ARGS__)
/**
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
@@ -355,10 +357,8 @@ void drm_err(const char *format, ...);
if (__ratelimit(&_rs)) \
DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
})
-#define DRM_ERROR_RATELIMITED(fmt, ...) \
- DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-#define DRM_DEV_INFO(dev, fmt, ...) \
+#define DRM_DEV_INFO(dev, fmt, ...) \
drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
@@ -378,41 +378,18 @@ void drm_err(const char *format, ...);
*/
#define DRM_DEV_DEBUG(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG(fmt, ...) \
- drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_DRIVER(fmt, ...) \
- drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_KMS(fmt, ...) \
- drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_PRIME(fmt, ...) \
- drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_ATOMIC(fmt, ...) \
- drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_VBL(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_VBL(fmt, ...) \
- drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
-
-#define DRM_DEBUG_LEASE(fmt, ...) \
- drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
-#define DRM_DEBUG_DP(fmt, ...) \
- drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
({ \
@@ -432,24 +409,147 @@ void drm_err(const char *format, ...);
#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...) \
_DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE, \
fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER, \
fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS, \
fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME, \
fmt, ##__VA_ARGS__)
+
+/*
+ * struct drm_device based logging
+ *
+ * Prefer drm_device based logging over device or prink based logging.
+ */
+
+/* Helper for struct drm_device based logging. */
+#define __drm_printk(drm, level, type, fmt, ...) \
+ dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__)
+
+
+#define drm_info(drm, fmt, ...) \
+ __drm_printk((drm), info,, fmt, ##__VA_ARGS__)
+
+#define drm_notice(drm, fmt, ...) \
+ __drm_printk((drm), notice,, fmt, ##__VA_ARGS__)
+
+#define drm_warn(drm, fmt, ...) \
+ __drm_printk((drm), warn,, fmt, ##__VA_ARGS__)
+
+#define drm_err(drm, fmt, ...) \
+ __drm_printk((drm), err,, "*ERROR* " fmt, ##__VA_ARGS__)
+
+
+#define drm_info_once(drm, fmt, ...) \
+ __drm_printk((drm), info, _once, fmt, ##__VA_ARGS__)
+
+#define drm_notice_once(drm, fmt, ...) \
+ __drm_printk((drm), notice, _once, fmt, ##__VA_ARGS__)
+
+#define drm_warn_once(drm, fmt, ...) \
+ __drm_printk((drm), warn, _once, fmt, ##__VA_ARGS__)
+
+#define drm_err_once(drm, fmt, ...) \
+ __drm_printk((drm), err, _once, "*ERROR* " fmt, ##__VA_ARGS__)
+
+
+#define drm_err_ratelimited(drm, fmt, ...) \
+ __drm_printk((drm), err, _ratelimited, "*ERROR* " fmt, ##__VA_ARGS__)
+
+
+#define drm_dbg_core(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+#define drm_dbg(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+#define drm_dbg_kms(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+#define drm_dbg_prime(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+#define drm_dbg_atomic(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+#define drm_dbg_vbl(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+#define drm_dbg_state(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_STATE, fmt, ##__VA_ARGS__)
+#define drm_dbg_lease(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+#define drm_dbg_dp(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__)
+
+
+/*
+ * printk based logging
+ *
+ * Prefer drm_device based logging over device or prink based logging.
+ */
+
+__printf(2, 3)
+void __drm_dbg(enum drm_debug_category category, const char *format, ...);
+__printf(1, 2)
+void __drm_err(const char *format, ...);
+
+/* Macros to make printk easier */
+
+#define _DRM_PRINTK(once, level, fmt, ...) \
+ printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+
+#define DRM_INFO(fmt, ...) \
+ _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE(fmt, ...) \
+ _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN(fmt, ...) \
+ _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+
+#define DRM_INFO_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+
+#define DRM_ERROR(fmt, ...) \
+ __drm_err(fmt, ##__VA_ARGS__)
+
+#define DRM_ERROR_RATELIMITED(fmt, ...) \
+ DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG(fmt, ...) \
+ __drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_DRIVER(fmt, ...) \
+ __drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_KMS(fmt, ...) \
+ __drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_PRIME(fmt, ...) \
+ __drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_ATOMIC(fmt, ...) \
+ __drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_VBL(fmt, ...) \
+ __drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_LEASE(fmt, ...) \
+ __drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_DP(fmt, ...) \
+ __drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
+
+
+#define DRM_DEBUG_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...) \
DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index cd0106135b6a..57a3be9e53e4 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -24,6 +24,8 @@
#ifndef DRM_RECT_H
#define DRM_RECT_H
+#include <linux/types.h>
+
/**
* DOC: rect utils
*
diff --git a/include/drm/drm_scdc_helper.h b/include/drm/drm_scdc_helper.h
index f92eb2094d6b..6a483533aae4 100644
--- a/include/drm/drm_scdc_helper.h
+++ b/include/drm/drm_scdc_helper.h
@@ -50,9 +50,9 @@
#define SCDC_READ_REQUEST_ENABLE (1 << 0)
#define SCDC_STATUS_FLAGS_0 0x40
-#define SCDC_CH2_LOCK (1 < 3)
-#define SCDC_CH1_LOCK (1 < 2)
-#define SCDC_CH0_LOCK (1 < 1)
+#define SCDC_CH2_LOCK (1 << 3)
+#define SCDC_CH1_LOCK (1 << 2)
+#define SCDC_CH0_LOCK (1 << 1)
#define SCDC_CH_LOCK_MASK (SCDC_CH2_LOCK | SCDC_CH1_LOCK | SCDC_CH0_LOCK)
#define SCDC_CLOCK_DETECT (1 << 0)
diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h
index 07b8e9f04599..79952d8c4bba 100644
--- a/include/drm/drm_util.h
+++ b/include/drm/drm_util.h
@@ -41,7 +41,7 @@
* Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall
* only be visible for drmselftests.
*/
-#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
+#if defined(CONFIG_DRM_EXPORT_FOR_TESTS)
#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
#else
#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 684692a8ed76..9e71be129c30 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -52,9 +52,9 @@ enum drm_sched_priority {
* @list: used to append this struct to the list of entities in the
* runqueue.
* @rq: runqueue on which this entity is currently scheduled.
- * @rq_list: a list of run queues on which jobs from this entity can
- * be scheduled
- * @num_rq_list: number of run queues in the rq_list
+ * @sched_list: a list of drm_gpu_schedulers on which jobs from this entity can
+ * be scheduled
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
* @rq_lock: lock to modify the runqueue to which this entity belongs.
* @job_queue: the list of jobs of this entity.
* @fence_seq: a linearly increasing seqno incremented with each
@@ -81,8 +81,9 @@ enum drm_sched_priority {
struct drm_sched_entity {
struct list_head list;
struct drm_sched_rq *rq;
- struct drm_sched_rq **rq_list;
- unsigned int num_rq_list;
+ struct drm_gpu_scheduler **sched_list;
+ unsigned int num_sched_list;
+ enum drm_sched_priority priority;
spinlock_t rq_lock;
struct spsc_queue job_queue;
@@ -260,7 +261,7 @@ struct drm_sched_backend_ops {
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
- * @num_jobs: the number of jobs in queue in the scheduler
+ * @score: score to help loadbalancer pick a idle sched
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
*
@@ -281,8 +282,8 @@ struct drm_gpu_scheduler {
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t num_jobs;
- bool ready;
+ atomic_t score;
+ bool ready;
bool free_guilty;
};
@@ -312,8 +313,9 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity);
int drm_sched_entity_init(struct drm_sched_entity *entity,
- struct drm_sched_rq **rq_list,
- unsigned int num_rq_list,
+ enum drm_sched_priority priority,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list,
atomic_t *guilty);
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index b1f66b117c74..1d2c12219f44 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -446,23 +446,18 @@
/* CML GT1 */
#define INTEL_CML_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x9B21, info), \
- INTEL_VGA_DEVICE(0x9BAA, info), \
- INTEL_VGA_DEVICE(0x9BAB, info), \
- INTEL_VGA_DEVICE(0x9BAC, info), \
- INTEL_VGA_DEVICE(0x9BA0, info), \
INTEL_VGA_DEVICE(0x9BA5, info), \
INTEL_VGA_DEVICE(0x9BA8, info), \
INTEL_VGA_DEVICE(0x9BA4, info), \
INTEL_VGA_DEVICE(0x9BA2, info)
+#define INTEL_CML_U_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x9B21, info), \
+ INTEL_VGA_DEVICE(0x9BAA, info), \
+ INTEL_VGA_DEVICE(0x9BAC, info)
+
/* CML GT2 */
#define INTEL_CML_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x9B41, info), \
- INTEL_VGA_DEVICE(0x9BCA, info), \
- INTEL_VGA_DEVICE(0x9BCB, info), \
- INTEL_VGA_DEVICE(0x9BCC, info), \
- INTEL_VGA_DEVICE(0x9BC0, info), \
INTEL_VGA_DEVICE(0x9BC5, info), \
INTEL_VGA_DEVICE(0x9BC8, info), \
INTEL_VGA_DEVICE(0x9BC4, info), \
@@ -471,6 +466,11 @@
INTEL_VGA_DEVICE(0x9BE6, info), \
INTEL_VGA_DEVICE(0x9BF6, info)
+#define INTEL_CML_U_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x9B41, info), \
+ INTEL_VGA_DEVICE(0x9BCA, info), \
+ INTEL_VGA_DEVICE(0x9BCC, info)
+
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
@@ -536,7 +536,9 @@
INTEL_WHL_U_GT3_IDS(info), \
INTEL_AML_CFL_GT2_IDS(info), \
INTEL_CML_GT1_IDS(info), \
- INTEL_CML_GT2_IDS(info)
+ INTEL_CML_GT2_IDS(info), \
+ INTEL_CML_U_GT1_IDS(info), \
+ INTEL_CML_U_GT2_IDS(info)
/* CNL */
#define INTEL_CNL_PORT_F_IDS(info) \
@@ -579,12 +581,15 @@
INTEL_VGA_DEVICE(0x8A51, info), \
INTEL_VGA_DEVICE(0x8A5D, info)
-/* EHL */
+/* EHL/JSL */
#define INTEL_EHL_IDS(info) \
INTEL_VGA_DEVICE(0x4500, info), \
INTEL_VGA_DEVICE(0x4571, info), \
INTEL_VGA_DEVICE(0x4551, info), \
- INTEL_VGA_DEVICE(0x4541, info)
+ INTEL_VGA_DEVICE(0x4541, info), \
+ INTEL_VGA_DEVICE(0x4E71, info), \
+ INTEL_VGA_DEVICE(0x4E61, info), \
+ INTEL_VGA_DEVICE(0x4E51, info)
/* TGL */
#define INTEL_TGL_12_IDS(info) \
diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h
new file mode 100644
index 000000000000..087e3f649c52
--- /dev/null
+++ b/include/drm/task_barrier.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/semaphore.h>
+#include <linux/atomic.h>
+
+/*
+ * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
+ * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
+ */
+
+
+
+#ifndef DRM_TASK_BARRIER_H_
+#define DRM_TASK_BARRIER_H_
+
+/*
+ * Represents an instance of a task barrier.
+ */
+struct task_barrier {
+ unsigned int n;
+ atomic_t count;
+ struct semaphore enter_turnstile;
+ struct semaphore exit_turnstile;
+};
+
+static inline void task_barrier_signal_turnstile(struct semaphore *turnstile,
+ unsigned int n)
+{
+ int i;
+
+ for (i = 0 ; i < n; i++)
+ up(turnstile);
+}
+
+static inline void task_barrier_init(struct task_barrier *tb)
+{
+ tb->n = 0;
+ atomic_set(&tb->count, 0);
+ sema_init(&tb->enter_turnstile, 0);
+ sema_init(&tb->exit_turnstile, 0);
+}
+
+static inline void task_barrier_add_task(struct task_barrier *tb)
+{
+ tb->n++;
+}
+
+static inline void task_barrier_rem_task(struct task_barrier *tb)
+{
+ tb->n--;
+}
+
+/*
+ * Lines up all the threads BEFORE the critical point.
+ *
+ * When all thread passed this code the entry barrier is back to locked state.
+ */
+static inline void task_barrier_enter(struct task_barrier *tb)
+{
+ if (atomic_inc_return(&tb->count) == tb->n)
+ task_barrier_signal_turnstile(&tb->enter_turnstile, tb->n);
+
+ down(&tb->enter_turnstile);
+}
+
+/*
+ * Lines up all the threads AFTER the critical point.
+ *
+ * This function is used to avoid any one thread running ahead if the barrier is
+ * used repeatedly .
+ */
+static inline void task_barrier_exit(struct task_barrier *tb)
+{
+ if (atomic_dec_return(&tb->count) == 0)
+ task_barrier_signal_turnstile(&tb->exit_turnstile, tb->n);
+
+ down(&tb->exit_turnstile);
+}
+
+/* Convinieince function when nothing to be done in between entry and exit */
+static inline void task_barrier_full(struct task_barrier *tb)
+{
+ task_barrier_enter(tb);
+ task_barrier_exit(tb);
+}
+
+#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 65e399d280f7..66ca49db9633 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -154,7 +154,6 @@ struct ttm_tt;
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
- * @wu_mutex: Wait unreserved mutex.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
@@ -222,8 +221,6 @@ struct ttm_buffer_object {
uint64_t offset; /* GPU address space is independent of CPU word size */
struct sg_table *sg;
-
- struct mutex wu_mutex;
};
/**
@@ -707,7 +704,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
int ttm_bo_swapout(struct ttm_bo_global *glob,
struct ttm_operation_ctx *ctx);
void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
/**
* ttm_bo_uses_embedded_gem_object - check if the given bo uses the
@@ -738,7 +734,13 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
pgoff_t num_prefault);
+vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
+
void ttm_bo_vm_open(struct vm_area_struct *vma);
void ttm_bo_vm_close(struct vm_area_struct *vma);
+
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
+
#endif
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
index db6a0fca09b4..ad889b539ab3 100644
--- a/include/kunit/assert.h
+++ b/include/kunit/assert.h
@@ -9,10 +9,11 @@
#ifndef _KUNIT_ASSERT_H
#define _KUNIT_ASSERT_H
-#include <kunit/string-stream.h>
#include <linux/err.h>
+#include <linux/kernel.h>
struct kunit;
+struct string_stream;
/**
* enum kunit_assert_type - Type of expectation/assertion.
diff --git a/include/kunit/string-stream.h b/include/kunit/string-stream.h
deleted file mode 100644
index fe98a00b75a9..000000000000
--- a/include/kunit/string-stream.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * C++ stream style string builder used in KUnit for building messages.
- *
- * Copyright (C) 2019, Google LLC.
- * Author: Brendan Higgins <brendanhiggins@google.com>
- */
-
-#ifndef _KUNIT_STRING_STREAM_H
-#define _KUNIT_STRING_STREAM_H
-
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <stdarg.h>
-
-struct string_stream_fragment {
- struct kunit *test;
- struct list_head node;
- char *fragment;
-};
-
-struct string_stream {
- size_t length;
- struct list_head fragments;
- /* length and fragments are protected by this lock */
- spinlock_t lock;
- struct kunit *test;
- gfp_t gfp;
-};
-
-struct kunit;
-
-struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp);
-
-int __printf(2, 3) string_stream_add(struct string_stream *stream,
- const char *fmt, ...);
-
-int string_stream_vadd(struct string_stream *stream,
- const char *fmt,
- va_list args);
-
-char *string_stream_get_string(struct string_stream *stream);
-
-int string_stream_append(struct string_stream *stream,
- struct string_stream *other);
-
-bool string_stream_is_empty(struct string_stream *stream);
-
-int string_stream_destroy(struct string_stream *stream);
-
-#endif /* _KUNIT_STRING_STREAM_H */
diff --git a/include/kunit/test.h b/include/kunit/test.h
index dba48304b3bd..2dfb550c6723 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -12,6 +12,7 @@
#include <kunit/assert.h>
#include <kunit/try-catch.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -197,31 +198,47 @@ void kunit_init_test(struct kunit *test, const char *name);
int kunit_run_tests(struct kunit_suite *suite);
/**
- * kunit_test_suite() - used to register a &struct kunit_suite with KUnit.
+ * kunit_test_suites() - used to register one or more &struct kunit_suite
+ * with KUnit.
*
- * @suite: a statically allocated &struct kunit_suite.
+ * @suites: a statically allocated list of &struct kunit_suite.
*
- * Registers @suite with the test framework. See &struct kunit_suite for
+ * Registers @suites with the test framework. See &struct kunit_suite for
* more information.
*
- * NOTE: Currently KUnit tests are all run as late_initcalls; this means
+ * When builtin, KUnit tests are all run as late_initcalls; this means
* that they cannot test anything where tests must run at a different init
* phase. One significant restriction resulting from this is that KUnit
* cannot reliably test anything that is initialize in the late_init phase;
* another is that KUnit is useless to test things that need to be run in
* an earlier init phase.
*
+ * An alternative is to build the tests as a module. Because modules
+ * do not support multiple late_initcall()s, we need to initialize an
+ * array of suites for a module.
+ *
* TODO(brendanhiggins@google.com): Don't run all KUnit tests as
* late_initcalls. I have some future work planned to dispatch all KUnit
* tests from the same place, and at the very least to do so after
* everything else is definitely initialized.
*/
-#define kunit_test_suite(suite) \
- static int kunit_suite_init##suite(void) \
- { \
- return kunit_run_tests(&suite); \
- } \
- late_initcall(kunit_suite_init##suite)
+#define kunit_test_suites(...) \
+ static struct kunit_suite *suites[] = { __VA_ARGS__, NULL}; \
+ static int kunit_test_suites_init(void) \
+ { \
+ unsigned int i; \
+ for (i = 0; suites[i] != NULL; i++) \
+ kunit_run_tests(suites[i]); \
+ return 0; \
+ } \
+ late_initcall(kunit_test_suites_init); \
+ static void __exit kunit_test_suites_exit(void) \
+ { \
+ return; \
+ } \
+ module_exit(kunit_test_suites_exit)
+
+#define kunit_test_suite(suite) kunit_test_suites(&suite)
/*
* Like kunit_alloc_resource() below, but returns the struct kunit_resource
diff --git a/include/kunit/try-catch.h b/include/kunit/try-catch.h
index 404f336cbdc8..c507dd43119d 100644
--- a/include/kunit/try-catch.h
+++ b/include/kunit/try-catch.h
@@ -53,11 +53,6 @@ struct kunit_try_catch {
void *context;
};
-void kunit_try_catch_init(struct kunit_try_catch *try_catch,
- struct kunit *test,
- kunit_try_catch_func_t try,
- kunit_try_catch_func_t catch);
-
void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context);
void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch);
@@ -67,9 +62,4 @@ static inline int kunit_try_catch_get_result(struct kunit_try_catch *try_catch)
return try_catch->try_result;
}
-/*
- * Exposed for testing only.
- */
-void kunit_generic_try_catch_init(struct kunit_try_catch *try_catch);
-
#endif /* _KUNIT_TRY_CATCH_H */
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index d12bb2153cd6..e4004d1e6725 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -54,6 +54,13 @@ void attribute_container_device_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct device *));
+int attribute_container_device_trigger_safe(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *,
+ struct device *));
void attribute_container_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *));
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 97967ce06de3..f88197c1ffc2 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,7 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/blkdev.h>
+#include <linux/device.h>
#include <linux/writeback.h>
#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
@@ -504,4 +505,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
(1 << WB_async_congested));
}
+extern const char *bdi_unknown_name;
+
+static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
+{
+ if (!bdi || !bdi->dev)
+ return bdi_unknown_name;
+ return dev_name(bdi->dev);
+}
+
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index e479067c202c..6c7c4133c25c 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -13,6 +13,7 @@
#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4c636c42ad68..053ea4b51988 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1711,6 +1711,13 @@ struct block_device_operations {
const struct pr_ops *pr_ops;
};
+#ifdef CONFIG_COMPAT
+extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
+ unsigned int, unsigned long);
+#else
+#define blkdev_compat_ptr_ioctl NULL
+#endif
+
extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
unsigned long);
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 68f79d855c3d..11083d84eb23 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -958,4 +958,22 @@ static inline bool in_compat_syscall(void) { return false; }
#endif /* CONFIG_COMPAT */
+/*
+ * A pointer passed in from user mode. This should not
+ * be used for syscall parameters, just declare them
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+#ifndef compat_ptr
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+ return (void __user *)(unsigned long)uptr;
+}
+#endif
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+ return (u32)(unsigned long)uptr;
+}
+
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/console.h b/include/linux/console.h
index d09951d5a94e..f33016b3a401 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -101,7 +101,6 @@ extern const struct consw *conswitchp;
extern const struct consw dummy_con; /* dummy console buffer */
extern const struct consw vga_con; /* VGA text console */
extern const struct consw newport_con; /* SGI Newport console */
-extern const struct consw prom_con; /* SPARC PROM console */
int con_is_bound(const struct consw *csw);
int do_unregister_con_driver(const struct consw *csw);
@@ -201,7 +200,6 @@ extern void suspend_console(void);
extern void resume_console(void);
int mda_console_init(void);
-void prom_con_init(void);
void vcs_make_sysfs(int index);
void vcs_remove_sysfs(int index);
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 64ec82851aa3..8150f5ac176c 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -154,15 +154,6 @@ static inline void guest_exit_irqoff(void)
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-static inline void guest_enter(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- guest_enter_irqoff();
- local_irq_restore(flags);
-}
-
static inline void guest_exit(void)
{
unsigned long flags;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 31b1b0e03df8..018dce868de6 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -148,6 +148,20 @@ struct cpufreq_policy {
struct notifier_block nb_max;
};
+/*
+ * Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
+ * callback for sanitization. That callback is only expected to modify the min
+ * and max values, if necessary, and specifically it must not update the
+ * frequency table.
+ */
+struct cpufreq_policy_data {
+ struct cpufreq_cpuinfo cpuinfo;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int cpu;
+ unsigned int min; /* in kHz */
+ unsigned int max; /* in kHz */
+};
+
struct cpufreq_freqs {
struct cpufreq_policy *policy;
unsigned int old;
@@ -201,8 +215,6 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
-int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy);
void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
@@ -284,7 +296,7 @@ struct cpufreq_driver {
/* needed by all drivers */
int (*init)(struct cpufreq_policy *policy);
- int (*verify)(struct cpufreq_policy *policy);
+ int (*verify)(struct cpufreq_policy_data *policy);
/* define one out of two */
int (*setpolicy)(struct cpufreq_policy *policy);
@@ -415,8 +427,9 @@ static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv)
(drv->flags & CPUFREQ_IS_COOLING_DEV);
}
-static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
- unsigned int min, unsigned int max)
+static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy,
+ unsigned int min,
+ unsigned int max)
{
if (policy->min < min)
policy->min = min;
@@ -432,10 +445,10 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
}
static inline void
-cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
+cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ policy->cpuinfo.max_freq);
}
#ifdef CONFIG_CPU_FREQ
@@ -513,6 +526,7 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
* CPUFREQ GOVERNORS *
*********************************************************************/
+#define CPUFREQ_POLICY_UNKNOWN (0)
/*
* If (cpufreq_driver->target) exists, the ->governor decides what frequency
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
@@ -684,9 +698,9 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
-int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table);
-int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
unsigned int target_freq,
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index af73f835c51c..abf5459a5b9d 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -249,31 +249,6 @@ struct dma_buf_ops {
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
- /**
- * @map:
- *
- * Maps a page from the buffer into kernel address space. The page is
- * specified by offset into the buffer in PAGE_SIZE units.
- *
- * This callback is optional.
- *
- * Returns:
- *
- * Virtual address pointer where requested page can be accessed. NULL
- * on error or when this function is unimplemented by the exporter.
- */
- void *(*map)(struct dma_buf *, unsigned long);
-
- /**
- * @unmap:
- *
- * Unmaps a page from the buffer. Page offset and address pointer should
- * be the same as the one passed to and returned by matching call to map.
- *
- * This callback is optional.
- */
- void (*unmap)(struct dma_buf *, unsigned long, void *);
-
void *(*vmap)(struct dma_buf *);
void (*vunmap)(struct dma_buf *, void *vaddr);
};
@@ -464,8 +439,6 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
-void *dma_buf_kmap(struct dma_buf *, unsigned long);
-void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
new file mode 100644
index 000000000000..454e354d1ffb
--- /dev/null
+++ b/include/linux/dma-heap.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DMABUF Heaps Allocation Infrastructure
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#ifndef _DMA_HEAPS_H
+#define _DMA_HEAPS_H
+
+#include <linux/cdev.h>
+#include <linux/types.h>
+
+struct dma_heap;
+
+/**
+ * struct dma_heap_ops - ops to operate on a given heap
+ * @allocate: allocate dmabuf and return fd
+ *
+ * allocate returns dmabuf fd on success, -errno on error.
+ */
+struct dma_heap_ops {
+ int (*allocate)(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags);
+};
+
+/**
+ * struct dma_heap_export_info - information needed to export a new dmabuf heap
+ * @name: used for debugging/device-node name
+ * @ops: ops struct for this heap
+ * @priv: heap exporter private data
+ *
+ * Information needed to export a new dmabuf heap.
+ */
+struct dma_heap_export_info {
+ const char *name;
+ const struct dma_heap_ops *ops;
+ void *priv;
+};
+
+/**
+ * dma_heap_get_drvdata() - get per-heap driver data
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The per-heap data for the heap.
+ */
+void *dma_heap_get_drvdata(struct dma_heap *heap);
+
+/**
+ * dma_heap_add - adds a heap to dmabuf heaps
+ * @exp_info: information needed to register this heap
+ */
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
+
+#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index bc6d79b00c4e..8f000fada5a4 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -61,6 +61,15 @@ static inline void eventpoll_release(struct file *file)
eventpoll_release_file(file);
}
+int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
+ bool nonblock);
+
+/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
+static inline int ep_op_has_event(int op)
+{
+ return op != EPOLL_CTL_DEL;
+}
+
#else
static inline void eventpoll_init_file(struct file *file) {}
diff --git a/include/linux/export.h b/include/linux/export.h
index 627841448293..fceb5e855717 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -82,16 +82,29 @@ struct kernel_symbol {
#else
-/* For every exported symbol, place a struct in the __ksymtab section */
-#define ___EXPORT_SYMBOL(sym, sec, ns) \
- extern typeof(sym) sym; \
- __CRC_SYMBOL(sym, sec); \
- static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
- = #sym; \
- static const char __kstrtabns_##sym[] \
- __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
- = ns; \
+/*
+ * For every exported symbol, do the following:
+ *
+ * - If applicable, place a CRC entry in the __kcrctab section.
+ * - Put the name of the symbol and namespace (empty string "" for none) in
+ * __ksymtab_strings.
+ * - Place a struct kernel_symbol entry in the __ksymtab section.
+ *
+ * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
+ */
+#define ___EXPORT_SYMBOL(sym, sec, ns) \
+ extern typeof(sym) sym; \
+ extern const char __kstrtab_##sym[]; \
+ extern const char __kstrtabns_##sym[]; \
+ __CRC_SYMBOL(sym, sec); \
+ asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" \
+ "__kstrtab_" #sym ": \n" \
+ " .asciz \"" #sym "\" \n" \
+ "__kstrtabns_" #sym ": \n" \
+ " .asciz \"" ns "\" \n" \
+ " .previous \n"); \
__KSYMTAB_ENTRY(sym, sec)
#endif
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 284738996028..ac3f4888b3df 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -23,6 +23,7 @@
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
+#define COMPRESS_ADDR ((block_t)-2) /* used as compressed data flag */
#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
@@ -271,6 +272,10 @@ struct f2fs_inode {
__le32 i_inode_checksum;/* inode meta checksum */
__le64 i_crtime; /* creation time */
__le32 i_crtime_nsec; /* creation time in nano scale */
+ __le64 i_compr_blocks; /* # of compressed blocks */
+ __u8 i_compress_algorithm; /* compress algorithm */
+ __u8 i_log_cluster_size; /* log of cluster size */
+ __le16 i_padding; /* padding */
__le32 i_extra_end[0]; /* for attribute size calculation */
} __packed;
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 8bf3d79f3e82..f3f0b97b1675 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -51,8 +51,6 @@ struct space_resv_32 {
#define FS_IOC_UNRESVSP64_32 _IOW ('X', 43, struct space_resv_32)
#define FS_IOC_ZERO_RANGE_32 _IOW ('X', 57, struct space_resv_32)
-int compat_ioctl_preallocate(struct file *, int, struct space_resv_32 __user *);
-
#endif
#endif /* _FALLOC_H_ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 41e0069eca0a..3b4b2f0c6994 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -472,7 +472,7 @@ struct fb_info {
struct fb_deferred_io *fbdefio;
#endif
- struct fb_ops *fbops;
+ const struct fb_ops *fbops;
struct device *device; /* This is the parent */
struct device *dev; /* This is this fb device */
int class_flag; /* private sysfs flags */
@@ -606,7 +606,6 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
-extern void unlink_framebuffer(struct fb_info *fb_info);
extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
const char *name);
extern int remove_conflicting_framebuffers(struct apertures_struct *a,
@@ -626,6 +625,7 @@ extern int fb_new_modelist(struct fb_info *info);
extern struct fb_info *registered_fb[FB_MAX];
extern int num_registered_fb;
extern bool fb_center_logo;
+extern int fb_logo_count;
extern struct class *fb_class;
#define for_each_registered_fb(i) \
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index d019df946cb2..7bcdcf4f6ab2 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -2,15 +2,29 @@
#ifndef _LINUX_FCNTL_H
#define _LINUX_FCNTL_H
+#include <linux/stat.h>
#include <uapi/linux/fcntl.h>
-/* list of all valid flags for the open/openat flags argument: */
+/* List of all valid flags for the open/openat flags argument: */
#define VALID_OPEN_FLAGS \
(O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
+/* List of all valid flags for the how->upgrade_mask argument: */
+#define VALID_UPGRADE_FLAGS \
+ (UPGRADE_NOWRITE | UPGRADE_NOREAD)
+
+/* List of all valid flags for the how->resolve argument: */
+#define VALID_RESOLVE_FLAGS \
+ (RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \
+ RESOLVE_BENEATH | RESOLVE_IN_ROOT)
+
+/* List of all open_how "versions". */
+#define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */
+#define OPEN_HOW_SIZE_LATEST OPEN_HOW_SIZE_VER0
+
#ifndef force_o_largefile
#define force_o_largefile() (!IS_ENABLED(CONFIG_ARCH_32BIT_OFF_T))
#endif
diff --git a/include/linux/file.h b/include/linux/file.h
index 3fcddff56bc4..c6c7b24ea9f7 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -16,6 +16,7 @@ extern void fput(struct file *);
extern void fput_many(struct file *, unsigned int);
struct file_operations;
+struct task_struct;
struct vfsmount;
struct dentry;
struct inode;
@@ -47,6 +48,7 @@ static inline void fdput(struct fd fd)
extern struct file *fget(unsigned int fd);
extern struct file *fget_many(unsigned int fd, unsigned int refs);
extern struct file *fget_raw(unsigned int fd);
+extern struct file *fget_task(struct task_struct *task, unsigned int fd);
extern unsigned long __fdget(unsigned int fd);
extern unsigned long __fdget_raw(unsigned int fd);
extern unsigned long __fdget_pos(unsigned int fd);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index dddfcbb140a7..41584f50af0d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2552,10 +2552,6 @@ extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
-/* fs/ioctl.c */
-
-extern int ioctl_preallocate(struct file *filp, int mode, void __user *argp);
-
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(void);
@@ -2741,7 +2737,6 @@ static inline int filemap_fdatawait(struct address_space *mapping)
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
loff_t lend);
-extern int filemap_write_and_wait(struct address_space *mapping);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
extern int __filemap_fdatawrite_range(struct address_space *mapping,
@@ -2751,6 +2746,11 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
extern int filemap_check_errors(struct address_space *mapping);
extern void __filemap_set_wb_err(struct address_space *mapping, int err);
+static inline int filemap_write_and_wait(struct address_space *mapping)
+{
+ return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
+}
+
extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
loff_t lend);
extern int __must_check file_check_and_advance_wb_err(struct file *file);
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 6f8d772591ba..62d216ff1097 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -24,16 +24,20 @@ struct iommu_group;
* struct host1x_client_ops - host1x client operations
* @init: host1x client initialization code
* @exit: host1x client tear down code
+ * @suspend: host1x client suspend code
+ * @resume: host1x client resume code
*/
struct host1x_client_ops {
int (*init)(struct host1x_client *client);
int (*exit)(struct host1x_client *client);
+ int (*suspend)(struct host1x_client *client);
+ int (*resume)(struct host1x_client *client);
};
/**
* struct host1x_client - host1x client structure
* @list: list node for the host1x client
- * @parent: pointer to struct device representing the host1x controller
+ * @host: pointer to struct device representing the host1x controller
* @dev: pointer to struct device backing this host1x client
* @group: IOMMU group that this client is a member of
* @ops: host1x client operations
@@ -44,7 +48,7 @@ struct host1x_client_ops {
*/
struct host1x_client {
struct list_head list;
- struct device *parent;
+ struct device *host;
struct device *dev;
struct iommu_group *group;
@@ -55,6 +59,10 @@ struct host1x_client {
struct host1x_syncpt **syncpts;
unsigned int num_syncpts;
+
+ struct host1x_client *parent;
+ unsigned int usecount;
+ struct mutex lock;
};
/*
@@ -72,8 +80,6 @@ struct host1x_bo_ops {
void (*unpin)(struct device *dev, struct sg_table *sgt);
void *(*mmap)(struct host1x_bo *bo);
void (*munmap)(struct host1x_bo *bo, void *addr);
- void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
- void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
};
struct host1x_bo {
@@ -119,17 +125,6 @@ static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
bo->ops->munmap(bo, addr);
}
-static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
-{
- return bo->ops->kmap(bo, pagenum);
-}
-
-static inline void host1x_bo_kunmap(struct host1x_bo *bo,
- unsigned int pagenum, void *addr)
-{
- bo->ops->kunmap(bo, pagenum, addr);
-}
-
/*
* host1x syncpoints
*/
@@ -322,6 +317,9 @@ int host1x_device_exit(struct host1x_device *device);
int host1x_client_register(struct host1x_client *client);
int host1x_client_unregister(struct host1x_client *client);
+int host1x_client_suspend(struct host1x_client *client);
+int host1x_client_resume(struct host1x_client *client);
+
struct tegra_mipi_device;
struct tegra_mipi_device *tegra_mipi_request(struct device *device);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 0b84e13e88e2..5aca3d1bdb32 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -160,6 +160,7 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
extern void prep_transhuge_page(struct page *page);
extern void free_transhuge_page(struct page *page);
+bool is_transparent_hugepage(struct page *page);
bool can_split_huge_page(struct page *page, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list);
@@ -308,6 +309,11 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
static inline void prep_transhuge_page(struct page *page) {}
+static inline bool is_transparent_hugepage(struct page *page)
+{
+ return false;
+}
+
#define transparent_hugepage_flags 0UL
#define thp_get_unmapped_area NULL
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 26f3aeeae1ca..692c89ccf5df 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -425,6 +425,8 @@ enum vmbus_channel_message_type {
CHANNELMSG_19 = 19,
CHANNELMSG_20 = 20,
CHANNELMSG_TL_CONNECT_REQUEST = 21,
+ CHANNELMSG_22 = 22,
+ CHANNELMSG_TL_CONNECT_RESULT = 23,
CHANNELMSG_COUNT
};
@@ -1433,6 +1435,8 @@ struct hv_util_service {
void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *);
void (*util_deinit)(void);
+ int (*util_pre_suspend)(void);
+ int (*util_pre_resume)(void);
};
struct vmbuspipe_hdr {
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 46b771d6999e..a254841bd315 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -413,6 +413,8 @@ struct ide_disk_ops {
sector_t);
int (*ioctl)(struct ide_drive_s *, struct block_device *,
fmode_t, unsigned int, unsigned long);
+ int (*compat_ioctl)(struct ide_drive_s *, struct block_device *,
+ fmode_t, unsigned int, unsigned long);
};
/* ATAPI device flags */
@@ -943,6 +945,10 @@ ide_devset_get(_name, _field); \
ide_devset_set(_name, _field); \
IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
+#define ide_devset_ro_field(_name, _field) \
+ide_devset_get(_name, _field); \
+IDE_DEVSET(_name, 0, get_##_name, NULL)
+
#define ide_devset_rw_flag(_name, _field) \
ide_devset_get_flag(_name, _field); \
ide_devset_set_flag(_name, _field); \
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 6e125e9b4187..837058bc1c9f 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -28,6 +28,7 @@ struct io_mapping {
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
+#include <linux/pfn.h>
#include <asm/iomap.h>
/*
* For small address space machines, mapping large objects
@@ -64,12 +65,10 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
resource_size_t phys_addr;
- unsigned long pfn;
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
- return iomap_atomic_prot_pfn(pfn, mapping->prot);
+ return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
}
static inline void
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index ce44b687d02b..f613d8529863 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1403,7 +1403,6 @@ extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_sb_errno(journal_t *);
extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
unsigned long, int);
-extern void __jbd2_journal_abort_hard (journal_t *);
extern void jbd2_journal_abort (journal_t *, int);
extern int jbd2_journal_errno (journal_t *);
extern void jbd2_journal_ack_err (journal_t *);
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 1b6d31da7cbc..e3279ef24d28 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -422,26 +422,6 @@ static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
-static inline unsigned long timespec_to_jiffies(const struct timespec *value)
-{
- struct timespec64 ts = timespec_to_timespec64(*value);
-
- return timespec64_to_jiffies(&ts);
-}
-
-static inline void jiffies_to_timespec(const unsigned long jiffies,
- struct timespec *value)
-{
- struct timespec64 ts;
-
- jiffies_to_timespec64(jiffies, &ts);
- *value = timespec64_to_timespec(ts);
-}
-
-extern unsigned long timeval_to_jiffies(const struct timeval *value);
-extern void jiffies_to_timeval(const unsigned long jiffies,
- struct timeval *value);
-
extern clock_t jiffies_to_clock_t(unsigned long x);
static inline clock_t jiffies_delta_to_clock_t(long delta)
{
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 538c25e778c0..e89eb67356cb 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -157,8 +157,6 @@ static inline bool is_error_page(struct page *page)
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
-extern struct kmem_cache *kvm_vcpu_cache;
-
extern struct mutex kvm_lock;
extern struct list_head vm_list;
@@ -204,7 +202,7 @@ struct kvm_async_pf {
struct list_head queue;
struct kvm_vcpu *vcpu;
struct mm_struct *mm;
- gva_t gva;
+ gpa_t cr2_or_gpa;
unsigned long addr;
struct kvm_arch_async_pf arch;
bool wakeup_all;
@@ -212,8 +210,8 @@ struct kvm_async_pf {
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
- struct kvm_arch_async_pf *arch);
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ unsigned long hva, struct kvm_arch_async_pf *arch);
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
@@ -579,8 +577,7 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
memslot++)
-int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
-void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
+void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
@@ -723,10 +720,9 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
void kvm_get_pfn(kvm_pfn_t pfn);
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
-int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
- unsigned long len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
@@ -767,7 +763,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
-unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
+unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
@@ -775,8 +771,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool atomic);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -867,16 +867,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
int kvm_arch_init(void *opaque);
void kvm_arch_exit(void);
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
-
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
@@ -982,10 +978,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
+bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -1109,9 +1105,8 @@ enum kvm_stat_kind {
};
struct kvm_stat_data {
- int offset;
- int mode;
struct kvm *kvm;
+ struct kvm_stats_debugfs_item *dbgfs_item;
};
struct kvm_stats_debugfs_item {
@@ -1120,6 +1115,10 @@ struct kvm_stats_debugfs_item {
enum kvm_stat_kind kind;
int mode;
};
+
+#define KVM_DBGFS_GET_MODE(dbgfs_item) \
+ ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644)
+
extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
@@ -1342,6 +1341,9 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
}
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
+struct kvm_vcpu *kvm_get_running_vcpu(void);
+struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
+
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 1c88e69db3d9..68e84cf42a3f 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -18,7 +18,7 @@ struct kvm_memslots;
enum kvm_mr_change;
-#include <asm/types.h>
+#include <linux/types.h>
/*
* Address types:
@@ -51,4 +51,11 @@ struct gfn_to_hva_cache {
struct kvm_memory_slot *memslot;
};
+struct gfn_to_pfn_cache {
+ u64 generation;
+ gfn_t gfn;
+ kvm_pfn_t pfn;
+ bool dirty;
+};
+
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h
index dd93c8d787b4..ec577f5f8707 100644
--- a/include/linux/leds-bd2802.h
+++ b/include/linux/leds-bd2802.h
@@ -11,7 +11,6 @@
#define _LEDS_BD2802_H_
struct bd2802_led_platform_data{
- int reset_gpio;
u8 rgb_time;
};
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 242258f7d837..75353e5f9d13 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -20,6 +20,7 @@
struct device;
struct led_pattern;
+struct device_node;
/*
* LED Core
*/
@@ -196,6 +197,11 @@ void devm_led_classdev_unregister(struct device *parent,
void led_classdev_suspend(struct led_classdev *led_cdev);
void led_classdev_resume(struct led_classdev *led_cdev);
+extern struct led_classdev *of_led_get(struct device_node *np, int index);
+extern void led_put(struct led_classdev *led_cdev);
+struct led_classdev *__must_check devm_of_led_get(struct device *dev,
+ int index);
+
/**
* led_blink_set - set blinking with software fallback
* @led_cdev: the LED to start blinking
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 2dbde119721d..a36bdcb8d9e9 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1109,6 +1109,11 @@ extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_op
extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
void __user *arg);
+#ifdef CONFIG_COMPAT
+#define ATA_SCSI_COMPAT_IOCTL .compat_ioctl = ata_scsi_ioctl,
+#else
+#define ATA_SCSI_COMPAT_IOCTL /* empty */
+#endif
extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
unsigned int cmd, void __user *arg);
@@ -1341,6 +1346,7 @@ extern struct device_attribute *ata_common_sdev_attrs[];
.module = THIS_MODULE, \
.name = drv_name, \
.ioctl = ata_scsi_ioctl, \
+ ATA_SCSI_COMPAT_IOCTL \
.queuecommand = ata_scsi_queuecmd, \
.can_queue = ATA_DEF_QUEUE, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c50d01ef1414..664f52c6dd4c 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -627,6 +627,13 @@ do { \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
+# define might_lock_nested(lock, subclass) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
+ _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
+} while (0)
#define lockdep_assert_irqs_enabled() do { \
WARN_ONCE(debug_locks && !current->lockdep_recursion && \
@@ -649,6 +656,7 @@ do { \
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
+# define might_lock_nested(lock, subclass) do { } while (0)
# define lockdep_assert_irqs_enabled() do { } while (0)
# define lockdep_assert_irqs_disabled() do { } while (0)
# define lockdep_assert_in_irq() do { } while (0)
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index e6f54ef6698b..a4dc45fbec0a 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -20,6 +20,16 @@
#define CMDQ_WFE_WAIT BIT(15)
#define CMDQ_WFE_WAIT_VALUE 0x1
+/*
+ * WFE arg_b
+ * bit 0-11: wait value
+ * bit 15: 1 - wait, 0 - no wait
+ * bit 16-27: update value
+ * bit 31: 1 - update, 0 - no update
+ */
+#define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \
+ CMDQ_WFE_WAIT_VALUE)
+
/** cmdq event maximum */
#define CMDQ_MAX_EVENT 0x3ff
@@ -45,6 +55,7 @@
enum cmdq_code {
CMDQ_CODE_MASK = 0x02,
CMDQ_CODE_WRITE = 0x04,
+ CMDQ_CODE_POLL = 0x08,
CMDQ_CODE_JUMP = 0x10,
CMDQ_CODE_WFE = 0x20,
CMDQ_CODE_EOC = 0x40,
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index b38bbefabfab..079d17d96410 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -113,6 +113,9 @@ int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
+#endif
void memblock_trim_memory(phys_addr_t align);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
@@ -127,10 +130,6 @@ void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);
/* Low level functions */
-int memblock_add_range(struct memblock_type *type,
- phys_addr_t base, phys_addr_t size,
- int nid, enum memblock_flags flags);
-
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 4c75dae8dd29..0b8d791b6669 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -29,8 +29,6 @@ struct memory_block {
int section_count; /* serialized by mem_sysfs_mutex */
int online_type; /* for passing data to online routine */
int phys_device; /* to which fru does this belong? */
- void *hw; /* optional pointer to fw/hw data */
- int (*phys_callback)(struct memory_block *);
struct device dev;
int nid; /* NID for this memory block */
};
@@ -55,19 +53,6 @@ struct memory_notify {
int status_change_nid;
};
-/*
- * During pageblock isolation, count the number of pages within the
- * range [start_pfn, start_pfn + nr_pages) which are owned by code
- * in the notifier chain.
- */
-#define MEM_ISOLATE_COUNT (1<<0)
-
-struct memory_isolate_notify {
- unsigned long start_pfn; /* Start of range to check */
- unsigned int nr_pages; /* # pages in range to check */
- unsigned int pages_found; /* # pages owned found by callbacks */
-};
-
struct notifier_block;
struct mem_section;
@@ -94,27 +79,13 @@ static inline int memory_notify(unsigned long val, void *v)
{
return 0;
}
-static inline int register_memory_isolate_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-static inline void unregister_memory_isolate_notifier(struct notifier_block *nb)
-{
-}
-static inline int memory_isolate_notify(unsigned long val, void *v)
-{
- return 0;
-}
#else
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
-extern int register_memory_isolate_notifier(struct notifier_block *nb);
-extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
-extern int memory_isolate_notify(unsigned long val, void *v);
extern struct memory_block *find_memory_block(struct mem_section *);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index ba0dca6aac6e..ffa6ad12d84a 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -94,7 +94,8 @@ extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
/* VM interface that may be used by firmware interface */
-extern int online_pages(unsigned long, unsigned long, int);
+extern int online_pages(unsigned long pfn, unsigned long nr_pages,
+ int online_type, int nid);
extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
unsigned long *valid_start, unsigned long *valid_end);
extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 1fc75d2b4a38..4b63d3ecdcff 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -525,9 +525,6 @@ u8 db8500_prcmu_get_power_state_result(void);
void db8500_prcmu_enable_wakeups(u32 wakeups);
int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state);
int db8500_prcmu_request_clock(u8 clock, bool enable);
-int db8500_prcmu_set_display_clocks(void);
-int db8500_prcmu_disable_dsipll(void);
-int db8500_prcmu_enable_dsipll(void);
void db8500_prcmu_config_abb_event_readout(u32 abb_events);
void db8500_prcmu_get_abb_event_buffer(void __iomem **buf);
int db8500_prcmu_config_esram0_deep_sleep(u8 state);
@@ -682,21 +679,6 @@ static inline int db8500_prcmu_request_clock(u8 clock, bool enable)
return 0;
}
-static inline int db8500_prcmu_set_display_clocks(void)
-{
- return 0;
-}
-
-static inline int db8500_prcmu_disable_dsipll(void)
-{
- return 0;
-}
-
-static inline int db8500_prcmu_enable_dsipll(void)
-{
- return 0;
-}
-
static inline int db8500_prcmu_config_esram0_deep_sleep(u8 state)
{
return 0;
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index e2571040c7e8..e6ee2ec35de9 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -321,21 +321,6 @@ static inline bool prcmu_is_ac_wake_requested(void)
return db8500_prcmu_is_ac_wake_requested();
}
-static inline int prcmu_set_display_clocks(void)
-{
- return db8500_prcmu_set_display_clocks();
-}
-
-static inline int prcmu_disable_dsipll(void)
-{
- return db8500_prcmu_disable_dsipll();
-}
-
-static inline int prcmu_enable_dsipll(void)
-{
- return db8500_prcmu_enable_dsipll();
-}
-
static inline int prcmu_config_esram0_deep_sleep(u8 state)
{
return db8500_prcmu_config_esram0_deep_sleep(state);
@@ -511,21 +496,6 @@ static inline bool prcmu_is_ac_wake_requested(void)
return false;
}
-static inline int prcmu_set_display_clocks(void)
-{
- return 0;
-}
-
-static inline int prcmu_disable_dsipll(void)
-{
- return 0;
-}
-
-static inline int prcmu_enable_dsipll(void)
-{
- return 0;
-}
-
static inline int prcmu_config_esram0_deep_sleep(u8 state)
{
return 0;
diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
index 1013e60c5b25..a57af878fd0c 100644
--- a/include/linux/mfd/rohm-bd70528.h
+++ b/include/linux/mfd/rohm-bd70528.h
@@ -7,6 +7,7 @@
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/mfd/rohm-generic.h>
+#include <linux/mfd/rohm-shared.h>
#include <linux/regmap.h>
enum {
@@ -89,10 +90,6 @@ struct bd70528_data {
#define BD70528_REG_GPIO3_OUT 0x52
#define BD70528_REG_GPIO4_OUT 0x54
-/* clk control */
-
-#define BD70528_REG_CLK_OUT 0x2c
-
/* RTC */
#define BD70528_REG_RTC_COUNT_H 0x2d
@@ -309,21 +306,8 @@ enum {
#define BD70528_GPIO_IN_STATE_BASE 1
-#define BD70528_CLK_OUT_EN_MASK 0x1
-
/* RTC masks to mask out reserved bits */
-#define BD70528_MASK_RTC_SEC 0x7f
-#define BD70528_MASK_RTC_MINUTE 0x7f
-#define BD70528_MASK_RTC_HOUR_24H 0x80
-#define BD70528_MASK_RTC_HOUR_PM 0x20
-#define BD70528_MASK_RTC_HOUR 0x1f
-#define BD70528_MASK_RTC_DAY 0x3f
-#define BD70528_MASK_RTC_WEEK 0x07
-#define BD70528_MASK_RTC_MONTH 0x1f
-#define BD70528_MASK_RTC_YEAR 0xff
-#define BD70528_MASK_RTC_COUNT_L 0x7f
-
#define BD70528_MASK_ELAPSED_TIMER_EN 0x1
/* Mask second, min and hour fields
* HW would support ALM irq for over 24h
@@ -332,7 +316,6 @@ enum {
* wake-up we limit ALM to 24H and only
* unmask sec, min and hour
*/
-#define BD70528_MASK_ALM_EN 0x7
#define BD70528_MASK_WAKE_EN 0x1
/* WDT masks */
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
new file mode 100644
index 000000000000..017a4c01cb31
--- /dev/null
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -0,0 +1,423 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2019 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD71828_H__
+#define __LINUX_MFD_BD71828_H__
+
+#include <linux/mfd/rohm-generic.h>
+#include <linux/mfd/rohm-shared.h>
+
+/* Regulator IDs */
+enum {
+ BD71828_BUCK1,
+ BD71828_BUCK2,
+ BD71828_BUCK3,
+ BD71828_BUCK4,
+ BD71828_BUCK5,
+ BD71828_BUCK6,
+ BD71828_BUCK7,
+ BD71828_LDO1,
+ BD71828_LDO2,
+ BD71828_LDO3,
+ BD71828_LDO4,
+ BD71828_LDO5,
+ BD71828_LDO6,
+ BD71828_LDO_SNVS,
+ BD71828_REGULATOR_AMOUNT,
+};
+
+#define BD71828_BUCK1267_VOLTS 0xEF
+#define BD71828_BUCK3_VOLTS 0x10
+#define BD71828_BUCK4_VOLTS 0x20
+#define BD71828_BUCK5_VOLTS 0x10
+#define BD71828_LDO_VOLTS 0x32
+/* LDO6 is fixed 1.8V voltage */
+#define BD71828_LDO_6_VOLTAGE 1800000
+
+/* Registers and masks*/
+
+/* MODE control */
+#define BD71828_REG_PS_CTRL_1 0x04
+#define BD71828_REG_PS_CTRL_2 0x05
+#define BD71828_REG_PS_CTRL_3 0x06
+
+//#define BD71828_REG_SWRESET 0x06
+#define BD71828_MASK_RUN_LVL_CTRL 0x30
+
+/* Regulator control masks */
+
+#define BD71828_MASK_RAMP_DELAY 0x6
+
+#define BD71828_MASK_RUN_EN 0x08
+#define BD71828_MASK_SUSP_EN 0x04
+#define BD71828_MASK_IDLE_EN 0x02
+#define BD71828_MASK_LPSR_EN 0x01
+
+#define BD71828_MASK_RUN0_EN 0x01
+#define BD71828_MASK_RUN1_EN 0x02
+#define BD71828_MASK_RUN2_EN 0x04
+#define BD71828_MASK_RUN3_EN 0x08
+
+#define BD71828_MASK_DVS_BUCK1_CTRL 0x10
+#define BD71828_DVS_BUCK1_CTRL_I2C 0
+#define BD71828_DVS_BUCK1_USE_RUNLVL 0x10
+
+#define BD71828_MASK_DVS_BUCK2_CTRL 0x20
+#define BD71828_DVS_BUCK2_CTRL_I2C 0
+#define BD71828_DVS_BUCK2_USE_RUNLVL 0x20
+
+#define BD71828_MASK_DVS_BUCK6_CTRL 0x40
+#define BD71828_DVS_BUCK6_CTRL_I2C 0
+#define BD71828_DVS_BUCK6_USE_RUNLVL 0x40
+
+#define BD71828_MASK_DVS_BUCK7_CTRL 0x80
+#define BD71828_DVS_BUCK7_CTRL_I2C 0
+#define BD71828_DVS_BUCK7_USE_RUNLVL 0x80
+
+#define BD71828_MASK_BUCK1267_VOLT 0xff
+#define BD71828_MASK_BUCK3_VOLT 0x1f
+#define BD71828_MASK_BUCK4_VOLT 0x3f
+#define BD71828_MASK_BUCK5_VOLT 0x1f
+#define BD71828_MASK_LDO_VOLT 0x3f
+
+/* Regulator control regs */
+#define BD71828_REG_BUCK1_EN 0x08
+#define BD71828_REG_BUCK1_CTRL 0x09
+#define BD71828_REG_BUCK1_MODE 0x0a
+#define BD71828_REG_BUCK1_IDLE_VOLT 0x0b
+#define BD71828_REG_BUCK1_SUSP_VOLT 0x0c
+#define BD71828_REG_BUCK1_VOLT 0x0d
+
+#define BD71828_REG_BUCK2_EN 0x12
+#define BD71828_REG_BUCK2_CTRL 0x13
+#define BD71828_REG_BUCK2_MODE 0x14
+#define BD71828_REG_BUCK2_IDLE_VOLT 0x15
+#define BD71828_REG_BUCK2_SUSP_VOLT 0x16
+#define BD71828_REG_BUCK2_VOLT 0x17
+
+#define BD71828_REG_BUCK3_EN 0x1c
+#define BD71828_REG_BUCK3_MODE 0x1d
+#define BD71828_REG_BUCK3_VOLT 0x1e
+
+#define BD71828_REG_BUCK4_EN 0x1f
+#define BD71828_REG_BUCK4_MODE 0x20
+#define BD71828_REG_BUCK4_VOLT 0x21
+
+#define BD71828_REG_BUCK5_EN 0x22
+#define BD71828_REG_BUCK5_MODE 0x23
+#define BD71828_REG_BUCK5_VOLT 0x24
+
+#define BD71828_REG_BUCK6_EN 0x25
+#define BD71828_REG_BUCK6_CTRL 0x26
+#define BD71828_REG_BUCK6_MODE 0x27
+#define BD71828_REG_BUCK6_IDLE_VOLT 0x28
+#define BD71828_REG_BUCK6_SUSP_VOLT 0x29
+#define BD71828_REG_BUCK6_VOLT 0x2a
+
+#define BD71828_REG_BUCK7_EN 0x2f
+#define BD71828_REG_BUCK7_CTRL 0x30
+#define BD71828_REG_BUCK7_MODE 0x31
+#define BD71828_REG_BUCK7_IDLE_VOLT 0x32
+#define BD71828_REG_BUCK7_SUSP_VOLT 0x33
+#define BD71828_REG_BUCK7_VOLT 0x34
+
+#define BD71828_REG_LDO1_EN 0x39
+#define BD71828_REG_LDO1_VOLT 0x3a
+#define BD71828_REG_LDO2_EN 0x3b
+#define BD71828_REG_LDO2_VOLT 0x3c
+#define BD71828_REG_LDO3_EN 0x3d
+#define BD71828_REG_LDO3_VOLT 0x3e
+#define BD71828_REG_LDO4_EN 0x3f
+#define BD71828_REG_LDO4_VOLT 0x40
+#define BD71828_REG_LDO5_EN 0x41
+#define BD71828_REG_LDO5_VOLT 0x43
+#define BD71828_REG_LDO5_VOLT_OPT 0x42
+#define BD71828_REG_LDO6_EN 0x44
+//#define BD71828_REG_LDO6_VOLT 0x4
+#define BD71828_REG_LDO7_EN 0x45
+#define BD71828_REG_LDO7_VOLT 0x46
+
+/* GPIO */
+
+#define BD71828_GPIO_DRIVE_MASK 0x2
+#define BD71828_GPIO_OPEN_DRAIN 0x0
+#define BD71828_GPIO_PUSH_PULL 0x2
+#define BD71828_GPIO_OUT_HI 0x1
+#define BD71828_GPIO_OUT_LO 0x0
+#define BD71828_GPIO_OUT_MASK 0x1
+
+#define BD71828_REG_GPIO_CTRL1 0x47
+#define BD71828_REG_GPIO_CTRL2 0x48
+#define BD71828_REG_GPIO_CTRL3 0x49
+#define BD71828_REG_IO_STAT 0xed
+
+/* RTC */
+#define BD71828_REG_RTC_SEC 0x4c
+#define BD71828_REG_RTC_MINUTE 0x4d
+#define BD71828_REG_RTC_HOUR 0x4e
+#define BD71828_REG_RTC_WEEK 0x4f
+#define BD71828_REG_RTC_DAY 0x50
+#define BD71828_REG_RTC_MONTH 0x51
+#define BD71828_REG_RTC_YEAR 0x52
+
+#define BD71828_REG_RTC_ALM0_SEC 0x53
+#define BD71828_REG_RTC_ALM_START BD71828_REG_RTC_ALM0_SEC
+#define BD71828_REG_RTC_ALM0_MINUTE 0x54
+#define BD71828_REG_RTC_ALM0_HOUR 0x55
+#define BD71828_REG_RTC_ALM0_WEEK 0x56
+#define BD71828_REG_RTC_ALM0_DAY 0x57
+#define BD71828_REG_RTC_ALM0_MONTH 0x58
+#define BD71828_REG_RTC_ALM0_YEAR 0x59
+#define BD71828_REG_RTC_ALM0_MASK 0x61
+
+#define BD71828_REG_RTC_ALM1_SEC 0x5a
+#define BD71828_REG_RTC_ALM1_MINUTE 0x5b
+#define BD71828_REG_RTC_ALM1_HOUR 0x5c
+#define BD71828_REG_RTC_ALM1_WEEK 0x5d
+#define BD71828_REG_RTC_ALM1_DAY 0x5e
+#define BD71828_REG_RTC_ALM1_MONTH 0x5f
+#define BD71828_REG_RTC_ALM1_YEAR 0x60
+#define BD71828_REG_RTC_ALM1_MASK 0x62
+
+#define BD71828_REG_RTC_ALM2 0x63
+#define BD71828_REG_RTC_START BD71828_REG_RTC_SEC
+
+/* Charger/Battey */
+#define BD71828_REG_CHG_STATE 0x65
+#define BD71828_REG_CHG_FULL 0xd2
+
+/* LEDs */
+#define BD71828_REG_LED_CTRL 0x4A
+#define BD71828_MASK_LED_AMBER 0x80
+#define BD71828_MASK_LED_GREEN 0x40
+#define BD71828_LED_ON 0xff
+#define BD71828_LED_OFF 0x0
+
+/* IRQ registers */
+#define BD71828_REG_INT_MASK_BUCK 0xd3
+#define BD71828_REG_INT_MASK_DCIN1 0xd4
+#define BD71828_REG_INT_MASK_DCIN2 0xd5
+#define BD71828_REG_INT_MASK_VSYS 0xd6
+#define BD71828_REG_INT_MASK_CHG 0xd7
+#define BD71828_REG_INT_MASK_BAT 0xd8
+#define BD71828_REG_INT_MASK_BAT_MON1 0xd9
+#define BD71828_REG_INT_MASK_BAT_MON2 0xda
+#define BD71828_REG_INT_MASK_BAT_MON3 0xdb
+#define BD71828_REG_INT_MASK_BAT_MON4 0xdc
+#define BD71828_REG_INT_MASK_TEMP 0xdd
+#define BD71828_REG_INT_MASK_RTC 0xde
+
+#define BD71828_REG_INT_MAIN 0xdf
+#define BD71828_REG_INT_BUCK 0xe0
+#define BD71828_REG_INT_DCIN1 0xe1
+#define BD71828_REG_INT_DCIN2 0xe2
+#define BD71828_REG_INT_VSYS 0xe3
+#define BD71828_REG_INT_CHG 0xe4
+#define BD71828_REG_INT_BAT 0xe5
+#define BD71828_REG_INT_BAT_MON1 0xe6
+#define BD71828_REG_INT_BAT_MON2 0xe7
+#define BD71828_REG_INT_BAT_MON3 0xe8
+#define BD71828_REG_INT_BAT_MON4 0xe9
+#define BD71828_REG_INT_TEMP 0xea
+#define BD71828_REG_INT_RTC 0xeb
+#define BD71828_REG_INT_UPDATE 0xec
+
+#define BD71828_MAX_REGISTER BD71828_REG_IO_STAT
+
+/* Masks for main IRQ register bits */
+enum {
+ BD71828_INT_BUCK,
+#define BD71828_INT_BUCK_MASK BIT(BD71828_INT_BUCK)
+ BD71828_INT_DCIN,
+#define BD71828_INT_DCIN_MASK BIT(BD71828_INT_DCIN)
+ BD71828_INT_VSYS,
+#define BD71828_INT_VSYS_MASK BIT(BD71828_INT_VSYS)
+ BD71828_INT_CHG,
+#define BD71828_INT_CHG_MASK BIT(BD71828_INT_CHG)
+ BD71828_INT_BAT,
+#define BD71828_INT_BAT_MASK BIT(BD71828_INT_BAT)
+ BD71828_INT_BAT_MON,
+#define BD71828_INT_BAT_MON_MASK BIT(BD71828_INT_BAT_MON)
+ BD71828_INT_TEMP,
+#define BD71828_INT_TEMP_MASK BIT(BD71828_INT_TEMP)
+ BD71828_INT_RTC,
+#define BD71828_INT_RTC_MASK BIT(BD71828_INT_RTC)
+};
+
+/* Interrupts */
+enum {
+ /* BUCK reg interrupts */
+ BD71828_INT_BUCK1_OCP,
+ BD71828_INT_BUCK2_OCP,
+ BD71828_INT_BUCK3_OCP,
+ BD71828_INT_BUCK4_OCP,
+ BD71828_INT_BUCK5_OCP,
+ BD71828_INT_BUCK6_OCP,
+ BD71828_INT_BUCK7_OCP,
+ BD71828_INT_PGFAULT,
+ /* DCIN1 interrupts */
+ BD71828_INT_DCIN_DET,
+ BD71828_INT_DCIN_RMV,
+ BD71828_INT_CLPS_OUT,
+ BD71828_INT_CLPS_IN,
+ /* DCIN2 interrupts */
+ BD71828_INT_DCIN_MON_RES,
+ BD71828_INT_DCIN_MON_DET,
+ BD71828_INT_LONGPUSH,
+ BD71828_INT_MIDPUSH,
+ BD71828_INT_SHORTPUSH,
+ BD71828_INT_PUSH,
+ BD71828_INT_WDOG,
+ BD71828_INT_SWRESET,
+ /* Vsys */
+ BD71828_INT_VSYS_UV_RES,
+ BD71828_INT_VSYS_UV_DET,
+ BD71828_INT_VSYS_LOW_RES,
+ BD71828_INT_VSYS_LOW_DET,
+ BD71828_INT_VSYS_HALL_IN,
+ BD71828_INT_VSYS_HALL_TOGGLE,
+ BD71828_INT_VSYS_MON_RES,
+ BD71828_INT_VSYS_MON_DET,
+ /* Charger */
+ BD71828_INT_CHG_DCIN_ILIM,
+ BD71828_INT_CHG_TOPOFF_TO_DONE,
+ BD71828_INT_CHG_WDG_TEMP,
+ BD71828_INT_CHG_WDG_TIME,
+ BD71828_INT_CHG_RECHARGE_RES,
+ BD71828_INT_CHG_RECHARGE_DET,
+ BD71828_INT_CHG_RANGED_TEMP_TRANSITION,
+ BD71828_INT_CHG_STATE_TRANSITION,
+ /* Battery */
+ BD71828_INT_BAT_TEMP_NORMAL,
+ BD71828_INT_BAT_TEMP_ERANGE,
+ BD71828_INT_BAT_TEMP_WARN,
+ BD71828_INT_BAT_REMOVED,
+ BD71828_INT_BAT_DETECTED,
+ BD71828_INT_THERM_REMOVED,
+ BD71828_INT_THERM_DETECTED,
+ /* Battery Mon 1 */
+ BD71828_INT_BAT_DEAD,
+ BD71828_INT_BAT_SHORTC_RES,
+ BD71828_INT_BAT_SHORTC_DET,
+ BD71828_INT_BAT_LOW_VOLT_RES,
+ BD71828_INT_BAT_LOW_VOLT_DET,
+ BD71828_INT_BAT_OVER_VOLT_RES,
+ BD71828_INT_BAT_OVER_VOLT_DET,
+ /* Battery Mon 2 */
+ BD71828_INT_BAT_MON_RES,
+ BD71828_INT_BAT_MON_DET,
+ /* Battery Mon 3 (Coulomb counter) */
+ BD71828_INT_BAT_CC_MON1,
+ BD71828_INT_BAT_CC_MON2,
+ BD71828_INT_BAT_CC_MON3,
+ /* Battery Mon 4 */
+ BD71828_INT_BAT_OVER_CURR_1_RES,
+ BD71828_INT_BAT_OVER_CURR_1_DET,
+ BD71828_INT_BAT_OVER_CURR_2_RES,
+ BD71828_INT_BAT_OVER_CURR_2_DET,
+ BD71828_INT_BAT_OVER_CURR_3_RES,
+ BD71828_INT_BAT_OVER_CURR_3_DET,
+ /* Temperature */
+ BD71828_INT_TEMP_BAT_LOW_RES,
+ BD71828_INT_TEMP_BAT_LOW_DET,
+ BD71828_INT_TEMP_BAT_HI_RES,
+ BD71828_INT_TEMP_BAT_HI_DET,
+ BD71828_INT_TEMP_CHIP_OVER_125_RES,
+ BD71828_INT_TEMP_CHIP_OVER_125_DET,
+ BD71828_INT_TEMP_CHIP_OVER_VF_DET,
+ BD71828_INT_TEMP_CHIP_OVER_VF_RES,
+ /* RTC Alarm */
+ BD71828_INT_RTC0,
+ BD71828_INT_RTC1,
+ BD71828_INT_RTC2,
+};
+
+#define BD71828_INT_BUCK1_OCP_MASK 0x1
+#define BD71828_INT_BUCK2_OCP_MASK 0x2
+#define BD71828_INT_BUCK3_OCP_MASK 0x4
+#define BD71828_INT_BUCK4_OCP_MASK 0x8
+#define BD71828_INT_BUCK5_OCP_MASK 0x10
+#define BD71828_INT_BUCK6_OCP_MASK 0x20
+#define BD71828_INT_BUCK7_OCP_MASK 0x40
+#define BD71828_INT_PGFAULT_MASK 0x80
+
+#define BD71828_INT_DCIN_DET_MASK 0x1
+#define BD71828_INT_DCIN_RMV_MASK 0x2
+#define BD71828_INT_CLPS_OUT_MASK 0x4
+#define BD71828_INT_CLPS_IN_MASK 0x8
+ /* DCIN2 interrupts */
+#define BD71828_INT_DCIN_MON_RES_MASK 0x1
+#define BD71828_INT_DCIN_MON_DET_MASK 0x2
+#define BD71828_INT_LONGPUSH_MASK 0x4
+#define BD71828_INT_MIDPUSH_MASK 0x8
+#define BD71828_INT_SHORTPUSH_MASK 0x10
+#define BD71828_INT_PUSH_MASK 0x20
+#define BD71828_INT_WDOG_MASK 0x40
+#define BD71828_INT_SWRESET_MASK 0x80
+ /* Vsys */
+#define BD71828_INT_VSYS_UV_RES_MASK 0x1
+#define BD71828_INT_VSYS_UV_DET_MASK 0x2
+#define BD71828_INT_VSYS_LOW_RES_MASK 0x4
+#define BD71828_INT_VSYS_LOW_DET_MASK 0x8
+#define BD71828_INT_VSYS_HALL_IN_MASK 0x10
+#define BD71828_INT_VSYS_HALL_TOGGLE_MASK 0x20
+#define BD71828_INT_VSYS_MON_RES_MASK 0x40
+#define BD71828_INT_VSYS_MON_DET_MASK 0x80
+ /* Charger */
+#define BD71828_INT_CHG_DCIN_ILIM_MASK 0x1
+#define BD71828_INT_CHG_TOPOFF_TO_DONE_MASK 0x2
+#define BD71828_INT_CHG_WDG_TEMP_MASK 0x4
+#define BD71828_INT_CHG_WDG_TIME_MASK 0x8
+#define BD71828_INT_CHG_RECHARGE_RES_MASK 0x10
+#define BD71828_INT_CHG_RECHARGE_DET_MASK 0x20
+#define BD71828_INT_CHG_RANGED_TEMP_TRANSITION_MASK 0x40
+#define BD71828_INT_CHG_STATE_TRANSITION_MASK 0x80
+ /* Battery */
+#define BD71828_INT_BAT_TEMP_NORMAL_MASK 0x1
+#define BD71828_INT_BAT_TEMP_ERANGE_MASK 0x2
+#define BD71828_INT_BAT_TEMP_WARN_MASK 0x4
+#define BD71828_INT_BAT_REMOVED_MASK 0x10
+#define BD71828_INT_BAT_DETECTED_MASK 0x20
+#define BD71828_INT_THERM_REMOVED_MASK 0x40
+#define BD71828_INT_THERM_DETECTED_MASK 0x80
+ /* Battery Mon 1 */
+#define BD71828_INT_BAT_DEAD_MASK 0x2
+#define BD71828_INT_BAT_SHORTC_RES_MASK 0x4
+#define BD71828_INT_BAT_SHORTC_DET_MASK 0x8
+#define BD71828_INT_BAT_LOW_VOLT_RES_MASK 0x10
+#define BD71828_INT_BAT_LOW_VOLT_DET_MASK 0x20
+#define BD71828_INT_BAT_OVER_VOLT_RES_MASK 0x40
+#define BD71828_INT_BAT_OVER_VOLT_DET_MASK 0x80
+ /* Battery Mon 2 */
+#define BD71828_INT_BAT_MON_RES_MASK 0x1
+#define BD71828_INT_BAT_MON_DET_MASK 0x2
+ /* Battery Mon 3 (Coulomb counter) */
+#define BD71828_INT_BAT_CC_MON1_MASK 0x1
+#define BD71828_INT_BAT_CC_MON2_MASK 0x2
+#define BD71828_INT_BAT_CC_MON3_MASK 0x4
+ /* Battery Mon 4 */
+#define BD71828_INT_BAT_OVER_CURR_1_RES_MASK 0x1
+#define BD71828_INT_BAT_OVER_CURR_1_DET_MASK 0x2
+#define BD71828_INT_BAT_OVER_CURR_2_RES_MASK 0x4
+#define BD71828_INT_BAT_OVER_CURR_2_DET_MASK 0x8
+#define BD71828_INT_BAT_OVER_CURR_3_RES_MASK 0x10
+#define BD71828_INT_BAT_OVER_CURR_3_DET_MASK 0x20
+ /* Temperature */
+#define BD71828_INT_TEMP_BAT_LOW_RES_MASK 0x1
+#define BD71828_INT_TEMP_BAT_LOW_DET_MASK 0x2
+#define BD71828_INT_TEMP_BAT_HI_RES_MASK 0x4
+#define BD71828_INT_TEMP_BAT_HI_DET_MASK 0x8
+#define BD71828_INT_TEMP_CHIP_OVER_125_RES_MASK 0x10
+#define BD71828_INT_TEMP_CHIP_OVER_125_DET_MASK 0x20
+#define BD71828_INT_TEMP_CHIP_OVER_VF_RES_MASK 0x40
+#define BD71828_INT_TEMP_CHIP_OVER_VF_DET_MASK 0x80
+ /* RTC Alarm */
+#define BD71828_INT_RTC0_MASK 0x1
+#define BD71828_INT_RTC1_MASK 0x2
+#define BD71828_INT_RTC2_MASK 0x4
+
+#define BD71828_OUT_TYPE_MASK 0x2
+#define BD71828_OUT_TYPE_OPEN_DRAIN 0x0
+#define BD71828_OUT_TYPE_CMOS 0x2
+
+#endif /* __LINUX_MFD_BD71828_H__ */
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
index 7f2dbde402a1..bee2474a8f9f 100644
--- a/include/linux/mfd/rohm-bd718x7.h
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -191,12 +191,6 @@ enum {
#define IRQ_ON_REQ 0x02
#define IRQ_STBY_REQ 0x01
-/* BD718XX_REG_OUT32K bits */
-#define BD718XX_OUT32K_EN 0x01
-
-/* BD7183XX gated clock rate */
-#define BD718XX_CLK_RATE 32768
-
/* ROHM BD718XX irqs */
enum {
BD718XX_INT_STBY_REQ,
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
index bff15ac26f2c..4283b5b33e04 100644
--- a/include/linux/mfd/rohm-generic.h
+++ b/include/linux/mfd/rohm-generic.h
@@ -4,17 +4,83 @@
#ifndef __LINUX_MFD_ROHM_H__
#define __LINUX_MFD_ROHM_H__
-enum {
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum rohm_chip_type {
ROHM_CHIP_TYPE_BD71837 = 0,
ROHM_CHIP_TYPE_BD71847,
ROHM_CHIP_TYPE_BD70528,
+ ROHM_CHIP_TYPE_BD71828,
ROHM_CHIP_TYPE_AMOUNT
};
struct rohm_regmap_dev {
- unsigned int chip_type;
struct device *dev;
struct regmap *regmap;
};
+enum {
+ ROHM_DVS_LEVEL_UNKNOWN,
+ ROHM_DVS_LEVEL_RUN,
+ ROHM_DVS_LEVEL_IDLE,
+ ROHM_DVS_LEVEL_SUSPEND,
+ ROHM_DVS_LEVEL_LPSR,
+ ROHM_DVS_LEVEL_MAX = ROHM_DVS_LEVEL_LPSR,
+};
+
+/**
+ * struct rohm_dvs_config - dynamic voltage scaling register descriptions
+ *
+ * @level_map: bitmap representing supported run-levels for this
+ * regulator
+ * @run_reg: register address for regulator config at 'run' state
+ * @run_mask: value mask for regulator voltages at 'run' state
+ * @run_on_mask: enable mask for regulator at 'run' state
+ * @idle_reg: register address for regulator config at 'idle' state
+ * @idle_mask: value mask for regulator voltages at 'idle' state
+ * @idle_on_mask: enable mask for regulator at 'idle' state
+ * @suspend_reg: register address for regulator config at 'suspend' state
+ * @suspend_mask: value mask for regulator voltages at 'suspend' state
+ * @suspend_on_mask: enable mask for regulator at 'suspend' state
+ * @lpsr_reg: register address for regulator config at 'lpsr' state
+ * @lpsr_mask: value mask for regulator voltages at 'lpsr' state
+ * @lpsr_on_mask: enable mask for regulator at 'lpsr' state
+ *
+ * Description of ROHM PMICs voltage configuration registers for different
+ * system states. This is used to correctly configure the PMIC at startup
+ * based on values read from DT.
+ */
+struct rohm_dvs_config {
+ uint64_t level_map;
+ unsigned int run_reg;
+ unsigned int run_mask;
+ unsigned int run_on_mask;
+ unsigned int idle_reg;
+ unsigned int idle_mask;
+ unsigned int idle_on_mask;
+ unsigned int suspend_reg;
+ unsigned int suspend_mask;
+ unsigned int suspend_on_mask;
+ unsigned int lpsr_reg;
+ unsigned int lpsr_mask;
+ unsigned int lpsr_on_mask;
+};
+
+#if IS_ENABLED(CONFIG_REGULATOR_ROHM)
+int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+ struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regmap *regmap);
+
+#else
+static inline int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+ struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regmap *regmap)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/include/linux/mfd/rohm-shared.h b/include/linux/mfd/rohm-shared.h
new file mode 100644
index 000000000000..53dd7f638bfd
--- /dev/null
+++ b/include/linux/mfd/rohm-shared.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2020 ROHM Semiconductors */
+
+
+#ifndef __LINUX_MFD_ROHM_SHARED_H__
+#define __LINUX_MFD_ROHM_SHARED_H__
+
+/* RTC definitions shared between BD70528 and BD71828 */
+
+#define BD70528_MASK_RTC_SEC 0x7f
+#define BD70528_MASK_RTC_MINUTE 0x7f
+#define BD70528_MASK_RTC_HOUR_24H 0x80
+#define BD70528_MASK_RTC_HOUR_PM 0x20
+#define BD70528_MASK_RTC_HOUR 0x3f
+#define BD70528_MASK_RTC_DAY 0x3f
+#define BD70528_MASK_RTC_WEEK 0x07
+#define BD70528_MASK_RTC_MONTH 0x1f
+#define BD70528_MASK_RTC_YEAR 0xff
+#define BD70528_MASK_ALM_EN 0x7
+
+#endif /* __LINUX_MFD_ROHM_SHARED_H__ */
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 112dc66262cc..7f20e9b502a5 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -23,6 +23,11 @@ extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
extern struct regmap *syscon_regmap_lookup_by_phandle(
struct device_node *np,
const char *property);
+extern struct regmap *syscon_regmap_lookup_by_phandle_args(
+ struct device_node *np,
+ const char *property,
+ int arg_count,
+ unsigned int *out_args);
#else
static inline struct regmap *device_node_to_regmap(struct device_node *np)
{
@@ -45,6 +50,15 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle(
{
return ERR_PTR(-ENOTSUPP);
}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle_args(
+ struct device_node *np,
+ const char *property,
+ int arg_count,
+ unsigned int *out_args)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
#endif
#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h
new file mode 100644
index 000000000000..bb8d2e276668
--- /dev/null
+++ b/include/linux/mfd/wcd934x/registers.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _WCD934X_REGISTERS_H
+#define _WCD934X_REGISTERS_H
+
+#define WCD934X_CODEC_RPM_CLK_GATE 0x0002
+#define WCD934X_CODEC_RPM_CLK_GATE_MASK GENMASK(1, 0)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG 0x0003
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_9P6MHZ BIT(0)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_12P288MHZ BIT(1)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK GENMASK(1, 0)
+#define WCD934X_CODEC_RPM_RST_CTL 0x0009
+#define WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL 0x0011
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0 0x0021
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2 0x0023
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_CTL 0x0025
+#define WCD934X_EFUSE_SENSE_STATE_MASK GENMASK(4, 1)
+#define WCD934X_EFUSE_SENSE_STATE_DEF 0x10
+#define WCD934X_EFUSE_SENSE_EN_MASK BIT(0)
+#define WCD934X_EFUSE_SENSE_ENABLE BIT(0)
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14 0x0037
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15 0x0038
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS 0x0039
+#define WCD934X_DATA_HUB_SB_TX10_INP_CFG 0x006b
+#define WCD934X_DATA_HUB_SB_TX11_INP_CFG 0x006c
+#define WCD934X_DATA_HUB_SB_TX13_INP_CFG 0x006e
+#define WCD934X_CPE_FLL_CONFIG_CTL_2 0x0111
+#define WCD934X_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD 0x0213
+#define WCD934X_CPE_SS_SVA_CFG 0x0214
+#define WCD934X_CPE_SS_DMIC0_CTL 0x0218
+#define WCD934X_CPE_SS_DMIC1_CTL 0x0219
+#define WCD934X_DMIC_RATE_MASK GENMASK(3, 1)
+#define WCD934X_CPE_SS_DMIC2_CTL 0x021a
+#define WCD934X_CPE_SS_DMIC_CFG 0x021b
+#define WCD934X_CPE_SS_DMIC_CFG 0x021b
+#define WCD934X_CPE_SS_CPAR_CFG 0x021c
+#define WCD934X_INTR_PIN1_MASK0 0x0409
+#define WCD934X_INTR_PIN1_STATUS0 0x0411
+#define WCD934X_INTR_PIN1_CLEAR0 0x0419
+#define WCD934X_INTR_PIN2_CLEAR3 0x0434
+#define WCD934X_INTR_LEVEL0 0x0461
+/* INTR_REG 0 */
+#define WCD934X_IRQ_SLIMBUS 0
+#define WCD934X_IRQ_MISC 1
+#define WCD934X_IRQ_HPH_PA_OCPL_FAULT 2
+#define WCD934X_IRQ_HPH_PA_OCPR_FAULT 3
+#define WCD934X_IRQ_EAR_PA_OCP_FAULT 4
+#define WCD934X_IRQ_HPH_PA_CNPL_COMPLETE 5
+#define WCD934X_IRQ_HPH_PA_CNPR_COMPLETE 6
+#define WCD934X_IRQ_EAR_PA_CNP_COMPLETE 7
+/* INTR_REG 1 */
+#define WCD934X_IRQ_MBHC_SW_DET 8
+#define WCD934X_IRQ_MBHC_ELECT_INS_REM_DET 9
+#define WCD934X_IRQ_MBHC_BUTTON_PRESS_DET 10
+#define WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET 11
+#define WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET 12
+#define WCD934X_IRQ_RESERVED_0 13
+#define WCD934X_IRQ_RESERVED_1 14
+#define WCD934X_IRQ_RESERVED_2 15
+/* INTR_REG 2 */
+#define WCD934X_IRQ_LINE_PA1_CNP_COMPLETE 16
+#define WCD934X_IRQ_LINE_PA2_CNP_COMPLETE 17
+#define WCD934X_IRQ_SLNQ_ANALOG_ERROR 18
+#define WCD934X_IRQ_RESERVED_3 19
+#define WCD934X_IRQ_SOUNDWIRE 20
+#define WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE 21
+#define WCD934X_IRQ_RCO_ERROR 22
+#define WCD934X_IRQ_CPE_ERROR 23
+/* INTR_REG 3 */
+#define WCD934X_IRQ_MAD_AUDIO 24
+#define WCD934X_IRQ_MAD_BEACON 25
+#define WCD934X_IRQ_MAD_ULTRASOUND 26
+#define WCD934X_IRQ_VBAT_ATTACK 27
+#define WCD934X_IRQ_VBAT_RESTORE 28
+#define WCD934X_IRQ_CPE1_INTR 29
+#define WCD934X_IRQ_RESERVED_4 30
+#define WCD934X_IRQ_SLNQ_DIGITAL 31
+#define WCD934X_NUM_IRQS 32
+#define WCD934X_ANA_BIAS 0x0601
+#define WCD934X_ANA_BIAS_EN_MASK BIT(7)
+#define WCD934X_ANA_BIAS_EN BIT(7)
+#define WCD934X_ANA_PRECHRG_EN_MASK BIT(6)
+#define WCD934X_ANA_PRECHRG_EN BIT(6)
+#define WCD934X_ANA_PRECHRG_MODE_MASK BIT(5)
+#define WCD934X_ANA_PRECHRG_MODE_AUTO BIT(5)
+#define WCD934X_ANA_RCO 0x0603
+#define WCD934X_ANA_RCO_BG_EN_MASK BIT(7)
+#define WCD934X_ANA_RCO_BG_ENABLE BIT(7)
+#define WCD934X_ANA_BUCK_CTL 0x0606
+#define WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK GENMASK(1, 0)
+#define WCD934X_ANA_BUCK_PRE_EN2_MASK BIT(0)
+#define WCD934X_ANA_BUCK_PRE_EN2_ENABLE BIT(0)
+#define WCD934X_ANA_BUCK_PRE_EN1_MASK BIT(1)
+#define WCD934X_ANA_BUCK_PRE_EN1_ENABLE BIT(1)
+#define WCD934X_ANA_BUCK_HI_ACCU_EN_MASK BIT(2)
+#define WCD934X_ANA_BUCK_HI_ACCU_ENABLE BIT(2)
+#define WCD934X_ANA_RX_SUPPLIES 0x0608
+#define WCD934X_ANA_HPH 0x0609
+#define WCD934X_ANA_EAR 0x060a
+#define WCD934X_ANA_LO_1_2 0x060b
+#define WCD934X_ANA_AMIC1 0x060e
+#define WCD934X_ANA_AMIC2 0x060f
+#define WCD934X_ANA_AMIC3 0x0610
+#define WCD934X_ANA_AMIC4 0x0611
+#define WCD934X_ANA_MBHC_MECH 0x0614
+#define WCD934X_ANA_MBHC_ELECT 0x0615
+#define WCD934X_ANA_MBHC_ZDET 0x0616
+#define WCD934X_ANA_MBHC_RESULT_1 0x0617
+#define WCD934X_ANA_MBHC_RESULT_2 0x0618
+#define WCD934X_ANA_MBHC_RESULT_3 0x0619
+#define WCD934X_ANA_MICB1 0x0622
+#define WCD934X_MICB_VAL_MASK GENMASK(5, 0)
+#define WCD934X_ANA_MICB_EN_MASK GENMASK(7, 6)
+#define WCD934X_ANA_MICB_PULL_UP 0x80
+#define WCD934X_ANA_MICB_ENABLE 0x40
+#define WCD934X_ANA_MICB_DISABLE 0x0
+#define WCD934X_ANA_MICB2 0x0623
+#define WCD934X_ANA_MICB3 0x0625
+#define WCD934X_ANA_MICB4 0x0626
+#define WCD934X_BIAS_VBG_FINE_ADJ 0x0629
+#define WCD934X_MICB1_TEST_CTL_1 0x066b
+#define WCD934X_MICB1_TEST_CTL_2 0x066c
+#define WCD934X_MICB2_TEST_CTL_1 0x066e
+#define WCD934X_MICB3_TEST_CTL_1 0x0671
+#define WCD934X_MICB4_TEST_CTL_1 0x0674
+#define WCD934X_CLASSH_MODE_1 0x0697
+#define WCD934X_CLASSH_MODE_2 0x0698
+#define WCD934X_CLASSH_MODE_3 0x0699
+#define WCD934X_CLASSH_CTRL_VCL_1 0x069a
+#define WCD934X_CLASSH_CTRL_VCL_2 0x069b
+#define WCD934X_CLASSH_CTRL_CCL_1 0x069c
+#define WCD934X_CLASSH_CTRL_CCL_2 0x069d
+#define WCD934X_CLASSH_CTRL_CCL_3 0x069e
+#define WCD934X_CLASSH_CTRL_CCL_4 0x069f
+#define WCD934X_CLASSH_CTRL_CCL_5 0x06a0
+#define WCD934X_CLASSH_BUCK_TMUX_A_D 0x06a1
+#define WCD934X_CLASSH_BUCK_SW_DRV_CNTL 0x06a2
+#define WCD934X_RX_OCP_CTL 0x06b6
+#define WCD934X_RX_OCP_COUNT 0x06b7
+#define WCD934X_HPH_CNP_EN 0x06cb
+#define WCD934X_HPH_CNP_WG_CTL 0x06cc
+#define WCD934X_HPH_GM3_BOOST_EN_MASK BIT(7)
+#define WCD934X_HPH_GM3_BOOST_ENABLE BIT(7)
+#define WCD934X_HPH_OCP_CTL 0x06ce
+#define WCD934X_HPH_L_EN 0x06d3
+#define WCD934X_HPH_GAIN_SRC_SEL_MASK BIT(5)
+#define WCD934X_HPH_GAIN_SRC_SEL_COMPANDER 0
+#define WCD934X_HPH_GAIN_SRC_SEL_REGISTER BIT(5)
+#define WCD934X_HPH_L_TEST 0x06d4
+#define WCD934X_HPH_R_EN 0x06d6
+#define WCD934X_HPH_R_TEST 0x06d7
+#define WCD934X_HPH_OCP_DET_MASK BIT(0)
+#define WCD934X_HPH_OCP_DET_ENABLE BIT(0)
+#define WCD934X_HPH_OCP_DET_DISABLE 0
+#define WCD934X_DIFF_LO_LO2_COMPANDER 0x06ea
+#define WCD934X_DIFF_LO_LO1_COMPANDER 0x06eb
+#define WCD934X_CLK_SYS_MCLK_PRG 0x0711
+#define WCD934X_EXT_CLK_BUF_EN_MASK BIT(7)
+#define WCD934X_EXT_CLK_BUF_EN BIT(7)
+#define WCD934X_EXT_CLK_DIV_RATIO_MASK GENMASK(5, 4)
+#define WCD934X_EXT_CLK_DIV_BY_2 0x10
+#define WCD934X_MCLK_SRC_MASK BIT(1)
+#define WCD934X_MCLK_SRC_EXT_CLK 0
+#define WCD934X_MCLK_SRC_MASK BIT(1)
+#define WCD934X_MCLK_EN_MASK BIT(0)
+#define WCD934X_MCLK_EN BIT(0)
+#define WCD934X_CLK_SYS_MCLK2_PRG1 0x0712
+#define WCD934X_CLK_SYS_MCLK2_PRG2 0x0713
+#define WCD934X_SIDO_NEW_VOUT_A_STARTUP 0x071b
+#define WCD934X_SIDO_NEW_VOUT_D_STARTUP 0x071c
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ1 0x071d
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ2 0x071e
+#define WCD934X_SIDO_RIPPLE_FREQ_EN_MASK BIT(0)
+#define WCD934X_SIDO_RIPPLE_FREQ_ENABLE BIT(0)
+#define WCD934X_MBHC_NEW_CTL_2 0x0721
+#define WCD934X_TX_NEW_AMIC_4_5_SEL 0x0727
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733
+#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R 0x0736
+#define WCD934X_HPH_NEW_INT_HPH_TIMER1 0x073a
+#define WCD934X_HPH_AUTOCHOP_TIMER_EN_MASK BIT(1)
+#define WCD934X_HPH_AUTOCHOP_TIMER_ENABLE BIT(1)
+#define WCD934X_CDC_TX0_TX_PATH_CTL 0x0a31
+#define WCD934X_CDC_TX_PATH_CTL_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_CDC_TX_PATH_CTL(dec) (0xa31 + dec * 0x10)
+#define WCD934X_CDC_TX0_TX_PATH_CFG0 0x0a32
+#define WCD934X_CDC_TX0_TX_PATH_CFG1 0x0a33
+#define WCD934X_CDC_TX0_TX_VOL_CTL 0x0a34
+#define WCD934X_CDC_TX0_TX_PATH_192_CTL 0x0a35
+#define WCD934X_CDC_TX0_TX_PATH_192_CFG 0x0a36
+#define WCD934X_CDC_TX0_TX_PATH_SEC2 0x0a39
+#define WCD934X_HPH_CUTOFF_FREQ_CHANGE_REQ_MASK BIT(1)
+#define WCD934X_HPH_CUTOFF_FREQ_CHANGE_REQ BIT(1)
+#define WCD934X_CDC_TX1_TX_PATH_CTL 0x0a41
+#define WCD934X_CDC_TX1_TX_PATH_CFG0 0x0a42
+#define WCD934X_CDC_TX1_TX_PATH_CFG1 0x0a43
+#define WCD934X_CDC_TX1_TX_VOL_CTL 0x0a44
+#define WCD934X_CDC_TX2_TX_PATH_CTL 0x0a51
+#define WCD934X_CDC_TX2_TX_PATH_CFG0 0x0a52
+#define WCD934X_CDC_TX2_TX_PATH_CFG1 0x0a53
+#define WCD934X_CDC_TX2_TX_VOL_CTL 0x0a54
+#define WCD934X_CDC_TX3_TX_PATH_CTL 0x0a61
+#define WCD934X_CDC_TX3_TX_PATH_CFG0 0x0a62
+#define WCD934X_CDC_TX3_TX_PATH_CFG1 0x0a63
+#define WCD934X_CDC_TX3_TX_VOL_CTL 0x0a64
+#define WCD934X_CDC_TX3_TX_PATH_192_CTL 0x0a65
+#define WCD934X_CDC_TX3_TX_PATH_192_CFG 0x0a66
+#define WCD934X_CDC_TX4_TX_PATH_CTL 0x0a71
+#define WCD934X_CDC_TX4_TX_PATH_CFG0 0x0a72
+#define WCD934X_CDC_TX4_TX_PATH_CFG1 0x0a73
+#define WCD934X_CDC_TX4_TX_VOL_CTL 0x0a74
+#define WCD934X_CDC_TX4_TX_PATH_192_CTL 0x0a75
+#define WCD934X_CDC_TX4_TX_PATH_192_CFG 0x0a76
+#define WCD934X_CDC_TX5_TX_PATH_CTL 0x0a81
+#define WCD934X_CDC_TX5_TX_PATH_CFG0 0x0a82
+#define WCD934X_CDC_TX5_TX_PATH_CFG1 0x0a83
+#define WCD934X_CDC_TX5_TX_VOL_CTL 0x0a84
+#define WCD934X_CDC_TX5_TX_PATH_192_CTL 0x0a85
+#define WCD934X_CDC_TX5_TX_PATH_192_CFG 0x0a86
+#define WCD934X_CDC_TX6_TX_PATH_CTL 0x0a91
+#define WCD934X_CDC_TX6_TX_PATH_CFG0 0x0a92
+#define WCD934X_CDC_TX6_TX_PATH_CFG1 0x0a93
+#define WCD934X_CDC_TX6_TX_VOL_CTL 0x0a94
+#define WCD934X_CDC_TX6_TX_PATH_192_CTL 0x0a95
+#define WCD934X_CDC_TX6_TX_PATH_192_CFG 0x0a96
+#define WCD934X_CDC_TX7_TX_PATH_CTL 0x0aa1
+#define WCD934X_CDC_TX7_TX_PATH_CFG0 0x0aa2
+#define WCD934X_CDC_TX7_TX_PATH_CFG1 0x0aa3
+#define WCD934X_CDC_TX7_TX_VOL_CTL 0x0aa4
+#define WCD934X_CDC_TX7_TX_PATH_192_CTL 0x0aa5
+#define WCD934X_CDC_TX7_TX_PATH_192_CFG 0x0aa6
+#define WCD934X_CDC_TX8_TX_PATH_CTL 0x0ab1
+#define WCD934X_CDC_TX8_TX_PATH_CFG0 0x0ab2
+#define WCD934X_CDC_TX8_TX_PATH_CFG1 0x0ab3
+#define WCD934X_CDC_TX8_TX_VOL_CTL 0x0ab4
+#define WCD934X_CDC_TX8_TX_PATH_192_CTL 0x0ab5
+#define WCD934X_CDC_TX8_TX_PATH_192_CFG 0x0ab6
+#define WCD934X_CDC_TX9_SPKR_PROT_PATH_CFG0 0x0ac3
+#define WCD934X_CDC_TX10_SPKR_PROT_PATH_CFG0 0x0ac7
+#define WCD934X_CDC_TX11_SPKR_PROT_PATH_CFG0 0x0acb
+#define WCD934X_CDC_TX12_SPKR_PROT_PATH_CFG0 0x0acf
+#define WCD934X_CDC_COMPANDER1_CTL0 0x0b01
+#define WCD934X_COMP_CLK_EN_MASK BIT(0)
+#define WCD934X_COMP_CLK_ENABLE BIT(0)
+#define WCD934X_COMP_SOFT_RST_MASK BIT(1)
+#define WCD934X_COMP_SOFT_RST_ENABLE BIT(1)
+#define WCD934X_COMP_HALT_MASK BIT(2)
+#define WCD934X_COMP_HALT BIT(2)
+#define WCD934X_COMP_SOFT_RST_DISABLE 0
+#define WCD934X_CDC_COMPANDER1_CTL7 0x0b08
+#define WCD934X_HPH_LOW_PWR_MODE_EN_MASK BIT(5)
+#define WCD934X_CDC_COMPANDER2_CTL7 0x0b10
+#define WCD934X_CDC_COMPANDER7_CTL3 0x0b34
+#define WCD934X_CDC_COMPANDER7_CTL7 0x0b38
+#define WCD934X_CDC_COMPANDER8_CTL3 0x0b3c
+#define WCD934X_CDC_COMPANDER8_CTL7 0x0b40
+#define WCD934X_CDC_RX0_RX_PATH_CTL 0x0b41
+#define WCD934X_CDC_RX_PGA_MUTE_EN_MASK BIT(4)
+#define WCD934X_CDC_RX_PGA_MUTE_ENABLE BIT(4)
+#define WCD934X_CDC_RX_PGA_MUTE_DISABLE 0
+#define WCD934X_RX_CLK_EN_MASK BIT(5)
+#define WCD934X_RX_CLK_ENABLE BIT(5)
+#define WCD934X_RX_RESET_MASK BIT(6)
+#define WCD934X_RX_RESET_ENABLE BIT(6)
+#define WCD934X_RX_RESET_DISABLE 0
+#define WCD934X_RX_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_RX_PCM_RATE_F_48K 0x04
+#define WCD934X_CDC_RX_PATH_CTL(rx) (0xb41 + rx * 0x14)
+#define WCD934X_CDC_MIX_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_CDC_RX0_RX_PATH_CFG0 0x0b42
+#define WCD934X_RX_DLY_ZN_EN_MASK BIT(3)
+#define WCD934X_RX_DLY_ZN_ENABLE BIT(3)
+#define WCD934X_RX_DLY_ZN_DISABLE 0
+#define WCD934X_CDC_RX0_RX_PATH_CFG1 0x0b43
+#define WCD934X_CDC_RX0_RX_PATH_CFG2 0x0b44
+#define WCD934X_CDC_RX0_RX_VOL_CTL 0x0b45
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CTL 0x0b46
+#define WCD934X_CDC_RX_MIX_CLK_EN_MASK BIT(5)
+#define WCD934X_CDC_RX_MIX_CLK_ENABLE BIT(5)
+#define WCD934X_CDC_RX_PATH_MIX_CTL(rx) (0xb46 + rx * 0x14)
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CFG 0x0b47
+#define WCD934X_CDC_RX0_RX_VOL_MIX_CTL 0x0b48
+#define WCD934X_CDC_RX0_RX_PATH_SEC0 0x0b49
+#define WCD934X_CDC_RX0_RX_PATH_DSMDEM_CTL 0x0b53
+#define WCD934X_CDC_RX1_RX_PATH_CTL 0x0b55
+#define WCD934X_RX_PATH_PGA_MUTE_EN_MASK BIT(4)
+#define WCD934X_RX_PATH_PGA_MUTE_ENABLE BIT(4)
+#define WCD934X_CDC_RX_PATH_PGA_MUTE_DISABLE 0
+#define WCD934X_CDC_RX_PATH_CLK_EN_MASK BIT(5)
+#define WCD934X_CDC_RX_PATH_CLK_ENABLE BIT(5)
+#define WCD934X_CDC_RX_PATH_CLK_DISABLE 0
+#define WCD934X_CDC_RX1_RX_PATH_CFG0 0x0b56
+#define WCD934X_HPH_CMP_EN_MASK BIT(1)
+#define WCD934X_HPH_CMP_ENABLE BIT(1)
+#define WCD934X_HPH_CMP_DISABLE 0
+#define WCD934X_CDC_RX1_RX_PATH_CFG2 0x0b58
+#define WCD934X_CDC_RX1_RX_VOL_CTL 0x0b59
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CTL 0x0b5a
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CFG 0x0b5b
+#define WCD934X_CDC_RX1_RX_VOL_MIX_CTL 0x0b5c
+#define WCD934X_CDC_RX1_RX_PATH_SEC0 0x0b5d
+#define WCD934X_CDC_RX1_RX_PATH_SEC3 0x0b60
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_MASK GENMASK(5, 2)
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_0P3125 0x14
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_0P0000 0
+#define WCD934X_CDC_RX1_RX_PATH_DSMDEM_CTL 0x0b67
+#define WCD934X_CDC_RX2_RX_PATH_CTL 0x0b69
+#define WCD934X_CDC_RX2_RX_PATH_CFG0 0x0b6a
+#define WCD934X_CDC_RX_PATH_CFG_HD2_EN_MASK BIT(2)
+#define WCD934X_CDC_RX_PATH_CFG_HD2_ENABLE BIT(2)
+#define WCD934X_CDC_RX_PATH_CFG_HD2_DISABLE 0
+#define WCD934X_CDC_RX2_RX_PATH_CFG2 0x0b6c
+#define WCD934X_CDC_RX2_RX_VOL_CTL 0x0b6d
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CTL 0x0b6e
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CFG 0x0b6f
+#define WCD934X_CDC_RX2_RX_VOL_MIX_CTL 0x0b70
+#define WCD934X_CDC_RX2_RX_PATH_SEC0 0x0b71
+#define WCD934X_CDC_RX2_RX_PATH_SEC3 0x0b74
+#define WCD934X_CDC_RX2_RX_PATH_DSMDEM_CTL 0x0b7b
+#define WCD934X_CDC_RX3_RX_PATH_CTL 0x0b7d
+#define WCD934X_CDC_RX3_RX_PATH_CFG0 0x0b6e
+#define WCD934X_CDC_RX3_RX_PATH_CFG2 0x0b80
+#define WCD934X_CDC_RX3_RX_VOL_CTL 0x0b81
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CTL 0x0b82
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CFG 0x0b83
+#define WCD934X_CDC_RX3_RX_VOL_MIX_CTL 0x0b84
+#define WCD934X_CDC_RX3_RX_PATH_SEC0 0x0b85
+#define WCD934X_CDC_RX3_RX_PATH_DSMDEM_CTL 0x0b8f
+#define WCD934X_CDC_RX4_RX_PATH_CTL 0x0b91
+#define WCD934X_CDC_RX4_RX_PATH_CFG0 0x0b92
+#define WCD934X_CDC_RX4_RX_PATH_CFG2 0x0b94
+#define WCD934X_CDC_RX4_RX_VOL_CTL 0x0b95
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CTL 0x0b96
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CFG 0x0b97
+#define WCD934X_CDC_RX4_RX_VOL_MIX_CTL 0x0b98
+#define WCD934X_CDC_RX4_RX_PATH_SEC0 0x0b99
+#define WCD934X_CDC_RX4_RX_PATH_DSMDEM_CTL 0x0ba3
+#define WCD934X_CDC_RX7_RX_PATH_CTL 0x0bcd
+#define WCD934X_CDC_RX7_RX_PATH_CFG0 0x0bce
+#define WCD934X_CDC_RX7_RX_PATH_CFG1 0x0bcf
+#define WCD934X_CDC_RX7_RX_PATH_CFG2 0x0bd0
+#define WCD934X_CDC_RX7_RX_VOL_CTL 0x0bd1
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CTL 0x0bd2
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CFG 0x0bd3
+#define WCD934X_CDC_RX7_RX_VOL_MIX_CTL 0x0bd4
+#define WCD934X_CDC_RX7_RX_PATH_SEC1 0x0bd6
+#define WCD934X_CDC_RX7_RX_PATH_MIX_SEC0 0x0bdd
+#define WCD934X_CDC_RX7_RX_PATH_DSMDEM_CTL 0x0bdf
+#define WCD934X_CDC_RX8_RX_PATH_CTL 0x0be1
+#define WCD934X_CDC_RX8_RX_PATH_CFG0 0x0be2
+#define WCD934X_CDC_RX8_RX_PATH_CFG1 0x0be3
+#define WCD934X_RX_SMART_BOOST_EN_MASK BIT(0)
+#define WCD934X_RX_SMART_BOOST_ENABLE BIT(0)
+#define WCD934X_RX_SMART_BOOST_DISABLE 0
+#define WCD934X_CDC_RX8_RX_PATH_CFG2 0x0be4
+#define WCD934X_CDC_RX8_RX_VOL_CTL 0x0be5
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CTL 0x0be6
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CFG 0x0be7
+#define WCD934X_CDC_RX8_RX_VOL_MIX_CTL 0x0be8
+#define WCD934X_CDC_RX8_RX_PATH_SEC1 0x0bea
+#define WCD934X_CDC_RX8_RX_PATH_MIX_SEC0 0x0bf1
+#define WCD934X_CDC_RX8_RX_PATH_DSMDEM_CTL 0x0bf3
+#define WCD934X_CDC_CLSH_DECAY_CTRL 0x0c03
+#define WCD934X_CDC_CLSH_K2_MSB 0x0c0a
+#define WCD934X_CDC_CLSH_K2_LSB 0x0c0b
+#define WCD934X_CDC_CLSH_TEST0 0x0c0f
+#define WCD934X_CDC_BOOST0_BOOST_PATH_CTL 0x0c19
+#define WCD934X_BOOST_PATH_CLK_EN_MASK BIT(4)
+#define WCD934X_BOOST_PATH_CLK_ENABLE BIT(4)
+#define WCD934X_BOOST_PATH_CLK_DISABLE 0
+#define WCD934X_CDC_BOOST0_BOOST_CTL 0x0c1a
+#define WCD934X_CDC_BOOST0_BOOST_CFG1 0x0c1b
+#define WCD934X_CDC_BOOST0_BOOST_CFG2 0x0c1c
+#define WCD934X_CDC_BOOST1_BOOST_PATH_CTL 0x0c21
+#define WCD934X_CDC_BOOST1_BOOST_CTL 0x0c22
+#define WCD934X_CDC_BOOST1_BOOST_CFG1 0x0c23
+#define WCD934X_CDC_BOOST1_BOOST_CFG2 0x0c24
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_0 0x0c91
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_1 0x0c92
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_2 0x0c93
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_3 0x0c94
+#define WCD934X_SWR_AHB_BRIDGE_ACCESS_STATUS 0x0c96
+#define WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL 0x0cb5
+#define WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL 0x0cb9
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG0 0x0d01
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_CFG0(i) (0xd01 + i * 0x2)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_SEL_MASK GENMASK(3, 0)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG1 0x0d02
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_CFG1(i) (0xd02 + i * 0x2)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG0 0x0d03
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG1 0x0d04
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG0 0x0d05
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG1 0x0d06
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG0 0x0d07
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG1 0x0d08
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG0 0x0d09
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG1 0x0d0a
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG0 0x0d0f
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG1 0x0d10
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG0 0x0d11
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG1 0x0d12
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG0 0x0d13
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG1 0x0d14
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG2 0x0d15
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG3 0x0d16
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG4 0x0d17
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0 0x0d18
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1 0x0d19
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0 0x0d1d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1 0x0d1e
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG0 0x0d1f
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1 0x0d20
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG0 0x0d21
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1 0x0d22
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG0 0x0d23
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1 0x0d25
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0 0x0d26
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX5_CFG0 0x0d27
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX6_CFG0 0x0d28
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX7_CFG0 0x0d29
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX8_CFG0 0x0d2a
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX10_CFG0 0x0d2b
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX11_CFG0 0x0d2c
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX12_CFG0 0x0d2d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX13_CFG0 0x0d2e
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0 0x0d31
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1 0x0d32
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2 0x0d33
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3 0x0d34
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0 0x0d35
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1 0x0d36
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2 0x0d37
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3 0x0d38
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0 0x0d3a
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1 0x0d3b
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2 0x0d3c
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3 0x0d3d
+#define WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL 0x0d41
+#define WCD934X_CDC_MCLK_EN_MASK BIT(0)
+#define WCD934X_CDC_MCLK_EN_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL 0x0d42
+#define WCD934X_CDC_FS_MCLK_CNT_EN_MASK BIT(0)
+#define WCD934X_CDC_FS_MCLK_CNT_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL 0x0d43
+#define WCD934X_CDC_SWR_CLK_EN_MASK BIT(0)
+#define WCD934X_CDC_SWR_CLK_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_DSD_CONTROL 0x0d44
+#define WCD934X_CDC_CLK_RST_CTRL_ASRC_SHARE_CONTROL 0x0d45
+#define WCD934X_CDC_CLK_RST_CTRL_GFM_CONTROL 0x0d46
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_PATH_CTL 0x0d55
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL 0x0d56
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL 0x0d57
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL 0x0d58
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL 0x0d59
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B5_CTL 0x0d5a
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B6_CTL 0x0d5b
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B7_CTL 0x0d5c
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B8_CTL 0x0d5d
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_CTL 0x0d5e
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_TIMER_CTL 0x0d5f
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL 0x0d60
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL 0x0d61
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_PATH_CTL 0x0d65
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL 0x0d66
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL 0x0d67
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL 0x0d68
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL 0x0d69
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B5_CTL 0x0d6a
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B6_CTL 0x0d6b
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B7_CTL 0x0d6c
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B8_CTL 0x0d6d
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_CTL 0x0d6e
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_TIMER_CTL 0x0d6f
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B1_CTL 0x0d70
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL 0x0d71
+#define WCD934X_CDC_TOP_TOP_CFG1 0x0d82
+#define WCD934X_CDC_TOP_TOP_CFG7 0x0d88
+#define WCD934X_CDC_TOP_HPHL_COMP_LUT 0x0d8b
+#define WCD934X_CDC_TOP_HPHR_COMP_LUT 0x0d90
+#define WCD934X_HPH_LUT_BYPASS_MASK BIT(7)
+#define WCD934X_HPH_LUT_BYPASS_ENABLE BIT(7)
+#define WCD934X_HPH_LUT_BYPASS_DISABLE 0
+#define WCD934X_CODEC_CPR_WR_DATA_0 0x5001
+#define WCD934X_CODEC_CPR_WR_ADDR_0 0x5005
+#define WCD934X_CODEC_CPR_SVS_CX_VDD 0x5022
+#define WCD934X_CODEC_CPR_SVS2_CX_VDD 0x5023
+#define WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD 0x5027
+#define WCD934X_TLMM_DMIC1_CLK_PINCFG 0x8015
+#define WCD934X_TLMM_DMIC1_DATA_PINCFG 0x8016
+#define WCD934X_TLMM_DMIC2_CLK_PINCFG 0x8017
+#define WCD934X_TLMM_DMIC2_DATA_PINCFG 0x8018
+#define WCD934X_TLMM_DMIC3_CLK_PINCFG 0x8019
+#define WCD934X_TLMM_DMIC3_DATA_PINCFG 0x801a
+#define WCD934X_TEST_DEBUG_PAD_DRVCTL_0 0x803b
+#define WCD934X_TEST_DEBUG_NPL_DLY_TEST_1 0x803e
+
+#define WCD934X_MAX_REGISTER 0xffff
+#define WCD934X_SEL_REGISTER 0x800
+#define WCD934X_SEL_MASK 0xff
+#define WCD934X_SEL_SHIFT 0x0
+#define WCD934X_WINDOW_START 0x800
+#define WCD934X_WINDOW_LENGTH 0x100
+
+/* SLIMBUS Slave Registers */
+#define WCD934X_SLIM_PGD_PORT_INT_EN0 0x30
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_0 0x34
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_1 0x35
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_0 0x36
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_1 0x37
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_0 0x38
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_1 0x39
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_0 0x3A
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_1 0x3B
+#define WCD934X_SLIM_PGD_PORT_INT_RX_SOURCE0 0x60
+#define WCD934X_SLIM_PGD_PORT_INT_TX_SOURCE0 0x70
+#define WCD934X_SLIM_PGD_RX_PORT_CFG(p) (0x30 + p)
+#define WCD934X_SLIM_PGD_PORT_CFG(p) (0x40 + p)
+#define WCD934X_SLIM_PGD_TX_PORT_CFG(p) (0x50 + p)
+#define WCD934X_SLIM_PGD_PORT_INT_SRC(p) (0x60 + p)
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS(p) (0x80 + p)
+#define WCD934X_SLIM_PGD_TX_PORT_MULTI_CHNL_0(p) (0x100 + 4 * p)
+/* ports range from 10-16 */
+#define WCD934X_SLIM_PGD_TX_PORT_MULTI_CHNL_1(p) (0x101 + 4 * p)
+#define WCD934X_SLIM_PGD_RX_PORT_MULTI_CHNL_0(p) (0x140 + 4 * p)
+
+#define SLIM_MANF_ID_QCOM 0x217
+#define SLIM_PROD_CODE_WCD9340 0x250
+#define SLIM_DEV_IDX_WCD9340 0x1
+#define SLIM_DEV_INSTANCE_ID_WCD9340 0
+
+#endif
diff --git a/include/linux/mfd/wcd934x/wcd934x.h b/include/linux/mfd/wcd934x/wcd934x.h
new file mode 100644
index 000000000000..f3c65a035150
--- /dev/null
+++ b/include/linux/mfd/wcd934x/wcd934x.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __WCD934X_H__
+#define __WCD934X_H__
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slimbus.h>
+
+#define WCD934X_MAX_SUPPLY 5
+
+/**
+ * struct wcd934x_ddata - wcd934x driver data
+ *
+ * @supplies: wcd934x regulator supplies
+ * @irq_data: wcd934x irq_chip data
+ * @regmap: wcd934x regmap pointer
+ * @extclk: External clock
+ * @dev: device instance of wcd934x slim device
+ * @irq: irq for wcd934x.
+ */
+struct wcd934x_ddata {
+ struct regulator_bulk_data supplies[WCD934X_MAX_SUPPLY];
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap *regmap;
+ struct clk *extclk;
+ struct device *dev;
+ int irq;
+};
+
+#endif /* __WCD934X_H__ */
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 508e8cc5ee86..653d2a0aa44c 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -130,6 +130,11 @@ enum {
MLX4_CQE_STATUS_IPOK = 1 << 12,
};
+/* L4_CSUM is logically part of status, but has to checked against badfcs_enc */
+enum {
+ MLX4_CQE_STATUS_L4_CSUM = 1 << 2,
+};
+
enum {
MLX4_CQE_LLC = 1,
MLX4_CQE_SNAP = 1 << 1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 22bd0d5024c8..277a51d3ec40 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -463,6 +463,11 @@ struct mlx5_vf_context {
int enabled;
u64 port_guid;
u64 node_guid;
+ /* Valid bits are used to validate administrative guid only.
+ * Enabled after ndo_set_vf_guid
+ */
+ u8 port_guid_valid:1;
+ u8 node_guid_valid:1;
enum port_state_policy policy;
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index ee0a34d66c7c..032cd6630720 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1188,7 +1188,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_cq[0x5];
u8 log_max_eq_sz[0x8];
- u8 reserved_at_e8[0x2];
+ u8 relaxed_ordering_write[0x1];
+ u8 relaxed_ordering_read[0x1];
u8 log_max_mkey[0x6];
u8 reserved_at_f0[0x8];
u8 dump_fill_mkey[0x1];
@@ -1211,7 +1212,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_130[0xa];
u8 log_max_ra_res_dc[0x6];
- u8 reserved_at_140[0xa];
+ u8 reserved_at_140[0x9];
+ u8 roce_accl[0x1];
u8 log_max_ra_req_qp[0x6];
u8 reserved_at_150[0xa];
u8 log_max_ra_res_qp[0x6];
@@ -3428,7 +3430,9 @@ struct mlx5_ifc_mkc_bits {
u8 translations_octword_size[0x20];
- u8 reserved_at_1c0[0x1b];
+ u8 reserved_at_1c0[0x19];
+ u8 relaxed_ordering_read[0x1];
+ u8 reserved_at_1d9[0x1];
u8 log_page_size[0x5];
u8 reserved_at_1e0[0x20];
@@ -4889,7 +4893,19 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 req_cqe_flush_error[0x20];
- u8 reserved_at_620[0x1e0];
+ u8 reserved_at_620[0x20];
+
+ u8 roce_adp_retrans[0x20];
+
+ u8 roce_adp_retrans_to[0x20];
+
+ u8 roce_slow_restart[0x20];
+
+ u8 roce_slow_restart_cnps[0x20];
+
+ u8 roce_slow_restart_trans[0x20];
+
+ u8 reserved_at_6e0[0x120];
};
struct mlx5_ifc_query_q_counter_in_bits {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 67f8451b9a12..73a044ed6981 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -70,11 +70,6 @@ static inline void totalram_pages_add(long count)
atomic_long_add(count, &_totalram_pages);
}
-static inline void totalram_pages_set(long val)
-{
- atomic_long_set(&_totalram_pages, val);
-}
-
extern void * high_memory;
extern int page_cluster;
@@ -916,10 +911,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
-#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
-#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
-#endif
-
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
@@ -947,9 +938,10 @@ static inline bool is_zone_device_page(const struct page *page)
#endif
#ifdef CONFIG_DEV_PAGEMAP_OPS
-void __put_devmap_managed_page(struct page *page);
+void free_devmap_managed_page(struct page *page);
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-static inline bool put_devmap_managed_page(struct page *page)
+
+static inline bool page_is_devmap_managed(struct page *page)
{
if (!static_branch_unlikely(&devmap_managed_key))
return false;
@@ -958,7 +950,6 @@ static inline bool put_devmap_managed_page(struct page *page)
switch (page->pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
case MEMORY_DEVICE_FS_DAX:
- __put_devmap_managed_page(page);
return true;
default:
break;
@@ -966,11 +957,17 @@ static inline bool put_devmap_managed_page(struct page *page)
return false;
}
+void put_devmap_managed_page(struct page *page);
+
#else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool put_devmap_managed_page(struct page *page)
+static inline bool page_is_devmap_managed(struct page *page)
{
return false;
}
+
+static inline void put_devmap_managed_page(struct page *page)
+{
+}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool is_device_private_page(const struct page *page)
@@ -1023,37 +1020,37 @@ static inline void put_page(struct page *page)
* need to inform the device driver through callback. See
* include/linux/memremap.h and HMM for details.
*/
- if (put_devmap_managed_page(page))
+ if (page_is_devmap_managed(page)) {
+ put_devmap_managed_page(page);
return;
+ }
if (put_page_testzero(page))
__put_page(page);
}
/**
- * put_user_page() - release a gup-pinned page
+ * unpin_user_page() - release a gup-pinned page
* @page: pointer to page to be released
*
- * Pages that were pinned via get_user_pages*() must be released via
- * either put_user_page(), or one of the put_user_pages*() routines
- * below. This is so that eventually, pages that are pinned via
- * get_user_pages*() can be separately tracked and uniquely handled. In
- * particular, interactions with RDMA and filesystems need special
- * handling.
+ * Pages that were pinned via pin_user_pages*() must be released via either
+ * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
+ * that eventually such pages can be separately tracked and uniquely handled. In
+ * particular, interactions with RDMA and filesystems need special handling.
*
- * put_user_page() and put_page() are not interchangeable, despite this early
- * implementation that makes them look the same. put_user_page() calls must
- * be perfectly matched up with get_user_page() calls.
+ * unpin_user_page() and put_page() are not interchangeable, despite this early
+ * implementation that makes them look the same. unpin_user_page() calls must
+ * be perfectly matched up with pin*() calls.
*/
-static inline void put_user_page(struct page *page)
+static inline void unpin_user_page(struct page *page)
{
put_page(page);
}
-void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
- bool make_dirty);
+void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+ bool make_dirty);
-void put_user_pages(struct page **pages, unsigned long npages);
+void unpin_user_pages(struct page **pages, unsigned long npages);
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
@@ -1501,9 +1498,16 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
+long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas, int *locked);
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
+long pin_user_pages(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, int *locked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
@@ -1511,6 +1515,8 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
+int pin_user_pages_fast(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
@@ -2323,6 +2329,7 @@ extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf, bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
+extern int do_madvise(unsigned long start, size_t len_in, int behavior);
static inline unsigned long
do_mmap_pgoff(struct file *file, unsigned long addr,
@@ -2574,13 +2581,15 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_ANON 0x8000 /* don't do file mappings */
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
+#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
/*
- * NOTE on FOLL_LONGTERM:
+ * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
+ * other. Here is what they mean, and how to use them:
*
* FOLL_LONGTERM indicates that the page will be held for an indefinite time
- * period _often_ under userspace control. This is contrasted with
- * iov_iter_get_pages() where usages which are transient.
+ * period _often_ under userspace control. This is in contrast to
+ * iov_iter_get_pages(), whose usages are transient.
*
* FIXME: For pages which are part of a filesystem, mappings are subject to the
* lifetime enforced by the filesystem and we need guarantees that longterm
@@ -2595,11 +2604,39 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
* Currently only get_user_pages() and get_user_pages_fast() support this flag
* and calls to get_user_pages_[un]locked are specifically not allowed. This
* is due to an incompatibility with the FS DAX check and
- * FAULT_FLAG_ALLOW_RETRY
+ * FAULT_FLAG_ALLOW_RETRY.
*
- * In the CMA case: longterm pins in a CMA region would unnecessarily fragment
- * that region. And so CMA attempts to migrate the page before pinning when
+ * In the CMA case: long term pins in a CMA region would unnecessarily fragment
+ * that region. And so, CMA attempts to migrate the page before pinning, when
* FOLL_LONGTERM is specified.
+ *
+ * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
+ * but an additional pin counting system) will be invoked. This is intended for
+ * anything that gets a page reference and then touches page data (for example,
+ * Direct IO). This lets the filesystem know that some non-file-system entity is
+ * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
+ * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
+ * a call to unpin_user_page().
+ *
+ * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
+ * and separate refcounting mechanisms, however, and that means that each has
+ * its own acquire and release mechanisms:
+ *
+ * FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
+ *
+ * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
+ *
+ * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
+ * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
+ * calls applied to them, and that's perfectly OK. This is a constraint on the
+ * callers, not on the pages.)
+ *
+ * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
+ * directly by the caller. That's in order to help avoid mismatches when
+ * releasing pages: get_user_pages*() pages must be released via put_page(),
+ * while pin_user_pages*() pages must be released via unpin_user_page().
+ *
+ * Please see Documentation/vm/pin_user_pages.rst for more information.
*/
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 270aa8fd2800..e87bb864bdb2 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -490,7 +490,7 @@ struct mm_struct {
/* store ref to file /proc/<pid>/exe symlink points to */
struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier_mm *mmu_notifier_mm;
+ struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 9e6caa8ecd19..736f6918335e 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -8,7 +8,7 @@
#include <linux/srcu.h>
#include <linux/interval_tree.h>
-struct mmu_notifier_mm;
+struct mmu_notifier_subscriptions;
struct mmu_notifier;
struct mmu_notifier_range;
struct mmu_interval_notifier;
@@ -73,7 +73,7 @@ struct mmu_notifier_ops {
* through the gart alias address, so leading to memory
* corruption.
*/
- void (*release)(struct mmu_notifier *mn,
+ void (*release)(struct mmu_notifier *subscription,
struct mm_struct *mm);
/*
@@ -85,7 +85,7 @@ struct mmu_notifier_ops {
* Start-end is necessary in case the secondary MMU is mapping the page
* at a smaller granularity than the primary MMU.
*/
- int (*clear_flush_young)(struct mmu_notifier *mn,
+ int (*clear_flush_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
@@ -95,7 +95,7 @@ struct mmu_notifier_ops {
* latter, it is supposed to test-and-clear the young/accessed bitflag
* in the secondary pte, but it may omit flushing the secondary tlb.
*/
- int (*clear_young)(struct mmu_notifier *mn,
+ int (*clear_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
@@ -106,7 +106,7 @@ struct mmu_notifier_ops {
* frequently used without actually clearing the flag or tearing
* down the secondary mapping on the page.
*/
- int (*test_young)(struct mmu_notifier *mn,
+ int (*test_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long address);
@@ -114,7 +114,7 @@ struct mmu_notifier_ops {
* change_pte is called in cases that pte mapping to page is changed:
* for example, when ksm remaps pte to point to a new shared page.
*/
- void (*change_pte)(struct mmu_notifier *mn,
+ void (*change_pte)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long address,
pte_t pte);
@@ -169,9 +169,9 @@ struct mmu_notifier_ops {
* invalidate_range_end.
*
*/
- int (*invalidate_range_start)(struct mmu_notifier *mn,
+ int (*invalidate_range_start)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
- void (*invalidate_range_end)(struct mmu_notifier *mn,
+ void (*invalidate_range_end)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
/*
@@ -192,8 +192,10 @@ struct mmu_notifier_ops {
* of what was passed to invalidate_range_start()/end(), if
* called between those functions.
*/
- void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
- unsigned long start, unsigned long end);
+ void (*invalidate_range)(struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
/*
* These callbacks are used with the get/put interface to manage the
@@ -206,7 +208,7 @@ struct mmu_notifier_ops {
* and cannot sleep.
*/
struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
- void (*free_notifier)(struct mmu_notifier *mn);
+ void (*free_notifier)(struct mmu_notifier *subscription);
};
/*
@@ -235,7 +237,7 @@ struct mmu_notifier {
* was required but mmu_notifier_range_blockable(range) is false.
*/
struct mmu_interval_notifier_ops {
- bool (*invalidate)(struct mmu_interval_notifier *mni,
+ bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
const struct mmu_notifier_range *range,
unsigned long cur_seq);
};
@@ -265,7 +267,7 @@ struct mmu_notifier_range {
static inline int mm_has_notifiers(struct mm_struct *mm)
{
- return unlikely(mm->mmu_notifier_mm);
+ return unlikely(mm->notifier_subscriptions);
}
struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
@@ -280,30 +282,31 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
up_write(&mm->mmap_sem);
return ret;
}
-void mmu_notifier_put(struct mmu_notifier *mn);
+void mmu_notifier_put(struct mmu_notifier *subscription);
void mmu_notifier_synchronize(void);
-extern int mmu_notifier_register(struct mmu_notifier *mn,
+extern int mmu_notifier_register(struct mmu_notifier *subscription,
struct mm_struct *mm);
-extern int __mmu_notifier_register(struct mmu_notifier *mn,
+extern int __mmu_notifier_register(struct mmu_notifier *subscription,
struct mm_struct *mm);
-extern void mmu_notifier_unregister(struct mmu_notifier *mn,
+extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
struct mm_struct *mm);
-unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni);
-int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
+unsigned long
+mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
+int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
struct mm_struct *mm, unsigned long start,
unsigned long length,
const struct mmu_interval_notifier_ops *ops);
int mmu_interval_notifier_insert_locked(
- struct mmu_interval_notifier *mni, struct mm_struct *mm,
+ struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
unsigned long start, unsigned long length,
const struct mmu_interval_notifier_ops *ops);
-void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
+void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
/**
* mmu_interval_set_seq - Save the invalidation sequence
- * @mni - The mni passed to invalidate
+ * @interval_sub - The subscription passed to invalidate
* @cur_seq - The cur_seq passed to the invalidate() callback
*
* This must be called unconditionally from the invalidate callback of a
@@ -314,15 +317,16 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
* If the caller does not call mmu_interval_read_begin() or
* mmu_interval_read_retry() then this call is not required.
*/
-static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
- unsigned long cur_seq)
+static inline void
+mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
+ unsigned long cur_seq)
{
- WRITE_ONCE(mni->invalidate_seq, cur_seq);
+ WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
}
/**
* mmu_interval_read_retry - End a read side critical section against a VA range
- * mni: The range
+ * interval_sub: The subscription
* seq: The return of the paired mmu_interval_read_begin()
*
* This MUST be called under a user provided lock that is also held
@@ -334,15 +338,16 @@ static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
* Returns true if an invalidation collided with this critical section, and
* the caller should retry.
*/
-static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
- unsigned long seq)
+static inline bool
+mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
+ unsigned long seq)
{
- return mni->invalidate_seq != seq;
+ return interval_sub->invalidate_seq != seq;
}
/**
* mmu_interval_check_retry - Test if a collision has occurred
- * mni: The range
+ * interval_sub: The subscription
* seq: The return of the matching mmu_interval_read_begin()
*
* This can be used in the critical section between mmu_interval_read_begin()
@@ -357,14 +362,15 @@ static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
* This call can be used as part of loops and other expensive operations to
* expedite a retry.
*/
-static inline bool mmu_interval_check_retry(struct mmu_interval_notifier *mni,
- unsigned long seq)
+static inline bool
+mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
+ unsigned long seq)
{
/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
- return READ_ONCE(mni->invalidate_seq) != seq;
+ return READ_ONCE(interval_sub->invalidate_seq) != seq;
}
-extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
+extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
@@ -480,15 +486,15 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
__mmu_notifier_invalidate_range(mm, start, end);
}
-static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
- mm->mmu_notifier_mm = NULL;
+ mm->notifier_subscriptions = NULL;
}
-static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
if (mm_has_notifiers(mm))
- __mmu_notifier_mm_destroy(mm);
+ __mmu_notifier_subscriptions_destroy(mm);
}
@@ -692,11 +698,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
{
}
-static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
}
-static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5334ad8fc7bd..c2bc309d1634 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -758,7 +758,7 @@ typedef struct pglist_data {
#ifdef CONFIG_NUMA
/*
- * zone reclaim becomes active if more unmapped pages exist.
+ * node reclaim becomes active if more unmapped pages exist.
*/
unsigned long min_unmapped_pages;
unsigned long min_slab_pages;
diff --git a/include/linux/module.h b/include/linux/module.h
index 0c7366c317bd..1ad393e62bef 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -170,6 +170,16 @@ extern void cleanup_module(void);
#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep)
/*
+ * MODULE_FILE is used for generating modules.builtin
+ * So, make it no-op when this is being built as a module
+ */
+#ifdef MODULE
+#define MODULE_FILE
+#else
+#define MODULE_FILE MODULE_INFO(file, KBUILD_MODFILE);
+#endif
+
+/*
* The following license idents are currently accepted as indicating free
* software modules
*
@@ -213,7 +223,7 @@ extern void cleanup_module(void);
* 2. So the community can ignore bug reports including proprietary modules
* 3. So vendors can do likewise based on their own policies
*/
-#define MODULE_LICENSE(_license) MODULE_INFO(license, _license)
+#define MODULE_LICENSE(_license) MODULE_FILE MODULE_INFO(license, _license)
/*
* Author(s), use "Name <email>" or just "Name", for multiple
@@ -429,7 +439,7 @@ struct module {
#ifdef CONFIG_KALLSYMS
/* Protected by RCU and/or module_mutex: use rcu_dereference() */
- struct mod_kallsyms *kallsyms;
+ struct mod_kallsyms __rcu *kallsyms;
struct mod_kallsyms core_kallsyms;
/* Section attributes */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index e5c3e23919b8..3ef917ff0964 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -128,6 +128,9 @@ struct kparam_array
/**
* module_param_unsafe - same as module_param but taints kernel
+ * @name: the variable to alter, and exposed parameter name.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
*/
#define module_param_unsafe(name, type, perm) \
module_param_named_unsafe(name, name, type, perm)
@@ -150,6 +153,10 @@ struct kparam_array
/**
* module_param_named_unsafe - same as module_param_named but taints kernel
+ * @name: a valid C identifier which is the parameter name.
+ * @value: the actual lvalue to alter.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
*/
#define module_param_named_unsafe(name, value, type, perm) \
param_check_##type(name, &(value)); \
@@ -160,6 +167,7 @@ struct kparam_array
* module_param_cb - general callback for a module/cmdline parameter
* @name: a valid C identifier which is the parameter name.
* @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
* @perm: visibility in sysfs.
*
* The ops can have NULL set or get functions.
@@ -171,36 +179,96 @@ struct kparam_array
__module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, \
KERNEL_PARAM_FL_UNSAFE)
+#define __level_param_cb(name, ops, arg, perm, level) \
+ __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
/**
- * <level>_param_cb - general callback for a module/cmdline parameter
- * to be evaluated before certain initcall level
+ * core_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before core initcall level
* @name: a valid C identifier which is the parameter name.
* @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
* @perm: visibility in sysfs.
*
* The ops can have NULL set or get functions.
*/
-#define __level_param_cb(name, ops, arg, perm, level) \
- __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
-
#define core_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 1)
+/**
+ * postcore_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before postcore initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define postcore_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 2)
+/**
+ * arch_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before arch initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define arch_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 3)
+/**
+ * subsys_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before subsys initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define subsys_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 4)
+/**
+ * fs_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before fs initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define fs_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 5)
+/**
+ * device_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before device initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define device_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 6)
+/**
+ * late_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before late initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define late_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 7)
@@ -263,6 +331,10 @@ static inline void kernel_param_unlock(struct module *mod)
/**
* core_param_unsafe - same as core_param but taints kernel
+ * @name: the name of the cmdline and sysfs parameter (often the same as var)
+ * @var: the variable
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs
*/
#define core_param_unsafe(name, var, type, perm) \
param_check_##type(name, &(var)); \
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 5a4623fc586b..5abd91cc6dfa 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -128,7 +128,8 @@
#define SR_BP0 BIT(2) /* Block protect 0 */
#define SR_BP1 BIT(3) /* Block protect 1 */
#define SR_BP2 BIT(4) /* Block protect 2 */
-#define SR_TB BIT(5) /* Top/Bottom protect */
+#define SR_TB_BIT5 BIT(5) /* Top/Bottom protect */
+#define SR_TB_BIT6 BIT(6) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
/* Spansion/Cypress specific status bits */
#define SR_E_ERR BIT(5)
@@ -224,14 +225,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
return spi_nor_get_protocol_data_nbits(proto);
}
-enum spi_nor_ops {
- SPI_NOR_OPS_READ = 0,
- SPI_NOR_OPS_WRITE,
- SPI_NOR_OPS_ERASE,
- SPI_NOR_OPS_LOCK,
- SPI_NOR_OPS_UNLOCK,
-};
-
enum spi_nor_option_flags {
SNOR_F_USE_FSR = BIT(0),
SNOR_F_HAS_SR_TB = BIT(1),
@@ -244,6 +237,7 @@ enum spi_nor_option_flags {
SNOR_F_HAS_LOCK = BIT(8),
SNOR_F_HAS_16BIT_SR = BIT(9),
SNOR_F_NO_READ_CR = BIT(10),
+ SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
};
@@ -483,8 +477,8 @@ struct spi_nor;
* opcode via write_reg().
*/
struct spi_nor_controller_ops {
- int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
- void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+ int (*prepare)(struct spi_nor *nor);
+ void (*unprepare)(struct spi_nor *nor);
int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len);
int (*write_reg)(struct spi_nor *nor, u8 opcode, const u8 *buf,
size_t len);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 07bfb0874033..0dd980d7318f 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_NAMEI_H
#define _LINUX_NAMEI_H
+#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/path.h>
#include <linux/fcntl.h>
@@ -38,6 +39,15 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
#define LOOKUP_ROOT 0x2000
#define LOOKUP_ROOT_GRABBED 0x0008
+/* Scoping flags for lookup. */
+#define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */
+#define LOOKUP_NO_MAGICLINKS 0x020000 /* No nd_jump_link() crossing. */
+#define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */
+#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
+#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
+/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
+#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
+
extern int path_pts(struct path *path);
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
@@ -68,7 +78,7 @@ extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
-extern void nd_jump_link(struct path *path);
+extern int __must_check nd_jump_link(struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 6861df759fad..572458016331 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,8 +33,8 @@ static inline bool is_migrate_isolate(int migratetype)
#define MEMORY_OFFLINE 0x1
#define REPORT_FAILURE 0x2
-bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
- int migratetype, int flags);
+struct page *has_unmovable_pages(struct zone *zone, struct page *page,
+ int migratetype, int flags);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 37a4d9e32cd3..ccb14b6a16b5 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -636,4 +636,32 @@ static inline unsigned long dir_pages(struct inode *inode)
PAGE_SHIFT;
}
+/**
+ * page_mkwrite_check_truncate - check if page was truncated
+ * @page: the page to check
+ * @inode: the inode to check the page against
+ *
+ * Returns the number of bytes in the page up to EOF,
+ * or -EFAULT if the page was truncated.
+ */
+static inline int page_mkwrite_check_truncate(struct page *page,
+ struct inode *inode)
+{
+ loff_t size = i_size_read(inode);
+ pgoff_t index = size >> PAGE_SHIFT;
+ int offset = offset_in_page(size);
+
+ if (page->mapping != inode->i_mapping)
+ return -EFAULT;
+
+ /* page is wholly inside EOF */
+ if (page->index < index)
+ return PAGE_SIZE;
+ /* page is wholly past EOF */
+ if (page->index > index || !offset)
+ return -EFAULT;
+ /* page is partially inside EOF */
+ return offset;
+}
+
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c393dff2d66f..3840a541a9de 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1202,6 +1202,7 @@ int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
+struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
@@ -2310,7 +2311,7 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
}
#endif
-void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
int pci_for_each_dma_alias(struct pci_dev *pdev,
int (*fn)(struct pci_dev *pdev,
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 390031e816dc..22d9d183950d 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -210,15 +210,17 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
}
/**
- * percpu_ref_tryget - try to increment a percpu refcount
+ * percpu_ref_tryget_many - try to increment a percpu refcount
* @ref: percpu_ref to try-get
+ * @nr: number of references to get
*
- * Increment a percpu refcount unless its count already reached zero.
+ * Increment a percpu refcount by @nr unless its count already reached zero.
* Returns %true on success; %false on failure.
*
* This function is safe to call as long as @ref is between init and exit.
*/
-static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
+ unsigned long nr)
{
unsigned long __percpu *percpu_count;
bool ret;
@@ -226,10 +228,10 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count)) {
- this_cpu_inc(*percpu_count);
+ this_cpu_add(*percpu_count, nr);
ret = true;
} else {
- ret = atomic_long_inc_not_zero(&ref->count);
+ ret = atomic_long_add_unless(&ref->count, nr, 0);
}
rcu_read_unlock();
@@ -238,6 +240,20 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
}
/**
+ * percpu_ref_tryget - try to increment a percpu refcount
+ * @ref: percpu_ref to try-get
+ *
+ * Increment a percpu refcount unless its count already reached zero.
+ * Returns %true on success; %false on failure.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+{
+ return percpu_ref_tryget_many(ref, 1);
+}
+
+/**
* percpu_ref_tryget_live - try to increment a live percpu refcount
* @ref: percpu_ref to try-get
*
diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h
index 3bd019037eb3..54a06a4d2618 100644
--- a/include/linux/platform_data/bd6107.h
+++ b/include/linux/platform_data/bd6107.h
@@ -9,7 +9,6 @@ struct device;
struct bd6107_platform_data {
struct device *fbdev;
- int reset; /* Reset GPIO */
unsigned int def_value;
};
diff --git a/include/linux/platform_data/tc35876x.h b/include/linux/platform_data/tc35876x.h
deleted file mode 100644
index cd6a51c71e7e..000000000000
--- a/include/linux/platform_data/tc35876x.h
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#ifndef _TC35876X_H
-#define _TC35876X_H
-
-struct tc35876x_platform_data {
- int gpio_bridge_reset;
- int gpio_panel_bl_en;
- int gpio_panel_vadd;
-};
-
-#endif /* _TC35876X_H */
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index 4badd5322949..d55c746ac56e 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -105,11 +105,56 @@ enum max17042_register {
MAX17042_OCV = 0xEE,
- MAX17042_OCVInternal = 0xFB,
+ MAX17042_OCVInternal = 0xFB, /* MAX17055 VFOCV */
MAX17042_VFSOC = 0xFF,
};
+enum max17055_register {
+ MAX17055_QRes = 0x0C,
+ MAX17055_TTF = 0x20,
+ MAX17055_V_empty = 0x3A,
+ MAX17055_TIMER = 0x3E,
+ MAX17055_USER_MEM = 0x40,
+ MAX17055_RGAIN = 0x42,
+
+ MAX17055_ConvgCfg = 0x49,
+ MAX17055_VFRemCap = 0x4A,
+
+ MAX17055_STATUS2 = 0xB0,
+ MAX17055_POWER = 0xB1,
+ MAX17055_ID = 0xB2,
+ MAX17055_AvgPower = 0xB3,
+ MAX17055_IAlrtTh = 0xB4,
+ MAX17055_TTFCfg = 0xB5,
+ MAX17055_CVMixCap = 0xB6,
+ MAX17055_CVHalfTime = 0xB7,
+ MAX17055_CGTempCo = 0xB8,
+ MAX17055_Curve = 0xB9,
+ MAX17055_HibCfg = 0xBA,
+ MAX17055_Config2 = 0xBB,
+ MAX17055_VRipple = 0xBC,
+ MAX17055_RippleCfg = 0xBD,
+ MAX17055_TimerH = 0xBE,
+
+ MAX17055_RSense = 0xD0,
+ MAX17055_ScOcvLim = 0xD1,
+
+ MAX17055_SOCHold = 0xD3,
+ MAX17055_MaxPeakPwr = 0xD4,
+ MAX17055_SusPeakPwr = 0xD5,
+ MAX17055_PackResistance = 0xD6,
+ MAX17055_SysResistance = 0xD7,
+ MAX17055_MinSysV = 0xD8,
+ MAX17055_MPPCurrent = 0xD9,
+ MAX17055_SPPCurrent = 0xDA,
+ MAX17055_ModelCfg = 0xDB,
+ MAX17055_AtQResidual = 0xDC,
+ MAX17055_AtTTE = 0xDD,
+ MAX17055_AtAvSOC = 0xDE,
+ MAX17055_AtAvCap = 0xDF,
+};
+
/* Registers specific to max17047/50 */
enum max17047_register {
MAX17047_QRTbl00 = 0x12,
@@ -125,6 +170,7 @@ enum max170xx_chip_type {
MAXIM_DEVICE_TYPE_MAX17042,
MAXIM_DEVICE_TYPE_MAX17047,
MAXIM_DEVICE_TYPE_MAX17050,
+ MAXIM_DEVICE_TYPE_MAX17055,
MAXIM_DEVICE_TYPE_NUM
};
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 28413f737e7d..dcd5a71e6c67 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -325,6 +325,11 @@ struct power_supply_battery_ocv_table {
int capacity; /* percent */
};
+struct power_supply_resistance_temp_table {
+ int temp; /* celsius */
+ int resistance; /* internal resistance percent */
+};
+
#define POWER_SUPPLY_OCV_TEMP_MAX 20
/*
@@ -349,6 +354,8 @@ struct power_supply_battery_info {
int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];/* celsius */
struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX];
int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX];
+ struct power_supply_resistance_temp_table *resist_table;
+ int resist_table_size;
};
extern struct atomic_notifier_head power_supply_notifier;
@@ -381,6 +388,9 @@ power_supply_find_ocv2cap_table(struct power_supply_battery_info *info,
int temp, int *table_len);
extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info,
int ocv, int temp);
+extern int
+power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
+ int table_len, int temp);
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
extern int power_supply_set_input_current_limit_from_supplier(
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index d312e6281e69..4626b1ac3b6c 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -79,10 +79,10 @@ static inline int ns_alloc_inum(struct ns_common *ns)
extern struct file *proc_ns_fget(int fd);
#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
-extern void *ns_get_path(struct path *path, struct task_struct *task,
+extern int ns_get_path(struct path *path, struct task_struct *task,
const struct proc_ns_operations *ns_ops);
typedef struct ns_common *ns_get_path_helper_t(void *);
-extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
+extern int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
void *private_data);
extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
diff --git a/include/linux/random.h b/include/linux/random.h
index f189c927fdea..d319f9a1e429 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -167,29 +167,21 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
#ifdef CONFIG_ARCH_RANDOM
# include <asm/archrandom.h>
#else
-static inline bool arch_get_random_long(unsigned long *v)
+static inline bool __must_check arch_get_random_long(unsigned long *v)
{
- return 0;
+ return false;
}
-static inline bool arch_get_random_int(unsigned int *v)
+static inline bool __must_check arch_get_random_int(unsigned int *v)
{
- return 0;
+ return false;
}
-static inline bool arch_has_random(void)
+static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
- return 0;
+ return false;
}
-static inline bool arch_get_random_seed_long(unsigned long *v)
+static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
- return 0;
-}
-static inline bool arch_get_random_seed_int(unsigned int *v)
-{
- return 0;
-}
-static inline bool arch_has_random_seed(void)
-{
- return 0;
+ return false;
}
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 716ad1d8d95e..04278493bf15 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -917,7 +917,7 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
- struct sighand_struct *sighand;
+ struct sighand_struct __rcu *sighand;
sigset_t blocked;
sigset_t real_blocked;
/* Restored if set_restore_sigmask() was used: */
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index 9618debb9ceb..a74c1d5acdf3 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -15,6 +15,12 @@
struct cmdq_pkt;
+struct cmdq_client_reg {
+ u8 subsys;
+ u16 offset;
+ u16 size;
+};
+
struct cmdq_client {
spinlock_t lock;
u32 pkt_cnt;
@@ -25,6 +31,21 @@ struct cmdq_client {
};
/**
+ * cmdq_dev_get_client_reg() - parse cmdq client reg from the device
+ * node of CMDQ client
+ * @dev: device of CMDQ mailbox client
+ * @client_reg: CMDQ client reg pointer
+ * @idx: the index of desired reg
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Help CMDQ client parsing the cmdq client reg
+ * from the device node of CMDQ client.
+ */
+int cmdq_dev_get_client_reg(struct device *dev,
+ struct cmdq_client_reg *client_reg, int idx);
+
+/**
* cmdq_mbox_create() - create CMDQ mailbox client and channel
* @dev: device of CMDQ mailbox client
* @index: index of CMDQ mailbox channel
@@ -100,6 +121,38 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event);
/**
+ * cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to
+ * execute an instruction that wait for a specified
+ * hardware register to check for the value w/o mask.
+ * All GCE hardware threads will be blocked by this
+ * instruction.
+ * @pkt: the CMDQ packet
+ * @subsys: the CMDQ sub system code
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value);
+
+/**
+ * cmdq_pkt_poll_mask() - Append polling command to the CMDQ packet, ask GCE to
+ * execute an instruction that wait for a specified
+ * hardware register to check for the value w/ mask.
+ * All GCE hardware threads will be blocked by this
+ * instruction.
+ * @pkt: the CMDQ packet
+ * @subsys: the CMDQ sub system code
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ * @mask: the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask);
+/**
* cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
* packet and call back at the end of done packet
* @pkt: the CMDQ packet
diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h
new file mode 100644
index 000000000000..082398e0cfb1
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk_sip_svc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+#ifndef __MTK_SIP_SVC_H
+#define __MTK_SIP_SVC_H
+
+/* Error Code */
+#define SIP_SVC_E_SUCCESS 0
+#define SIP_SVC_E_NOT_SUPPORTED -1
+#define SIP_SVC_E_INVALID_PARAMS -2
+#define SIP_SVC_E_INVALID_RANGE -3
+#define SIP_SVC_E_PERMISSION_DENIED -4
+
+#ifdef CONFIG_ARM64
+#define MTK_SIP_SMC_CONVENTION ARM_SMCCC_SMC_64
+#else
+#define MTK_SIP_SMC_CONVENTION ARM_SMCCC_SMC_32
+#endif
+
+#define MTK_SIP_SMC_CMD(fn_id) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \
+ ARM_SMCCC_OWNER_SIP, fn_id)
+
+#endif
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index f8603724fbee..0f64de7caa39 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -45,8 +45,8 @@
*/
struct cache_head {
struct hlist_node cache_list;
- time_t expiry_time; /* After time time, don't use the data */
- time_t last_refresh; /* If CACHE_PENDING, this is when upcall was
+ time64_t expiry_time; /* After time time, don't use the data */
+ time64_t last_refresh; /* If CACHE_PENDING, this is when upcall was
* sent, else this is when update was
* received, though it is alway set to
* be *after* ->flush_time.
@@ -95,22 +95,22 @@ struct cache_detail {
/* fields below this comment are for internal use
* and should not be touched by cache owners
*/
- time_t flush_time; /* flush all cache items with
+ time64_t flush_time; /* flush all cache items with
* last_refresh at or earlier
* than this. last_refresh
* is never set at or earlier
* than this.
*/
struct list_head others;
- time_t nextcheck;
+ time64_t nextcheck;
int entries;
/* fields for communication over channel */
struct list_head queue;
atomic_t writers; /* how many time is /channel open */
- time_t last_close; /* if no writers, when did last close */
- time_t last_warn; /* when we last warned about no writers */
+ time64_t last_close; /* if no writers, when did last close */
+ time64_t last_warn; /* when we last warned about no writers */
union {
struct proc_dir_entry *procfs;
@@ -147,18 +147,22 @@ struct cache_deferred_req {
* timestamps kept in the cache are expressed in seconds
* since boot. This is the best for measuring differences in
* real time.
+ * This reimplemnts ktime_get_boottime_seconds() in a slightly
+ * faster but less accurate way. When we end up converting
+ * back to wallclock (CLOCK_REALTIME), that error often
+ * cancels out during the reverse operation.
*/
-static inline time_t seconds_since_boot(void)
+static inline time64_t seconds_since_boot(void)
{
- struct timespec boot;
- getboottime(&boot);
- return get_seconds() - boot.tv_sec;
+ struct timespec64 boot;
+ getboottime64(&boot);
+ return ktime_get_real_seconds() - boot.tv_sec;
}
-static inline time_t convert_to_wallclock(time_t sinceboot)
+static inline time64_t convert_to_wallclock(time64_t sinceboot)
{
- struct timespec boot;
- getboottime(&boot);
+ struct timespec64 boot;
+ getboottime64(&boot);
return boot.tv_sec + sinceboot;
}
@@ -273,7 +277,7 @@ static inline int get_uint(char **bpp, unsigned int *anint)
return 0;
}
-static inline int get_time(char **bpp, time_t *time)
+static inline int get_time(char **bpp, time64_t *time)
{
char buf[50];
long long ll;
@@ -287,20 +291,20 @@ static inline int get_time(char **bpp, time_t *time)
if (kstrtoll(buf, 0, &ll))
return -EINVAL;
- *time = (time_t)ll;
+ *time = ll;
return 0;
}
-static inline time_t get_expiry(char **bpp)
+static inline time64_t get_expiry(char **bpp)
{
- time_t rv;
- struct timespec boot;
+ time64_t rv;
+ struct timespec64 boot;
if (get_time(bpp, &rv))
return 0;
if (rv < 0)
return 0;
- getboottime(&boot);
+ getboottime64(&boot);
return rv - boot.tv_sec;
}
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index bd691e08be3b..1cc6cefb1220 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -48,7 +48,7 @@ int gss_import_sec_context(
size_t bufsize,
struct gss_api_mech *mech,
struct gss_ctx **ctx_id,
- time_t *endtime,
+ time64_t *endtime,
gfp_t gfp_mask);
u32 gss_get_mic(
struct gss_ctx *ctx_id,
@@ -108,7 +108,7 @@ struct gss_api_ops {
const void *input_token,
size_t bufsize,
struct gss_ctx *ctx_id,
- time_t *endtime,
+ time64_t *endtime,
gfp_t gfp_mask);
u32 (*gss_get_mic)(
struct gss_ctx *ctx_id,
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 02c0412e368c..c1d77dd8ed41 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -106,9 +106,9 @@ struct krb5_ctx {
struct crypto_sync_skcipher *initiator_enc_aux;
u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
u8 cksum[GSS_KRB5_MAX_KEYLEN];
- s32 endtime;
atomic_t seq_send;
atomic64_t seq_send64;
+ time64_t endtime;
struct xdr_netobj mech_used;
u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
diff --git a/include/linux/swab.h b/include/linux/swab.h
index e466fd159c85..bcff5149861a 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -7,6 +7,7 @@
# define swab16 __swab16
# define swab32 __swab32
# define swab64 __swab64
+# define swab __swab
# define swahw32 __swahw32
# define swahb32 __swahb32
# define swab16p __swab16p
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index e295515bc3f3..082f1d51957a 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -21,6 +21,11 @@
#define SWITCHTEC_EVENT_FATAL BIT(4)
#define SWITCHTEC_DMA_MRPC_EN BIT(0)
+
+#define MRPC_GAS_READ 0x29
+#define MRPC_GAS_WRITE 0x87
+#define MRPC_CMD_ID(x) ((x) & 0xffff)
+
enum {
SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
@@ -32,6 +37,11 @@ enum {
SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
};
+enum switchtec_gen {
+ SWITCHTEC_GEN3,
+ SWITCHTEC_GEN4,
+};
+
struct mrpc_regs {
u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
@@ -98,16 +108,37 @@ struct sw_event_regs {
} __packed;
enum {
- SWITCHTEC_CFG0_RUNNING = 0x04,
- SWITCHTEC_CFG1_RUNNING = 0x05,
- SWITCHTEC_IMG0_RUNNING = 0x03,
- SWITCHTEC_IMG1_RUNNING = 0x07,
+ SWITCHTEC_GEN3_CFG0_RUNNING = 0x04,
+ SWITCHTEC_GEN3_CFG1_RUNNING = 0x05,
+ SWITCHTEC_GEN3_IMG0_RUNNING = 0x03,
+ SWITCHTEC_GEN3_IMG1_RUNNING = 0x07,
};
-struct sys_info_regs {
- u32 device_id;
- u32 device_version;
- u32 firmware_version;
+enum {
+ SWITCHTEC_GEN4_MAP0_RUNNING = 0x00,
+ SWITCHTEC_GEN4_MAP1_RUNNING = 0x01,
+ SWITCHTEC_GEN4_KEY0_RUNNING = 0x02,
+ SWITCHTEC_GEN4_KEY1_RUNNING = 0x03,
+ SWITCHTEC_GEN4_BL2_0_RUNNING = 0x04,
+ SWITCHTEC_GEN4_BL2_1_RUNNING = 0x05,
+ SWITCHTEC_GEN4_CFG0_RUNNING = 0x06,
+ SWITCHTEC_GEN4_CFG1_RUNNING = 0x07,
+ SWITCHTEC_GEN4_IMG0_RUNNING = 0x08,
+ SWITCHTEC_GEN4_IMG1_RUNNING = 0x09,
+};
+
+enum {
+ SWITCHTEC_GEN4_KEY0_ACTIVE = 0,
+ SWITCHTEC_GEN4_KEY1_ACTIVE = 1,
+ SWITCHTEC_GEN4_BL2_0_ACTIVE = 0,
+ SWITCHTEC_GEN4_BL2_1_ACTIVE = 1,
+ SWITCHTEC_GEN4_CFG0_ACTIVE = 0,
+ SWITCHTEC_GEN4_CFG1_ACTIVE = 1,
+ SWITCHTEC_GEN4_IMG0_ACTIVE = 0,
+ SWITCHTEC_GEN4_IMG1_ACTIVE = 1,
+};
+
+struct sys_info_regs_gen3 {
u32 reserved1;
u32 vendor_table_revision;
u32 table_format_version;
@@ -124,26 +155,105 @@ struct sys_info_regs {
u8 component_revision;
} __packed;
-struct flash_info_regs {
+struct sys_info_regs_gen4 {
+ u16 gas_layout_ver;
+ u8 evlist_ver;
+ u8 reserved1;
+ u16 mgmt_cmd_set_ver;
+ u16 fabric_cmd_set_ver;
+ u32 reserved2[2];
+ u8 mrpc_uart_ver;
+ u8 mrpc_twi_ver;
+ u8 mrpc_eth_ver;
+ u8 mrpc_inband_ver;
+ u32 reserved3[7];
+ u32 fw_update_tmo;
+ u32 xml_version_cfg;
+ u32 xml_version_img;
+ u32 partition_id;
+ u16 bl2_running;
+ u16 cfg_running;
+ u16 img_running;
+ u16 key_running;
+ u32 reserved4[43];
+ u32 vendor_seeprom_twi;
+ u32 vendor_table_revision;
+ u32 vendor_specific_info[2];
+ u16 p2p_vendor_id;
+ u16 p2p_device_id;
+ u8 p2p_revision_id;
+ u8 reserved5[3];
+ u32 p2p_class_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_id;
+ u32 p2p_serial_number[2];
+ u8 mac_addr[6];
+ u8 reserved6[2];
+ u32 reserved7[3];
+ char vendor_id[8];
+ char product_id[24];
+ char product_revision[2];
+ u16 reserved8;
+} __packed;
+
+struct sys_info_regs {
+ u32 device_id;
+ u32 device_version;
+ u32 firmware_version;
+ union {
+ struct sys_info_regs_gen3 gen3;
+ struct sys_info_regs_gen4 gen4;
+ };
+} __packed;
+
+struct partition_info {
+ u32 address;
+ u32 length;
+};
+
+struct flash_info_regs_gen3 {
u32 flash_part_map_upd_idx;
- struct active_partition_info {
+ struct active_partition_info_gen3 {
u32 address;
u32 build_version;
u32 build_string;
} active_img;
- struct active_partition_info active_cfg;
- struct active_partition_info inactive_img;
- struct active_partition_info inactive_cfg;
+ struct active_partition_info_gen3 active_cfg;
+ struct active_partition_info_gen3 inactive_img;
+ struct active_partition_info_gen3 inactive_cfg;
u32 flash_length;
- struct partition_info {
- u32 address;
- u32 length;
- } cfg0;
+ struct partition_info cfg0;
+ struct partition_info cfg1;
+ struct partition_info img0;
+ struct partition_info img1;
+ struct partition_info nvlog;
+ struct partition_info vendor[8];
+};
+struct flash_info_regs_gen4 {
+ u32 flash_address;
+ u32 flash_length;
+
+ struct active_partition_info_gen4 {
+ unsigned char bl2;
+ unsigned char cfg;
+ unsigned char img;
+ unsigned char key;
+ } active_flag;
+
+ u32 reserved[3];
+
+ struct partition_info map0;
+ struct partition_info map1;
+ struct partition_info key0;
+ struct partition_info key1;
+ struct partition_info bl2_0;
+ struct partition_info bl2_1;
+ struct partition_info cfg0;
struct partition_info cfg1;
struct partition_info img0;
struct partition_info img1;
@@ -151,6 +261,13 @@ struct flash_info_regs {
struct partition_info vendor[8];
};
+struct flash_info_regs {
+ union {
+ struct flash_info_regs_gen3 gen3;
+ struct flash_info_regs_gen4 gen4;
+ };
+};
+
enum {
SWITCHTEC_NTB_REG_INFO_OFFSET = 0x0000,
SWITCHTEC_NTB_REG_CTRL_OFFSET = 0x4000,
@@ -196,7 +313,9 @@ struct part_cfg_regs {
u32 mrpc_comp_async_data[5];
u32 dyn_binding_hdr;
u32 dyn_binding_data[5];
- u32 reserved4[159];
+ u32 intercomm_notify_hdr;
+ u32 intercomm_notify_data[5];
+ u32 reserved4[153];
} __packed;
enum {
@@ -320,7 +439,8 @@ struct pff_csr_regs {
u32 dpc_data[5];
u32 cts_hdr;
u32 cts_data[5];
- u32 reserved3[6];
+ u32 uec_hdr;
+ u32 uec_data[5];
u32 hotplug_hdr;
u32 hotplug_data[5];
u32 ier_hdr;
@@ -355,6 +475,8 @@ struct switchtec_dev {
struct device dev;
struct cdev cdev;
+ enum switchtec_gen gen;
+
int partition;
int partition_count;
int pff_csr_count;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 5262b7a76d39..1815065d52f3 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -16,8 +16,7 @@ struct inode;
struct iocb;
struct io_event;
struct iovec;
-struct itimerspec;
-struct itimerval;
+struct __kernel_old_itimerval;
struct kexec_segment;
struct linux_dirent;
struct linux_dirent64;
@@ -69,6 +68,7 @@ struct rseq;
union bpf_attr;
struct io_uring_params;
struct clone_args;
+struct open_how;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -439,6 +439,8 @@ asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
umode_t mode);
+asmlinkage long sys_openat2(int dfd, const char __user *filename,
+ struct open_how *how, size_t size);
asmlinkage long sys_close(unsigned int fd);
asmlinkage long sys_vhangup(void);
@@ -591,10 +593,10 @@ asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp,
struct old_timespec32 __user *rmtp);
/* kernel/itimer.c */
-asmlinkage long sys_getitimer(int which, struct itimerval __user *value);
+asmlinkage long sys_getitimer(int which, struct __kernel_old_itimerval __user *value);
asmlinkage long sys_setitimer(int which,
- struct itimerval __user *value,
- struct itimerval __user *ovalue);
+ struct __kernel_old_itimerval __user *value,
+ struct __kernel_old_itimerval __user *ovalue);
/* kernel/kexec.c */
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
@@ -1000,6 +1002,7 @@ asmlinkage long sys_fspick(int dfd, const char __user *path, unsigned int flags)
asmlinkage long sys_pidfd_send_signal(int pidfd, int sig,
siginfo_t __user *info,
unsigned int flags);
+asmlinkage long sys_pidfd_getfd(int pidfd, int fd, unsigned int flags);
/*
* Architecture-specific system calls
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 1cf73e6f85ca..3dc964010fef 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -148,9 +148,7 @@ struct tcp_request_sock {
const struct tcp_request_sock_ops *af_specific;
u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
-#if IS_ENABLED(CONFIG_MPTCP)
bool is_mptcp;
-#endif
u32 txhash;
u32 rcv_isn;
u32 snt_isn;
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index d9111aebb97d..126913c6a53b 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -32,17 +32,6 @@
/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
#define THERMAL_TEMP_INVALID -274000
-/* Unit conversion macros */
-#define DECI_KELVIN_TO_CELSIUS(t) ({ \
- long _t = (t); \
- ((_t-2732 >= 0) ? (_t-2732+5)/10 : (_t-2732-5)/10); \
-})
-#define CELSIUS_TO_DECI_KELVIN(t) ((t)*10+2732)
-#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
-#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
-#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
-#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
-
/* Default Thermal Governor */
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
#define DEFAULT_THERMAL_GOVERNOR "step_wise"
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
index a9c59761927b..63076fb835e3 100644
--- a/include/linux/transport_class.h
+++ b/include/linux/transport_class.h
@@ -62,16 +62,16 @@ struct transport_container {
container_of(x, struct transport_container, ac)
void transport_remove_device(struct device *);
-void transport_add_device(struct device *);
+int transport_add_device(struct device *);
void transport_setup_device(struct device *);
void transport_configure_device(struct device *);
void transport_destroy_device(struct device *);
-static inline void
+static inline int
transport_register_device(struct device *dev)
{
transport_setup_device(dev);
- transport_add_device(dev);
+ return transport_add_device(dev);
}
static inline void
diff --git a/include/linux/units.h b/include/linux/units.h
new file mode 100644
index 000000000000..aaf716364ec3
--- /dev/null
+++ b/include/linux/units.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UNITS_H
+#define _LINUX_UNITS_H
+
+#include <linux/kernel.h>
+
+#define ABSOLUTE_ZERO_MILLICELSIUS -273150
+
+static inline long milli_kelvin_to_millicelsius(long t)
+{
+ return t + ABSOLUTE_ZERO_MILLICELSIUS;
+}
+
+static inline long millicelsius_to_milli_kelvin(long t)
+{
+ return t - ABSOLUTE_ZERO_MILLICELSIUS;
+}
+
+#define MILLIDEGREE_PER_DEGREE 1000
+#define MILLIDEGREE_PER_DECIDEGREE 100
+
+static inline long kelvin_to_millicelsius(long t)
+{
+ return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long millicelsius_to_kelvin(long t)
+{
+ t = millicelsius_to_milli_kelvin(t);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long deci_kelvin_to_celsius(long t)
+{
+ t = milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long celsius_to_deci_kelvin(long t)
+{
+ t = millicelsius_to_milli_kelvin(t * MILLIDEGREE_PER_DEGREE);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE);
+}
+
+/**
+ * deci_kelvin_to_millicelsius_with_offset - convert Kelvin to Celsius
+ * @t: temperature value in decidegrees Kelvin
+ * @offset: difference between Kelvin and Celsius in millidegrees
+ *
+ * Return: temperature value in millidegrees Celsius
+ */
+static inline long deci_kelvin_to_millicelsius_with_offset(long t, long offset)
+{
+ return t * MILLIDEGREE_PER_DECIDEGREE - offset;
+}
+
+static inline long deci_kelvin_to_millicelsius(long t)
+{
+ return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE);
+}
+
+static inline long millicelsius_to_deci_kelvin(long t)
+{
+ t = millicelsius_to_milli_kelvin(t);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE);
+}
+
+static inline long kelvin_to_celsius(long t)
+{
+ return t + DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS,
+ MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long celsius_to_kelvin(long t)
+{
+ return t - DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS,
+ MILLIDEGREE_PER_DEGREE);
+}
+
+#endif /* _LINUX_UNITS_H */
diff --git a/include/linux/zlib.h b/include/linux/zlib.h
index 92dbbd3f6c75..c757d848a758 100644
--- a/include/linux/zlib.h
+++ b/include/linux/zlib.h
@@ -191,6 +191,12 @@ extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
exceed those passed here.
*/
+extern int zlib_deflate_dfltcc_enabled (void);
+/*
+ Returns 1 if Deflate-Conversion facility is installed and enabled,
+ otherwise 0.
+*/
+
/*
extern int deflateInit (z_streamp strm, int level);
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index 985afea1ee36..139e93be13b0 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -37,12 +37,6 @@ struct cec_notifier *cec_notifier_get_conn(struct device *dev,
const char *conn);
/**
- * cec_notifier_put - decrease refcount and delete when the refcount reaches 0.
- * @n: notifier
- */
-void cec_notifier_put(struct cec_notifier *n);
-
-/**
* cec_notifier_conn_register - find or create a new cec_notifier for the given
* HDMI device and connector tuple.
* @hdmi_dev: HDMI device that sends the events.
@@ -138,10 +132,6 @@ static inline struct cec_notifier *cec_notifier_get_conn(struct device *dev,
return (struct cec_notifier *)0xdeadfeed;
}
-static inline void cec_notifier_put(struct cec_notifier *n)
-{
-}
-
static inline struct cec_notifier *
cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name,
const struct cec_connector_info *conn_info)
@@ -184,23 +174,6 @@ static inline struct device *cec_notifier_parse_hdmi_phandle(struct device *dev)
#endif
/**
- * cec_notifier_get - find or create a new cec_notifier for the given device.
- * @dev: device that sends the events.
- *
- * If a notifier for device @dev already exists, then increase the refcount
- * and return that notifier.
- *
- * If it doesn't exist, then allocate a new notifier struct and return a
- * pointer to that new struct.
- *
- * Return NULL if the memory could not be allocated.
- */
-static inline struct cec_notifier *cec_notifier_get(struct device *dev)
-{
- return cec_notifier_get_conn(dev, NULL);
-}
-
-/**
* cec_notifier_phys_addr_invalidate() - set the physical address to INVALID
*
* @n: the CEC notifier
diff --git a/include/media/cec.h b/include/media/cec.h
index 0a4f69cc9dd4..972bc8cd4384 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -386,52 +386,6 @@ cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info,
#endif
-#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
-
-/**
- * cec_notifier_register - register a callback with the notifier
- * @n: the CEC notifier
- * @adap: the CEC adapter, passed as argument to the callback function
- * @callback: the callback function
- */
-void cec_notifier_register(struct cec_notifier *n,
- struct cec_adapter *adap,
- void (*callback)(struct cec_adapter *adap, u16 pa));
-
-/**
- * cec_notifier_unregister - unregister the callback from the notifier.
- * @n: the CEC notifier
- */
-void cec_notifier_unregister(struct cec_notifier *n);
-
-/**
- * cec_register_cec_notifier - register the notifier with the cec adapter.
- * @adap: the CEC adapter
- * @notifier: the CEC notifier
- */
-void cec_register_cec_notifier(struct cec_adapter *adap,
- struct cec_notifier *notifier);
-
-#else
-
-static inline void
-cec_notifier_register(struct cec_notifier *n,
- struct cec_adapter *adap,
- void (*callback)(struct cec_adapter *adap, u16 pa))
-{
-}
-
-static inline void cec_notifier_unregister(struct cec_notifier *n)
-{
-}
-
-static inline void cec_register_cec_notifier(struct cec_adapter *adap,
- struct cec_notifier *notifier)
-{
-}
-
-#endif
-
/**
* cec_phys_addr_invalidate() - set the physical address to INVALID
*
diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h
index 1409230ad3a4..800d473b03c4 100644
--- a/include/media/dvb-usb-ids.h
+++ b/include/media/dvb-usb-ids.h
@@ -180,6 +180,7 @@
#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9
+#define USB_PID_TERRATEC_CINERGY_TC2_STICK 0x10b2
#define USB_PID_TWINHAN_VP7041_COLD 0x3201
#define USB_PID_TWINHAN_VP7041_WARM 0x3202
#define USB_PID_TWINHAN_VP7020_COLD 0x3203
@@ -425,4 +426,5 @@
#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
#define USB_PID_HAMA_DVBT_HYBRID 0x2758
#define USB_PID_XBOX_ONE_TUNER 0x02d5
+#define USB_PID_PROLECTRIX_DV107669 0xd803
#endif
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index d8c29e089000..150ee16ebd81 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -14,6 +14,7 @@
#ifndef V4L2_COMMON_H_
#define V4L2_COMMON_H_
+#include <linux/time.h>
#include <media/v4l2-dev.h>
/* Common printk constructs for v4l-i2c drivers. These macros create a unique
@@ -518,4 +519,24 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
u32 width, u32 height);
+static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
+{
+ /*
+ * When the timestamp comes from 32-bit user space, there may be
+ * uninitialized data in tv_usec, so cast it to u32.
+ * Otherwise allow invalid input for backwards compatibility.
+ */
+ return buf->timestamp.tv_sec * NSEC_PER_SEC +
+ (u32)buf->timestamp.tv_usec * NSEC_PER_USEC;
+}
+
+static inline void v4l2_buffer_set_timestamp(struct v4l2_buffer *buf,
+ u64 timestamp)
+{
+ struct timespec64 ts = ns_to_timespec64(timestamp);
+
+ buf->timestamp.tv_sec = ts.tv_sec;
+ buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+}
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 5f36e0d2ede6..95353ae476a1 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -371,7 +371,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
struct v4l2_subdev *__sd; \
\
__v4l2_device_call_subdevs_p(v4l2_dev, __sd, \
- !(grpid) || __sd->grp_id == (grpid), o, f , \
+ (grpid) == 0 || __sd->grp_id == (grpid), o, f , \
##args); \
} while (0)
@@ -403,7 +403,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
({ \
struct v4l2_subdev *__sd; \
__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
- !(grpid) || __sd->grp_id == (grpid), o, f , \
+ (grpid) == 0 || __sd->grp_id == (grpid), o, f , \
##args); \
})
@@ -431,8 +431,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
struct v4l2_subdev *__sd; \
\
__v4l2_device_call_subdevs_p(v4l2_dev, __sd, \
- !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
- ##args); \
+ (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \
+ f , ##args); \
} while (0)
/**
@@ -462,8 +462,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
({ \
struct v4l2_subdev *__sd; \
__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
- !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
- ##args); \
+ (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \
+ f , ##args); \
})
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 4bba65a59d46..86878fba332b 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -724,4 +724,59 @@ long int video_usercopy(struct file *file, unsigned int cmd,
long int video_ioctl2(struct file *file,
unsigned int cmd, unsigned long int arg);
+/*
+ * The user space interpretation of the 'v4l2_event' differs
+ * based on the 'time_t' definition on 32-bit architectures, so
+ * the kernel has to handle both.
+ * This is the old version for 32-bit architectures.
+ */
+struct v4l2_event_time32 {
+ __u32 type;
+ union {
+ struct v4l2_event_vsync vsync;
+ struct v4l2_event_ctrl ctrl;
+ struct v4l2_event_frame_sync frame_sync;
+ struct v4l2_event_src_change src_change;
+ struct v4l2_event_motion_det motion_det;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct old_timespec32 timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
+#define VIDIOC_DQEVENT_TIME32 _IOR('V', 89, struct v4l2_event_time32)
+
+struct v4l2_buffer_time32 {
+ __u32 index;
+ __u32 type;
+ __u32 bytesused;
+ __u32 flags;
+ __u32 field;
+ struct old_timeval32 timestamp;
+ struct v4l2_timecode timecode;
+ __u32 sequence;
+
+ /* memory location */
+ __u32 memory;
+ union {
+ __u32 offset;
+ unsigned long userptr;
+ struct v4l2_plane *planes;
+ __s32 fd;
+ } m;
+ __u32 length;
+ __u32 reserved2;
+ union {
+ __s32 request_fd;
+ __u32 reserved;
+ };
+};
+#define VIDIOC_QUERYBUF_TIME32 _IOWR('V', 9, struct v4l2_buffer_time32)
+#define VIDIOC_QBUF_TIME32 _IOWR('V', 15, struct v4l2_buffer_time32)
+#define VIDIOC_DQBUF_TIME32 _IOWR('V', 17, struct v4l2_buffer_time32)
+#define VIDIOC_PREPARE_BUF_TIME32 _IOWR('V', 93, struct v4l2_buffer_time32)
+
#endif /* _V4L2_IOCTL_H */
diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h
index c86474dc7b55..8800a640c224 100644
--- a/include/media/v4l2-rect.h
+++ b/include/media/v4l2-rect.h
@@ -63,10 +63,10 @@ static inline void v4l2_rect_map_inside(struct v4l2_rect *r,
r->left = boundary->left;
if (r->top < boundary->top)
r->top = boundary->top;
- if (r->left + r->width > boundary->width)
- r->left = boundary->width - r->width;
- if (r->top + r->height > boundary->height)
- r->top = boundary->height - r->height;
+ if (r->left + r->width > boundary->left + boundary->width)
+ r->left = boundary->left + boundary->width - r->width;
+ if (r->top + r->height > boundary->top + boundary->height)
+ r->top = boundary->top + boundary->height - r->height;
}
/**
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 71f1f2f0da53..761aa83a3f3c 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -1090,7 +1090,7 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
* @sd: pointer to the &struct v4l2_subdev
* @o: name of the element at &struct v4l2_subdev_ops that contains @f.
* Each element there groups a set of callbacks functions.
- * @f: callback function that will be called if @cond matches.
+ * @f: callback function to be called.
* The callback functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
* @args...: arguments for @f.
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 27627e2d1bc2..c971d25431ea 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -174,15 +174,12 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
#endif /* CONFIG_MPTCP */
-void mptcp_handle_ipv6_mapped(struct sock *sk, bool mapped);
-
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
int mptcpv6_init(void);
+void mptcpv6_handle_mapped(struct sock *sk, bool mapped);
#elif IS_ENABLED(CONFIG_IPV6)
-static inline int mptcpv6_init(void)
-{
- return 0;
-}
+static inline int mptcpv6_init(void) { return 0; }
+static inline void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { }
#endif
#endif /* __NET_MPTCP_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index 4a180f2a13e3..e55d5f765807 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -476,6 +476,13 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
if (!inet_get_convert_csum(sk))
features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
+ * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
+ * packets in udp_gro_complete_segment. As does UDP GSO, verified by
+ * udp_send_skb. But when those packets are looped in dev_loopback_xmit
+ * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
+ * specific case, where PARTIAL is both correct and required.
+ */
if (skb->pkt_type == PACKET_LOOPBACK)
skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index b01a8a8d4de9..8ec482e391aa 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -500,21 +500,6 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
u8 private_data_len);
/**
- * ib_send_cm_lap - Sends a load alternate path request.
- * @cm_id: Connection identifier associated with the load alternate path
- * message.
- * @alternate_path: A path record that identifies the alternate path to
- * load.
- * @private_data: Optional user-defined private data sent with the
- * load alternate path message.
- * @private_data_len: Size of the private data buffer, in bytes.
- */
-int ib_send_cm_lap(struct ib_cm_id *cm_id,
- struct sa_path_rec *alternate_path,
- const void *private_data,
- u8 private_data_len);
-
-/**
* ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning
* to a specified QP state.
* @cm_id: Communication identifier associated with the QP attributes to
@@ -534,25 +519,6 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask);
-/**
- * ib_send_cm_apr - Sends an alternate path response message in response to
- * a load alternate path request.
- * @cm_id: Connection identifier associated with the alternate path response.
- * @status: Reply status sent with the alternate path response.
- * @info: Optional additional information sent with the alternate path
- * response.
- * @info_length: Size of the additional information, in bytes.
- * @private_data: Optional user-defined private data sent with the
- * alternate path response message.
- * @private_data_len: Size of the private data buffer, in bytes.
- */
-int ib_send_cm_apr(struct ib_cm_id *cm_id,
- enum ib_cm_apr_status status,
- void *info,
- u8 info_length,
- const void *private_data,
- u8 private_data_len);
-
struct ib_cm_sidr_req_param {
struct sa_path_rec *path;
const struct ib_gid_attr *sgid_attr;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e2cc62217cc2..1f779fad3a1e 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -72,11 +72,16 @@
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
struct ib_umem_odp;
+struct ib_uqp_object;
+struct ib_usrq_object;
+struct ib_uwq_object;
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
extern struct workqueue_struct *ib_comp_unbound_wq;
+struct ib_ucq_object;
+
__printf(3, 4) __cold
void ibdev_printk(const char *level, const struct ib_device *ibdev,
const char *format, ...);
@@ -1413,8 +1418,11 @@ enum ib_access_flags {
IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
+ IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
- IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
+ IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
+ IB_ACCESS_SUPPORTED =
+ ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
};
/*
@@ -1544,7 +1552,7 @@ enum ib_poll_context {
struct ib_cq {
struct ib_device *device;
- struct ib_uobject *uobject;
+ struct ib_ucq_object *uobject;
ib_comp_handler comp_handler;
void (*event_handler)(struct ib_event *, void *);
void *cq_context;
@@ -1558,6 +1566,11 @@ struct ib_cq {
};
struct workqueue_struct *comp_wq;
struct dim *dim;
+
+ /* updated only by trace points */
+ ktime_t timestamp;
+ bool interrupt;
+
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -1567,7 +1580,7 @@ struct ib_cq {
struct ib_srq {
struct ib_device *device;
struct ib_pd *pd;
- struct ib_uobject *uobject;
+ struct ib_usrq_object *uobject;
void (*event_handler)(struct ib_event *, void *);
void *srq_context;
enum ib_srq_type srq_type;
@@ -1612,7 +1625,7 @@ enum ib_wq_state {
struct ib_wq {
struct ib_device *device;
- struct ib_uobject *uobject;
+ struct ib_uwq_object *uobject;
void *wq_context;
void (*event_handler)(struct ib_event *, void *);
struct ib_pd *pd;
@@ -1728,7 +1741,7 @@ struct ib_qp {
atomic_t usecnt;
struct list_head open_list;
struct ib_qp *real_qp;
- struct ib_uobject *uobject;
+ struct ib_uqp_object *uobject;
void (*event_handler)(struct ib_event *, void *);
void *qp_context;
/* sgid_attrs associated with the AV's */
@@ -2147,11 +2160,6 @@ struct ib_port_cache {
enum ib_port_state port_state;
};
-struct ib_cache {
- rwlock_t lock;
- struct ib_event_handler event_handler;
-};
-
struct ib_port_immutable {
int pkey_tbl_len;
int gid_tbl_len;
@@ -2627,13 +2635,18 @@ struct ib_device {
struct rcu_head rcu_head;
struct list_head event_handler_list;
- spinlock_t event_handler_lock;
+ /* Protects event_handler_list */
+ struct rw_semaphore event_handler_rwsem;
+
+ /* Protects QP's event_handler calls and open_qp list */
+ spinlock_t qp_open_list_lock;
struct rw_semaphore client_data_rwsem;
struct xarray client_data;
struct mutex unregistration_lock;
- struct ib_cache cache;
+ /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
+ rwlock_t cache_lock;
/**
* port_data is indexed by port number
*/
@@ -2942,7 +2955,7 @@ bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
void ib_register_event_handler(struct ib_event_handler *event_handler);
void ib_unregister_event_handler(struct ib_event_handler *event_handler);
-void ib_dispatch_event(struct ib_event *event);
+void ib_dispatch_event(const struct ib_event *event);
int ib_query_port(struct ib_device *device,
u8 port_num, struct ib_port_attr *port_attr);
@@ -4309,6 +4322,9 @@ static inline int ib_check_mr_access(int flags)
!(flags & IB_ACCESS_LOCAL_WRITE))
return -EINVAL;
+ if (flags & ~IB_ACCESS_SUPPORTED)
+ return -EINVAL;
+
return 0;
}
diff --git a/include/rdma/iba.h b/include/rdma/iba.h
new file mode 100644
index 000000000000..6a1115b02a0d
--- /dev/null
+++ b/include/rdma/iba.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+#ifndef _IBA_DEFS_H_
+#define _IBA_DEFS_H_
+
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <asm/unaligned.h>
+
+static inline u32 _iba_get8(const u8 *ptr)
+{
+ return *ptr;
+}
+
+static inline void _iba_set8(u8 *ptr, u32 mask, u32 prep_value)
+{
+ *ptr = (*ptr & ~mask) | prep_value;
+}
+
+static inline u16 _iba_get16(const __be16 *ptr)
+{
+ return be16_to_cpu(*ptr);
+}
+
+static inline void _iba_set16(__be16 *ptr, u16 mask, u16 prep_value)
+{
+ *ptr = cpu_to_be16((be16_to_cpu(*ptr) & ~mask) | prep_value);
+}
+
+static inline u32 _iba_get32(const __be32 *ptr)
+{
+ return be32_to_cpu(*ptr);
+}
+
+static inline void _iba_set32(__be32 *ptr, u32 mask, u32 prep_value)
+{
+ *ptr = cpu_to_be32((be32_to_cpu(*ptr) & ~mask) | prep_value);
+}
+
+static inline u64 _iba_get64(const __be64 *ptr)
+{
+ /*
+ * The mads are constructed so that 32 bit and smaller are naturally
+ * aligned, everything larger has a max alignment of 4 bytes.
+ */
+ return be64_to_cpu(get_unaligned(ptr));
+}
+
+static inline void _iba_set64(__be64 *ptr, u64 mask, u64 prep_value)
+{
+ put_unaligned(cpu_to_be64((_iba_get64(ptr) & ~mask) | prep_value), ptr);
+}
+
+#define _IBA_SET(field_struct, field_offset, field_mask, num_bits, ptr, value) \
+ ({ \
+ field_struct *_ptr = ptr; \
+ _iba_set##num_bits((void *)_ptr + (field_offset), field_mask, \
+ FIELD_PREP(field_mask, value)); \
+ })
+#define IBA_SET(field, ptr, value) _IBA_SET(field, ptr, value)
+
+#define _IBA_GET_MEM_PTR(field_struct, field_offset, type, num_bits, ptr) \
+ ({ \
+ field_struct *_ptr = ptr; \
+ (type *)((void *)_ptr + (field_offset)); \
+ })
+#define IBA_GET_MEM_PTR(field, ptr) _IBA_GET_MEM_PTR(field, ptr)
+
+/* FIXME: A set should always set the entire field, meaning we should zero the trailing bytes */
+#define _IBA_SET_MEM(field_struct, field_offset, type, num_bits, ptr, in, \
+ bytes) \
+ ({ \
+ const type *_in_ptr = in; \
+ WARN_ON(bytes * 8 > num_bits); \
+ if (in && bytes) \
+ memcpy(_IBA_GET_MEM_PTR(field_struct, field_offset, \
+ type, num_bits, ptr), \
+ _in_ptr, bytes); \
+ })
+#define IBA_SET_MEM(field, ptr, in, bytes) _IBA_SET_MEM(field, ptr, in, bytes)
+
+#define _IBA_GET(field_struct, field_offset, field_mask, num_bits, ptr) \
+ ({ \
+ const field_struct *_ptr = ptr; \
+ (u##num_bits) FIELD_GET( \
+ field_mask, _iba_get##num_bits((const void *)_ptr + \
+ (field_offset))); \
+ })
+#define IBA_GET(field, ptr) _IBA_GET(field, ptr)
+
+#define _IBA_GET_MEM(field_struct, field_offset, type, num_bits, ptr, out, \
+ bytes) \
+ ({ \
+ type *_out_ptr = out; \
+ WARN_ON(bytes * 8 > num_bits); \
+ if (out && bytes) \
+ memcpy(_out_ptr, \
+ _IBA_GET_MEM_PTR(field_struct, field_offset, \
+ type, num_bits, ptr), \
+ bytes); \
+ })
+#define IBA_GET_MEM(field, ptr, out, bytes) _IBA_GET_MEM(field, ptr, out, bytes)
+
+/*
+ * The generated list becomes the parameters to the macros, the order is:
+ * - struct this applies to
+ * - starting offset of the max
+ * - GENMASK or GENMASK_ULL in CPU order
+ * - The width of data the mask operations should work on, in bits
+ */
+
+/*
+ * Extraction using a tabular description like table 106. bit_offset is from
+ * the Byte[Bit] notation.
+ */
+#define IBA_FIELD_BLOC(field_struct, byte_offset, bit_offset, num_bits) \
+ field_struct, byte_offset, \
+ GENMASK(7 - (bit_offset), 7 - (bit_offset) - (num_bits - 1)), \
+ 8
+#define IBA_FIELD8_LOC(field_struct, byte_offset, num_bits) \
+ IBA_FIELD_BLOC(field_struct, byte_offset, 0, num_bits)
+
+#define IBA_FIELD16_LOC(field_struct, byte_offset, num_bits) \
+ field_struct, (byte_offset)&0xFFFE, \
+ GENMASK(15 - (((byte_offset) % 2) * 8), \
+ 15 - (((byte_offset) % 2) * 8) - (num_bits - 1)), \
+ 16
+
+#define IBA_FIELD32_LOC(field_struct, byte_offset, num_bits) \
+ field_struct, (byte_offset)&0xFFFC, \
+ GENMASK(31 - (((byte_offset) % 4) * 8), \
+ 31 - (((byte_offset) % 4) * 8) - (num_bits - 1)), \
+ 32
+
+#define IBA_FIELD64_LOC(field_struct, byte_offset) \
+ field_struct, byte_offset, GENMASK_ULL(63, 0), 64
+/*
+ * In IBTA spec, everything that is more than 64bits is multiple
+ * of bytes without leftover bits.
+ */
+#define IBA_FIELD_MLOC(field_struct, byte_offset, num_bits, type) \
+ field_struct, byte_offset, type, num_bits
+
+#endif /* _IBA_DEFS_H_ */
diff --git a/include/rdma/ibta_vol1_c12.h b/include/rdma/ibta_vol1_c12.h
new file mode 100644
index 000000000000..269904425d3f
--- /dev/null
+++ b/include/rdma/ibta_vol1_c12.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ *
+ * This file is IBTA volume 1, chapter 12 declarations:
+ * CHAPTER 12: COMMUNICATION MANAGEMENT
+ */
+#ifndef _IBTA_VOL1_C12_H_
+#define _IBTA_VOL1_C12_H_
+
+#include <rdma/iba.h>
+
+#define CM_FIELD_BLOC(field_struct, byte_offset, bits_offset, width) \
+ IBA_FIELD_BLOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), bits_offset, \
+ width)
+#define CM_FIELD8_LOC(field_struct, byte_offset, width) \
+ IBA_FIELD8_LOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD16_LOC(field_struct, byte_offset, width) \
+ IBA_FIELD16_LOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD32_LOC(field_struct, byte_offset, width) \
+ IBA_FIELD32_LOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD64_LOC(field_struct, byte_offset) \
+ IBA_FIELD64_LOC(field_struct, (byte_offset + sizeof(struct ib_mad_hdr)))
+#define CM_FIELD_MLOC(field_struct, byte_offset, width, type) \
+ IBA_FIELD_MLOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width, type)
+#define CM_STRUCT(field_struct, total_len) \
+ field_struct \
+ { \
+ struct ib_mad_hdr hdr; \
+ u32 _data[(total_len) / 32 + \
+ BUILD_BUG_ON_ZERO((total_len) % 32 != 0)]; \
+ }
+
+/* Table 106 REQ Message Contents */
+#define CM_REQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_req_msg, 0, 32)
+#define CM_REQ_SERVICE_ID CM_FIELD64_LOC(struct cm_req_msg, 8)
+#define CM_REQ_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_req_msg, 16)
+#define CM_REQ_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_req_msg, 28, 32)
+#define CM_REQ_LOCAL_QPN CM_FIELD32_LOC(struct cm_req_msg, 32, 24)
+#define CM_REQ_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_req_msg, 35, 8)
+#define CM_REQ_LOCAL_EECN CM_FIELD32_LOC(struct cm_req_msg, 36, 24)
+#define CM_REQ_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_req_msg, 39, 8)
+#define CM_REQ_REMOTE_EECN CM_FIELD32_LOC(struct cm_req_msg, 40, 24)
+#define CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_req_msg, 43, 5)
+#define CM_REQ_TRANSPORT_SERVICE_TYPE CM_FIELD_BLOC(struct cm_req_msg, 43, 5, 2)
+#define CM_REQ_END_TO_END_FLOW_CONTROL \
+ CM_FIELD_BLOC(struct cm_req_msg, 43, 7, 1)
+#define CM_REQ_STARTING_PSN CM_FIELD32_LOC(struct cm_req_msg, 44, 24)
+#define CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT CM_FIELD8_LOC(struct cm_req_msg, 47, 5)
+#define CM_REQ_RETRY_COUNT CM_FIELD_BLOC(struct cm_req_msg, 47, 5, 3)
+#define CM_REQ_PARTITION_KEY CM_FIELD16_LOC(struct cm_req_msg, 48, 16)
+#define CM_REQ_PATH_PACKET_PAYLOAD_MTU CM_FIELD8_LOC(struct cm_req_msg, 50, 4)
+#define CM_REQ_RDC_EXISTS CM_FIELD_BLOC(struct cm_req_msg, 50, 4, 1)
+#define CM_REQ_RNR_RETRY_COUNT CM_FIELD_BLOC(struct cm_req_msg, 50, 5, 3)
+#define CM_REQ_MAX_CM_RETRIES CM_FIELD8_LOC(struct cm_req_msg, 51, 4)
+#define CM_REQ_SRQ CM_FIELD_BLOC(struct cm_req_msg, 51, 4, 1)
+#define CM_REQ_EXTENDED_TRANSPORT_TYPE \
+ CM_FIELD_BLOC(struct cm_req_msg, 51, 5, 3)
+#define CM_REQ_PRIMARY_LOCAL_PORT_LID CM_FIELD16_LOC(struct cm_req_msg, 52, 16)
+#define CM_REQ_PRIMARY_REMOTE_PORT_LID CM_FIELD16_LOC(struct cm_req_msg, 54, 16)
+#define CM_REQ_PRIMARY_LOCAL_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 56, 128, union ib_gid)
+#define CM_REQ_PRIMARY_REMOTE_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 72, 128, union ib_gid)
+#define CM_REQ_PRIMARY_FLOW_LABEL CM_FIELD32_LOC(struct cm_req_msg, 88, 20)
+#define CM_REQ_PRIMARY_PACKET_RATE CM_FIELD_BLOC(struct cm_req_msg, 91, 2, 6)
+#define CM_REQ_PRIMARY_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_req_msg, 92, 8)
+#define CM_REQ_PRIMARY_HOP_LIMIT CM_FIELD8_LOC(struct cm_req_msg, 93, 8)
+#define CM_REQ_PRIMARY_SL CM_FIELD8_LOC(struct cm_req_msg, 94, 4)
+#define CM_REQ_PRIMARY_SUBNET_LOCAL CM_FIELD_BLOC(struct cm_req_msg, 94, 4, 1)
+#define CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT CM_FIELD8_LOC(struct cm_req_msg, 95, 5)
+#define CM_REQ_ALTERNATE_LOCAL_PORT_LID \
+ CM_FIELD16_LOC(struct cm_req_msg, 96, 16)
+#define CM_REQ_ALTERNATE_REMOTE_PORT_LID \
+ CM_FIELD16_LOC(struct cm_req_msg, 98, 16)
+#define CM_REQ_ALTERNATE_LOCAL_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 100, 128, union ib_gid)
+#define CM_REQ_ALTERNATE_REMOTE_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 116, 128, union ib_gid)
+#define CM_REQ_ALTERNATE_FLOW_LABEL CM_FIELD32_LOC(struct cm_req_msg, 132, 20)
+#define CM_REQ_ALTERNATE_PACKET_RATE CM_FIELD_BLOC(struct cm_req_msg, 135, 2, 6)
+#define CM_REQ_ALTERNATE_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_req_msg, 136, 8)
+#define CM_REQ_ALTERNATE_HOP_LIMIT CM_FIELD8_LOC(struct cm_req_msg, 137, 8)
+#define CM_REQ_ALTERNATE_SL CM_FIELD8_LOC(struct cm_req_msg, 138, 4)
+#define CM_REQ_ALTERNATE_SUBNET_LOCAL \
+ CM_FIELD_BLOC(struct cm_req_msg, 138, 4, 1)
+#define CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_req_msg, 139, 5)
+#define CM_REQ_SAP_SUPPORTED CM_FIELD_BLOC(struct cm_req_msg, 139, 5, 1)
+#define CM_REQ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_req_msg, 140, 736, void)
+CM_STRUCT(struct cm_req_msg, 140 * 8 + 736);
+
+/* Table 107 MRA Message Contents */
+#define CM_MRA_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_mra_msg, 0, 32)
+#define CM_MRA_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_mra_msg, 4, 32)
+#define CM_MRA_MESSAGE_MRAED CM_FIELD8_LOC(struct cm_mra_msg, 8, 2)
+#define CM_MRA_SERVICE_TIMEOUT CM_FIELD8_LOC(struct cm_mra_msg, 9, 5)
+#define CM_MRA_PRIVATE_DATA CM_FIELD_MLOC(struct cm_mra_msg, 10, 1776, void)
+CM_STRUCT(struct cm_mra_msg, 10 * 8 + 1776);
+
+/* Table 108 REJ Message Contents */
+#define CM_REJ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rej_msg, 0, 32)
+#define CM_REJ_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rej_msg, 4, 32)
+#define CM_REJ_MESSAGE_REJECTED CM_FIELD8_LOC(struct cm_rej_msg, 8, 2)
+#define CM_REJ_REJECTED_INFO_LENGTH CM_FIELD8_LOC(struct cm_rej_msg, 9, 7)
+#define CM_REJ_REASON CM_FIELD16_LOC(struct cm_rej_msg, 10, 16)
+#define CM_REJ_ARI CM_FIELD_MLOC(struct cm_rej_msg, 12, 576, void)
+#define CM_REJ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rej_msg, 84, 1184, void)
+CM_STRUCT(struct cm_rej_msg, 84 * 8 + 1184);
+
+/* Table 110 REP Message Contents */
+#define CM_REP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 0, 32)
+#define CM_REP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 4, 32)
+#define CM_REP_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_rep_msg, 8, 32)
+#define CM_REP_LOCAL_QPN CM_FIELD32_LOC(struct cm_rep_msg, 12, 24)
+#define CM_REP_LOCAL_EE_CONTEXT_NUMBER CM_FIELD32_LOC(struct cm_rep_msg, 16, 24)
+#define CM_REP_STARTING_PSN CM_FIELD32_LOC(struct cm_rep_msg, 20, 24)
+#define CM_REP_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_rep_msg, 24, 8)
+#define CM_REP_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_rep_msg, 25, 8)
+#define CM_REP_TARGET_ACK_DELAY CM_FIELD8_LOC(struct cm_rep_msg, 26, 5)
+#define CM_REP_FAILOVER_ACCEPTED CM_FIELD_BLOC(struct cm_rep_msg, 26, 5, 2)
+#define CM_REP_END_TO_END_FLOW_CONTROL \
+ CM_FIELD_BLOC(struct cm_rep_msg, 26, 7, 1)
+#define CM_REP_RNR_RETRY_COUNT CM_FIELD8_LOC(struct cm_rep_msg, 27, 3)
+#define CM_REP_SRQ CM_FIELD_BLOC(struct cm_rep_msg, 27, 3, 1)
+#define CM_REP_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_rep_msg, 28)
+#define CM_REP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rep_msg, 36, 1568, void)
+CM_STRUCT(struct cm_rep_msg, 36 * 8 + 1568);
+
+/* Table 111 RTU Message Contents */
+#define CM_RTU_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rtu_msg, 0, 32)
+#define CM_RTU_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rtu_msg, 4, 32)
+#define CM_RTU_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rtu_msg, 8, 1792, void)
+CM_STRUCT(struct cm_rtu_msg, 8 * 8 + 1792);
+
+/* Table 112 DREQ Message Contents */
+#define CM_DREQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_dreq_msg, 0, 32)
+#define CM_DREQ_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_dreq_msg, 4, 32)
+#define CM_DREQ_REMOTE_QPN_EECN CM_FIELD32_LOC(struct cm_dreq_msg, 8, 24)
+#define CM_DREQ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_dreq_msg, 12, 1760, void)
+CM_STRUCT(struct cm_dreq_msg, 12 * 8 + 1760);
+
+/* Table 113 DREP Message Contents */
+#define CM_DREP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_drep_msg, 0, 32)
+#define CM_DREP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_drep_msg, 4, 32)
+#define CM_DREP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_drep_msg, 8, 1792, void)
+CM_STRUCT(struct cm_drep_msg, 8 * 8 + 1792);
+
+/* Table 115 LAP Message Contents */
+#define CM_LAP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_lap_msg, 0, 32)
+#define CM_LAP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_lap_msg, 4, 32)
+#define CM_LAP_REMOTE_QPN_EECN CM_FIELD32_LOC(struct cm_lap_msg, 12, 24)
+#define CM_LAP_REMOTE_CM_RESPONSE_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_lap_msg, 15, 5)
+#define CM_LAP_ALTERNATE_LOCAL_PORT_LID \
+ CM_FIELD16_LOC(struct cm_lap_msg, 20, 16)
+#define CM_LAP_ALTERNATE_REMOTE_PORT_LID \
+ CM_FIELD16_LOC(struct cm_lap_msg, 22, 16)
+#define CM_LAP_ALTERNATE_LOCAL_PORT_GID \
+ CM_FIELD_MLOC(struct cm_lap_msg, 24, 128, union ib_gid)
+#define CM_LAP_ALTERNATE_REMOTE_PORT_GID \
+ CM_FIELD_MLOC(struct cm_lap_msg, 40, 128, union ib_gid)
+#define CM_LAP_ALTERNATE_FLOW_LABEL CM_FIELD32_LOC(struct cm_lap_msg, 56, 20)
+#define CM_LAP_ALTERNATE_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_lap_msg, 59, 8)
+#define CM_LAP_ALTERNATE_HOP_LIMIT CM_FIELD8_LOC(struct cm_lap_msg, 60, 8)
+#define CM_LAP_ALTERNATE_PACKET_RATE CM_FIELD_BLOC(struct cm_lap_msg, 61, 2, 6)
+#define CM_LAP_ALTERNATE_SL CM_FIELD8_LOC(struct cm_lap_msg, 62, 4)
+#define CM_LAP_ALTERNATE_SUBNET_LOCAL CM_FIELD_BLOC(struct cm_lap_msg, 62, 4, 1)
+#define CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_lap_msg, 63, 5)
+#define CM_LAP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_lap_msg, 64, 1344, void)
+CM_STRUCT(struct cm_lap_msg, 64 * 8 + 1344);
+
+/* Table 116 APR Message Contents */
+#define CM_APR_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_apr_msg, 0, 32)
+#define CM_APR_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_apr_msg, 4, 32)
+#define CM_APR_ADDITIONAL_INFORMATION_LENGTH \
+ CM_FIELD8_LOC(struct cm_apr_msg, 8, 8)
+#define CM_APR_AR_STATUS CM_FIELD8_LOC(struct cm_apr_msg, 9, 8)
+#define CM_APR_ADDITIONAL_INFORMATION \
+ CM_FIELD_MLOC(struct cm_apr_msg, 12, 576, void)
+#define CM_APR_PRIVATE_DATA CM_FIELD_MLOC(struct cm_apr_msg, 84, 1184, void)
+CM_STRUCT(struct cm_apr_msg, 84 * 8 + 1184);
+
+/* Table 119 SIDR_REQ Message Contents */
+#define CM_SIDR_REQ_REQUESTID CM_FIELD32_LOC(struct cm_sidr_req_msg, 0, 32)
+#define CM_SIDR_REQ_PARTITION_KEY CM_FIELD16_LOC(struct cm_sidr_req_msg, 4, 16)
+#define CM_SIDR_REQ_SERVICEID CM_FIELD64_LOC(struct cm_sidr_req_msg, 8)
+#define CM_SIDR_REQ_PRIVATE_DATA \
+ CM_FIELD_MLOC(struct cm_sidr_req_msg, 16, 1728, void)
+CM_STRUCT(struct cm_sidr_req_msg, 16 * 8 + 1728);
+
+/* Table 120 SIDR_REP Message Contents */
+#define CM_SIDR_REP_REQUESTID CM_FIELD32_LOC(struct cm_sidr_rep_msg, 0, 32)
+#define CM_SIDR_REP_STATUS CM_FIELD8_LOC(struct cm_sidr_rep_msg, 4, 8)
+#define CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH \
+ CM_FIELD8_LOC(struct cm_sidr_rep_msg, 5, 8)
+#define CM_SIDR_REP_QPN CM_FIELD32_LOC(struct cm_sidr_rep_msg, 8, 24)
+#define CM_SIDR_REP_SERVICEID CM_FIELD64_LOC(struct cm_sidr_rep_msg, 12)
+#define CM_SIDR_REP_Q_KEY CM_FIELD32_LOC(struct cm_sidr_rep_msg, 20, 32)
+#define CM_SIDR_REP_ADDITIONAL_INFORMATION \
+ CM_FIELD_MLOC(struct cm_sidr_rep_msg, 24, 576, void)
+#define CM_SIDR_REP_PRIVATE_DATA \
+ CM_FIELD_MLOC(struct cm_sidr_rep_msg, 96, 1088, void)
+CM_STRUCT(struct cm_sidr_rep_msg, 96 * 8 + 1088);
+
+#endif /* _IBTA_VOL1_C12_H_ */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index b550ae89bf85..0d5c70e2d8ab 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -640,34 +640,14 @@ static inline int rvt_cmp_msn(u32 a, u32 b)
return (((int)a) - ((int)b)) << 8;
}
-/**
- * rvt_compute_aeth - compute the AETH (syndrome + MSN)
- * @qp: the queue pair to compute the AETH for
- *
- * Returns the AETH.
- */
__be32 rvt_compute_aeth(struct rvt_qp *qp);
-/**
- * rvt_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
- * @aeth: the Acknowledge Extended Transport Header
- *
- * The QP s_lock should be held.
- */
void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
-/**
- * rvt_restart_sge - rewind the sge state for a wqe
- * @ss: the sge state pointer
- * @wqe: the wqe to rewind
- * @len: the data length from the start of the wqe in bytes
- *
- * Returns the remaining data length.
- */
u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
/**
+ * rvt_div_round_up_mtu - round up divide
* @qp - the qp pair
* @len - the length
*
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
index 3447bfe356d6..6ae6cf8e4c2e 100644
--- a/include/rdma/uverbs_named_ioctl.h
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -76,7 +76,7 @@
#define DECLARE_UVERBS_NAMED_OBJECT(_object_id, _type_attrs, ...) \
static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS( \
_object_id)[] = { __VA_ARGS__ }; \
- const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
+ static const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
.id = _object_id, \
.type_attrs = &_type_attrs, \
.num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)), \
@@ -88,10 +88,10 @@
* identify all uapi methods with a (object,method) tuple. However, they have
* no type pointer.
*/
-#define DECLARE_UVERBS_GLOBAL_METHODS(_object_id, ...) \
+#define DECLARE_UVERBS_GLOBAL_METHODS(_object_id, ...) \
static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS( \
_object_id)[] = { __VA_ARGS__ }; \
- const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
+ static const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
.id = _object_id, \
.num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)), \
.methods = &UVERBS_OBJECT_METHODS(_object_id) \
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 05eabfd5d0d3..1b28ce1aba07 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -104,16 +104,6 @@ static inline void uobj_put_write(struct ib_uobject *uobj)
rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
}
-static inline int __must_check
-uobj_alloc_commit(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
-{
- int ret = rdma_alloc_commit_uobject(uobj, attrs);
-
- if (ret)
- return ret;
- return 0;
-}
-
static inline void uobj_alloc_abort(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs)
{
@@ -124,8 +114,7 @@ static inline struct ib_uobject *
__uobj_alloc(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs, struct ib_device **ib_dev)
{
- struct ib_uobject *uobj =
- rdma_alloc_begin_uobject(obj, attrs->ufile, attrs);
+ struct ib_uobject *uobj = rdma_alloc_begin_uobject(obj, attrs);
if (!IS_ERR(uobj))
*ib_dev = attrs->context->device;
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index d57a5ba00c74..f1cbdae67250 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -83,9 +83,9 @@ enum rdma_lookup_mode {
*/
struct uverbs_obj_type_class {
struct ib_uobject *(*alloc_begin)(const struct uverbs_api_object *obj,
- struct ib_uverbs_file *ufile);
+ struct uverbs_attr_bundle *attrs);
/* This consumes the kref on uobj */
- int (*alloc_commit)(struct ib_uobject *uobj);
+ void (*alloc_commit)(struct ib_uobject *uobj);
/* This does not consume the kref on uobj */
void (*alloc_abort)(struct ib_uobject *uobj);
@@ -98,7 +98,6 @@ struct uverbs_obj_type_class {
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs);
void (*remove_handle)(struct ib_uobject *uobj);
- u8 needs_kfree_rcu;
};
struct uverbs_obj_type {
@@ -138,23 +137,34 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
void rdma_lookup_put_uobject(struct ib_uobject *uobj,
enum rdma_lookup_mode mode);
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
- struct ib_uverbs_file *ufile,
struct uverbs_attr_bundle *attrs);
void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs);
-int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj,
- struct uverbs_attr_bundle *attrs);
+void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs);
+
+/*
+ * uverbs_uobject_get is called in order to increase the reference count on
+ * an uobject. This is useful when a handler wants to keep the uobject's memory
+ * alive, regardless if this uobject is still alive in the context's objects
+ * repository. Objects are put via uverbs_uobject_put.
+ */
+static inline void uverbs_uobject_get(struct ib_uobject *uobject)
+{
+ kref_get(&uobject->ref);
+}
+void uverbs_uobject_put(struct ib_uobject *uobject);
struct uverbs_obj_fd_type {
/*
* In fd based objects, uverbs_obj_type_ops points to generic
* fd operations. In order to specialize the underlying types (e.g.
* completion_channel), we use fops, name and flags for fd creation.
- * context_closed is called when the context is closed either when
- * the driver is removed or the process terminated.
+ * destroy_object is called when the uobject is to be destroyed,
+ * because the driver is removed or the FD is closed.
*/
struct uverbs_obj_type type;
- int (*context_closed)(struct ib_uobject *uobj,
+ int (*destroy_object)(struct ib_uobject *uobj,
enum rdma_remove_reason why);
const struct file_operations *fops;
const char *name;
@@ -163,11 +173,11 @@ struct uverbs_obj_fd_type {
extern const struct uverbs_obj_type_class uverbs_idr_class;
extern const struct uverbs_obj_type_class uverbs_fd_class;
-void uverbs_close_fd(struct file *f);
+int uverbs_uobject_fd_release(struct inode *inode, struct file *filp);
#define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \
sizeof(char))
-#define UVERBS_TYPE_ALLOC_FD(_obj_size, _context_closed, _fops, _name, _flags)\
+#define UVERBS_TYPE_ALLOC_FD(_obj_size, _destroy_object, _fops, _name, _flags) \
((&((const struct uverbs_obj_fd_type) \
{.type = { \
.type_class = &uverbs_fd_class, \
@@ -175,7 +185,7 @@ void uverbs_close_fd(struct file *f);
UVERBS_BUILD_BUG_ON((_obj_size) < \
sizeof(struct ib_uobject)), \
}, \
- .context_closed = _context_closed, \
+ .destroy_object = _destroy_object, \
.fops = _fops, \
.name = _name, \
.flags = _flags}))->type)
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 3ed836db5306..f8312a3e5b42 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -172,6 +172,7 @@ struct scsi_device {
* because we did a bus reset. */
unsigned use_10_for_rw:1; /* first try 10-byte read / write */
unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
+ unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */
unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */
unsigned no_write_same:1; /* no WRITE SAME command */
unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h
index 5101e987c0ef..4fe69d863b5d 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/scsi/scsi_ioctl.h
@@ -44,6 +44,7 @@ typedef struct scsi_fctargaddress {
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
int cmd, bool ndelay);
extern int scsi_ioctl(struct scsi_device *, int, void __user *);
+extern int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
#endif /* __KERNEL__ */
#endif /* _SCSI_IOCTL_H */
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index f91bcca604e4..29c7ad04d2e2 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -68,6 +68,36 @@ typedef struct sg_io_hdr
unsigned int info; /* [o] auxiliary information */
} sg_io_hdr_t; /* 64 bytes long (on i386) */
+#if defined(__KERNEL__)
+#include <linux/compat.h>
+
+struct compat_sg_io_hdr {
+ compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
+ compat_int_t dxfer_direction; /* [i] data transfer direction */
+ unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
+ unsigned char mx_sb_len; /* [i] max length to write to sbp */
+ unsigned short iovec_count; /* [i] 0 implies no scatter gather */
+ compat_uint_t dxfer_len; /* [i] byte count of data transfer */
+ compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
+ or scatter gather list */
+ compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
+ compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
+ compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
+ compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
+ compat_int_t pack_id; /* [i->o] unused internally (normally) */
+ compat_uptr_t usr_ptr; /* [i->o] unused internally */
+ unsigned char status; /* [o] scsi status */
+ unsigned char masked_status; /* [o] shifted, masked scsi status */
+ unsigned char msg_status; /* [o] messaging level data (optional) */
+ unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
+ unsigned short host_status; /* [o] errors from host adapter */
+ unsigned short driver_status; /* [o] errors from software driver */
+ compat_int_t resid; /* [o] dxfer_len - actual_transferred */
+ compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
+ compat_uint_t info; /* [o] auxiliary information */
+};
+#endif
+
#define SG_INTERFACE_ID_ORIG 'S'
/* Use negative values to flag difference from original sg_header structure */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 182c9fe9c0e9..19c87661eeec 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -48,6 +48,16 @@ struct partial_cluster;
{ EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \
{ EXT4_GET_BLOCKS_ZERO, "ZERO" })
+/*
+ * __print_flags() requires that all enum values be wrapped in the
+ * TRACE_DEFINE_ENUM macro so that the enum value can be encoded in the ftrace
+ * ring buffer.
+ */
+TRACE_DEFINE_ENUM(BH_New);
+TRACE_DEFINE_ENUM(BH_Mapped);
+TRACE_DEFINE_ENUM(BH_Unwritten);
+TRACE_DEFINE_ENUM(BH_Boundary);
+
#define show_mflags(flags) __print_flags(flags, "", \
{ EXT4_MAP_NEW, "N" }, \
{ EXT4_MAP_MAPPED, "M" }, \
@@ -62,11 +72,18 @@ struct partial_cluster;
{ EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER,"1ST_CLUSTER" },\
{ EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER, "LAST_CLUSTER" })
+TRACE_DEFINE_ENUM(ES_WRITTEN_B);
+TRACE_DEFINE_ENUM(ES_UNWRITTEN_B);
+TRACE_DEFINE_ENUM(ES_DELAYED_B);
+TRACE_DEFINE_ENUM(ES_HOLE_B);
+TRACE_DEFINE_ENUM(ES_REFERENCED_B);
+
#define show_extent_status(status) __print_flags(status, "", \
{ EXTENT_STATUS_WRITTEN, "W" }, \
{ EXTENT_STATUS_UNWRITTEN, "U" }, \
{ EXTENT_STATUS_DELAYED, "D" }, \
- { EXTENT_STATUS_HOLE, "H" })
+ { EXTENT_STATUS_HOLE, "H" }, \
+ { EXTENT_STATUS_REFERENCED, "R" })
#define show_falloc_mode(mode) __print_flags(mode, "|", \
{ FALLOC_FL_KEEP_SIZE, "KEEP_SIZE"}, \
@@ -2265,7 +2282,7 @@ DECLARE_EVENT_CLASS(ext4__es_extent,
__entry->ino = inode->i_ino;
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
- __entry->pblk = ext4_es_pblock(es);
+ __entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
),
@@ -2354,7 +2371,7 @@ TRACE_EVENT(ext4_es_find_extent_range_exit,
__entry->ino = inode->i_ino;
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
- __entry->pblk = ext4_es_pblock(es);
+ __entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
),
@@ -2408,7 +2425,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
__entry->ino = inode->i_ino;
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
- __entry->pblk = ext4_es_pblock(es);
+ __entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
__entry->found = found;
),
@@ -2576,7 +2593,7 @@ TRACE_EVENT(ext4_es_insert_delayed_block,
__entry->ino = inode->i_ino;
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
- __entry->pblk = ext4_es_pblock(es);
+ __entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
__entry->allocated = allocated;
),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 1796ff99c3e9..67a97838c2a0 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -49,6 +49,7 @@ TRACE_DEFINE_ENUM(CP_SYNC);
TRACE_DEFINE_ENUM(CP_RECOVERY);
TRACE_DEFINE_ENUM(CP_DISCARD);
TRACE_DEFINE_ENUM(CP_TRIMMED);
+TRACE_DEFINE_ENUM(CP_PAUSE);
#define show_block_type(type) \
__print_symbolic(type, \
@@ -124,13 +125,14 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ CP_SYNC, "Sync" }, \
{ CP_RECOVERY, "Recovery" }, \
{ CP_DISCARD, "Discard" }, \
- { CP_UMOUNT, "Umount" }, \
+ { CP_PAUSE, "Pause" }, \
{ CP_TRIMMED, "Trimmed" })
#define show_fsync_cpreason(type) \
__print_symbolic(type, \
{ CP_NO_NEEDED, "no needed" }, \
{ CP_NON_REGULAR, "non regular" }, \
+ { CP_COMPRESSED, "compreesed" }, \
{ CP_HARDLINK, "hardlink" }, \
{ CP_SB_NEED_CP, "sb needs cp" }, \
{ CP_WRONG_PINO, "wrong pino" }, \
@@ -148,6 +150,11 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ F2FS_GOING_DOWN_METAFLUSH, "meta flush" }, \
{ F2FS_GOING_DOWN_NEED_FSCK, "need fsck" })
+#define show_compress_algorithm(type) \
+ __print_symbolic(type, \
+ { COMPRESS_LZO, "LZO" }, \
+ { COMPRESS_LZ4, "LZ4" })
+
struct f2fs_sb_info;
struct f2fs_io_info;
struct extent_info;
@@ -1710,6 +1717,100 @@ TRACE_EVENT(f2fs_shutdown,
__entry->ret)
);
+DECLARE_EVENT_CLASS(f2fs_zip_start,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int cluster_size, unsigned char algtype),
+
+ TP_ARGS(inode, cluster_idx, cluster_size, algtype),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(pgoff_t, idx)
+ __field(unsigned int, size)
+ __field(unsigned int, algtype)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->idx = cluster_idx;
+ __entry->size = cluster_size;
+ __entry->algtype = algtype;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, cluster_idx:%lu, "
+ "cluster_size = %u, algorithm = %s",
+ show_dev_ino(__entry),
+ __entry->idx,
+ __entry->size,
+ show_compress_algorithm(__entry->algtype))
+);
+
+DECLARE_EVENT_CLASS(f2fs_zip_end,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int compressed_size, int ret),
+
+ TP_ARGS(inode, cluster_idx, compressed_size, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(pgoff_t, idx)
+ __field(unsigned int, size)
+ __field(unsigned int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->idx = cluster_idx;
+ __entry->size = compressed_size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, cluster_idx:%lu, "
+ "compressed_size = %u, ret = %d",
+ show_dev_ino(__entry),
+ __entry->idx,
+ __entry->size,
+ __entry->ret)
+);
+
+DEFINE_EVENT(f2fs_zip_start, f2fs_compress_pages_start,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int cluster_size, unsigned char algtype),
+
+ TP_ARGS(inode, cluster_idx, cluster_size, algtype)
+);
+
+DEFINE_EVENT(f2fs_zip_start, f2fs_decompress_pages_start,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int cluster_size, unsigned char algtype),
+
+ TP_ARGS(inode, cluster_idx, cluster_size, algtype)
+);
+
+DEFINE_EVENT(f2fs_zip_end, f2fs_compress_pages_end,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int compressed_size, int ret),
+
+ TP_ARGS(inode, cluster_idx, compressed_size, ret)
+);
+
+DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int compressed_size, int ret),
+
+ TP_ARGS(inode, cluster_idx, compressed_size, ret)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index b352d66b5d51..27bd9e4f927b 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -320,6 +320,7 @@ TRACE_EVENT(io_uring_complete,
* io_uring_submit_sqe - called before submitting one SQE
*
* @ctx: pointer to a ring context structure
+ * @opcode: opcode of request
* @user_data: user data associated with the request
* @force_nonblock: whether a context blocking or not
* @sq_thread: true if sq_thread has submitted this SQE
@@ -329,12 +330,14 @@ TRACE_EVENT(io_uring_complete,
*/
TRACE_EVENT(io_uring_submit_sqe,
- TP_PROTO(void *ctx, u64 user_data, bool force_nonblock, bool sq_thread),
+ TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock,
+ bool sq_thread),
- TP_ARGS(ctx, user_data, force_nonblock, sq_thread),
+ TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread),
TP_STRUCT__entry (
__field( void *, ctx )
+ __field( u8, opcode )
__field( u64, user_data )
__field( bool, force_nonblock )
__field( bool, sq_thread )
@@ -342,13 +345,15 @@ TRACE_EVENT(io_uring_submit_sqe,
TP_fast_assign(
__entry->ctx = ctx;
+ __entry->opcode = opcode;
__entry->user_data = user_data;
__entry->force_nonblock = force_nonblock;
__entry->sq_thread = sq_thread;
),
- TP_printk("ring %p, user data 0x%llx, non block %d, sq_thread %d",
- __entry->ctx, (unsigned long long) __entry->user_data,
+ TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d",
+ __entry->ctx, __entry->opcode,
+ (unsigned long long) __entry->user_data,
__entry->force_nonblock, __entry->sq_thread)
);
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index ad7e642bd497..f65b1f6db22d 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -88,8 +88,8 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->node = node;
),
- TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
- __entry->call_site,
+ TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
+ (void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
diff --git a/include/trace/events/rdma_core.h b/include/trace/events/rdma_core.h
new file mode 100644
index 000000000000..17642aa54437
--- /dev/null
+++ b/include/trace/events/rdma_core.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Trace point definitions for core RDMA functions.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rdma_core
+
+#if !defined(_TRACE_RDMA_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RDMA_CORE_H
+
+#include <linux/tracepoint.h>
+#include <rdma/ib_verbs.h>
+
+/*
+ * enum ib_poll_context, from include/rdma/ib_verbs.h
+ */
+#define IB_POLL_CTX_LIST \
+ ib_poll_ctx(DIRECT) \
+ ib_poll_ctx(SOFTIRQ) \
+ ib_poll_ctx(WORKQUEUE) \
+ ib_poll_ctx_end(UNBOUND_WORKQUEUE)
+
+#undef ib_poll_ctx
+#undef ib_poll_ctx_end
+
+#define ib_poll_ctx(x) TRACE_DEFINE_ENUM(IB_POLL_##x);
+#define ib_poll_ctx_end(x) TRACE_DEFINE_ENUM(IB_POLL_##x);
+
+IB_POLL_CTX_LIST
+
+#undef ib_poll_ctx
+#undef ib_poll_ctx_end
+
+#define ib_poll_ctx(x) { IB_POLL_##x, #x },
+#define ib_poll_ctx_end(x) { IB_POLL_##x, #x }
+
+#define rdma_show_ib_poll_ctx(x) \
+ __print_symbolic(x, IB_POLL_CTX_LIST)
+
+/**
+ ** Completion Queue events
+ **/
+
+TRACE_EVENT(cq_schedule,
+ TP_PROTO(
+ struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ cq->timestamp = ktime_get();
+ cq->interrupt = true;
+
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+TRACE_EVENT(cq_reschedule,
+ TP_PROTO(
+ struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ cq->timestamp = ktime_get();
+ cq->interrupt = false;
+
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+TRACE_EVENT(cq_process,
+ TP_PROTO(
+ const struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(bool, interrupt)
+ __field(s64, latency)
+ ),
+
+ TP_fast_assign(
+ ktime_t latency = ktime_sub(ktime_get(), cq->timestamp);
+
+ __entry->cq_id = cq->res.id;
+ __entry->latency = ktime_to_us(latency);
+ __entry->interrupt = cq->interrupt;
+ ),
+
+ TP_printk("cq.id=%u wake-up took %lld [us] from %s",
+ __entry->cq_id, __entry->latency,
+ __entry->interrupt ? "interrupt" : "reschedule"
+ )
+);
+
+TRACE_EVENT(cq_poll,
+ TP_PROTO(
+ const struct ib_cq *cq,
+ int requested,
+ int rc
+ ),
+
+ TP_ARGS(cq, requested, rc),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, requested)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ __entry->requested = requested;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("cq.id=%u requested %d, returned %d",
+ __entry->cq_id, __entry->requested, __entry->rc
+ )
+);
+
+TRACE_EVENT(cq_drain_complete,
+ TP_PROTO(
+ const struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u",
+ __entry->cq_id
+ )
+);
+
+
+TRACE_EVENT(cq_modify,
+ TP_PROTO(
+ const struct ib_cq *cq,
+ u16 comps,
+ u16 usec
+ ),
+
+ TP_ARGS(cq, comps, usec),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(unsigned int, comps)
+ __field(unsigned int, usec)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ __entry->comps = comps;
+ __entry->usec = usec;
+ ),
+
+ TP_printk("cq.id=%u comps=%u usec=%u",
+ __entry->cq_id, __entry->comps, __entry->usec
+ )
+);
+
+TRACE_EVENT(cq_alloc,
+ TP_PROTO(
+ const struct ib_cq *cq,
+ int nr_cqe,
+ int comp_vector,
+ enum ib_poll_context poll_ctx
+ ),
+
+ TP_ARGS(cq, nr_cqe, comp_vector, poll_ctx),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, nr_cqe)
+ __field(int, comp_vector)
+ __field(unsigned long, poll_ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ __entry->nr_cqe = nr_cqe;
+ __entry->comp_vector = comp_vector;
+ __entry->poll_ctx = poll_ctx;
+ ),
+
+ TP_printk("cq.id=%u nr_cqe=%d comp_vector=%d poll_ctx=%s",
+ __entry->cq_id, __entry->nr_cqe, __entry->comp_vector,
+ rdma_show_ib_poll_ctx(__entry->poll_ctx)
+ )
+);
+
+TRACE_EVENT(cq_alloc_error,
+ TP_PROTO(
+ int nr_cqe,
+ int comp_vector,
+ enum ib_poll_context poll_ctx,
+ int rc
+ ),
+
+ TP_ARGS(nr_cqe, comp_vector, poll_ctx, rc),
+
+ TP_STRUCT__entry(
+ __field(int, rc)
+ __field(int, nr_cqe)
+ __field(int, comp_vector)
+ __field(unsigned long, poll_ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->rc = rc;
+ __entry->nr_cqe = nr_cqe;
+ __entry->comp_vector = comp_vector;
+ __entry->poll_ctx = poll_ctx;
+ ),
+
+ TP_printk("nr_cqe=%d comp_vector=%d poll_ctx=%s rc=%d",
+ __entry->nr_cqe, __entry->comp_vector,
+ rdma_show_ib_poll_ctx(__entry->poll_ctx), __entry->rc
+ )
+);
+
+TRACE_EVENT(cq_free,
+ TP_PROTO(
+ const struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+/**
+ ** Memory Region events
+ **/
+
+/*
+ * enum ib_mr_type, from include/rdma/ib_verbs.h
+ */
+#define IB_MR_TYPE_LIST \
+ ib_mr_type_item(MEM_REG) \
+ ib_mr_type_item(SG_GAPS) \
+ ib_mr_type_item(DM) \
+ ib_mr_type_item(USER) \
+ ib_mr_type_item(DMA) \
+ ib_mr_type_end(INTEGRITY)
+
+#undef ib_mr_type_item
+#undef ib_mr_type_end
+
+#define ib_mr_type_item(x) TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
+#define ib_mr_type_end(x) TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
+
+IB_MR_TYPE_LIST
+
+#undef ib_mr_type_item
+#undef ib_mr_type_end
+
+#define ib_mr_type_item(x) { IB_MR_TYPE_##x, #x },
+#define ib_mr_type_end(x) { IB_MR_TYPE_##x, #x }
+
+#define rdma_show_ib_mr_type(x) \
+ __print_symbolic(x, IB_MR_TYPE_LIST)
+
+TRACE_EVENT(mr_alloc,
+ TP_PROTO(
+ const struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg,
+ const struct ib_mr *mr
+ ),
+
+ TP_ARGS(pd, mr_type, max_num_sg, mr),
+
+ TP_STRUCT__entry(
+ __field(u32, pd_id)
+ __field(u32, mr_id)
+ __field(u32, max_num_sg)
+ __field(int, rc)
+ __field(unsigned long, mr_type)
+ ),
+
+ TP_fast_assign(
+ __entry->pd_id = pd->res.id;
+ if (IS_ERR(mr)) {
+ __entry->mr_id = 0;
+ __entry->rc = PTR_ERR(mr);
+ } else {
+ __entry->mr_id = mr->res.id;
+ __entry->rc = 0;
+ }
+ __entry->max_num_sg = max_num_sg;
+ __entry->mr_type = mr_type;
+ ),
+
+ TP_printk("pd.id=%u mr.id=%u type=%s max_num_sg=%u rc=%d",
+ __entry->pd_id, __entry->mr_id,
+ rdma_show_ib_mr_type(__entry->mr_type),
+ __entry->max_num_sg, __entry->rc)
+);
+
+TRACE_EVENT(mr_integ_alloc,
+ TP_PROTO(
+ const struct ib_pd *pd,
+ u32 max_num_data_sg,
+ u32 max_num_meta_sg,
+ const struct ib_mr *mr
+ ),
+
+ TP_ARGS(pd, max_num_data_sg, max_num_meta_sg, mr),
+
+ TP_STRUCT__entry(
+ __field(u32, pd_id)
+ __field(u32, mr_id)
+ __field(u32, max_num_data_sg)
+ __field(u32, max_num_meta_sg)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->pd_id = pd->res.id;
+ if (IS_ERR(mr)) {
+ __entry->mr_id = 0;
+ __entry->rc = PTR_ERR(mr);
+ } else {
+ __entry->mr_id = mr->res.id;
+ __entry->rc = 0;
+ }
+ __entry->max_num_data_sg = max_num_data_sg;
+ __entry->max_num_meta_sg = max_num_meta_sg;
+ ),
+
+ TP_printk("pd.id=%u mr.id=%u max_num_data_sg=%u max_num_meta_sg=%u rc=%d",
+ __entry->pd_id, __entry->mr_id, __entry->max_num_data_sg,
+ __entry->max_num_meta_sg, __entry->rc)
+);
+
+TRACE_EVENT(mr_dereg,
+ TP_PROTO(
+ const struct ib_mr *mr
+ ),
+
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ ),
+
+ TP_fast_assign(
+ __entry->id = mr->res.id;
+ ),
+
+ TP_printk("mr.id=%u", __entry->id)
+);
+
+#endif /* _TRACE_RDMA_CORE_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index 83860de120e3..248bc09bfc99 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -130,7 +130,7 @@ DECLARE_EVENT_CLASS(v4l2_event_class,
__entry->bytesused = buf->bytesused;
__entry->flags = buf->flags;
__entry->field = buf->field;
- __entry->timestamp = timeval_to_ns(&buf->timestamp);
+ __entry->timestamp = v4l2_buffer_get_timestamp(buf);
__entry->timecode_type = buf->timecode.type;
__entry->timecode_flags = buf->timecode.flags;
__entry->timecode_frames = buf->timecode.frames;
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index ef50be4e5e6c..d94def25e4dc 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -67,8 +67,8 @@ DECLARE_EVENT_CLASS(writeback_page_template,
TP_fast_assign(
strscpy_pad(__entry->name,
- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
- 32);
+ bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
+ NULL), 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
@@ -111,8 +111,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
struct backing_dev_info *bdi = inode_to_bdi(inode);
/* may be called for files on pseudo FSes w/ unregistered bdi */
- strscpy_pad(__entry->name,
- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->flags = flags;
@@ -193,7 +192,7 @@ TRACE_EVENT(inode_foreign_history,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32);
+ strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
__entry->history = history;
@@ -222,7 +221,7 @@ TRACE_EVENT(inode_switch_wbs,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(old_wb->bdi->dev), 32);
+ strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32);
__entry->ino = inode->i_ino;
__entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
__entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
@@ -255,7 +254,7 @@ TRACE_EVENT(track_foreign_dirty,
struct address_space *mapping = page_mapping(page);
struct inode *inode = mapping ? mapping->host : NULL;
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->bdi_id = wb->bdi->id;
__entry->ino = inode ? inode->i_ino : 0;
__entry->memcg_id = wb->memcg_css->id;
@@ -288,7 +287,7 @@ TRACE_EVENT(flush_foreign,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
__entry->frn_bdi_id = frn_bdi_id;
__entry->frn_memcg_id = frn_memcg_id;
@@ -318,7 +317,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
TP_fast_assign(
strscpy_pad(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
@@ -361,9 +360,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(ino_t, cgroup_ino)
),
TP_fast_assign(
- strscpy_pad(__entry->name,
- wb->bdi->dev ? dev_name(wb->bdi->dev) :
- "(unknown)", 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
@@ -416,7 +413,7 @@ DECLARE_EVENT_CLASS(writeback_class,
__field(ino_t, cgroup_ino)
),
TP_fast_assign(
- strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: cgroup_ino=%lu",
@@ -438,7 +435,7 @@ TRACE_EVENT(writeback_bdi_register,
__array(char, name, 32)
),
TP_fast_assign(
- strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
),
TP_printk("bdi %s",
__entry->name
@@ -463,7 +460,7 @@ DECLARE_EVENT_CLASS(wbc_class,
),
TP_fast_assign(
- strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->sync_mode = wbc->sync_mode;
@@ -514,7 +511,7 @@ TRACE_EVENT(writeback_queue_io,
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
- strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
(jiffies - *older_than_this) * 1000 / HZ : -1;
@@ -600,7 +597,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
),
TP_fast_assign(
- strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
__entry->write_bw = KBps(wb->write_bandwidth);
__entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
@@ -665,7 +662,7 @@ TRACE_EVENT(balance_dirty_pages,
TP_fast_assign(
unsigned long freerun = (thresh + bg_thresh) / 2;
- strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
__entry->limit = global_wb_domain.dirty_limit;
__entry->setpoint = (global_wb_domain.dirty_limit +
@@ -726,7 +723,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
TP_fast_assign(
strscpy_pad(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
@@ -800,7 +797,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_fast_assign(
strscpy_pad(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 1fc8faa6e973..3a3201e4618e 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -851,8 +851,13 @@ __SYSCALL(__NR_pidfd_open, sys_pidfd_open)
__SYSCALL(__NR_clone3, sys_clone3)
#endif
+#define __NR_openat2 437
+__SYSCALL(__NR_openat2, sys_openat2)
+#define __NR_pidfd_getfd 438
+__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
+
#undef __NR_syscalls
-#define __NR_syscalls 436
+#define __NR_syscalls 439
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index bbdad866e3fe..ac3879829bb5 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -703,6 +703,9 @@ struct drm_amdgpu_cs_chunk_data {
/* Subquery id: Query DMCU firmware version */
#define AMDGPU_INFO_FW_DMCU 0x12
#define AMDGPU_INFO_FW_TA 0x13
+ /* Subquery id: Query DMCUB firmware version */
+ #define AMDGPU_INFO_FW_DMCUB 0x14
+
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 8caaaf7ff91b..8bc0b31597d8 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -411,6 +411,30 @@ extern "C" {
#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5)
/*
+ * Intel color control surfaces (CCS) for Gen-12 render compression.
+ *
+ * The main surface is Y-tiled and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * Y-tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS fourcc_mod_code(INTEL, 6)
+
+/*
+ * Intel color control surfaces (CCS) for Gen-12 media compression
+ *
+ * The main surface is Y-tiled and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * Y-tile widths. For semi-planar formats like NV12, CCS planes follow the
+ * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
+ * planes 2 and 3 for the respective CCS.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 45c6582b3df3..a51aa1c618c1 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -394,7 +394,7 @@ struct drm_exynos_ioctl_ipp_commit {
#define DRM_IOCTL_EXYNOS_IPP_COMMIT DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit)
-/* EXYNOS specific events */
+/* Exynos specific events */
#define DRM_EXYNOS_G2D_EVENT 0x80000000
#define DRM_EXYNOS_IPP_EVENT 0x80000002
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 5400d7e057f1..829c0a48577f 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -395,6 +395,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
@@ -793,6 +794,37 @@ struct drm_i915_gem_mmap_gtt {
__u64 offset;
};
+struct drm_i915_gem_mmap_offset {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+
+ /**
+ * Flags for extended behaviour.
+ *
+ * It is mandatory that one of the MMAP_OFFSET types
+ * (GTT, WC, WB, UC, etc) should be included.
+ */
+ __u64 flags;
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC 1
+#define I915_MMAP_OFFSET_WB 2
+#define I915_MMAP_OFFSET_UC 3
+
+ /*
+ * Zero-terminated chain of extensions.
+ *
+ * No current extensions defined; mbz.
+ */
+ __u64 extensions;
+};
+
struct drm_i915_gem_set_domain {
/** Handle for the object */
__u32 handle;
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 9459a6e3bc1f..853a327433d3 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -110,6 +110,7 @@ struct drm_nouveau_gem_pushbuf {
__u64 push;
__u32 suffix0;
__u32 suffix1;
+#define NOUVEAU_GEM_PUSHBUF_SYNC (1ULL << 0)
__u64 vram_available;
__u64 gart_available;
};
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 02cab33f2f25..fcb741e3068f 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -71,6 +71,7 @@ extern "C" {
#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
#define DRM_VMW_GB_SURFACE_REF_EXT 28
+#define DRM_VMW_MSG 29
/*************************************************************************/
/**
@@ -1213,6 +1214,22 @@ union drm_vmw_gb_surface_reference_ext_arg {
struct drm_vmw_surface_arg req;
};
+/**
+ * struct drm_vmw_msg_arg
+ *
+ * @send: Pointer to user-space msg string (null terminated).
+ * @receive: Pointer to user-space receive buffer.
+ * @send_only: Boolean whether this is only sending or receiving too.
+ *
+ * Argument to the DRM_VMW_MSG ioctl.
+ */
+struct drm_vmw_msg_arg {
+ __u64 send;
+ __u64 receive;
+ __s32 send_only;
+ __u32 receive_len;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/acct.h b/include/uapi/linux/acct.h
index 0e72172cd23a..985b89068591 100644
--- a/include/uapi/linux/acct.h
+++ b/include/uapi/linux/acct.h
@@ -49,6 +49,7 @@ struct acct
__u16 ac_uid16; /* LSB of Real User ID */
__u16 ac_gid16; /* LSB of Real Group ID */
__u16 ac_tty; /* Control Terminal */
+ /* __u32 range means times from 1970 to 2106 */
__u32 ac_btime; /* Process Creation Time */
comp_t ac_utime; /* User Time */
comp_t ac_stime; /* System Time */
@@ -81,6 +82,7 @@ struct acct_v3
__u32 ac_gid; /* Real Group ID */
__u32 ac_pid; /* Process ID */
__u32 ac_ppid; /* Parent Process ID */
+ /* __u32 range means times from 1970 to 2106 */
__u32 ac_btime; /* Process Creation Time */
#ifdef __KERNEL__
__u32 ac_etime; /* Elapsed Time */
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 240fdb9a60f6..272dc69fa080 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -301,6 +301,7 @@ struct vfs_ns_cap_data {
/* Allow more than 64hz interrupts from the real-time clock */
/* Override max number of consoles on console allocation */
/* Override max number of keymaps */
+/* Control memory reclaim behavior */
#define CAP_SYS_RESOURCE 24
diff --git a/include/uapi/linux/dma-heap.h b/include/uapi/linux/dma-heap.h
new file mode 100644
index 000000000000..6f84fa08e074
--- /dev/null
+++ b/include/uapi/linux/dma-heap.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * DMABUF Heaps Userspace API
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+#ifndef _UAPI_LINUX_DMABUF_POOL_H
+#define _UAPI_LINUX_DMABUF_POOL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * DOC: DMABUF Heaps Userspace API
+ */
+
+/* Valid FD_FLAGS are O_CLOEXEC, O_RDONLY, O_WRONLY, O_RDWR */
+#define DMA_HEAP_VALID_FD_FLAGS (O_CLOEXEC | O_ACCMODE)
+
+/* Currently no heap flags */
+#define DMA_HEAP_VALID_HEAP_FLAGS (0)
+
+/**
+ * struct dma_heap_allocation_data - metadata passed from userspace for
+ * allocations
+ * @len: size of the allocation
+ * @fd: will be populated with a fd which provides the
+ * handle to the allocated dma-buf
+ * @fd_flags: file descriptor flags used when allocating
+ * @heap_flags: flags passed to heap
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct dma_heap_allocation_data {
+ __u64 len;
+ __u32 fd;
+ __u32 fd_flags;
+ __u64 heap_flags;
+};
+
+#define DMA_HEAP_IOC_MAGIC 'H'
+
+/**
+ * DOC: DMA_HEAP_IOCTL_ALLOC - allocate memory from pool
+ *
+ * Takes a dma_heap_allocation_data struct and returns it with the fd field
+ * populated with the dmabuf handle of the allocation.
+ */
+#define DMA_HEAP_IOCTL_ALLOC _IOWR(DMA_HEAP_IOC_MAGIC, 0x0,\
+ struct dma_heap_allocation_data)
+
+#endif /* _UAPI_LINUX_DMABUF_POOL_H */
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 1f97b33c840e..ca88b7bce553 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -3,6 +3,7 @@
#define _UAPI_LINUX_FCNTL_H
#include <asm/fcntl.h>
+#include <linux/openat2.h>
#define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0)
#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
@@ -100,5 +101,4 @@
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
-
#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 55cfcb71606d..3f7961c1c243 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -34,21 +34,43 @@ struct io_uring_sqe {
__u32 timeout_flags;
__u32 accept_flags;
__u32 cancel_flags;
+ __u32 open_flags;
+ __u32 statx_flags;
+ __u32 fadvise_advice;
};
__u64 user_data; /* data to be passed back at completion time */
union {
- __u16 buf_index; /* index into fixed buffers, if used */
+ struct {
+ /* index into fixed buffers, if used */
+ __u16 buf_index;
+ /* personality to use, if used */
+ __u16 personality;
+ };
__u64 __pad2[3];
};
};
+enum {
+ IOSQE_FIXED_FILE_BIT,
+ IOSQE_IO_DRAIN_BIT,
+ IOSQE_IO_LINK_BIT,
+ IOSQE_IO_HARDLINK_BIT,
+ IOSQE_ASYNC_BIT,
+};
+
/*
* sqe->flags
*/
-#define IOSQE_FIXED_FILE (1U << 0) /* use fixed fileset */
-#define IOSQE_IO_DRAIN (1U << 1) /* issue after inflight IO */
-#define IOSQE_IO_LINK (1U << 2) /* links next sqe */
-#define IOSQE_IO_HARDLINK (1U << 3) /* like LINK, but stronger */
+/* use fixed fileset */
+#define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT)
+/* issue after inflight IO */
+#define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT)
+/* links next sqe */
+#define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT)
+/* like LINK, but stronger */
+#define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT)
+/* always go async */
+#define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT)
/*
* io_uring_setup() flags
@@ -57,6 +79,8 @@ struct io_uring_sqe {
#define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */
#define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */
#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
+#define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */
+#define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */
enum {
IORING_OP_NOP,
@@ -76,6 +100,19 @@ enum {
IORING_OP_ASYNC_CANCEL,
IORING_OP_LINK_TIMEOUT,
IORING_OP_CONNECT,
+ IORING_OP_FALLOCATE,
+ IORING_OP_OPENAT,
+ IORING_OP_CLOSE,
+ IORING_OP_FILES_UPDATE,
+ IORING_OP_STATX,
+ IORING_OP_READ,
+ IORING_OP_WRITE,
+ IORING_OP_FADVISE,
+ IORING_OP_MADVISE,
+ IORING_OP_SEND,
+ IORING_OP_RECV,
+ IORING_OP_OPENAT2,
+ IORING_OP_EPOLL_CTL,
/* this goes last, obviously */
IORING_OP_LAST,
@@ -153,7 +190,8 @@ struct io_uring_params {
__u32 sq_thread_cpu;
__u32 sq_thread_idle;
__u32 features;
- __u32 resv[4];
+ __u32 wq_fd;
+ __u32 resv[3];
struct io_sqring_offsets sq_off;
struct io_cqring_offsets cq_off;
};
@@ -164,6 +202,8 @@ struct io_uring_params {
#define IORING_FEAT_SINGLE_MMAP (1U << 0)
#define IORING_FEAT_NODROP (1U << 1)
#define IORING_FEAT_SUBMIT_STABLE (1U << 2)
+#define IORING_FEAT_RW_CUR_POS (1U << 3)
+#define IORING_FEAT_CUR_PERSONALITY (1U << 4)
/*
* io_uring_register(2) opcodes and arguments
@@ -175,6 +215,10 @@ struct io_uring_params {
#define IORING_REGISTER_EVENTFD 4
#define IORING_UNREGISTER_EVENTFD 5
#define IORING_REGISTER_FILES_UPDATE 6
+#define IORING_REGISTER_EVENTFD_ASYNC 7
+#define IORING_REGISTER_PROBE 8
+#define IORING_REGISTER_PERSONALITY 9
+#define IORING_UNREGISTER_PERSONALITY 10
struct io_uring_files_update {
__u32 offset;
@@ -182,4 +226,21 @@ struct io_uring_files_update {
__aligned_u64 /* __s32 * */ fds;
};
+#define IO_URING_OP_SUPPORTED (1U << 0)
+
+struct io_uring_probe_op {
+ __u8 op;
+ __u8 resv;
+ __u16 flags; /* IO_URING_OP_* flags */
+ __u32 resv2;
+};
+
+struct io_uring_probe {
+ __u8 last_op; /* last opcode supported */
+ __u8 ops_len; /* length of ops[] array below */
+ __u16 resv;
+ __u32 resv2[3];
+ struct io_uring_probe_op ops[0];
+};
+
#endif
diff --git a/include/uapi/linux/openat2.h b/include/uapi/linux/openat2.h
new file mode 100644
index 000000000000..58b1eb711360
--- /dev/null
+++ b/include/uapi/linux/openat2.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_OPENAT2_H
+#define _UAPI_LINUX_OPENAT2_H
+
+#include <linux/types.h>
+
+/*
+ * Arguments for how openat2(2) should open the target path. If only @flags and
+ * @mode are non-zero, then openat2(2) operates very similarly to openat(2).
+ *
+ * However, unlike openat(2), unknown or invalid bits in @flags result in
+ * -EINVAL rather than being silently ignored. @mode must be zero unless one of
+ * {O_CREAT, O_TMPFILE} are set.
+ *
+ * @flags: O_* flags.
+ * @mode: O_CREAT/O_TMPFILE file mode.
+ * @resolve: RESOLVE_* flags.
+ */
+struct open_how {
+ __u64 flags;
+ __u64 mode;
+ __u64 resolve;
+};
+
+/* how->resolve flags for openat2(2). */
+#define RESOLVE_NO_XDEV 0x01 /* Block mount-point crossings
+ (includes bind-mounts). */
+#define RESOLVE_NO_MAGICLINKS 0x02 /* Block traversal through procfs-style
+ "magic-links". */
+#define RESOLVE_NO_SYMLINKS 0x04 /* Block traversal through all symlinks
+ (implies OEXT_NO_MAGICLINKS) */
+#define RESOLVE_BENEATH 0x08 /* Block "lexical" trickery like
+ "..", symlinks, and absolute
+ paths which escape the dirfd. */
+#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".."
+ be scoped inside the dirfd
+ (similar to chroot(2)). */
+
+#endif /* _UAPI_LINUX_OPENAT2_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index acb7d2bdb419..5437690483cd 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -676,6 +676,7 @@
#define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */
#define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
#define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
+#define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 7da1b37b27aa..07b4f8131e36 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -234,4 +234,8 @@ struct prctl_mm_map {
#define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+/* Control reclaim behavior when allocating memory */
+#define PR_SET_IO_FLUSHER 57
+#define PR_GET_IO_FLUSHER 58
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
index 26ee91300e3e..dcc1b3e6106f 100644
--- a/include/uapi/linux/random.h
+++ b/include/uapi/linux/random.h
@@ -48,9 +48,11 @@ struct rand_pool_info {
* Flags for getrandom(2)
*
* GRND_NONBLOCK Don't block and return EAGAIN instead
- * GRND_RANDOM Use the /dev/random pool instead of /dev/urandom
+ * GRND_RANDOM No effect
+ * GRND_INSECURE Return non-cryptographic random bytes
*/
#define GRND_NONBLOCK 0x0001
#define GRND_RANDOM 0x0002
+#define GRND_INSECURE 0x0004
#endif /* _UAPI_LINUX_RANDOM_H */
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 23cd84868cc3..fa7f97da5b76 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <asm/bitsperlong.h>
#include <asm/swab.h>
/*
@@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
__fswab64(x))
#endif
+static __always_inline unsigned long __swab(const unsigned long y)
+{
+#if BITS_PER_LONG == 64
+ return __swab64(y);
+#else /* BITS_PER_LONG == 32 */
+ return __swab32(y);
+#endif
+}
+
/**
* __swahw32 - return a word-swapped 32-bit value
* @x: value to wordswap
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
index c912b5a678e4..2c661a3557e5 100644
--- a/include/uapi/linux/switchtec_ioctl.h
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -32,7 +32,18 @@
#define SWITCHTEC_IOCTL_PART_VENDOR5 10
#define SWITCHTEC_IOCTL_PART_VENDOR6 11
#define SWITCHTEC_IOCTL_PART_VENDOR7 12
-#define SWITCHTEC_IOCTL_NUM_PARTITIONS 13
+#define SWITCHTEC_IOCTL_PART_BL2_0 13
+#define SWITCHTEC_IOCTL_PART_BL2_1 14
+#define SWITCHTEC_IOCTL_PART_MAP_0 15
+#define SWITCHTEC_IOCTL_PART_MAP_1 16
+#define SWITCHTEC_IOCTL_PART_KEY_0 17
+#define SWITCHTEC_IOCTL_PART_KEY_1 18
+
+#define SWITCHTEC_NUM_PARTITIONS_GEN3 13
+#define SWITCHTEC_NUM_PARTITIONS_GEN4 19
+
+/* obsolete: for compatibility with old userspace software */
+#define SWITCHTEC_IOCTL_NUM_PARTITIONS SWITCHTEC_NUM_PARTITIONS_GEN3
struct switchtec_ioctl_flash_info {
__u64 flash_length;
@@ -98,7 +109,9 @@ struct switchtec_ioctl_event_summary {
#define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27
#define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28
#define SWITCHTEC_IOCTL_EVENT_GFMS 29
-#define SWITCHTEC_IOCTL_MAX_EVENTS 30
+#define SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY 30
+#define SWITCHTEC_IOCTL_EVENT_UEC 31
+#define SWITCHTEC_IOCTL_MAX_EVENTS 32
#define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1
#define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 87aa2a6d9125..27c1ed2822e6 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -195,7 +195,7 @@ enum
VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */
VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
- VM_MIN_SLAB=35, /* Percent pages ignored by zone reclaim */
+ VM_MIN_SLAB=35, /* Percent pages ignored by node reclaim */
};
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index 5e8ca16a9079..ccbd08709321 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -34,7 +34,7 @@
*/
-#define TASKSTATS_VERSION 9
+#define TASKSTATS_VERSION 10
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -112,6 +112,7 @@ struct taskstats {
__u32 ac_gid; /* Group ID */
__u32 ac_pid; /* Process ID */
__u32 ac_ppid; /* Parent process ID */
+ /* __u32 range means times from 1970 to 2106 */
__u32 ac_btime; /* Begin time [sec since 1970] */
__u64 ac_etime __attribute__((aligned(8)));
/* Elapsed time [usec] */
@@ -168,6 +169,9 @@ struct taskstats {
/* Delay waiting for thrashing page */
__u64 thrashing_count;
__u64 thrashing_delay_total;
+
+ /* v10: 64-bit btime to avoid overflow */
+ __u64 ac_btime64; /* 64-bit begin time */
};
diff --git a/include/uapi/linux/time_types.h b/include/uapi/linux/time_types.h
index 074e391d73a1..bcc0002115d3 100644
--- a/include/uapi/linux/time_types.h
+++ b/include/uapi/linux/time_types.h
@@ -33,6 +33,11 @@ struct __kernel_old_timespec {
long tv_nsec; /* nanoseconds */
};
+struct __kernel_old_itimerval {
+ struct __kernel_old_timeval it_interval;/* timer interval */
+ struct __kernel_old_timeval it_value; /* current value */
+};
+
struct __kernel_sock_timeval {
__s64 tv_sec;
__s64 tv_usec;
diff --git a/include/uapi/linux/timex.h b/include/uapi/linux/timex.h
index 9f517f9010bb..bd627c368d09 100644
--- a/include/uapi/linux/timex.h
+++ b/include/uapi/linux/timex.h
@@ -57,6 +57,7 @@
#define NTP_API 4 /* NTP API version */
+#ifndef __KERNEL__
/*
* syscall interface - used (mainly by NTP daemon)
* to discipline kernel clock oscillator
@@ -91,6 +92,7 @@ struct timex {
int :32; int :32; int :32; int :32;
int :32; int :32; int :32;
};
+#endif
struct __kernel_timex_timeval {
__kernel_time64_t tv_sec;
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 04481c717fee..5f9357dcb060 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -912,6 +912,25 @@ struct v4l2_jpegcompression {
/*
* M E M O R Y - M A P P I N G B U F F E R S
*/
+
+#ifdef __KERNEL__
+/*
+ * This corresponds to the user space version of timeval
+ * for 64-bit time_t. sparc64 is different from everyone
+ * else, using the microseconds in the wrong half of the
+ * second 64-bit word.
+ */
+struct __kernel_v4l2_timeval {
+ long long tv_sec;
+#if defined(__sparc__) && defined(__arch64__)
+ int tv_usec;
+ int __pad;
+#else
+ long long tv_usec;
+#endif
+};
+#endif
+
struct v4l2_requestbuffers {
__u32 count;
__u32 type; /* enum v4l2_buf_type */
@@ -997,7 +1016,11 @@ struct v4l2_buffer {
__u32 bytesused;
__u32 flags;
__u32 field;
+#ifdef __KERNEL__
+ struct __kernel_v4l2_timeval timestamp;
+#else
struct timeval timestamp;
+#endif
struct v4l2_timecode timecode;
__u32 sequence;
@@ -1017,6 +1040,7 @@ struct v4l2_buffer {
};
};
+#ifndef __KERNEL__
/**
* v4l2_timeval_to_ns - Convert timeval to nanoseconds
* @ts: pointer to the timeval variable to be converted
@@ -1028,6 +1052,7 @@ static inline __u64 v4l2_timeval_to_ns(const struct timeval *tv)
{
return (__u64)tv->tv_sec * 1000000000ULL + tv->tv_usec * 1000;
}
+#endif
/* Flags for 'flags' field */
/* Buffer is mapped (flag) */
@@ -2339,7 +2364,11 @@ struct v4l2_event {
} u;
__u32 pending;
__u32 sequence;
+#ifdef __KERNEL__
+ struct __kernel_timespec timestamp;
+#else
struct timespec timestamp;
+#endif
__u32 id;
__u32 reserved[8];
};
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 64f0e3aacd3f..d4ddbe4e696c 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -56,6 +56,7 @@ enum uverbs_default_objects {
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_OBJECT_DM,
UVERBS_OBJECT_COUNTERS,
+ UVERBS_OBJECT_ASYNC_EVENT,
};
enum {
@@ -67,6 +68,7 @@ enum uverbs_methods_device {
UVERBS_METHOD_INVOKE_WRITE,
UVERBS_METHOD_INFO_HANDLES,
UVERBS_METHOD_QUERY_PORT,
+ UVERBS_METHOD_GET_CONTEXT,
};
enum uverbs_attrs_invoke_write_cmd_attr_ids {
@@ -80,6 +82,11 @@ enum uverbs_attrs_query_port_cmd_attr_ids {
UVERBS_ATTR_QUERY_PORT_RESP,
};
+enum uverbs_attrs_get_context_attr_ids {
+ UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
+ UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
+};
+
enum uverbs_attrs_create_cq_cmd_attr_ids {
UVERBS_ATTR_CREATE_CQ_HANDLE,
UVERBS_ATTR_CREATE_CQ_CQE,
@@ -241,4 +248,12 @@ enum uverbs_attrs_flow_destroy_ids {
UVERBS_ATTR_DESTROY_FLOW_HANDLE,
};
+enum uverbs_method_async_event {
+ UVERBS_METHOD_ASYNC_EVENT_ALLOC,
+};
+
+enum uverbs_attrs_async_event_create {
+ UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
+};
+
#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 9019b2d906ea..a640bb814be0 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -41,6 +41,13 @@
#define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name
#endif
+#define IB_UVERBS_ACCESS_OPTIONAL_FIRST (1 << 20)
+#define IB_UVERBS_ACCESS_OPTIONAL_LAST (1 << 29)
+
+enum ib_uverbs_core_support {
+ IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS = 1 << 0,
+};
+
enum ib_uverbs_access_flags {
IB_UVERBS_ACCESS_LOCAL_WRITE = 1 << 0,
IB_UVERBS_ACCESS_REMOTE_WRITE = 1 << 1,
@@ -50,6 +57,11 @@ enum ib_uverbs_access_flags {
IB_UVERBS_ACCESS_ZERO_BASED = 1 << 5,
IB_UVERBS_ACCESS_ON_DEMAND = 1 << 6,
IB_UVERBS_ACCESS_HUGETLB = 1 << 7,
+
+ IB_UVERBS_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_OPTIONAL_FIRST,
+ IB_UVERBS_ACCESS_OPTIONAL_RANGE =
+ ((IB_UVERBS_ACCESS_OPTIONAL_LAST << 1) - 1) &
+ ~(IB_UVERBS_ACCESS_OPTIONAL_FIRST - 1)
};
enum ib_uverbs_query_port_cap_flags {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 20d88307f75f..afe7da6f2b8e 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -115,6 +115,22 @@ enum mlx5_ib_devx_obj_methods {
MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
};
+enum mlx5_ib_var_alloc_attrs {
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+};
+
+enum mlx5_ib_var_obj_destroy_attrs {
+ MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_var_obj_methods {
+ MLX5_IB_METHOD_VAR_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_VAR_OBJ_DESTROY,
+};
+
enum mlx5_ib_devx_umem_reg_attrs {
MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
@@ -156,6 +172,7 @@ enum mlx5_ib_objects {
MLX5_IB_OBJECT_FLOW_MATCHER,
MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ MLX5_IB_OBJECT_VAR,
};
enum mlx5_ib_flow_matcher_create_attrs {
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index c022ee26089b..a0b83c9d4498 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -48,6 +48,18 @@ struct qedr_alloc_ucontext_req {
__u32 reserved;
};
+#define QEDR_LDPM_MAX_SIZE (8192)
+#define QEDR_EDPM_TRANS_SIZE (64)
+
+enum qedr_rdma_dpm_type {
+ QEDR_DPM_TYPE_NONE = 0,
+ QEDR_DPM_TYPE_ROCE_ENHANCED = 1 << 0,
+ QEDR_DPM_TYPE_ROCE_LEGACY = 1 << 1,
+ QEDR_DPM_TYPE_IWARP_LEGACY = 1 << 2,
+ QEDR_DPM_TYPE_RESERVED = 1 << 3,
+ QEDR_DPM_SIZES_SET = 1 << 4,
+};
+
struct qedr_alloc_ucontext_resp {
__aligned_u64 db_pa;
__u32 db_size;
@@ -59,10 +71,12 @@ struct qedr_alloc_ucontext_resp {
__u32 sges_per_recv_wr;
__u32 sges_per_srq_wr;
__u32 max_cqes;
- __u8 dpm_enabled;
+ __u8 dpm_flags;
__u8 wids_enabled;
__u16 wid_count;
- __u32 reserved;
+ __u16 ldpm_limit_size;
+ __u8 edpm_trans_size;
+ __u8 reserved;
};
struct qedr_alloc_pd_ureq {
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index 9988db6ad244..d55f2176dfd4 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -68,14 +68,13 @@ struct utp_upiu_cmd {
* @header:UPIU header structure DW-0 to DW-2
* @sc: fields structure for scsi command DW-3 to DW-7
* @qr: fields structure for query request DW-3 to DW-7
+ * @uc: use utp_upiu_query to host the 4 dwords of uic command
*/
struct utp_upiu_req {
struct utp_upiu_header header;
union {
struct utp_upiu_cmd sc;
struct utp_upiu_query qr;
- struct utp_upiu_query tr;
- /* use utp_upiu_query to host the 4 dwords of uic command */
struct utp_upiu_query uc;
};
};
diff --git a/include/video/mipi_display.h b/include/video/mipi_display.h
index cba57a678daf..b6d8b874233f 100644
--- a/include/video/mipi_display.h
+++ b/include/video/mipi_display.h
@@ -17,6 +17,9 @@ enum {
MIPI_DSI_H_SYNC_START = 0x21,
MIPI_DSI_H_SYNC_END = 0x31,
+ MIPI_DSI_COMPRESSION_MODE = 0x07,
+ MIPI_DSI_END_OF_TRANSMISSION = 0x08,
+
MIPI_DSI_COLOR_MODE_OFF = 0x02,
MIPI_DSI_COLOR_MODE_ON = 0x12,
MIPI_DSI_SHUTDOWN_PERIPHERAL = 0x22,
@@ -34,19 +37,18 @@ enum {
MIPI_DSI_DCS_SHORT_WRITE_PARAM = 0x15,
MIPI_DSI_DCS_READ = 0x06,
-
- MIPI_DSI_DCS_COMPRESSION_MODE = 0x07,
- MIPI_DSI_PPS_LONG_WRITE = 0x0A,
+ MIPI_DSI_EXECUTE_QUEUE = 0x16,
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 0x37,
- MIPI_DSI_END_OF_TRANSMISSION = 0x08,
-
MIPI_DSI_NULL_PACKET = 0x09,
MIPI_DSI_BLANKING_PACKET = 0x19,
MIPI_DSI_GENERIC_LONG_WRITE = 0x29,
MIPI_DSI_DCS_LONG_WRITE = 0x39,
+ MIPI_DSI_PICTURE_PARAMETER_SET = 0x0a,
+ MIPI_DSI_COMPRESSED_PIXEL_STREAM = 0x0b,
+
MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20 = 0x0c,
MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24 = 0x1c,
MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16 = 0x2c,
@@ -77,7 +79,9 @@ enum {
enum {
MIPI_DCS_NOP = 0x00,
MIPI_DCS_SOFT_RESET = 0x01,
+ MIPI_DCS_GET_COMPRESSION_MODE = 0x03,
MIPI_DCS_GET_DISPLAY_ID = 0x04,
+ MIPI_DCS_GET_ERROR_COUNT_ON_DSI = 0x05,
MIPI_DCS_GET_RED_CHANNEL = 0x06,
MIPI_DCS_GET_GREEN_CHANNEL = 0x07,
MIPI_DCS_GET_BLUE_CHANNEL = 0x08,
@@ -92,6 +96,8 @@ enum {
MIPI_DCS_EXIT_SLEEP_MODE = 0x11,
MIPI_DCS_ENTER_PARTIAL_MODE = 0x12,
MIPI_DCS_ENTER_NORMAL_MODE = 0x13,
+ MIPI_DCS_GET_IMAGE_CHECKSUM_RGB = 0x14,
+ MIPI_DCS_GET_IMAGE_CHECKSUM_CT = 0x15,
MIPI_DCS_EXIT_INVERT_MODE = 0x20,
MIPI_DCS_ENTER_INVERT_MODE = 0x21,
MIPI_DCS_SET_GAMMA_CURVE = 0x26,
@@ -102,7 +108,8 @@ enum {
MIPI_DCS_WRITE_MEMORY_START = 0x2C,
MIPI_DCS_WRITE_LUT = 0x2D,
MIPI_DCS_READ_MEMORY_START = 0x2E,
- MIPI_DCS_SET_PARTIAL_AREA = 0x30,
+ MIPI_DCS_SET_PARTIAL_ROWS = 0x30, /* MIPI DCS 1.02 - MIPI_DCS_SET_PARTIAL_AREA before that */
+ MIPI_DCS_SET_PARTIAL_COLUMNS = 0x31,
MIPI_DCS_SET_SCROLL_AREA = 0x33,
MIPI_DCS_SET_TEAR_OFF = 0x34,
MIPI_DCS_SET_TEAR_ON = 0x35,
@@ -112,7 +119,10 @@ enum {
MIPI_DCS_ENTER_IDLE_MODE = 0x39,
MIPI_DCS_SET_PIXEL_FORMAT = 0x3A,
MIPI_DCS_WRITE_MEMORY_CONTINUE = 0x3C,
+ MIPI_DCS_SET_3D_CONTROL = 0x3D,
MIPI_DCS_READ_MEMORY_CONTINUE = 0x3E,
+ MIPI_DCS_GET_3D_CONTROL = 0x3F,
+ MIPI_DCS_SET_VSYNC_TIMING = 0x40,
MIPI_DCS_SET_TEAR_SCANLINE = 0x44,
MIPI_DCS_GET_SCANLINE = 0x45,
MIPI_DCS_SET_DISPLAY_BRIGHTNESS = 0x51, /* MIPI DCS 1.3 */
@@ -124,7 +134,9 @@ enum {
MIPI_DCS_SET_CABC_MIN_BRIGHTNESS = 0x5E, /* MIPI DCS 1.3 */
MIPI_DCS_GET_CABC_MIN_BRIGHTNESS = 0x5F, /* MIPI DCS 1.3 */
MIPI_DCS_READ_DDB_START = 0xA1,
+ MIPI_DCS_READ_PPS_START = 0xA2,
MIPI_DCS_READ_DDB_CONTINUE = 0xA8,
+ MIPI_DCS_READ_PPS_CONTINUE = 0xA9,
};
/* MIPI DCS pixel formats */
OpenPOWER on IntegriCloud