diff options
Diffstat (limited to 'include')
50 files changed, 1914 insertions, 925 deletions
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index f7a19c2a7a80..05350424a4d3 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -110,7 +110,4 @@ static inline bool drm_can_sleep(void) return true; } -/* helper for handling conditionals in various for_each macros */ -#define for_each_if(condition) if (!(condition)) {} else - #endif diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 1e713154f00e..1e810e0b7664 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -29,6 +29,7 @@ #define DRM_ATOMIC_H_ #include <drm/drm_crtc.h> +#include <drm/drm_util.h> /** * struct drm_crtc_commit - track modeset commits on a CRTC @@ -384,9 +385,6 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state); struct drm_crtc_state * __must_check drm_atomic_get_crtc_state(struct drm_atomic_state *state, struct drm_crtc *crtc); -int drm_atomic_crtc_set_property(struct drm_crtc *crtc, - struct drm_crtc_state *state, struct drm_property *property, - uint64_t val); struct drm_plane_state * __must_check drm_atomic_get_plane_state(struct drm_atomic_state *state, struct drm_plane *plane); @@ -598,25 +596,6 @@ __drm_atomic_get_current_plane_state(struct drm_atomic_state *state, } int __must_check -drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, - const struct drm_display_mode *mode); -int __must_check -drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, - struct drm_property_blob *blob); -int __must_check -drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, - struct drm_crtc *crtc); -void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, - struct drm_framebuffer *fb); -void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, - struct dma_fence *fence); -int __must_check -drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, - struct drm_crtc *crtc); -int drm_atomic_set_writeback_fb_for_connector( - struct drm_connector_state *conn_state, - struct drm_framebuffer *fb); -int __must_check drm_atomic_add_affected_connectors(struct drm_atomic_state *state, struct drm_crtc *crtc); int __must_check diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 99e2a5297c69..657af7b39379 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -31,6 +31,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_util.h> struct drm_atomic_state; struct drm_private_obj; @@ -156,6 +157,8 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state); void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state); +void __drm_atomic_helper_plane_reset(struct drm_plane *plane, + struct drm_plane_state *state); void drm_atomic_helper_plane_reset(struct drm_plane *plane); void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, struct drm_plane_state *state); diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h new file mode 100644 index 000000000000..8cec52ad1277 --- /dev/null +++ b/include/drm/drm_atomic_uapi.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * Copyright (C) 2018 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#ifndef DRM_ATOMIC_UAPI_H_ +#define DRM_ATOMIC_UAPI_H_ + +struct drm_crtc_state; +struct drm_display_mode; +struct drm_property_blob; +struct drm_plane_state; +struct drm_crtc; +struct drm_connector_state; +struct dma_fence; +struct drm_framebuffer; + +int __must_check +drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, + const struct drm_display_mode *mode); +int __must_check +drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, + struct drm_property_blob *blob); +int __must_check +drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, + struct drm_crtc *crtc); +void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, + struct drm_framebuffer *fb); +void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, + struct dma_fence *fence); +int __must_check +drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, + struct drm_crtc *crtc); + +#endif diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h index 330c561c4c11..88bdfec3bd88 100644 --- a/include/drm/drm_blend.h +++ b/include/drm/drm_blend.h @@ -27,6 +27,10 @@ #include <linux/ctype.h> #include <drm/drm_mode.h> +#define DRM_MODE_BLEND_PREMULTI 0 +#define DRM_MODE_BLEND_COVERAGE 1 +#define DRM_MODE_BLEND_PIXEL_NONE 2 + struct drm_device; struct drm_atomic_state; struct drm_plane; @@ -52,4 +56,6 @@ int drm_plane_create_zpos_immutable_property(struct drm_plane *plane, unsigned int zpos); int drm_atomic_normalize_zpos(struct drm_device *dev, struct drm_atomic_state *state); +int drm_plane_create_blend_mode_property(struct drm_plane *plane, + unsigned int supported_modes); #endif diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h index 44f04233e3db..90ef9996d9a4 100644 --- a/include/drm/drm_color_mgmt.h +++ b/include/drm/drm_color_mgmt.h @@ -24,6 +24,7 @@ #define __DRM_COLOR_MGMT_H__ #include <linux/ctype.h> +#include <drm/drm_property.h> struct drm_crtc; struct drm_plane; diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 97ea41dc678f..91a877fa00cb 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -28,6 +28,7 @@ #include <linux/ctype.h> #include <linux/hdmi.h> #include <drm/drm_mode_object.h> +#include <drm/drm_util.h> #include <uapi/drm/drm_mode.h> diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 92e7fc7f05a4..b21437bc95bf 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -744,8 +744,45 @@ struct drm_crtc_funcs { * * 0 on success or a negative error code on failure. */ - int (*set_crc_source)(struct drm_crtc *crtc, const char *source, - size_t *values_cnt); + int (*set_crc_source)(struct drm_crtc *crtc, const char *source); + /** + * @verify_crc_source: + * + * verifies the source of CRC checksums of frames before setting the + * source for CRC and during crc open. Source parameter can be NULL + * while disabling crc source. + * + * This callback is optional if the driver does not support any CRC + * generation functionality. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*verify_crc_source)(struct drm_crtc *crtc, const char *source, + size_t *values_cnt); + /** + * @get_crc_sources: + * + * Driver callback for getting a list of all the available sources for + * CRC generation. This callback depends upon verify_crc_source, So + * verify_crc_source callback should be implemented before implementing + * this. Driver can pass full list of available crc sources, this + * callback does the verification on each crc-source before passing it + * to userspace. + * + * This callback is optional if the driver does not support exporting of + * possible CRC sources list. + * + * RETURNS: + * + * a constant character pointer to the list of all the available CRC + * sources. On failure driver should return NULL. count should be + * updated with number of sources in list. if zero we don't process any + * source from the list. + */ + const char *const *(*get_crc_sources)(struct drm_crtc *crtc, + size_t *count); /** * @atomic_print_state: diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index f9c6e0e3aec7..42411b3ea0c8 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -46,6 +46,16 @@ struct drm_device { struct drm_master *master; /** + * @driver_features: per-device driver features + * + * Drivers can clear specific flags here to disallow + * certain features on a per-device basis while still + * sharing a single &struct drm_driver instance across + * all devices. + */ + u32 driver_features; + + /** * @unplugged: * * Flag to tell if the device has been unplugged. diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 05cc31b5db16..2a3843f248cf 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -123,8 +123,9 @@ # define DP_FRAMING_CHANGE_CAP (1 << 1) # define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */ -#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ -# define DP_TRAINING_AUX_RD_MASK 0x7F /* XXX 1.2? */ +#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ +# define DP_TRAINING_AUX_RD_MASK 0x7F /* DP 1.3 */ +# define DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT (1 << 7) /* DP 1.3 */ #define DP_ADAPTER_CAP 0x00f /* 1.2 */ # define DP_FORCE_LOAD_SENSE_CAP (1 << 0) @@ -1260,12 +1261,12 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, */ enum drm_dp_quirk { /** - * @DP_DPCD_QUIRK_LIMITED_M_N: + * @DP_DPCD_QUIRK_CONSTANT_N: * * The device requires main link attributes Mvid and Nvid to be limited - * to 16 bits. + * to 16 bits. So will give a constant value (0x8000) for compatability. */ - DP_DPCD_QUIRK_LIMITED_M_N, + DP_DPCD_QUIRK_CONSTANT_N, }; /** diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 152b3055e9e1..3199ef70c007 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -56,7 +56,6 @@ struct drm_printer; #define DRIVER_ATOMIC 0x10000 #define DRIVER_KMS_LEGACY_CONTEXT 0x20000 #define DRIVER_SYNCOBJ 0x40000 -#define DRIVER_PREFER_XBGR_30BPP 0x80000 /** * struct drm_driver - DRM driver structure @@ -654,14 +653,14 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev) * @dev: DRM device to check * @feature: feature flag * - * This checks @dev for driver features, see &drm_driver.driver_features and the - * various DRIVER_\* flags. + * This checks @dev for driver features, see &drm_driver.driver_features, + * &drm_device.driver_features, and the various DRIVER_\* flags. * * Returns true if the @feature is supported, false otherwise. */ -static inline bool drm_core_check_feature(struct drm_device *dev, int feature) +static inline bool drm_core_check_feature(struct drm_device *dev, u32 feature) { - return dev->driver->driver_features & feature; + return dev->driver->driver_features & dev->driver_features & feature; } /** diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index 4f597c0730b4..70cfca03d812 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -28,6 +28,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_mode.h> #include <drm/drm_mode_object.h> +#include <drm/drm_util.h> struct drm_encoder; diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index 96e26e3b9a0c..4a65f0d155b0 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -26,7 +26,6 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); -void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state); void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma, bool state); diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 5db08c8f1d25..bb9acea61369 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -604,6 +604,16 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) #endif +/** + * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers + * @a: memory range, users of which are to be removed + * @name: requesting driver name + * @primary: also kick vga16fb if present + * + * This function removes framebuffer devices (initialized by firmware/bootloader) + * which use memory range described by @a. If @a is NULL all such devices are + * removed. + */ static inline int drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary) @@ -615,4 +625,28 @@ drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a, #endif } +/** + * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices + * @pdev: PCI device + * @resource_id: index of PCI BAR configuring framebuffer memory + * @name: requesting driver name + * + * This function removes framebuffer devices (eg. initialized by firmware) + * using memory range configured for @pdev's BAR @resource_id. + * + * The function assumes that PCI device with shadowed ROM drives a primary + * display and so kicks out vga16fb. + */ +static inline int +drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, + int resource_id, + const char *name) +{ +#if IS_REACHABLE(CONFIG_FB) + return remove_conflicting_pci_framebuffers(pdev, resource_id, name); +#else + return 0; +#endif +} + #endif diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index f9c15845f465..865ef60c17af 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -25,6 +25,28 @@ #include <linux/types.h> #include <uapi/drm/drm_fourcc.h> +/* + * DRM formats are little endian. Define host endian variants for the + * most common formats here, to reduce the #ifdefs needed in drivers. + * + * Note that the DRM_FORMAT_BIG_ENDIAN flag should only be used in + * case the format can't be specified otherwise, so we don't end up + * with two values describing the same format. + */ +#ifdef __BIG_ENDIAN +# define DRM_FORMAT_HOST_XRGB1555 (DRM_FORMAT_XRGB1555 | \ + DRM_FORMAT_BIG_ENDIAN) +# define DRM_FORMAT_HOST_RGB565 (DRM_FORMAT_RGB565 | \ + DRM_FORMAT_BIG_ENDIAN) +# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_BGRX8888 +# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_BGRA8888 +#else +# define DRM_FORMAT_HOST_XRGB1555 DRM_FORMAT_XRGB1555 +# define DRM_FORMAT_HOST_RGB565 DRM_FORMAT_RGB565 +# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_XRGB8888 +# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_ARGB8888 +#endif + struct drm_device; struct drm_mode_fb_cmd2; @@ -66,6 +88,8 @@ const struct drm_format_info * drm_get_format_info(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd); uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); +uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, + uint32_t bpp, uint32_t depth); int drm_format_num_planes(uint32_t format); int drm_format_plane_cpp(uint32_t format, int plane); int drm_format_horz_chroma_subsampling(uint32_t format); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index a0b202e1d69a..928e4172a0bb 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -809,6 +809,21 @@ struct drm_mode_config { /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; + bool quirk_addfb_prefer_xbgr_30bpp; + + /** + * @quirk_addfb_prefer_host_byte_order: + * + * When set to true drm_mode_addfb() will pick host byte order + * pixel_format when calling drm_mode_addfb2(). This is how + * drm_mode_addfb() should have worked from day one. It + * didn't though, so we ended up with quirks in both kernel + * and userspace drivers to deal with the broken behavior. + * Simply fixing drm_mode_addfb() unconditionally would break + * these drivers, so add a quirk bit here to allow drivers + * opt-in. + */ + bool quirk_addfb_prefer_host_byte_order; /** * @async_page_flip: Does this device support async flips on the primary diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 777814755fa6..8c738c0e6e9f 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -82,6 +82,7 @@ struct drm_panel_funcs { * @drm: DRM device owning the panel * @connector: DRM connector that the panel is attached to * @dev: parent device of the panel + * @link: link from panel device (supplier) to DRM device (consumer) * @funcs: operations that can be performed on the panel * @list: panel entry in registry */ diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 683742826511..b7e899ce44f0 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */ #define radeon_PCI_IDS \ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 8a152dc16ea5..0a0834bef8bd 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -27,6 +27,9 @@ #include <linux/ctype.h> #include <drm/drm_mode_object.h> #include <drm/drm_color_mgmt.h> +#include <drm/drm_rect.h> +#include <drm/drm_modeset_lock.h> +#include <drm/drm_util.h> struct drm_crtc; struct drm_printer; @@ -119,6 +122,14 @@ struct drm_plane_state { u16 alpha; /** + * @pixel_blend_mode: + * The alpha blending equation selection, describing how the pixels from + * the current plane are composited with the background. Value can be + * one of DRM_MODE_BLEND_* + */ + uint16_t pixel_blend_mode; + + /** * @rotation: * Rotation of the plane. See drm_plane_create_rotation_property() for * more details. @@ -659,6 +670,14 @@ struct drm_plane { * drm_plane_create_rotation_property(). */ struct drm_property *rotation_property; + /** + * @blend_mode_property: + * Optional "pixel blend mode" enum property for this plane. + * Blend mode property represents the alpha blending equation selection, + * describing how the pixels from the current plane are composited with + * the background. + */ + struct drm_property *blend_mode_property; /** * @color_encoding_property: diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index f3e6eed3e79c..afbc3beef089 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -381,7 +381,7 @@ void drm_err(const char *format, ...); #define DRM_DEV_DEBUG_DP(dev, fmt, ...) \ drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__) -#define DRM_DEBUG_DP(dev, fmt, ...) \ +#define DRM_DEBUG_DP(fmt, ...) \ drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__) #define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \ diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index c030f6ccab99..5b9efff35d6d 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -27,6 +27,8 @@ #include <linux/ctype.h> #include <drm/drm_mode_object.h> +#include <uapi/drm/drm_mode.h> + /** * struct drm_property_enum - symbolic values for enumerations * @value: numeric property value for this enum entry diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 3980602472c0..425432b85a87 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -131,15 +131,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj) struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, u32 handle); -void drm_syncobj_add_callback(struct drm_syncobj *syncobj, - struct drm_syncobj_cb *cb, - drm_syncobj_func_t func); -void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, - struct drm_syncobj_cb *cb); -void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, +void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 point, struct dma_fence *fence); int drm_syncobj_find_fence(struct drm_file *file_private, - u32 handle, + u32 handle, u64 point, struct dma_fence **fence); void drm_syncobj_free(struct kref *kref); int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h new file mode 100644 index 000000000000..88abdca89baa --- /dev/null +++ b/include/drm/drm_util.h @@ -0,0 +1,32 @@ +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 2018 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_UTIL_H_ +#define _DRM_UTIL_H_ + +/* helper for handling conditionals in various for_each macros */ +#define for_each_if(condition) if (!(condition)) {} else + +#endif diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 21c648b0b2a1..d87b268f1781 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -50,7 +50,10 @@ enum drm_sched_priority { * * @list: used to append this struct to the list of entities in the * runqueue. - * @rq: runqueue to which this entity belongs. + * @rq: runqueue on which this entity is currently scheduled. + * @rq_list: a list of run queues on which jobs from this entity can + * be scheduled + * @num_rq_list: number of run queues in the rq_list * @rq_lock: lock to modify the runqueue to which this entity belongs. * @job_queue: the list of jobs of this entity. * @fence_seq: a linearly increasing seqno incremented with each @@ -67,6 +70,7 @@ enum drm_sched_priority { * @fini_status: contains the exit status in case the process was signalled. * @last_scheduled: points to the finished fence of the last scheduled job. * @last_user: last group leader pushing a job into the entity. + * @stopped: Marks the enity as removed from rq and destined for termination. * * Entities will emit jobs in order to their corresponding hardware * ring, and the scheduler will alternate between entities based on @@ -75,6 +79,8 @@ enum drm_sched_priority { struct drm_sched_entity { struct list_head list; struct drm_sched_rq *rq; + struct drm_sched_rq **rq_list; + unsigned int num_rq_list; spinlock_t rq_lock; struct spsc_queue job_queue; @@ -87,6 +93,7 @@ struct drm_sched_entity { atomic_t *guilty; struct dma_fence *last_scheduled; struct task_struct *last_user; + bool stopped; }; /** @@ -168,8 +175,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * finished to remove the job from the * @drm_gpu_scheduler.ring_mirror_list. * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. - * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout - * interval is over. * @id: a unique id assigned to each job scheduled on the scheduler. * @karma: increment on every hang caused by this job. If this exceeds the hang * limit of the scheduler then the job is marked guilty and will not @@ -188,7 +193,6 @@ struct drm_sched_job { struct dma_fence_cb finish_cb; struct work_struct finish_work; struct list_head node; - struct delayed_work work_tdr; uint64_t id; atomic_t karma; enum drm_sched_priority s_priority; @@ -252,11 +256,14 @@ struct drm_sched_backend_ops { * finished. * @hw_rq_count: the number of jobs currently in the hardware queue. * @job_id_count: used to assign unique id to the each job. + * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the + * timeout interval is over. * @thread: the kthread on which the scheduler which run. * @ring_mirror_list: the list of jobs which are currently in the job queue. * @job_list_lock: lock to protect the ring_mirror_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked * guilty and it will be considered for scheduling further. + * @num_jobs: the number of jobs in queue in the scheduler * * One scheduler is implemented for each hardware ring. */ @@ -270,10 +277,12 @@ struct drm_gpu_scheduler { wait_queue_head_t job_scheduled; atomic_t hw_rq_count; atomic64_t job_id_count; + struct delayed_work work_tdr; struct task_struct *thread; struct list_head ring_mirror_list; spinlock_t job_list_lock; int hang_limit; + atomic_t num_jobs; }; int drm_sched_init(struct drm_gpu_scheduler *sched, @@ -281,6 +290,21 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, uint32_t hw_submission, unsigned hang_limit, long timeout, const char *name); void drm_sched_fini(struct drm_gpu_scheduler *sched); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_wakeup(struct drm_gpu_scheduler *sched); +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job); +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); int drm_sched_entity_init(struct drm_sched_entity *entity, struct drm_sched_rq **rq_list, @@ -289,23 +313,17 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); void drm_sched_entity_fini(struct drm_sched_entity *entity); void drm_sched_entity_destroy(struct drm_sched_entity *entity); +void drm_sched_entity_select_rq(struct drm_sched_entity *entity); +struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity); -void drm_sched_entity_set_rq(struct drm_sched_entity *entity, - struct drm_sched_rq *rq); +void drm_sched_entity_set_priority(struct drm_sched_entity *entity, + enum drm_sched_priority priority); +bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); struct drm_sched_fence *drm_sched_fence_create( struct drm_sched_entity *s_entity, void *owner); void drm_sched_fence_scheduled(struct drm_sched_fence *fence); void drm_sched_fence_finished(struct drm_sched_fence *fence); -int drm_sched_job_init(struct drm_sched_job *job, - struct drm_sched_entity *entity, - void *owner); -void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, - struct drm_sched_job *job); -void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); -bool drm_sched_dependency_optimized(struct dma_fence* fence, - struct drm_sched_entity *entity); -void drm_sched_job_kickout(struct drm_sched_job *s_job); #endif diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index fbf5cfc9b352..fd965ffbb92e 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -386,6 +386,7 @@ INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ /* CFL H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index a01ba2032f0e..3fc4854dce49 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -51,6 +51,8 @@ struct ttm_placement; struct ttm_place; +struct ttm_lru_bulk_move; + /** * struct ttm_bus_placement * @@ -311,6 +313,24 @@ ttm_bo_reference(struct ttm_buffer_object *bo) } /** + * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless + * its refcount has already reached zero. + * @bo: The buffer object. + * + * Used to reference a TTM buffer object in lookups where the object is removed + * from the lookup structure during the destructor and for RCU lookups. + * + * Returns: @bo if the referencing was successful, NULL otherwise. + */ +static inline __must_check struct ttm_buffer_object * +ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) +{ + if (!kref_get_unless_zero(&bo->kref)) + return NULL; + return bo; +} + +/** * ttm_bo_wait - wait for buffer idle. * * @bo: The buffer object. @@ -405,12 +425,24 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); * ttm_bo_move_to_lru_tail * * @bo: The buffer object. + * @bulk: optional bulk move structure to remember BO positions * * Move this BO to the tail of all lru lists used to lookup and reserve an * object. This function must be called with struct ttm_bo_global::lru_lock * held, and is used to make a BO less likely to be considered for eviction. */ -void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, + struct ttm_lru_bulk_move *bulk); + +/** + * ttm_bo_bulk_move_lru_tail + * + * @bulk: bulk move structure + * + * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that + * BO order never changes. Should be called with ttm_bo_global::lru_lock held. + */ +void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); /** * ttm_bo_lock_delayed_workqueue diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 3234cc322e70..e4fee8e02559 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -491,6 +491,34 @@ struct ttm_bo_device { }; /** + * struct ttm_lru_bulk_move_pos + * + * @first: first BO in the bulk move range + * @last: last BO in the bulk move range + * + * Positions for a lru bulk move. + */ +struct ttm_lru_bulk_move_pos { + struct ttm_buffer_object *first; + struct ttm_buffer_object *last; +}; + +/** + * struct ttm_lru_bulk_move + * + * @tt: first/last lru entry for BOs in the TT domain + * @vram: first/last lru entry for BOs in the VRAM domain + * @swap: first/last lru entry for BOs on the swap list + * + * Helper structure for bulk moves on the LRU list. + */ +struct ttm_lru_bulk_move { + struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY]; + struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY]; + struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY]; +}; + +/** * ttm_flag_masked * * @old: Pointer to the result and original value. diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h deleted file mode 100644 index 0c3af9836863..000000000000 --- a/include/drm/ttm/ttm_lock.h +++ /dev/null @@ -1,248 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ -/* - * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> - */ - -/** @file ttm_lock.h - * This file implements a simple replacement for the buffer manager use - * of the DRM heavyweight hardware lock. - * The lock is a read-write lock. Taking it in read mode and write mode - * is relatively fast, and intended for in-kernel use only. - * - * The vt mode is used only when there is a need to block all - * user-space processes from validating buffers. - * It's allowed to leave kernel space with the vt lock held. - * If a user-space process dies while having the vt-lock, - * it will be released during the file descriptor release. The vt lock - * excludes write lock and read lock. - * - * The suspend mode is used to lock out all TTM users when preparing for - * and executing suspend operations. - * - */ - -#ifndef _TTM_LOCK_H_ -#define _TTM_LOCK_H_ - -#include <linux/wait.h> -#include <linux/atomic.h> - -#include "ttm_object.h" - -/** - * struct ttm_lock - * - * @base: ttm base object used solely to release the lock if the client - * holding the lock dies. - * @queue: Queue for processes waiting for lock change-of-status. - * @lock: Spinlock protecting some lock members. - * @rw: Read-write lock counter. Protected by @lock. - * @flags: Lock state. Protected by @lock. - * @kill_takers: Boolean whether to kill takers of the lock. - * @signal: Signal to send when kill_takers is true. - */ - -struct ttm_lock { - struct ttm_base_object base; - wait_queue_head_t queue; - spinlock_t lock; - int32_t rw; - uint32_t flags; - bool kill_takers; - int signal; - struct ttm_object_file *vt_holder; -}; - - -/** - * ttm_lock_init - * - * @lock: Pointer to a struct ttm_lock - * Initializes the lock. - */ -extern void ttm_lock_init(struct ttm_lock *lock); - -/** - * ttm_read_unlock - * - * @lock: Pointer to a struct ttm_lock - * - * Releases a read lock. - */ -extern void ttm_read_unlock(struct ttm_lock *lock); - -/** - * ttm_read_lock - * - * @lock: Pointer to a struct ttm_lock - * @interruptible: Interruptible sleeping while waiting for a lock. - * - * Takes the lock in read mode. - * Returns: - * -ERESTARTSYS If interrupted by a signal and interruptible is true. - */ -extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible); - -/** - * ttm_read_trylock - * - * @lock: Pointer to a struct ttm_lock - * @interruptible: Interruptible sleeping while waiting for a lock. - * - * Tries to take the lock in read mode. If the lock is already held - * in write mode, the function will return -EBUSY. If the lock is held - * in vt or suspend mode, the function will sleep until these modes - * are unlocked. - * - * Returns: - * -EBUSY The lock was already held in write mode. - * -ERESTARTSYS If interrupted by a signal and interruptible is true. - */ -extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible); - -/** - * ttm_write_unlock - * - * @lock: Pointer to a struct ttm_lock - * - * Releases a write lock. - */ -extern void ttm_write_unlock(struct ttm_lock *lock); - -/** - * ttm_write_lock - * - * @lock: Pointer to a struct ttm_lock - * @interruptible: Interruptible sleeping while waiting for a lock. - * - * Takes the lock in write mode. - * Returns: - * -ERESTARTSYS If interrupted by a signal and interruptible is true. - */ -extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); - -/** - * ttm_lock_downgrade - * - * @lock: Pointer to a struct ttm_lock - * - * Downgrades a write lock to a read lock. - */ -extern void ttm_lock_downgrade(struct ttm_lock *lock); - -/** - * ttm_suspend_lock - * - * @lock: Pointer to a struct ttm_lock - * - * Takes the lock in suspend mode. Excludes read and write mode. - */ -extern void ttm_suspend_lock(struct ttm_lock *lock); - -/** - * ttm_suspend_unlock - * - * @lock: Pointer to a struct ttm_lock - * - * Releases a suspend lock - */ -extern void ttm_suspend_unlock(struct ttm_lock *lock); - -/** - * ttm_vt_lock - * - * @lock: Pointer to a struct ttm_lock - * @interruptible: Interruptible sleeping while waiting for a lock. - * @tfile: Pointer to a struct ttm_object_file to register the lock with. - * - * Takes the lock in vt mode. - * Returns: - * -ERESTARTSYS If interrupted by a signal and interruptible is true. - * -ENOMEM: Out of memory when locking. - */ -extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible, - struct ttm_object_file *tfile); - -/** - * ttm_vt_unlock - * - * @lock: Pointer to a struct ttm_lock - * - * Releases a vt lock. - * Returns: - * -EINVAL If the lock was not held. - */ -extern int ttm_vt_unlock(struct ttm_lock *lock); - -/** - * ttm_write_unlock - * - * @lock: Pointer to a struct ttm_lock - * - * Releases a write lock. - */ -extern void ttm_write_unlock(struct ttm_lock *lock); - -/** - * ttm_write_lock - * - * @lock: Pointer to a struct ttm_lock - * @interruptible: Interruptible sleeping while waiting for a lock. - * - * Takes the lock in write mode. - * Returns: - * -ERESTARTSYS If interrupted by a signal and interruptible is true. - */ -extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); - -/** - * ttm_lock_set_kill - * - * @lock: Pointer to a struct ttm_lock - * @val: Boolean whether to kill processes taking the lock. - * @signal: Signal to send to the process taking the lock. - * - * The kill-when-taking-lock functionality is used to kill processes that keep - * on using the TTM functionality when its resources has been taken down, for - * example when the X server exits. A typical sequence would look like this: - * - X server takes lock in write mode. - * - ttm_lock_set_kill() is called with @val set to true. - * - As part of X server exit, TTM resources are taken down. - * - X server releases the lock on file release. - * - Another dri client wants to render, takes the lock and is killed. - * - */ -static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, - int signal) -{ - lock->kill_takers = val; - if (val) - lock->signal = signal; -} - -#endif diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h deleted file mode 100644 index a98bfeb4239e..000000000000 --- a/include/drm/ttm/ttm_object.h +++ /dev/null @@ -1,354 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ -/* - * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> - */ -/** @file ttm_object.h - * - * Base- and reference object implementation for the various - * ttm objects. Implements reference counting, minimal security checks - * and release on file close. - */ - -#ifndef _TTM_OBJECT_H_ -#define _TTM_OBJECT_H_ - -#include <linux/list.h> -#include <drm/drm_hashtab.h> -#include <linux/kref.h> -#include <linux/rcupdate.h> -#include <linux/dma-buf.h> - -#include "ttm_memory.h" - -/** - * enum ttm_ref_type - * - * Describes what type of reference a ref object holds. - * - * TTM_REF_USAGE is a simple refcount on a base object. - * - * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a - * buffer object. - * - * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a - * buffer object. - * - */ - -enum ttm_ref_type { - TTM_REF_USAGE, - TTM_REF_SYNCCPU_READ, - TTM_REF_SYNCCPU_WRITE, - TTM_REF_NUM -}; - -/** - * enum ttm_object_type - * - * One entry per ttm object type. - * Device-specific types should use the - * ttm_driver_typex types. - */ - -enum ttm_object_type { - ttm_fence_type, - ttm_buffer_type, - ttm_lock_type, - ttm_prime_type, - ttm_driver_type0 = 256, - ttm_driver_type1, - ttm_driver_type2, - ttm_driver_type3, - ttm_driver_type4, - ttm_driver_type5 -}; - -struct ttm_object_file; -struct ttm_object_device; - -/** - * struct ttm_base_object - * - * @hash: hash entry for the per-device object hash. - * @type: derived type this object is base class for. - * @shareable: Other ttm_object_files can access this object. - * - * @tfile: Pointer to ttm_object_file of the creator. - * NULL if the object was not created by a user request. - * (kernel object). - * - * @refcount: Number of references to this object, not - * including the hash entry. A reference to a base object can - * only be held by a ref object. - * - * @refcount_release: A function to be called when there are - * no more references to this object. This function should - * destroy the object (or make sure destruction eventually happens), - * and when it is called, the object has - * already been taken out of the per-device hash. The parameter - * "base" should be set to NULL by the function. - * - * @ref_obj_release: A function to be called when a reference object - * with another ttm_ref_type than TTM_REF_USAGE is deleted. - * This function may, for example, release a lock held by a user-space - * process. - * - * This struct is intended to be used as a base struct for objects that - * are visible to user-space. It provides a global name, race-safe - * access and refcounting, minimal access contol and hooks for unref actions. - */ - -struct ttm_base_object { - struct rcu_head rhead; - struct drm_hash_item hash; - enum ttm_object_type object_type; - bool shareable; - struct ttm_object_file *tfile; - struct kref refcount; - void (*refcount_release) (struct ttm_base_object **base); - void (*ref_obj_release) (struct ttm_base_object *base, - enum ttm_ref_type ref_type); -}; - - -/** - * struct ttm_prime_object - Modified base object that is prime-aware - * - * @base: struct ttm_base_object that we derive from - * @mutex: Mutex protecting the @dma_buf member. - * @size: Size of the dma_buf associated with this object - * @real_type: Type of the underlying object. Needed since we're setting - * the value of @base::object_type to ttm_prime_type - * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this - * object. - * @refcount_release: The underlying object's release method. Needed since - * we set @base::refcount_release to our own release method. - */ - -struct ttm_prime_object { - struct ttm_base_object base; - struct mutex mutex; - size_t size; - enum ttm_object_type real_type; - struct dma_buf *dma_buf; - void (*refcount_release) (struct ttm_base_object **); -}; - -/** - * ttm_base_object_init - * - * @tfile: Pointer to a struct ttm_object_file. - * @base: The struct ttm_base_object to initialize. - * @shareable: This object is shareable with other applcations. - * (different @tfile pointers.) - * @type: The object type. - * @refcount_release: See the struct ttm_base_object description. - * @ref_obj_release: See the struct ttm_base_object description. - * - * Initializes a struct ttm_base_object. - */ - -extern int ttm_base_object_init(struct ttm_object_file *tfile, - struct ttm_base_object *base, - bool shareable, - enum ttm_object_type type, - void (*refcount_release) (struct ttm_base_object - **), - void (*ref_obj_release) (struct ttm_base_object - *, - enum ttm_ref_type - ref_type)); - -/** - * ttm_base_object_lookup - * - * @tfile: Pointer to a struct ttm_object_file. - * @key: Hash key - * - * Looks up a struct ttm_base_object with the key @key. - */ - -extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file - *tfile, uint32_t key); - -/** - * ttm_base_object_lookup_for_ref - * - * @tdev: Pointer to a struct ttm_object_device. - * @key: Hash key - * - * Looks up a struct ttm_base_object with the key @key. - * This function should only be used when the struct tfile associated with the - * caller doesn't yet have a reference to the base object. - */ - -extern struct ttm_base_object * -ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key); - -/** - * ttm_base_object_unref - * - * @p_base: Pointer to a pointer referencing a struct ttm_base_object. - * - * Decrements the base object refcount and clears the pointer pointed to by - * p_base. - */ - -extern void ttm_base_object_unref(struct ttm_base_object **p_base); - -/** - * ttm_ref_object_add. - * - * @tfile: A struct ttm_object_file representing the application owning the - * ref_object. - * @base: The base object to reference. - * @ref_type: The type of reference. - * @existed: Upon completion, indicates that an identical reference object - * already existed, and the refcount was upped on that object instead. - * @require_existed: Fail with -EPERM if an identical ref object didn't - * already exist. - * - * Checks that the base object is shareable and adds a ref object to it. - * - * Adding a ref object to a base object is basically like referencing the - * base object, but a user-space application holds the reference. When the - * file corresponding to @tfile is closed, all its reference objects are - * deleted. A reference object can have different types depending on what - * it's intended for. It can be refcounting to prevent object destruction, - * When user-space takes a lock, it can add a ref object to that lock to - * make sure the lock is released if the application dies. A ref object - * will hold a single reference on a base object. - */ -extern int ttm_ref_object_add(struct ttm_object_file *tfile, - struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed, - bool require_existed); - -extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, - struct ttm_base_object *base); - -/** - * ttm_ref_object_base_unref - * - * @key: Key representing the base object. - * @ref_type: Ref type of the ref object to be dereferenced. - * - * Unreference a ref object with type @ref_type - * on the base object identified by @key. If there are no duplicate - * references, the ref object will be destroyed and the base object - * will be unreferenced. - */ -extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, - unsigned long key, - enum ttm_ref_type ref_type); - -/** - * ttm_object_file_init - initialize a struct ttm_object file - * - * @tdev: A struct ttm_object device this file is initialized on. - * @hash_order: Order of the hash table used to hold the reference objects. - * - * This is typically called by the file_ops::open function. - */ - -extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device - *tdev, - unsigned int hash_order); - -/** - * ttm_object_file_release - release data held by a ttm_object_file - * - * @p_tfile: Pointer to pointer to the ttm_object_file object to release. - * *p_tfile will be set to NULL by this function. - * - * Releases all data associated by a ttm_object_file. - * Typically called from file_ops::release. The caller must - * ensure that there are no concurrent users of tfile. - */ - -extern void ttm_object_file_release(struct ttm_object_file **p_tfile); - -/** - * ttm_object device init - initialize a struct ttm_object_device - * - * @mem_glob: struct ttm_mem_global for memory accounting. - * @hash_order: Order of hash table used to hash the base objects. - * @ops: DMA buf ops for prime objects of this device. - * - * This function is typically called on device initialization to prepare - * data structures needed for ttm base and ref objects. - */ - -extern struct ttm_object_device * -ttm_object_device_init(struct ttm_mem_global *mem_glob, - unsigned int hash_order, - const struct dma_buf_ops *ops); - -/** - * ttm_object_device_release - release data held by a ttm_object_device - * - * @p_tdev: Pointer to pointer to the ttm_object_device object to release. - * *p_tdev will be set to NULL by this function. - * - * Releases all data associated by a ttm_object_device. - * Typically called from driver::unload before the destruction of the - * device private data structure. - */ - -extern void ttm_object_device_release(struct ttm_object_device **p_tdev); - -#define ttm_base_object_kfree(__object, __base)\ - kfree_rcu(__object, __base.rhead) - -extern int ttm_prime_object_init(struct ttm_object_file *tfile, - size_t size, - struct ttm_prime_object *prime, - bool shareable, - enum ttm_object_type type, - void (*refcount_release) - (struct ttm_base_object **), - void (*ref_obj_release) - (struct ttm_base_object *, - enum ttm_ref_type ref_type)); - -static inline enum ttm_object_type -ttm_base_object_type(struct ttm_base_object *base) -{ - return (base->object_type == ttm_prime_type) ? - container_of(base, struct ttm_prime_object, base)->real_type : - base->object_type; -} -extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, - int fd, u32 *handle); -extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, - uint32_t handle, uint32_t flags, - int *prime_fd); - -#define ttm_prime_object_kfree(__obj, __prime) \ - kfree_rcu(__obj, __prime.base.rhead) -#endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 1921545c6351..4170fcee5adb 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -344,29 +344,14 @@ static inline void *offset_to_ptr(const int *off) #endif #ifndef __compiletime_error # define __compiletime_error(message) -/* - * Sparse complains of variable sized arrays due to the temporary variable in - * __compiletime_assert. Unfortunately we can't just expand it out to make - * sparse see a constant array size without breaking compiletime_assert on old - * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. - */ -# ifndef __CHECKER__ -# define __compiletime_error_fallback(condition) \ - do { ((void)sizeof(char[1 - 2 * condition])); } while (0) -# endif -#endif -#ifndef __compiletime_error_fallback -# define __compiletime_error_fallback(condition) do { } while (0) #endif #ifdef __OPTIMIZE__ # define __compiletime_assert(condition, msg, prefix, suffix) \ do { \ - int __cond = !(condition); \ extern void prefix ## suffix(void) __compiletime_error(msg); \ - if (__cond) \ + if (!(condition)) \ prefix ## suffix(); \ - __compiletime_error_fallback(__cond); \ } while (0) #else # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index fea64f2692a0..ab137f97ecbd 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -141,7 +141,6 @@ struct vc_data { struct uni_pagedir *vc_uni_pagedir; struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ struct uni_screen *vc_uni_screen; /* unicode screen content */ - bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */ /* additional information is in vt_kern.h */ }; diff --git a/include/linux/fb.h b/include/linux/fb.h index 3e7e75383d32..a3cab6dc9b44 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -456,10 +456,13 @@ struct fb_tile_ops { * and host endianness. Drivers should not use this flag. */ #define FBINFO_BE_MATH 0x100000 +/* + * Hide smem_start in the FBIOGET_FSCREENINFO IOCTL. This is used by modern DRM + * drivers to stop userspace from trying to share buffers behind the kernel's + * back. Instead dma-buf based buffer sharing should be used. + */ +#define FBINFO_HIDE_SMEM_START 0x200000 -/* report to the VT layer that this fb driver can accept forced console - output like oopses */ -#define FBINFO_CAN_FORCE_OUTPUT 0x200000 struct fb_info { atomic_t count; @@ -632,6 +635,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, extern int register_framebuffer(struct fb_info *fb_info); extern int unregister_framebuffer(struct fb_info *fb_info); extern int unlink_framebuffer(struct fb_info *fb_info); +extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, + const char *name); extern int remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary); extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); diff --git a/include/linux/fs.h b/include/linux/fs.h index 897eae8faee1..771341470bce 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -403,24 +403,40 @@ int pagecache_write_end(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); +/** + * struct address_space - Contents of a cacheable, mappable object. + * @host: Owner, either the inode or the block_device. + * @i_pages: Cached pages. + * @gfp_mask: Memory allocation flags to use for allocating pages. + * @i_mmap_writable: Number of VM_SHARED mappings. + * @i_mmap: Tree of private and shared mappings. + * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. + * @nrpages: Number of page entries, protected by the i_pages lock. + * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock. + * @writeback_index: Writeback starts here. + * @a_ops: Methods. + * @flags: Error bits and flags (AS_*). + * @wb_err: The most recent error which has occurred. + * @private_lock: For use by the owner of the address_space. + * @private_list: For use by the owner of the address_space. + * @private_data: For use by the owner of the address_space. + */ struct address_space { - struct inode *host; /* owner: inode, block_device */ - struct radix_tree_root i_pages; /* cached pages */ - atomic_t i_mmap_writable;/* count VM_SHARED mappings */ - struct rb_root_cached i_mmap; /* tree of private and shared mappings */ - struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ - /* Protected by the i_pages lock */ - unsigned long nrpages; /* number of total pages */ - /* number of shadow or DAX exceptional entries */ + struct inode *host; + struct xarray i_pages; + gfp_t gfp_mask; + atomic_t i_mmap_writable; + struct rb_root_cached i_mmap; + struct rw_semaphore i_mmap_rwsem; + unsigned long nrpages; unsigned long nrexceptional; - pgoff_t writeback_index;/* writeback starts here */ - const struct address_space_operations *a_ops; /* methods */ - unsigned long flags; /* error bits */ - spinlock_t private_lock; /* for use by the address_space */ - gfp_t gfp_mask; /* implicit gfp mask for allocations */ - struct list_head private_list; /* for use by the address_space */ - void *private_data; /* ditto */ + pgoff_t writeback_index; + const struct address_space_operations *a_ops; + unsigned long flags; errseq_t wb_err; + spinlock_t private_lock; + struct list_head private_list; + void *private_data; } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* * On most architectures that alignment is already the case; but @@ -467,15 +483,18 @@ struct block_device { struct mutex bd_fsfreeze_mutex; } __randomize_layout; +/* XArray tags, for tagging dirty and writeback pages in the pagecache. */ +#define PAGECACHE_TAG_DIRTY XA_MARK_0 +#define PAGECACHE_TAG_WRITEBACK XA_MARK_1 +#define PAGECACHE_TAG_TOWRITE XA_MARK_2 + /* - * Radix-tree tags, for tagging dirty and writeback pages within the pagecache - * radix trees + * Returns true if any of the pages in the mapping are marked with the tag. */ -#define PAGECACHE_TAG_DIRTY 0 -#define PAGECACHE_TAG_WRITEBACK 1 -#define PAGECACHE_TAG_TOWRITE 2 - -int mapping_tagged(struct address_space *mapping, int tag); +static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag) +{ + return xa_marked(&mapping->i_pages, tag); +} static inline void i_mmap_lock_write(struct address_space *mapping) { diff --git a/include/linux/idr.h b/include/linux/idr.h index 3ec8628ce17f..60daf34b625d 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -214,8 +214,7 @@ static inline void idr_preload_end(void) ++id, (entry) = idr_get_next((idr), &(id))) /* - * IDA - IDR based id allocator, use when translation from id to - * pointer isn't necessary. + * IDA - ID Allocator, use when translation from id to pointer isn't necessary. */ #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) @@ -225,14 +224,14 @@ struct ida_bitmap { unsigned long bitmap[IDA_BITMAP_LONGS]; }; -DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap); - struct ida { - struct radix_tree_root ida_rt; + struct xarray xa; }; +#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC) + #define IDA_INIT(name) { \ - .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \ + .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \ } #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) @@ -292,7 +291,7 @@ static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) static inline void ida_init(struct ida *ida) { - INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); + xa_init_flags(&ida->xa, IDA_INIT_FLAGS); } #define ida_simple_get(ida, start, end, gfp) \ @@ -301,9 +300,6 @@ static inline void ida_init(struct ida *ida) static inline bool ida_is_empty(const struct ida *ida) { - return radix_tree_empty(&ida->ida_rt); + return xa_empty(&ida->xa); } - -/* in lib/radix-tree.c */ -int ida_pre_get(struct ida *ida, gfp_t gfp_mask); #endif /* __IDR_H__ */ diff --git a/include/linux/list.h b/include/linux/list.h index de04cc5ed536..edb7628e46ed 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -184,6 +184,29 @@ static inline void list_move_tail(struct list_head *list, } /** + * list_bulk_move_tail - move a subsection of a list to its tail + * @head: the head that will follow our entry + * @first: first entry to move + * @last: last entry to move, can be the same as first + * + * Move all entries between @first and including @last before @head. + * All three entries must belong to the same linked list. + */ +static inline void list_bulk_move_tail(struct list_head *head, + struct list_head *first, + struct list_head *last) +{ + first->prev->next = last->next; + last->next->prev = first->prev; + + head->prev->next = first; + first->prev = head->prev; + + last->next = head; + head->prev = last; +} + +/** * list_is_last - tests whether @list is the last entry in list @head * @list: the entry to test * @head: the head of the list diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index b1bd2186e6d2..226f96f0dee0 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -241,9 +241,9 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x) typedef int filler_t(void *, struct page *); -pgoff_t page_cache_next_hole(struct address_space *mapping, +pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); -pgoff_t page_cache_prev_hole(struct address_space *mapping, +pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); #define FGP_ACCESSED 0x00000001 @@ -363,17 +363,17 @@ static inline unsigned find_get_pages(struct address_space *mapping, unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, - pgoff_t end, int tag, unsigned int nr_pages, + pgoff_t end, xa_mark_t tag, unsigned int nr_pages, struct page **pages); static inline unsigned find_get_pages_tag(struct address_space *mapping, - pgoff_t *index, int tag, unsigned int nr_pages, + pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, struct page **pages) { return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, nr_pages, pages); } unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, - int tag, unsigned int nr_entries, + xa_mark_t tag, unsigned int nr_entries, struct page **entries, pgoff_t *indices); struct page *grab_cache_page_write_begin(struct address_space *mapping, diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 6dc456ac6136..081d934eda64 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -9,6 +9,8 @@ #ifndef _LINUX_PAGEVEC_H #define _LINUX_PAGEVEC_H +#include <linux/xarray.h> + /* 15 pointers + header align the pagevec structure to a power of two */ #define PAGEVEC_SIZE 15 @@ -40,12 +42,12 @@ static inline unsigned pagevec_lookup(struct pagevec *pvec, unsigned pagevec_lookup_range_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, pgoff_t end, - int tag); + xa_mark_t tag); unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, pgoff_t end, - int tag, unsigned max_pages); + xa_mark_t tag, unsigned max_pages); static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, - struct address_space *mapping, pgoff_t *index, int tag) + struct address_space *mapping, pgoff_t *index, xa_mark_t tag) { return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); } diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h index ee495d707f17..fe815d7d9f58 100644 --- a/include/linux/platform_data/shmob_drm.h +++ b/include/linux/platform_data/shmob_drm.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * shmob_drm.h -- SH Mobile DRM driver * * Copyright (C) 2012 Renesas Corporation * * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __SHMOB_DRM_H__ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 34149e8b5f73..06c4c7a6c09c 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -28,34 +28,30 @@ #include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/types.h> +#include <linux/xarray.h> + +/* Keep unconverted code working */ +#define radix_tree_root xarray +#define radix_tree_node xa_node /* * The bottom two bits of the slot determine how the remaining bits in the * slot are interpreted: * * 00 - data pointer - * 01 - internal entry - * 10 - exceptional entry - * 11 - this bit combination is currently unused/reserved + * 10 - internal entry + * x1 - value entry * * The internal entry may be a pointer to the next level in the tree, a * sibling entry, or an indicator that the entry in this slot has been moved * to another location in the tree and the lookup should be restarted. While * NULL fits the 'data pointer' pattern, it means that there is no entry in * the tree for this index (no matter what level of the tree it is found at). - * This means that you cannot store NULL in the tree as a value for the index. + * This means that storing a NULL entry in the tree is the same as deleting + * the entry from the tree. */ #define RADIX_TREE_ENTRY_MASK 3UL -#define RADIX_TREE_INTERNAL_NODE 1UL - -/* - * Most users of the radix tree store pointers but shmem/tmpfs stores swap - * entries in the same tree. They are marked as exceptional entries to - * distinguish them from pointers to struct page. - * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. - */ -#define RADIX_TREE_EXCEPTIONAL_ENTRY 2 -#define RADIX_TREE_EXCEPTIONAL_SHIFT 2 +#define RADIX_TREE_INTERNAL_NODE 2UL static inline bool radix_tree_is_internal_node(void *ptr) { @@ -65,75 +61,32 @@ static inline bool radix_tree_is_internal_node(void *ptr) /*** radix-tree API starts here ***/ -#define RADIX_TREE_MAX_TAGS 3 - -#ifndef RADIX_TREE_MAP_SHIFT -#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) -#endif - +#define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) -#define RADIX_TREE_TAG_LONGS \ - ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) +#define RADIX_TREE_MAX_TAGS XA_MAX_MARKS +#define RADIX_TREE_TAG_LONGS XA_MARK_LONGS #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) -/* - * @count is the count of every non-NULL element in the ->slots array - * whether that is an exceptional entry, a retry entry, a user pointer, - * a sibling entry or a pointer to the next level of the tree. - * @exceptional is the count of every element in ->slots which is - * either radix_tree_exceptional_entry() or is a sibling entry for an - * exceptional entry. - */ -struct radix_tree_node { - unsigned char shift; /* Bits remaining in each slot */ - unsigned char offset; /* Slot offset in parent */ - unsigned char count; /* Total entry count */ - unsigned char exceptional; /* Exceptional entry count */ - struct radix_tree_node *parent; /* Used when ascending tree */ - struct radix_tree_root *root; /* The tree we belong to */ - union { - struct list_head private_list; /* For tree user */ - struct rcu_head rcu_head; /* Used when freeing node */ - }; - void __rcu *slots[RADIX_TREE_MAP_SIZE]; - unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; -}; - -/* The IDR tag is stored in the low bits of the GFP flags */ +/* The IDR tag is stored in the low bits of xa_flags */ #define ROOT_IS_IDR ((__force gfp_t)4) -/* The top bits of gfp_mask are used to store the root tags */ +/* The top bits of xa_flags are used to store the root tags */ #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT) -struct radix_tree_root { - spinlock_t xa_lock; - gfp_t gfp_mask; - struct radix_tree_node __rcu *rnode; -}; - -#define RADIX_TREE_INIT(name, mask) { \ - .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ - .gfp_mask = (mask), \ - .rnode = NULL, \ -} +#define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask) #define RADIX_TREE(name, mask) \ struct radix_tree_root name = RADIX_TREE_INIT(name, mask) -#define INIT_RADIX_TREE(root, mask) \ -do { \ - spin_lock_init(&(root)->xa_lock); \ - (root)->gfp_mask = (mask); \ - (root)->rnode = NULL; \ -} while (0) +#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask) static inline bool radix_tree_empty(const struct radix_tree_root *root) { - return root->rnode == NULL; + return root->xa_head == NULL; } /** @@ -143,7 +96,6 @@ static inline bool radix_tree_empty(const struct radix_tree_root *root) * @next_index: one beyond the last index for this chunk * @tags: bit-mask for tag-iterating * @node: node that contains current slot - * @shift: shift for the node that holds our slots * * This radix tree iterator works in terms of "chunks" of slots. A chunk is a * subinterval of slots contained within one radix tree leaf node. It is @@ -157,20 +109,8 @@ struct radix_tree_iter { unsigned long next_index; unsigned long tags; struct radix_tree_node *node; -#ifdef CONFIG_RADIX_TREE_MULTIORDER - unsigned int shift; -#endif }; -static inline unsigned int iter_shift(const struct radix_tree_iter *iter) -{ -#ifdef CONFIG_RADIX_TREE_MULTIORDER - return iter->shift; -#else - return 0; -#endif -} - /** * Radix-tree synchronization * @@ -194,12 +134,11 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter) * radix_tree_lookup_slot * radix_tree_tag_get * radix_tree_gang_lookup - * radix_tree_gang_lookup_slot * radix_tree_gang_lookup_tag * radix_tree_gang_lookup_tag_slot * radix_tree_tagged * - * The first 8 functions are able to be called locklessly, using RCU. The + * The first 7 functions are able to be called locklessly, using RCU. The * caller must ensure calls to these functions are made within rcu_read_lock() * regions. Other readers (lock-free or otherwise) and modifications may be * running concurrently. @@ -269,17 +208,6 @@ static inline int radix_tree_deref_retry(void *arg) } /** - * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry? - * @arg: value returned by radix_tree_deref_slot - * Returns: 0 if well-aligned pointer, non-0 if exceptional entry. - */ -static inline int radix_tree_exceptional_entry(void *arg) -{ - /* Not unlikely because radix_tree_exception often tested first */ - return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY; -} - -/** * radix_tree_exception - radix_tree_deref_slot returned either exception? * @arg: value returned by radix_tree_deref_slot * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. @@ -289,47 +217,28 @@ static inline int radix_tree_exception(void *arg) return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } -int __radix_tree_create(struct radix_tree_root *, unsigned long index, - unsigned order, struct radix_tree_node **nodep, - void __rcu ***slotp); -int __radix_tree_insert(struct radix_tree_root *, unsigned long index, - unsigned order, void *); -static inline int radix_tree_insert(struct radix_tree_root *root, - unsigned long index, void *entry) -{ - return __radix_tree_insert(root, index, 0, entry); -} +int radix_tree_insert(struct radix_tree_root *, unsigned long index, + void *); void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, struct radix_tree_node **nodep, void __rcu ***slotp); void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, unsigned long index); -typedef void (*radix_tree_update_node_t)(struct radix_tree_node *); void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, - void __rcu **slot, void *entry, - radix_tree_update_node_t update_node); + void __rcu **slot, void *entry); void radix_tree_iter_replace(struct radix_tree_root *, const struct radix_tree_iter *, void __rcu **slot, void *entry); void radix_tree_replace_slot(struct radix_tree_root *, void __rcu **slot, void *entry); -void __radix_tree_delete_node(struct radix_tree_root *, - struct radix_tree_node *, - radix_tree_update_node_t update_node); void radix_tree_iter_delete(struct radix_tree_root *, struct radix_tree_iter *iter, void __rcu **slot); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); -void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *, - void __rcu **slot); unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items); -unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, - void __rcu ***results, unsigned long *indices, - unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); -int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *, unsigned long index, unsigned int tag); @@ -337,8 +246,6 @@ void *radix_tree_tag_clear(struct radix_tree_root *, unsigned long index, unsigned int tag); int radix_tree_tag_get(const struct radix_tree_root *, unsigned long index, unsigned int tag); -void radix_tree_iter_tag_set(struct radix_tree_root *, - const struct radix_tree_iter *iter, unsigned int tag); void radix_tree_iter_tag_clear(struct radix_tree_root *, const struct radix_tree_iter *iter, unsigned int tag); unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, @@ -354,12 +261,6 @@ static inline void radix_tree_preload_end(void) preempt_enable(); } -int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); -int radix_tree_split(struct radix_tree_root *, unsigned long index, - unsigned new_order); -int radix_tree_join(struct radix_tree_root *, unsigned long index, - unsigned new_order, void *); - void __rcu **idr_get_free(struct radix_tree_root *root, struct radix_tree_iter *iter, gfp_t gfp, unsigned long max); @@ -465,7 +366,7 @@ void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) static inline unsigned long __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) { - return iter->index + (slots << iter_shift(iter)); + return iter->index + slots; } /** @@ -490,21 +391,9 @@ void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, static __always_inline long radix_tree_chunk_size(struct radix_tree_iter *iter) { - return (iter->next_index - iter->index) >> iter_shift(iter); + return iter->next_index - iter->index; } -#ifdef CONFIG_RADIX_TREE_MULTIORDER -void __rcu **__radix_tree_next_slot(void __rcu **slot, - struct radix_tree_iter *iter, unsigned flags); -#else -/* Can't happen without sibling entries, but the compiler can't tell that */ -static inline void __rcu **__radix_tree_next_slot(void __rcu **slot, - struct radix_tree_iter *iter, unsigned flags) -{ - return slot; -} -#endif - /** * radix_tree_next_slot - find next slot in chunk * @@ -563,8 +452,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, return NULL; found: - if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot)))) - return __radix_tree_next_slot(slot, iter, flags); return slot; } @@ -584,23 +471,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, slot = radix_tree_next_slot(slot, iter, 0)) /** - * radix_tree_for_each_contig - iterate over contiguous slots - * - * @slot: the void** variable for pointer to slot - * @root: the struct radix_tree_root pointer - * @iter: the struct radix_tree_iter pointer - * @start: iteration starting index - * - * @slot points to radix tree slot, @iter->index contains its index. - */ -#define radix_tree_for_each_contig(slot, root, iter, start) \ - for (slot = radix_tree_iter_init(iter, start) ; \ - slot || (slot = radix_tree_next_chunk(root, iter, \ - RADIX_TREE_ITER_CONTIG)) ; \ - slot = radix_tree_next_slot(slot, iter, \ - RADIX_TREE_ITER_CONTIG)) - -/** * radix_tree_for_each_tagged - iterate over tagged slots * * @slot: the void** variable for pointer to slot diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 6aedc30003e7..c8bb4a2b48c3 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -167,17 +167,12 @@ struct rtc_device { #define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */ #define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */ -extern struct rtc_device *rtc_device_register(const char *name, - struct device *dev, - const struct rtc_class_ops *ops, - struct module *owner); extern struct rtc_device *devm_rtc_device_register(struct device *dev, const char *name, const struct rtc_class_ops *ops, struct module *owner); struct rtc_device *devm_rtc_allocate_device(struct device *dev); int __rtc_register_device(struct module *owner, struct rtc_device *rtc); -extern void rtc_device_unregister(struct rtc_device *rtc); extern void devm_rtc_device_unregister(struct device *dev, struct rtc_device *rtc); @@ -277,4 +272,20 @@ static inline int rtc_nvmem_register(struct rtc_device *rtc, static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {} #endif +#ifdef CONFIG_RTC_INTF_SYSFS +int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp); +int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps); +#else +static inline +int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp) +{ + return 0; +} + +static inline +int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps) +{ + return 0; +} +#endif #endif /* _LINUX_RTC_H_ */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 38195f5c96b1..d8a07a4f171d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -300,17 +300,12 @@ void *workingset_eviction(struct address_space *mapping, struct page *page); void workingset_refault(struct page *page, void *shadow); void workingset_activation(struct page *page); -/* Do not use directly, use workingset_lookup_update */ -void workingset_update_node(struct radix_tree_node *node); - -/* Returns workingset_update_node() if the mapping has shadow entries. */ -#define workingset_lookup_update(mapping) \ -({ \ - radix_tree_update_node_t __helper = workingset_update_node; \ - if (dax_mapping(mapping) || shmem_mapping(mapping)) \ - __helper = NULL; \ - __helper; \ -}) +/* Only track the nodes of mappings with shadow entries */ +void workingset_update_node(struct xa_node *node); +#define mapping_set_update(xas, mapping) do { \ + if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \ + xas_set_update(xas, workingset_update_node); \ +} while (0) /* linux/mm/page_alloc.c */ extern unsigned long totalram_pages; @@ -409,7 +404,7 @@ extern void show_swap_cache_info(void); extern int add_to_swap(struct page *page); extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); -extern void __delete_from_swap_cache(struct page *); +extern void __delete_from_swap_cache(struct page *, swp_entry_t entry); extern void delete_from_swap_cache(struct page *); extern void free_page_and_swap_cache(struct page *); extern void free_pages_and_swap_cache(struct page **, int); @@ -563,7 +558,8 @@ static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, return -1; } -static inline void __delete_from_swap_cache(struct page *page) +static inline void __delete_from_swap_cache(struct page *page, + swp_entry_t entry) { } diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 22af9d8a84ae..4d961668e5fc 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -18,9 +18,8 @@ * * swp_entry_t's are *never* stored anywhere in their arch-dependent format. */ -#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \ - (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT)) -#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) +#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) +#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) /* * Store a type+offset into a swp_entry_t in an arch-independent format @@ -29,8 +28,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) { swp_entry_t ret; - ret.val = (type << SWP_TYPE_SHIFT(ret)) | - (offset & SWP_OFFSET_MASK(ret)); + ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK); return ret; } @@ -40,7 +38,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) */ static inline unsigned swp_type(swp_entry_t entry) { - return (entry.val >> SWP_TYPE_SHIFT(entry)); + return (entry.val >> SWP_TYPE_SHIFT); } /* @@ -49,7 +47,7 @@ static inline unsigned swp_type(swp_entry_t entry) */ static inline pgoff_t swp_offset(swp_entry_t entry) { - return entry.val & SWP_OFFSET_MASK(entry); + return entry.val & SWP_OFFSET_MASK; } #ifdef CONFIG_MMU @@ -90,16 +88,13 @@ static inline swp_entry_t radix_to_swp_entry(void *arg) { swp_entry_t entry; - entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT; + entry.val = xa_to_value(arg); return entry; } static inline void *swp_to_radix_entry(swp_entry_t entry) { - unsigned long value; - - value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT; - return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY); + return xa_mk_value(entry.val); } #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 3fd07912909c..8dc77e40bc03 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -135,13 +135,6 @@ extern int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt); int vty_init(const struct file_operations *console_fops); -static inline bool vt_force_oops_output(struct vc_data *vc) -{ - if (oops_in_progress && vc->vc_panic_force_write && panic_timeout >= 0) - return true; - return false; -} - extern char vt_dont_switch; extern int default_utf8; extern int global_cursor_default; diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 2dfc8006fe64..d9514928ddac 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -4,10 +4,432 @@ /* * eXtensible Arrays * Copyright (c) 2017 Microsoft Corporation - * Author: Matthew Wilcox <mawilcox@microsoft.com> + * Author: Matthew Wilcox <willy@infradead.org> + * + * See Documentation/core-api/xarray.rst for how to use the XArray. */ +#include <linux/bug.h> +#include <linux/compiler.h> +#include <linux/gfp.h> +#include <linux/kconfig.h> +#include <linux/kernel.h> +#include <linux/rcupdate.h> #include <linux/spinlock.h> +#include <linux/types.h> + +/* + * The bottom two bits of the entry determine how the XArray interprets + * the contents: + * + * 00: Pointer entry + * 10: Internal entry + * x1: Value entry or tagged pointer + * + * Attempting to store internal entries in the XArray is a bug. + * + * Most internal entries are pointers to the next node in the tree. + * The following internal entries have a special meaning: + * + * 0-62: Sibling entries + * 256: Zero entry + * 257: Retry entry + * + * Errors are also represented as internal entries, but use the negative + * space (-4094 to -2). They're never stored in the slots array; only + * returned by the normal API. + */ + +#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1) + +/** + * xa_mk_value() - Create an XArray entry from an integer. + * @v: Value to store in XArray. + * + * Context: Any context. + * Return: An entry suitable for storing in the XArray. + */ +static inline void *xa_mk_value(unsigned long v) +{ + WARN_ON((long)v < 0); + return (void *)((v << 1) | 1); +} + +/** + * xa_to_value() - Get value stored in an XArray entry. + * @entry: XArray entry. + * + * Context: Any context. + * Return: The value stored in the XArray entry. + */ +static inline unsigned long xa_to_value(const void *entry) +{ + return (unsigned long)entry >> 1; +} + +/** + * xa_is_value() - Determine if an entry is a value. + * @entry: XArray entry. + * + * Context: Any context. + * Return: True if the entry is a value, false if it is a pointer. + */ +static inline bool xa_is_value(const void *entry) +{ + return (unsigned long)entry & 1; +} + +/** + * xa_tag_pointer() - Create an XArray entry for a tagged pointer. + * @p: Plain pointer. + * @tag: Tag value (0, 1 or 3). + * + * If the user of the XArray prefers, they can tag their pointers instead + * of storing value entries. Three tags are available (0, 1 and 3). + * These are distinct from the xa_mark_t as they are not replicated up + * through the array and cannot be searched for. + * + * Context: Any context. + * Return: An XArray entry. + */ +static inline void *xa_tag_pointer(void *p, unsigned long tag) +{ + return (void *)((unsigned long)p | tag); +} + +/** + * xa_untag_pointer() - Turn an XArray entry into a plain pointer. + * @entry: XArray entry. + * + * If you have stored a tagged pointer in the XArray, call this function + * to get the untagged version of the pointer. + * + * Context: Any context. + * Return: A pointer. + */ +static inline void *xa_untag_pointer(void *entry) +{ + return (void *)((unsigned long)entry & ~3UL); +} + +/** + * xa_pointer_tag() - Get the tag stored in an XArray entry. + * @entry: XArray entry. + * + * If you have stored a tagged pointer in the XArray, call this function + * to get the tag of that pointer. + * + * Context: Any context. + * Return: A tag. + */ +static inline unsigned int xa_pointer_tag(void *entry) +{ + return (unsigned long)entry & 3UL; +} + +/* + * xa_mk_internal() - Create an internal entry. + * @v: Value to turn into an internal entry. + * + * Context: Any context. + * Return: An XArray internal entry corresponding to this value. + */ +static inline void *xa_mk_internal(unsigned long v) +{ + return (void *)((v << 2) | 2); +} + +/* + * xa_to_internal() - Extract the value from an internal entry. + * @entry: XArray entry. + * + * Context: Any context. + * Return: The value which was stored in the internal entry. + */ +static inline unsigned long xa_to_internal(const void *entry) +{ + return (unsigned long)entry >> 2; +} + +/* + * xa_is_internal() - Is the entry an internal entry? + * @entry: XArray entry. + * + * Context: Any context. + * Return: %true if the entry is an internal entry. + */ +static inline bool xa_is_internal(const void *entry) +{ + return ((unsigned long)entry & 3) == 2; +} + +/** + * xa_is_err() - Report whether an XArray operation returned an error + * @entry: Result from calling an XArray function + * + * If an XArray operation cannot complete an operation, it will return + * a special value indicating an error. This function tells you + * whether an error occurred; xa_err() tells you which error occurred. + * + * Context: Any context. + * Return: %true if the entry indicates an error. + */ +static inline bool xa_is_err(const void *entry) +{ + return unlikely(xa_is_internal(entry)); +} + +/** + * xa_err() - Turn an XArray result into an errno. + * @entry: Result from calling an XArray function. + * + * If an XArray operation cannot complete an operation, it will return + * a special pointer value which encodes an errno. This function extracts + * the errno from the pointer value, or returns 0 if the pointer does not + * represent an errno. + * + * Context: Any context. + * Return: A negative errno or 0. + */ +static inline int xa_err(void *entry) +{ + /* xa_to_internal() would not do sign extension. */ + if (xa_is_err(entry)) + return (long)entry >> 2; + return 0; +} + +typedef unsigned __bitwise xa_mark_t; +#define XA_MARK_0 ((__force xa_mark_t)0U) +#define XA_MARK_1 ((__force xa_mark_t)1U) +#define XA_MARK_2 ((__force xa_mark_t)2U) +#define XA_PRESENT ((__force xa_mark_t)8U) +#define XA_MARK_MAX XA_MARK_2 +#define XA_FREE_MARK XA_MARK_0 + +enum xa_lock_type { + XA_LOCK_IRQ = 1, + XA_LOCK_BH = 2, +}; + +/* + * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags, + * and we remain compatible with that. + */ +#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ) +#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) +#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) +#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ + (__force unsigned)(mark))) + +#define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) + +/** + * struct xarray - The anchor of the XArray. + * @xa_lock: Lock that protects the contents of the XArray. + * + * To use the xarray, define it statically or embed it in your data structure. + * It is a very small data structure, so it does not usually make sense to + * allocate it separately and keep a pointer to it in your data structure. + * + * You may use the xa_lock to protect your own data structures as well. + */ +/* + * If all of the entries in the array are NULL, @xa_head is a NULL pointer. + * If the only non-NULL entry in the array is at index 0, @xa_head is that + * entry. If any other entry in the array is non-NULL, @xa_head points + * to an @xa_node. + */ +struct xarray { + spinlock_t xa_lock; +/* private: The rest of the data structure is not to be used directly. */ + gfp_t xa_flags; + void __rcu * xa_head; +}; + +#define XARRAY_INIT(name, flags) { \ + .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ + .xa_flags = flags, \ + .xa_head = NULL, \ +} + +/** + * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags. + * @name: A string that names your XArray. + * @flags: XA_FLAG values. + * + * This is intended for file scope definitions of XArrays. It declares + * and initialises an empty XArray with the chosen name and flags. It is + * equivalent to calling xa_init_flags() on the array, but it does the + * initialisation at compiletime instead of runtime. + */ +#define DEFINE_XARRAY_FLAGS(name, flags) \ + struct xarray name = XARRAY_INIT(name, flags) + +/** + * DEFINE_XARRAY() - Define an XArray. + * @name: A string that names your XArray. + * + * This is intended for file scope definitions of XArrays. It declares + * and initialises an empty XArray with the chosen name. It is equivalent + * to calling xa_init() on the array, but it does the initialisation at + * compiletime instead of runtime. + */ +#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) + +/** + * DEFINE_XARRAY_ALLOC() - Define an XArray which can allocate IDs. + * @name: A string that names your XArray. + * + * This is intended for file scope definitions of allocating XArrays. + * See also DEFINE_XARRAY(). + */ +#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) + +void xa_init_flags(struct xarray *, gfp_t flags); +void *xa_load(struct xarray *, unsigned long index); +void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); +void *xa_cmpxchg(struct xarray *, unsigned long index, + void *old, void *entry, gfp_t); +int xa_reserve(struct xarray *, unsigned long index, gfp_t); +void *xa_store_range(struct xarray *, unsigned long first, unsigned long last, + void *entry, gfp_t); +bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); +void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); +void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); +void *xa_find(struct xarray *xa, unsigned long *index, + unsigned long max, xa_mark_t) __attribute__((nonnull(2))); +void *xa_find_after(struct xarray *xa, unsigned long *index, + unsigned long max, xa_mark_t) __attribute__((nonnull(2))); +unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, + unsigned long max, unsigned int n, xa_mark_t); +void xa_destroy(struct xarray *); + +/** + * xa_init() - Initialise an empty XArray. + * @xa: XArray. + * + * An empty XArray is full of NULL entries. + * + * Context: Any context. + */ +static inline void xa_init(struct xarray *xa) +{ + xa_init_flags(xa, 0); +} + +/** + * xa_empty() - Determine if an array has any present entries. + * @xa: XArray. + * + * Context: Any context. + * Return: %true if the array contains only NULL pointers. + */ +static inline bool xa_empty(const struct xarray *xa) +{ + return xa->xa_head == NULL; +} + +/** + * xa_marked() - Inquire whether any entry in this array has a mark set + * @xa: Array + * @mark: Mark value + * + * Context: Any context. + * Return: %true if any entry has this mark set. + */ +static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) +{ + return xa->xa_flags & XA_FLAGS_MARK(mark); +} + +/** + * xa_erase() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index of entry. + * + * This function is the equivalent of calling xa_store() with %NULL as + * the third argument. The XArray does not need to allocate memory, so + * the user does not need to provide GFP flags. + * + * Context: Process context. Takes and releases the xa_lock. + * Return: The entry which used to be at this index. + */ +static inline void *xa_erase(struct xarray *xa, unsigned long index) +{ + return xa_store(xa, index, NULL, 0); +} + +/** + * xa_insert() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * If you would rather see the existing entry in the array, use xa_cmpxchg(). + * This function is for users who don't care what the entry is, only that + * one is present. + * + * Context: Process context. Takes and releases the xa_lock. + * May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int xa_insert(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); + if (!curr) + return 0; + if (xa_is_err(curr)) + return xa_err(curr); + return -EEXIST; +} + +/** + * xa_release() - Release a reserved entry. + * @xa: XArray. + * @index: Index of entry. + * + * After calling xa_reserve(), you can call this function to release the + * reservation. If the entry at @index has been stored to, this function + * will do nothing. + */ +static inline void xa_release(struct xarray *xa, unsigned long index) +{ + xa_cmpxchg(xa, index, NULL, NULL, 0); +} + +/** + * xa_for_each() - Iterate over a portion of an XArray. + * @xa: XArray. + * @entry: Entry retrieved from array. + * @index: Index of @entry. + * @max: Maximum index to retrieve from array. + * @filter: Selection criterion. + * + * Initialise @index to the lowest index you want to retrieve from the + * array. During the iteration, @entry will have the value of the entry + * stored in @xa at @index. The iteration will skip all entries in the + * array which do not match @filter. You may modify @index during the + * iteration if you want to skip or reprocess indices. It is safe to modify + * the array during the iteration. At the end of the iteration, @entry will + * be set to NULL and @index will have a value less than or equal to max. + * + * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have + * to handle your own locking with xas_for_each(), and if you have to unlock + * after each iteration, it will also end up being O(n.log(n)). xa_for_each() + * will spin if it hits a retry entry; if you intend to see retry entries, + * you should use the xas_for_each() iterator instead. The xas_for_each() + * iterator will expand into more inline code than xa_for_each(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each(xa, entry, index, max, filter) \ + for (entry = xa_find(xa, &index, max, filter); entry; \ + entry = xa_find_after(xa, &index, max, filter)) #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) #define xa_lock(xa) spin_lock(&(xa)->xa_lock) @@ -21,4 +443,873 @@ #define xa_unlock_irqrestore(xa, flags) \ spin_unlock_irqrestore(&(xa)->xa_lock, flags) +/* + * Versions of the normal API which require the caller to hold the + * xa_lock. If the GFP flags allow it, they will drop the lock to + * allocate memory, then reacquire it afterwards. These functions + * may also re-enable interrupts if the XArray flags indicate the + * locking should be interrupt safe. + */ +void *__xa_erase(struct xarray *, unsigned long index); +void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); +void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, + void *entry, gfp_t); +int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); +void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); +void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); + +/** + * __xa_insert() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * If you would rather see the existing entry in the array, use __xa_cmpxchg(). + * This function is for users who don't care what the entry is, only that + * one is present. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int __xa_insert(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp); + if (!curr) + return 0; + if (xa_is_err(curr)) + return xa_err(curr); + return -EEXIST; +} + +/** + * xa_erase_bh() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index of entry. + * + * This function is the equivalent of calling xa_store() with %NULL as + * the third argument. The XArray does not need to allocate memory, so + * the user does not need to provide GFP flags. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling softirqs. + * Return: The entry which used to be at this index. + */ +static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) +{ + void *entry; + + xa_lock_bh(xa); + entry = __xa_erase(xa, index); + xa_unlock_bh(xa); + + return entry; +} + +/** + * xa_erase_irq() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index of entry. + * + * This function is the equivalent of calling xa_store() with %NULL as + * the third argument. The XArray does not need to allocate memory, so + * the user does not need to provide GFP flags. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. + * Return: The entry which used to be at this index. + */ +static inline void *xa_erase_irq(struct xarray *xa, unsigned long index) +{ + void *entry; + + xa_lock_irq(xa); + entry = __xa_erase(xa, index); + xa_unlock_irq(xa); + + return entry; +} + +/** + * xa_alloc() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @max: Maximum ID to allocate (inclusive). + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Allocates an unused ID in the range specified by @id and @max. + * Updates the @id pointer with the index, then stores the entry at that + * index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Process context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if + * there is no more space in the XArray. + */ +static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, + gfp_t gfp) +{ + int err; + + xa_lock(xa); + err = __xa_alloc(xa, id, max, entry, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_alloc_bh() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @max: Maximum ID to allocate (inclusive). + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Allocates an unused ID in the range specified by @id and @max. + * Updates the @id pointer with the index, then stores the entry at that + * index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if + * there is no more space in the XArray. + */ +static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, + gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_alloc(xa, id, max, entry, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_alloc_irq() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @max: Maximum ID to allocate (inclusive). + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Allocates an unused ID in the range specified by @id and @max. + * Updates the @id pointer with the index, then stores the entry at that + * index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if + * there is no more space in the XArray. + */ +static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, + gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_alloc(xa, id, max, entry, gfp); + xa_unlock_irq(xa); + + return err; +} + +/* Everything below here is the Advanced API. Proceed with caution. */ + +/* + * The xarray is constructed out of a set of 'chunks' of pointers. Choosing + * the best chunk size requires some tradeoffs. A power of two recommends + * itself so that we can walk the tree based purely on shifts and masks. + * Generally, the larger the better; as the number of slots per level of the + * tree increases, the less tall the tree needs to be. But that needs to be + * balanced against the memory consumption of each node. On a 64-bit system, + * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we + * doubled the number of slots per node, we'd get only 3 nodes per 4kB page. + */ +#ifndef XA_CHUNK_SHIFT +#define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) +#endif +#define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT) +#define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1) +#define XA_MAX_MARKS 3 +#define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG) + +/* + * @count is the count of every non-NULL element in the ->slots array + * whether that is a value entry, a retry entry, a user pointer, + * a sibling entry or a pointer to the next level of the tree. + * @nr_values is the count of every element in ->slots which is + * either a value entry or a sibling of a value entry. + */ +struct xa_node { + unsigned char shift; /* Bits remaining in each slot */ + unsigned char offset; /* Slot offset in parent */ + unsigned char count; /* Total entry count */ + unsigned char nr_values; /* Value entry count */ + struct xa_node __rcu *parent; /* NULL at top of tree */ + struct xarray *array; /* The array we belong to */ + union { + struct list_head private_list; /* For tree user */ + struct rcu_head rcu_head; /* Used when freeing node */ + }; + void __rcu *slots[XA_CHUNK_SIZE]; + union { + unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS]; + unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS]; + }; +}; + +void xa_dump(const struct xarray *); +void xa_dump_node(const struct xa_node *); + +#ifdef XA_DEBUG +#define XA_BUG_ON(xa, x) do { \ + if (x) { \ + xa_dump(xa); \ + BUG(); \ + } \ + } while (0) +#define XA_NODE_BUG_ON(node, x) do { \ + if (x) { \ + if (node) xa_dump_node(node); \ + BUG(); \ + } \ + } while (0) +#else +#define XA_BUG_ON(xa, x) do { } while (0) +#define XA_NODE_BUG_ON(node, x) do { } while (0) +#endif + +/* Private */ +static inline void *xa_head(const struct xarray *xa) +{ + return rcu_dereference_check(xa->xa_head, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_head_locked(const struct xarray *xa) +{ + return rcu_dereference_protected(xa->xa_head, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_entry(const struct xarray *xa, + const struct xa_node *node, unsigned int offset) +{ + XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); + return rcu_dereference_check(node->slots[offset], + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_entry_locked(const struct xarray *xa, + const struct xa_node *node, unsigned int offset) +{ + XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); + return rcu_dereference_protected(node->slots[offset], + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline struct xa_node *xa_parent(const struct xarray *xa, + const struct xa_node *node) +{ + return rcu_dereference_check(node->parent, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline struct xa_node *xa_parent_locked(const struct xarray *xa, + const struct xa_node *node) +{ + return rcu_dereference_protected(node->parent, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_mk_node(const struct xa_node *node) +{ + return (void *)((unsigned long)node | 2); +} + +/* Private */ +static inline struct xa_node *xa_to_node(const void *entry) +{ + return (struct xa_node *)((unsigned long)entry - 2); +} + +/* Private */ +static inline bool xa_is_node(const void *entry) +{ + return xa_is_internal(entry) && (unsigned long)entry > 4096; +} + +/* Private */ +static inline void *xa_mk_sibling(unsigned int offset) +{ + return xa_mk_internal(offset); +} + +/* Private */ +static inline unsigned long xa_to_sibling(const void *entry) +{ + return xa_to_internal(entry); +} + +/** + * xa_is_sibling() - Is the entry a sibling entry? + * @entry: Entry retrieved from the XArray + * + * Return: %true if the entry is a sibling entry. + */ +static inline bool xa_is_sibling(const void *entry) +{ + return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) && + (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); +} + +#define XA_ZERO_ENTRY xa_mk_internal(256) +#define XA_RETRY_ENTRY xa_mk_internal(257) + +/** + * xa_is_zero() - Is the entry a zero entry? + * @entry: Entry retrieved from the XArray + * + * Return: %true if the entry is a zero entry. + */ +static inline bool xa_is_zero(const void *entry) +{ + return unlikely(entry == XA_ZERO_ENTRY); +} + +/** + * xa_is_retry() - Is the entry a retry entry? + * @entry: Entry retrieved from the XArray + * + * Return: %true if the entry is a retry entry. + */ +static inline bool xa_is_retry(const void *entry) +{ + return unlikely(entry == XA_RETRY_ENTRY); +} + +/** + * typedef xa_update_node_t - A callback function from the XArray. + * @node: The node which is being processed + * + * This function is called every time the XArray updates the count of + * present and value entries in a node. It allows advanced users to + * maintain the private_list in the node. + * + * Context: The xa_lock is held and interrupts may be disabled. + * Implementations should not drop the xa_lock, nor re-enable + * interrupts. + */ +typedef void (*xa_update_node_t)(struct xa_node *node); + +/* + * The xa_state is opaque to its users. It contains various different pieces + * of state involved in the current operation on the XArray. It should be + * declared on the stack and passed between the various internal routines. + * The various elements in it should not be accessed directly, but only + * through the provided accessor functions. The below documentation is for + * the benefit of those working on the code, not for users of the XArray. + * + * @xa_node usually points to the xa_node containing the slot we're operating + * on (and @xa_offset is the offset in the slots array). If there is a + * single entry in the array at index 0, there are no allocated xa_nodes to + * point to, and so we store %NULL in @xa_node. @xa_node is set to + * the value %XAS_RESTART if the xa_state is not walked to the correct + * position in the tree of nodes for this operation. If an error occurs + * during an operation, it is set to an %XAS_ERROR value. If we run off the + * end of the allocated nodes, it is set to %XAS_BOUNDS. + */ +struct xa_state { + struct xarray *xa; + unsigned long xa_index; + unsigned char xa_shift; + unsigned char xa_sibs; + unsigned char xa_offset; + unsigned char xa_pad; /* Helps gcc generate better code */ + struct xa_node *xa_node; + struct xa_node *xa_alloc; + xa_update_node_t xa_update; +}; + +/* + * We encode errnos in the xas->xa_node. If an error has happened, we need to + * drop the lock to fix it, and once we've done so the xa_state is invalid. + */ +#define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL)) +#define XAS_BOUNDS ((struct xa_node *)1UL) +#define XAS_RESTART ((struct xa_node *)3UL) + +#define __XA_STATE(array, index, shift, sibs) { \ + .xa = array, \ + .xa_index = index, \ + .xa_shift = shift, \ + .xa_sibs = sibs, \ + .xa_offset = 0, \ + .xa_pad = 0, \ + .xa_node = XAS_RESTART, \ + .xa_alloc = NULL, \ + .xa_update = NULL \ +} + +/** + * XA_STATE() - Declare an XArray operation state. + * @name: Name of this operation state (usually xas). + * @array: Array to operate on. + * @index: Initial index of interest. + * + * Declare and initialise an xa_state on the stack. + */ +#define XA_STATE(name, array, index) \ + struct xa_state name = __XA_STATE(array, index, 0, 0) + +/** + * XA_STATE_ORDER() - Declare an XArray operation state. + * @name: Name of this operation state (usually xas). + * @array: Array to operate on. + * @index: Initial index of interest. + * @order: Order of entry. + * + * Declare and initialise an xa_state on the stack. This variant of + * XA_STATE() allows you to specify the 'order' of the element you + * want to operate on.` + */ +#define XA_STATE_ORDER(name, array, index, order) \ + struct xa_state name = __XA_STATE(array, \ + (index >> order) << order, \ + order - (order % XA_CHUNK_SHIFT), \ + (1U << (order % XA_CHUNK_SHIFT)) - 1) + +#define xas_marked(xas, mark) xa_marked((xas)->xa, (mark)) +#define xas_trylock(xas) xa_trylock((xas)->xa) +#define xas_lock(xas) xa_lock((xas)->xa) +#define xas_unlock(xas) xa_unlock((xas)->xa) +#define xas_lock_bh(xas) xa_lock_bh((xas)->xa) +#define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) +#define xas_lock_irq(xas) xa_lock_irq((xas)->xa) +#define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) +#define xas_lock_irqsave(xas, flags) \ + xa_lock_irqsave((xas)->xa, flags) +#define xas_unlock_irqrestore(xas, flags) \ + xa_unlock_irqrestore((xas)->xa, flags) + +/** + * xas_error() - Return an errno stored in the xa_state. + * @xas: XArray operation state. + * + * Return: 0 if no error has been noted. A negative errno if one has. + */ +static inline int xas_error(const struct xa_state *xas) +{ + return xa_err(xas->xa_node); +} + +/** + * xas_set_err() - Note an error in the xa_state. + * @xas: XArray operation state. + * @err: Negative error number. + * + * Only call this function with a negative @err; zero or positive errors + * will probably not behave the way you think they should. If you want + * to clear the error from an xa_state, use xas_reset(). + */ +static inline void xas_set_err(struct xa_state *xas, long err) +{ + xas->xa_node = XA_ERROR(err); +} + +/** + * xas_invalid() - Is the xas in a retry or error state? + * @xas: XArray operation state. + * + * Return: %true if the xas cannot be used for operations. + */ +static inline bool xas_invalid(const struct xa_state *xas) +{ + return (unsigned long)xas->xa_node & 3; +} + +/** + * xas_valid() - Is the xas a valid cursor into the array? + * @xas: XArray operation state. + * + * Return: %true if the xas can be used for operations. + */ +static inline bool xas_valid(const struct xa_state *xas) +{ + return !xas_invalid(xas); +} + +/** + * xas_is_node() - Does the xas point to a node? + * @xas: XArray operation state. + * + * Return: %true if the xas currently references a node. + */ +static inline bool xas_is_node(const struct xa_state *xas) +{ + return xas_valid(xas) && xas->xa_node; +} + +/* True if the pointer is something other than a node */ +static inline bool xas_not_node(struct xa_node *node) +{ + return ((unsigned long)node & 3) || !node; +} + +/* True if the node represents RESTART or an error */ +static inline bool xas_frozen(struct xa_node *node) +{ + return (unsigned long)node & 2; +} + +/* True if the node represents head-of-tree, RESTART or BOUNDS */ +static inline bool xas_top(struct xa_node *node) +{ + return node <= XAS_RESTART; +} + +/** + * xas_reset() - Reset an XArray operation state. + * @xas: XArray operation state. + * + * Resets the error or walk state of the @xas so future walks of the + * array will start from the root. Use this if you have dropped the + * xarray lock and want to reuse the xa_state. + * + * Context: Any context. + */ +static inline void xas_reset(struct xa_state *xas) +{ + xas->xa_node = XAS_RESTART; +} + +/** + * xas_retry() - Retry the operation if appropriate. + * @xas: XArray operation state. + * @entry: Entry from xarray. + * + * The advanced functions may sometimes return an internal entry, such as + * a retry entry or a zero entry. This function sets up the @xas to restart + * the walk from the head of the array if needed. + * + * Context: Any context. + * Return: true if the operation needs to be retried. + */ +static inline bool xas_retry(struct xa_state *xas, const void *entry) +{ + if (xa_is_zero(entry)) + return true; + if (!xa_is_retry(entry)) + return false; + xas_reset(xas); + return true; +} + +void *xas_load(struct xa_state *); +void *xas_store(struct xa_state *, void *entry); +void *xas_find(struct xa_state *, unsigned long max); +void *xas_find_conflict(struct xa_state *); + +bool xas_get_mark(const struct xa_state *, xa_mark_t); +void xas_set_mark(const struct xa_state *, xa_mark_t); +void xas_clear_mark(const struct xa_state *, xa_mark_t); +void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t); +void xas_init_marks(const struct xa_state *); + +bool xas_nomem(struct xa_state *, gfp_t); +void xas_pause(struct xa_state *); + +void xas_create_range(struct xa_state *); + +/** + * xas_reload() - Refetch an entry from the xarray. + * @xas: XArray operation state. + * + * Use this function to check that a previously loaded entry still has + * the same value. This is useful for the lockless pagecache lookup where + * we walk the array with only the RCU lock to protect us, lock the page, + * then check that the page hasn't moved since we looked it up. + * + * The caller guarantees that @xas is still valid. If it may be in an + * error or restart state, call xas_load() instead. + * + * Return: The entry at this location in the xarray. + */ +static inline void *xas_reload(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (node) + return xa_entry(xas->xa, node, xas->xa_offset); + return xa_head(xas->xa); +} + +/** + * xas_set() - Set up XArray operation state for a different index. + * @xas: XArray operation state. + * @index: New index into the XArray. + * + * Move the operation state to refer to a different index. This will + * have the effect of starting a walk from the top; see xas_next() + * to move to an adjacent index. + */ +static inline void xas_set(struct xa_state *xas, unsigned long index) +{ + xas->xa_index = index; + xas->xa_node = XAS_RESTART; +} + +/** + * xas_set_order() - Set up XArray operation state for a multislot entry. + * @xas: XArray operation state. + * @index: Target of the operation. + * @order: Entry occupies 2^@order indices. + */ +static inline void xas_set_order(struct xa_state *xas, unsigned long index, + unsigned int order) +{ +#ifdef CONFIG_XARRAY_MULTI + xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; + xas->xa_shift = order - (order % XA_CHUNK_SHIFT); + xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; + xas->xa_node = XAS_RESTART; +#else + BUG_ON(order > 0); + xas_set(xas, index); +#endif +} + +/** + * xas_set_update() - Set up XArray operation state for a callback. + * @xas: XArray operation state. + * @update: Function to call when updating a node. + * + * The XArray can notify a caller after it has updated an xa_node. + * This is advanced functionality and is only needed by the page cache. + */ +static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update) +{ + xas->xa_update = update; +} + +/** + * xas_next_entry() - Advance iterator to next present entry. + * @xas: XArray operation state. + * @max: Highest index to return. + * + * xas_next_entry() is an inline function to optimise xarray traversal for + * speed. It is equivalent to calling xas_find(), and will call xas_find() + * for all the hard cases. + * + * Return: The next present entry after the one currently referred to by @xas. + */ +static inline void *xas_next_entry(struct xa_state *xas, unsigned long max) +{ + struct xa_node *node = xas->xa_node; + void *entry; + + if (unlikely(xas_not_node(node) || node->shift || + xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK))) + return xas_find(xas, max); + + do { + if (unlikely(xas->xa_index >= max)) + return xas_find(xas, max); + if (unlikely(xas->xa_offset == XA_CHUNK_MASK)) + return xas_find(xas, max); + entry = xa_entry(xas->xa, node, xas->xa_offset + 1); + if (unlikely(xa_is_internal(entry))) + return xas_find(xas, max); + xas->xa_offset++; + xas->xa_index++; + } while (!entry); + + return entry; +} + +/* Private */ +static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance, + xa_mark_t mark) +{ + unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark]; + unsigned int offset = xas->xa_offset; + + if (advance) + offset++; + if (XA_CHUNK_SIZE == BITS_PER_LONG) { + if (offset < XA_CHUNK_SIZE) { + unsigned long data = *addr & (~0UL << offset); + if (data) + return __ffs(data); + } + return XA_CHUNK_SIZE; + } + + return find_next_bit(addr, XA_CHUNK_SIZE, offset); +} + +/** + * xas_next_marked() - Advance iterator to next marked entry. + * @xas: XArray operation state. + * @max: Highest index to return. + * @mark: Mark to search for. + * + * xas_next_marked() is an inline function to optimise xarray traversal for + * speed. It is equivalent to calling xas_find_marked(), and will call + * xas_find_marked() for all the hard cases. + * + * Return: The next marked entry after the one currently referred to by @xas. + */ +static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, + xa_mark_t mark) +{ + struct xa_node *node = xas->xa_node; + unsigned int offset; + + if (unlikely(xas_not_node(node) || node->shift)) + return xas_find_marked(xas, max, mark); + offset = xas_find_chunk(xas, true, mark); + xas->xa_offset = offset; + xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset; + if (xas->xa_index > max) + return NULL; + if (offset == XA_CHUNK_SIZE) + return xas_find_marked(xas, max, mark); + return xa_entry(xas->xa, node, offset); +} + +/* + * If iterating while holding a lock, drop the lock and reschedule + * every %XA_CHECK_SCHED loops. + */ +enum { + XA_CHECK_SCHED = 4096, +}; + +/** + * xas_for_each() - Iterate over a range of an XArray. + * @xas: XArray operation state. + * @entry: Entry retrieved from the array. + * @max: Maximum index to retrieve from array. + * + * The loop body will be executed for each entry present in the xarray + * between the current xas position and @max. @entry will be set to + * the entry retrieved from the xarray. It is safe to delete entries + * from the array in the loop body. You should hold either the RCU lock + * or the xa_lock while iterating. If you need to drop the lock, call + * xas_pause() first. + */ +#define xas_for_each(xas, entry, max) \ + for (entry = xas_find(xas, max); entry; \ + entry = xas_next_entry(xas, max)) + +/** + * xas_for_each_marked() - Iterate over a range of an XArray. + * @xas: XArray operation state. + * @entry: Entry retrieved from the array. + * @max: Maximum index to retrieve from array. + * @mark: Mark to search for. + * + * The loop body will be executed for each marked entry in the xarray + * between the current xas position and @max. @entry will be set to + * the entry retrieved from the xarray. It is safe to delete entries + * from the array in the loop body. You should hold either the RCU lock + * or the xa_lock while iterating. If you need to drop the lock, call + * xas_pause() first. + */ +#define xas_for_each_marked(xas, entry, max, mark) \ + for (entry = xas_find_marked(xas, max, mark); entry; \ + entry = xas_next_marked(xas, max, mark)) + +/** + * xas_for_each_conflict() - Iterate over a range of an XArray. + * @xas: XArray operation state. + * @entry: Entry retrieved from the array. + * + * The loop body will be executed for each entry in the XArray that lies + * within the range specified by @xas. If the loop completes successfully, + * any entries that lie in this range will be replaced by @entry. The caller + * may break out of the loop; if they do so, the contents of the XArray will + * be unchanged. The operation may fail due to an out of memory condition. + * The caller may also call xa_set_err() to exit the loop while setting an + * error to record the reason. + */ +#define xas_for_each_conflict(xas, entry) \ + while ((entry = xas_find_conflict(xas))) + +void *__xas_next(struct xa_state *); +void *__xas_prev(struct xa_state *); + +/** + * xas_prev() - Move iterator to previous index. + * @xas: XArray operation state. + * + * If the @xas was in an error state, it will remain in an error state + * and this function will return %NULL. If the @xas has never been walked, + * it will have the effect of calling xas_load(). Otherwise one will be + * subtracted from the index and the state will be walked to the correct + * location in the array for the next operation. + * + * If the iterator was referencing index 0, this function wraps + * around to %ULONG_MAX. + * + * Return: The entry at the new index. This may be %NULL or an internal + * entry. + */ +static inline void *xas_prev(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (unlikely(xas_not_node(node) || node->shift || + xas->xa_offset == 0)) + return __xas_prev(xas); + + xas->xa_index--; + xas->xa_offset--; + return xa_entry(xas->xa, node, xas->xa_offset); +} + +/** + * xas_next() - Move state to next index. + * @xas: XArray operation state. + * + * If the @xas was in an error state, it will remain in an error state + * and this function will return %NULL. If the @xas has never been walked, + * it will have the effect of calling xas_load(). Otherwise one will be + * added to the index and the state will be walked to the correct + * location in the array for the next operation. + * + * If the iterator was referencing index %ULONG_MAX, this function wraps + * around to 0. + * + * Return: The entry at the new index. This may be %NULL or an internal + * entry. + */ +static inline void *xas_next(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (unlikely(xas_not_node(node) || node->shift || + xas->xa_offset == XA_CHUNK_MASK)) + return __xas_next(xas); + + xas->xa_index++; + xas->xa_offset++; + return xa_entry(xas->xa, node, xas->xa_offset); +} + #endif /* _LINUX_XARRAY_H */ diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 1ceec56de015..370e9a5536ef 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -665,6 +665,8 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10 /* Subquery id: Query GFX RLC SRLS firmware version */ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11 + /* Subquery id: Query DMCU firmware version */ + #define AMDGPU_INFO_FW_DMCU 0x12 /* number of bytes moved for TTM migration */ #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f /* the used VRAM size */ diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 721ab7e54d96..0cd40ebfa1b1 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -30,11 +30,50 @@ extern "C" { #endif +/** + * DOC: overview + * + * In the DRM subsystem, framebuffer pixel formats are described using the + * fourcc codes defined in `include/uapi/drm/drm_fourcc.h`. In addition to the + * fourcc code, a Format Modifier may optionally be provided, in order to + * further describe the buffer's format - for example tiling or compression. + * + * Format Modifiers + * ---------------- + * + * Format modifiers are used in conjunction with a fourcc code, forming a + * unique fourcc:modifier pair. This format:modifier pair must fully define the + * format and data layout of the buffer, and should be the only way to describe + * that particular buffer. + * + * Having multiple fourcc:modifier pairs which describe the same layout should + * be avoided, as such aliases run the risk of different drivers exposing + * different names for the same data format, forcing userspace to understand + * that they are aliases. + * + * Format modifiers may change any property of the buffer, including the number + * of planes and/or the required allocation size. Format modifiers are + * vendor-namespaced, and as such the relationship between a fourcc code and a + * modifier is specific to the modifer being used. For example, some modifiers + * may preserve meaning - such as number of planes - from the fourcc code, + * whereas others may not. + * + * Vendors should document their modifier usage in as much detail as + * possible, to ensure maximum compatibility across devices, drivers and + * applications. + * + * The authoritative list of format modifier codes is found in + * `include/uapi/drm/drm_fourcc.h` + */ + #define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ ((__u32)(c) << 16) | ((__u32)(d) << 24)) #define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */ +/* Reserve 0 for the invalid format specifier */ +#define DRM_FORMAT_INVALID 0 + /* color index */ #define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */ @@ -300,6 +339,15 @@ extern "C" { #define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) /* + * Tiled, 16 (pixels) x 16 (lines) - sized macroblocks + * + * This is a simple tiled layout using tiles of 16x16 pixels in a row-major + * layout. For YCbCr formats Cb/Cr components are taken in such a way that + * they correspond to their 16x16 luma block. + */ +#define DRM_FORMAT_MOD_SAMSUNG_16_16_TILE fourcc_mod_code(SAMSUNG, 2) + +/* * Qualcomm Compressed Format * * Refers to a compressed variant of the base format that is compressed. diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 8d67243952f4..d3e0fe31efc5 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -186,8 +186,9 @@ extern "C" { /* * DRM_MODE_REFLECT_<axis> * - * Signals that the contents of a drm plane is reflected in the <axis> axis, + * Signals that the contents of a drm plane is reflected along the <axis> axis, * in the same way as mirroring. + * See kerneldoc chapter "Plane Composition Properties" for more details. * * This define is provided as a convenience, looking up the property id * using the name->prop id lookup is the preferred method. diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 7f5634ce8e88..a4446f452040 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -529,6 +529,28 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 +/* + * Once upon a time we supposed that writes through the GGTT would be + * immediately in physical memory (once flushed out of the CPU path). However, + * on a few different processors and chipsets, this is not necessarily the case + * as the writes appear to be buffered internally. Thus a read of the backing + * storage (physical memory) via a different path (with different physical tags + * to the indirect write via the GGTT) will see stale values from before + * the GGTT write. Inside the kernel, we can for the most part keep track of + * the different read/write domains in use (e.g. set-domain), but the assumption + * of coherency is baked into the ABI, hence reporting its true state in this + * parameter. + * + * Reports true when writes via mmap_gtt are immediately visible following an + * lfence to flush the WCB. + * + * Reports false when writes via mmap_gtt are indeterminately delayed in an in + * internal buffer and are _not_ immediately visible to third parties accessing + * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC + * communications channel when reporting false is strongly disadvised. + */ +#define I915_PARAM_MMAP_GTT_COHERENT 52 + typedef struct drm_i915_getparam { __s32 param; /* diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 01674b56e14f..f5ff8a76e208 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -82,6 +82,14 @@ struct kfd_ioctl_set_cu_mask_args { __u64 cu_mask_ptr; /* to KFD */ }; +struct kfd_ioctl_get_queue_wave_state_args { + uint64_t ctl_stack_address; /* to KFD */ + uint32_t ctl_stack_used_size; /* from KFD */ + uint32_t save_area_used_size; /* from KFD */ + uint32_t queue_id; /* to KFD */ + uint32_t pad; +}; + /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ #define KFD_IOC_CACHE_POLICY_COHERENT 0 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 @@ -475,7 +483,10 @@ struct kfd_ioctl_unmap_memory_from_gpu_args { #define AMDKFD_IOC_SET_CU_MASK \ AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args) +#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \ + AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x1B +#define AMDKFD_COMMAND_END 0x1C #endif diff --git a/include/uapi/linux/udmabuf.h b/include/uapi/linux/udmabuf.h new file mode 100644 index 000000000000..46b6532ed855 --- /dev/null +++ b/include/uapi/linux/udmabuf.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_UDMABUF_H +#define _UAPI_LINUX_UDMABUF_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +#define UDMABUF_FLAGS_CLOEXEC 0x01 + +struct udmabuf_create { + __u32 memfd; + __u32 flags; + __u64 offset; + __u64 size; +}; + +struct udmabuf_create_item { + __u32 memfd; + __u32 __pad; + __u64 offset; + __u64 size; +}; + +struct udmabuf_create_list { + __u32 flags; + __u32 count; + struct udmabuf_create_item list[]; +}; + +#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create) +#define UDMABUF_CREATE_LIST _IOW('u', 0x43, struct udmabuf_create_list) + +#endif /* _UAPI_LINUX_UDMABUF_H */ |