diff options
-rw-r--r-- | Documentation/DocBook/drm.tmpl | 274 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_crtc.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_gem_cma_helper.c | 259 | ||||
-rw-r--r-- | drivers/gpu/drm/omapdrm/omap_gem.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/rcar-du/rcar_du_kms.c | 4 | ||||
-rw-r--r-- | include/drm/drm_gem_cma_helper.h | 30 |
7 files changed, 395 insertions, 196 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index b8bfa8d1f289..8c54f9a393cf 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -492,10 +492,10 @@ char *date;</synopsis> <sect2> <title>The Translation Table Manager (TTM)</title> <para> - TTM design background and information belongs here. + TTM design background and information belongs here. </para> <sect3> - <title>TTM initialization</title> + <title>TTM initialization</title> <warning><para>This section is outdated.</para></warning> <para> Drivers wishing to support TTM must fill out a drm_bo_driver @@ -503,42 +503,42 @@ char *date;</synopsis> pointers for initializing the TTM, allocating and freeing memory, waiting for command completion and fence synchronization, and memory migration. See the radeon_ttm.c file for an example of usage. - </para> - <para> - The ttm_global_reference structure is made up of several fields: - </para> - <programlisting> - struct ttm_global_reference { - enum ttm_global_types global_type; - size_t size; - void *object; - int (*init) (struct ttm_global_reference *); - void (*release) (struct ttm_global_reference *); - }; - </programlisting> - <para> - There should be one global reference structure for your memory - manager as a whole, and there will be others for each object - created by the memory manager at runtime. Your global TTM should - have a type of TTM_GLOBAL_TTM_MEM. The size field for the global - object should be sizeof(struct ttm_mem_global), and the init and - release hooks should point at your driver-specific init and - release routines, which probably eventually call - ttm_mem_global_init and ttm_mem_global_release, respectively. - </para> - <para> - Once your global TTM accounting structure is set up and initialized - by calling ttm_global_item_ref() on it, - you need to create a buffer object TTM to - provide a pool for buffer object allocation by clients and the - kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO, - and its size should be sizeof(struct ttm_bo_global). Again, - driver-specific init and release functions may be provided, - likely eventually calling ttm_bo_global_init() and - ttm_bo_global_release(), respectively. Also, like the previous - object, ttm_global_item_ref() is used to create an initial reference - count for the TTM, which will call your initialization function. - </para> + </para> + <para> + The ttm_global_reference structure is made up of several fields: + </para> + <programlisting> + struct ttm_global_reference { + enum ttm_global_types global_type; + size_t size; + void *object; + int (*init) (struct ttm_global_reference *); + void (*release) (struct ttm_global_reference *); + }; + </programlisting> + <para> + There should be one global reference structure for your memory + manager as a whole, and there will be others for each object + created by the memory manager at runtime. Your global TTM should + have a type of TTM_GLOBAL_TTM_MEM. The size field for the global + object should be sizeof(struct ttm_mem_global), and the init and + release hooks should point at your driver-specific init and + release routines, which probably eventually call + ttm_mem_global_init and ttm_mem_global_release, respectively. + </para> + <para> + Once your global TTM accounting structure is set up and initialized + by calling ttm_global_item_ref() on it, + you need to create a buffer object TTM to + provide a pool for buffer object allocation by clients and the + kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO, + and its size should be sizeof(struct ttm_bo_global). Again, + driver-specific init and release functions may be provided, + likely eventually calling ttm_bo_global_init() and + ttm_bo_global_release(), respectively. Also, like the previous + object, ttm_global_item_ref() is used to create an initial reference + count for the TTM, which will call your initialization function. + </para> </sect3> </sect2> <sect2 id="drm-gem"> @@ -566,19 +566,19 @@ char *date;</synopsis> using driver-specific ioctls. </para> <para> - On a fundamental level, GEM involves several operations: - <itemizedlist> - <listitem>Memory allocation and freeing</listitem> - <listitem>Command execution</listitem> - <listitem>Aperture management at command execution time</listitem> - </itemizedlist> - Buffer object allocation is relatively straightforward and largely + On a fundamental level, GEM involves several operations: + <itemizedlist> + <listitem>Memory allocation and freeing</listitem> + <listitem>Command execution</listitem> + <listitem>Aperture management at command execution time</listitem> + </itemizedlist> + Buffer object allocation is relatively straightforward and largely provided by Linux's shmem layer, which provides memory to back each object. </para> <para> Device-specific operations, such as command execution, pinning, buffer - read & write, mapping, and domain ownership transfers are left to + read & write, mapping, and domain ownership transfers are left to driver-specific ioctls. </para> <sect3> @@ -738,16 +738,16 @@ char *date;</synopsis> respectively. The conversion is handled by the DRM core without any driver-specific support. </para> - <para> - GEM also supports buffer sharing with dma-buf file descriptors through - PRIME. GEM-based drivers must use the provided helpers functions to - implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />. - Since sharing file descriptors is inherently more secure than the - easily guessable and global GEM names it is the preferred buffer - sharing mechanism. Sharing buffers through GEM names is only supported - for legacy userspace. Furthermore PRIME also allows cross-device - buffer sharing since it is based on dma-bufs. - </para> + <para> + GEM also supports buffer sharing with dma-buf file descriptors through + PRIME. GEM-based drivers must use the provided helpers functions to + implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />. + Since sharing file descriptors is inherently more secure than the + easily guessable and global GEM names it is the preferred buffer + sharing mechanism. Sharing buffers through GEM names is only supported + for legacy userspace. Furthermore PRIME also allows cross-device + buffer sharing since it is based on dma-bufs. + </para> </sect3> <sect3 id="drm-gem-objects-mapping"> <title>GEM Objects Mapping</title> @@ -852,7 +852,7 @@ char *date;</synopsis> <sect3> <title>Command Execution</title> <para> - Perhaps the most important GEM function for GPU devices is providing a + Perhaps the most important GEM function for GPU devices is providing a command execution interface to clients. Client programs construct command buffers containing references to previously allocated memory objects, and then submit them to GEM. At that point, GEM takes care to @@ -874,95 +874,101 @@ char *date;</synopsis> <title>GEM Function Reference</title> !Edrivers/gpu/drm/drm_gem.c </sect3> - </sect2> - <sect2> - <title>VMA Offset Manager</title> + </sect2> + <sect2> + <title>VMA Offset Manager</title> !Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager !Edrivers/gpu/drm/drm_vma_manager.c !Iinclude/drm/drm_vma_manager.h - </sect2> - <sect2 id="drm-prime-support"> - <title>PRIME Buffer Sharing</title> - <para> - PRIME is the cross device buffer sharing framework in drm, originally - created for the OPTIMUS range of multi-gpu platforms. To userspace - PRIME buffers are dma-buf based file descriptors. - </para> - <sect3> - <title>Overview and Driver Interface</title> - <para> - Similar to GEM global names, PRIME file descriptors are - also used to share buffer objects across processes. They offer - additional security: as file descriptors must be explicitly sent over - UNIX domain sockets to be shared between applications, they can't be - guessed like the globally unique GEM names. - </para> - <para> - Drivers that support the PRIME - API must set the DRIVER_PRIME bit in the struct - <structname>drm_driver</structname> - <structfield>driver_features</structfield> field, and implement the - <methodname>prime_handle_to_fd</methodname> and - <methodname>prime_fd_to_handle</methodname> operations. - </para> - <para> - <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev, - struct drm_file *file_priv, uint32_t handle, - uint32_t flags, int *prime_fd); + </sect2> + <sect2 id="drm-prime-support"> + <title>PRIME Buffer Sharing</title> + <para> + PRIME is the cross device buffer sharing framework in drm, originally + created for the OPTIMUS range of multi-gpu platforms. To userspace + PRIME buffers are dma-buf based file descriptors. + </para> + <sect3> + <title>Overview and Driver Interface</title> + <para> + Similar to GEM global names, PRIME file descriptors are + also used to share buffer objects across processes. They offer + additional security: as file descriptors must be explicitly sent over + UNIX domain sockets to be shared between applications, they can't be + guessed like the globally unique GEM names. + </para> + <para> + Drivers that support the PRIME + API must set the DRIVER_PRIME bit in the struct + <structname>drm_driver</structname> + <structfield>driver_features</structfield> field, and implement the + <methodname>prime_handle_to_fd</methodname> and + <methodname>prime_fd_to_handle</methodname> operations. + </para> + <para> + <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, + uint32_t flags, int *prime_fd); int (*prime_fd_to_handle)(struct drm_device *dev, - struct drm_file *file_priv, int prime_fd, - uint32_t *handle);</synopsis> - Those two operations convert a handle to a PRIME file descriptor and - vice versa. Drivers must use the kernel dma-buf buffer sharing framework - to manage the PRIME file descriptors. Similar to the mode setting - API PRIME is agnostic to the underlying buffer object manager, as - long as handles are 32bit unsigned integers. - </para> - <para> - While non-GEM drivers must implement the operations themselves, GEM - drivers must use the <function>drm_gem_prime_handle_to_fd</function> - and <function>drm_gem_prime_fd_to_handle</function> helper functions. - Those helpers rely on the driver - <methodname>gem_prime_export</methodname> and - <methodname>gem_prime_import</methodname> operations to create a dma-buf - instance from a GEM object (dma-buf exporter role) and to create a GEM - object from a dma-buf instance (dma-buf importer role). - </para> - <para> - <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev, - struct drm_gem_object *obj, - int flags); + struct drm_file *file_priv, int prime_fd, + uint32_t *handle);</synopsis> + Those two operations convert a handle to a PRIME file descriptor and + vice versa. Drivers must use the kernel dma-buf buffer sharing framework + to manage the PRIME file descriptors. Similar to the mode setting + API PRIME is agnostic to the underlying buffer object manager, as + long as handles are 32bit unsigned integers. + </para> + <para> + While non-GEM drivers must implement the operations themselves, GEM + drivers must use the <function>drm_gem_prime_handle_to_fd</function> + and <function>drm_gem_prime_fd_to_handle</function> helper functions. + Those helpers rely on the driver + <methodname>gem_prime_export</methodname> and + <methodname>gem_prime_import</methodname> operations to create a dma-buf + instance from a GEM object (dma-buf exporter role) and to create a GEM + object from a dma-buf instance (dma-buf importer role). + </para> + <para> + <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev, + struct drm_gem_object *obj, + int flags); struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, - struct dma_buf *dma_buf);</synopsis> - These two operations are mandatory for GEM drivers that support - PRIME. - </para> - </sect3> - <sect3> - <title>PRIME Helper Functions</title> -!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers + struct dma_buf *dma_buf);</synopsis> + These two operations are mandatory for GEM drivers that support + PRIME. + </para> </sect3> - </sect2> - <sect2> - <title>PRIME Function References</title> + <sect3> + <title>PRIME Helper Functions</title> +!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers + </sect3> + </sect2> + <sect2> + <title>PRIME Function References</title> !Edrivers/gpu/drm/drm_prime.c - </sect2> - <sect2> - <title>DRM MM Range Allocator</title> - <sect3> - <title>Overview</title> + </sect2> + <sect2> + <title>DRM MM Range Allocator</title> + <sect3> + <title>Overview</title> !Pdrivers/gpu/drm/drm_mm.c Overview - </sect3> - <sect3> - <title>LRU Scan/Eviction Support</title> + </sect3> + <sect3> + <title>LRU Scan/Eviction Support</title> !Pdrivers/gpu/drm/drm_mm.c lru scan roaster - </sect3> + </sect3> </sect2> - <sect2> - <title>DRM MM Range Allocator Function References</title> + <sect2> + <title>DRM MM Range Allocator Function References</title> !Edrivers/gpu/drm/drm_mm.c !Iinclude/drm/drm_mm.h - </sect2> + </sect2> + <sect2> + <title>CMA Helper Functions Reference</title> +!Pdrivers/gpu/drm/drm_gem_cma_helper.c cma helpers +!Edrivers/gpu/drm/drm_gem_cma_helper.c +!Iinclude/drm/drm_gem_cma_helper.h + </sect2> </sect1> <!-- Internals: mode setting --> diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 15f8b3bfd1ee..56737e74b59d 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -4774,6 +4774,16 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev, if (PAGE_ALIGN(size) == 0) return -EINVAL; + /* + * handle, pitch and size are output parameters. Zero them out to + * prevent drivers from accidentally using uninitialized data. Since + * not all existing userspace is clearing these fields properly we + * cannot reject IOCTL with garbage in them. + */ + args->handle = 0; + args->pitch = 0; + args->size = 0; + return dev->driver->dumb_create(file_priv, dev, args); } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 973a9b6644d4..16a164770713 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -309,7 +309,7 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy); * drm_gem_handle_create_tail - internal functions to create a handle * @file_priv: drm file-private structure to register the handle for * @obj: object to register - * @handlep: pionter to return the created handle to the caller + * @handlep: pointer to return the created handle to the caller * * This expects the dev->object_name_lock to be held already and will drop it * before returning. Used to avoid races in establishing new handles when @@ -362,7 +362,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, } /** - * gem_handle_create - create a gem handle for an object + * drm_gem_handle_create - create a gem handle for an object * @file_priv: drm file-private structure to register the handle for * @obj: object to register * @handlep: pionter to return the created handle to the caller @@ -371,10 +371,9 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, * to the object, which includes a regular reference count. Callers * will likely want to dereference the object afterwards. */ -int -drm_gem_handle_create(struct drm_file *file_priv, - struct drm_gem_object *obj, - u32 *handlep) +int drm_gem_handle_create(struct drm_file *file_priv, + struct drm_gem_object *obj, + u32 *handlep) { mutex_lock(&obj->dev->object_name_lock); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 0316310e2cc4..e419eedf751d 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -29,18 +29,31 @@ #include <drm/drm_gem_cma_helper.h> #include <drm/drm_vma_manager.h> -/* +/** + * DOC: cma helpers + * + * The Contiguous Memory Allocator reserves a pool of memory at early boot + * that is used to service requests for large blocks of contiguous memory. + * + * The DRM GEM/CMA helpers use this allocator as a means to provide buffer + * objects that are physically contiguous in memory. This is useful for + * display drivers that are unable to map scattered buffers via an IOMMU. + */ + +/** * __drm_gem_cma_create - Create a GEM CMA object without allocating memory - * @drm: The drm device - * @size: The GEM object size + * @drm: DRM device + * @size: size of the object to allocate * - * This function creates and initializes a GEM CMA object of the given size, but - * doesn't allocate any memory to back the object. + * This function creates and initializes a GEM CMA object of the given size, + * but doesn't allocate any memory to back the object. * - * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure. + * Returns: + * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative + * error code on failure. */ static struct drm_gem_cma_object * -__drm_gem_cma_create(struct drm_device *drm, unsigned int size) +__drm_gem_cma_create(struct drm_device *drm, size_t size) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; @@ -69,14 +82,21 @@ error: return ERR_PTR(ret); } -/* +/** * drm_gem_cma_create - allocate an object with the given size + * @drm: DRM device + * @size: size of the object to allocate + * + * This function creates a CMA GEM object and allocates a contiguous chunk of + * memory as backing store. The backing memory has the writecombine attribute + * set. * - * returns a struct drm_gem_cma_object* on success or ERR_PTR values - * on failure. + * Returns: + * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative + * error code on failure. */ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, - unsigned int size) + size_t size) { struct drm_gem_cma_object *cma_obj; int ret; @@ -104,17 +124,26 @@ error: } EXPORT_SYMBOL_GPL(drm_gem_cma_create); -/* - * drm_gem_cma_create_with_handle - allocate an object with the given - * size and create a gem handle on it +/** + * drm_gem_cma_create_with_handle - allocate an object with the given size and + * return a GEM handle to it + * @file_priv: DRM file-private structure to register the handle for + * @drm: DRM device + * @size: size of the object to allocate + * @handle: return location for the GEM handle + * + * This function creates a CMA GEM object, allocating a physically contiguous + * chunk of memory as backing store. The GEM object is then added to the list + * of object associated with the given file and a handle to it is returned. * - * returns a struct drm_gem_cma_object* on success or ERR_PTR values - * on failure. + * Returns: + * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative + * error code on failure. */ -static struct drm_gem_cma_object *drm_gem_cma_create_with_handle( - struct drm_file *file_priv, - struct drm_device *drm, unsigned int size, - unsigned int *handle) +static struct drm_gem_cma_object * +drm_gem_cma_create_with_handle(struct drm_file *file_priv, + struct drm_device *drm, size_t size, + uint32_t *handle) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; @@ -145,16 +174,19 @@ err_handle_create: return ERR_PTR(ret); } -/* - * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback - * function +/** + * drm_gem_cma_free_object - free resources associated with a CMA GEM object + * @gem_obj: GEM object to free + * + * This function frees the backing memory of the CMA GEM object, cleans up the + * GEM object state and frees the memory used to store the object itself. + * Drivers using the CMA helpers should set this as their DRM driver's + * ->gem_free_object() callback. */ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) { struct drm_gem_cma_object *cma_obj; - drm_gem_free_mmap_offset(gem_obj); - cma_obj = to_drm_gem_cma_obj(gem_obj); if (cma_obj->vaddr) { @@ -170,18 +202,26 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) } EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); -/* - * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback - * function +/** + * drm_gem_cma_dumb_create_internal - create a dumb buffer object + * @file_priv: DRM file-private structure to create the dumb buffer for + * @drm: DRM device + * @args: IOCTL data + * + * This aligns the pitch and size arguments to the minimum required. This is + * an internal helper that can be wrapped by a driver to account for hardware + * with more specific alignment requirements. It should not be used directly + * as the ->dumb_create() callback in a DRM driver. * - * This aligns the pitch and size arguments to the minimum required. wrap - * this into your own function if you need bigger alignment. + * Returns: + * 0 on success or a negative error code on failure. */ -int drm_gem_cma_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, struct drm_mode_create_dumb *args) +int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args) { + unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); struct drm_gem_cma_object *cma_obj; - int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); if (args->pitch < min_pitch) args->pitch = min_pitch; @@ -189,18 +229,63 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv, if (args->size < args->pitch * args->height) args->size = args->pitch * args->height; - cma_obj = drm_gem_cma_create_with_handle(file_priv, dev, - args->size, &args->handle); + cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size, + &args->handle); + return PTR_ERR_OR_ZERO(cma_obj); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal); + +/** + * drm_gem_cma_dumb_create - create a dumb buffer object + * @file_priv: DRM file-private structure to create the dumb buffer for + * @drm: DRM device + * @args: IOCTL data + * + * This function computes the pitch of the dumb buffer and rounds it up to an + * integer number of bytes per pixel. Drivers for hardware that doesn't have + * any additional restrictions on the pitch can directly use this function as + * their ->dumb_create() callback. + * + * For hardware with additional restrictions, drivers can adjust the fields + * set up by userspace and pass the IOCTL data along to the + * drm_gem_cma_dumb_create_internal() function. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_cma_dumb_create(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args) +{ + struct drm_gem_cma_object *cma_obj; + + args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + args->size = args->pitch * args->height; + + cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size, + &args->handle); return PTR_ERR_OR_ZERO(cma_obj); } EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create); -/* - * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback - * function +/** + * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM + * object + * @file_priv: DRM file-private structure containing the GEM object + * @drm: DRM device + * @handle: GEM object handle + * @offset: return location for the fake mmap offset + * + * This function look up an object by its handle and returns the fake mmap + * offset associated with it. Drivers using the CMA helpers should set this + * as their DRM driver's ->dumb_map_offset() callback. + * + * Returns: + * 0 on success or a negative error code on failure. */ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *drm, uint32_t handle, uint64_t *offset) + struct drm_device *drm, u32 handle, + u64 *offset) { struct drm_gem_object *gem_obj; @@ -208,7 +293,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, gem_obj = drm_gem_object_lookup(drm, file_priv, handle); if (!gem_obj) { - dev_err(drm->dev, "failed to lookup gem object\n"); + dev_err(drm->dev, "failed to lookup GEM object\n"); mutex_unlock(&drm->struct_mutex); return -EINVAL; } @@ -251,8 +336,20 @@ static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj, return ret; } -/* - * drm_gem_cma_mmap - (struct file_operation)->mmap callback function +/** + * drm_gem_cma_mmap - memory-map a CMA GEM object + * @filp: file object + * @vma: VMA for the area to be mapped + * + * This function implements an augmented version of the GEM DRM file mmap + * operation for CMA objects: In addition to the usual GEM VMA setup it + * immediately faults in the entire object instead of using on-demaind + * faulting. Drivers which employ the CMA helpers should use this function + * as their ->mmap() handler in the DRM device file's file_operations + * structure. + * + * Returns: + * 0 on success or a negative error code on failure. */ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) { @@ -272,7 +369,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); #ifdef CONFIG_DEBUG_FS -void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m) +/** + * drm_gem_cma_describe - describe a CMA GEM object for debugfs + * @cma_obj: CMA GEM object + * @m: debugfs file handle + * + * This function can be used to dump a human-readable representation of the + * CMA GEM object into a synthetic file. + */ +void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, + struct seq_file *m) { struct drm_gem_object *obj = &cma_obj->base; struct drm_device *dev = obj->dev; @@ -291,7 +397,18 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m EXPORT_SYMBOL_GPL(drm_gem_cma_describe); #endif -/* low-level interface prime helpers */ +/** + * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned + * pages for a CMA GEM object + * @obj: GEM object + * + * This function exports a scatter/gather table suitable for PRIME usage by + * calling the standard DMA mapping API. Drivers using the CMA helpers should + * set this as their DRM driver's ->gem_prime_get_sg_table() callback. + * + * Returns: + * A pointer to the scatter/gather table of pinned pages or NULL on failure. + */ struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj) { struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); @@ -315,6 +432,23 @@ out: } EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table); +/** + * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another + * driver's scatter/gather table of pinned pages + * @dev: device to import into + * @attach: DMA-BUF attachment + * @sgt: scatter/gather table of pinned pages + * + * This function imports a scatter/gather table exported via DMA-BUF by + * another driver. Imported buffers must be physically contiguous in memory + * (i.e. the scatter/gather table must contain a single entry). Drivers that + * use the CMA helpers should set this as their DRM driver's + * ->gem_prime_import_sg_table() callback. + * + * Returns: + * A pointer to a newly created GEM object or an ERR_PTR-encoded negative + * error code on failure. + */ struct drm_gem_object * drm_gem_cma_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, @@ -339,6 +473,18 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, } EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table); +/** + * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object + * @obj: GEM object + * @vma: VMA for the area to be mapped + * + * This function maps a buffer imported via DRM PRIME into a userspace + * process's address space. Drivers that use the CMA helpers should set this + * as their DRM driver's ->gem_prime_mmap() callback. + * + * Returns: + * 0 on success or a negative error code on failure. + */ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { @@ -357,6 +503,20 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, } EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap); +/** + * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual + * address space + * @obj: GEM object + * + * This function maps a buffer exported via DRM PRIME into the kernel's + * virtual address space. Since the CMA buffers are already mapped into the + * kernel virtual address space this simply returns the cached virtual + * address. Drivers using the CMA helpers should set this as their DRM + * driver's ->gem_prime_vmap() callback. + * + * Returns: + * The kernel virtual address of the CMA GEM object's backing store. + */ void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj) { struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); @@ -365,6 +525,17 @@ void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj) } EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap); +/** + * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual + * address space + * @obj: GEM object + * @vaddr: kernel virtual address where the CMA GEM object was mapped + * + * This function removes a buffer exported via DRM PRIME from the kernel's + * virtual address space. This is a no-op because CMA buffers cannot be + * unmapped from kernel space. Drivers using the CMA helpers should set this + * as their DRM driver's ->gem_prime_vunmap() callback. + */ void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr) { /* Nothing to do */ diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index e4849413ee80..aeb91ed653c9 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -612,8 +612,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, { union omap_gem_size gsize; - /* in case someone tries to feed us a completely bogus stride: */ - args->pitch = align_pitch(args->pitch, args->width, args->bpp); + args->pitch = align_pitch(0, args->width, args->bpp); args->size = PAGE_ALIGN(args->pitch * args->height); gsize = (union omap_gem_size){ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 6c24ad7d03ef..6289e3797bc5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -126,9 +126,9 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, else align = 16 * args->bpp / 8; - args->pitch = roundup(max(args->pitch, min_pitch), align); + args->pitch = roundup(min_pitch, align); - return drm_gem_cma_dumb_create(file, dev, args); + return drm_gem_cma_dumb_create_internal(file, dev, args); } static struct drm_framebuffer * diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 2ff35f3de9c5..acd6af8a8e67 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -4,6 +4,13 @@ #include <drm/drmP.h> #include <drm/drm_gem.h> +/** + * struct drm_gem_cma_object - GEM object backed by CMA memory allocations + * @base: base GEM object + * @paddr: physical address of the backing memory + * @sgt: scatter/gather table for imported PRIME buffers + * @vaddr: kernel virtual address of the backing memory + */ struct drm_gem_cma_object { struct drm_gem_object base; dma_addr_t paddr; @@ -19,23 +26,30 @@ to_drm_gem_cma_obj(struct drm_gem_object *gem_obj) return container_of(gem_obj, struct drm_gem_cma_object, base); } -/* free gem object. */ +/* free GEM object */ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); -/* create memory region for drm framebuffer. */ +/* create memory region for DRM framebuffer */ +int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args); + +/* create memory region for DRM framebuffer */ int drm_gem_cma_dumb_create(struct drm_file *file_priv, - struct drm_device *drm, struct drm_mode_create_dumb *args); + struct drm_device *drm, + struct drm_mode_create_dumb *args); -/* map memory region for drm framebuffer to user space. */ +/* map memory region for DRM framebuffer to user space */ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *drm, uint32_t handle, uint64_t *offset); + struct drm_device *drm, u32 handle, + u64 *offset); -/* set vm_flags and we can change the vm attribute to other one at here. */ +/* set vm_flags and we can change the VM attribute to other one at here */ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); -/* allocate physical memory. */ +/* allocate physical memory */ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, - unsigned int size); + size_t size); extern const struct vm_operations_struct drm_gem_cma_vm_ops; |