diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_context.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_msg.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 4 |
15 files changed, 83 insertions, 52 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 35bf781e418e..c7056322211c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -30,49 +30,49 @@ #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_page_alloc.h> -static struct ttm_place vram_placement_flags = { +static const struct ttm_place vram_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED }; -static struct ttm_place vram_ne_placement_flags = { +static const struct ttm_place vram_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place sys_placement_flags = { +static const struct ttm_place sys_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED }; -static struct ttm_place sys_ne_placement_flags = { +static const struct ttm_place sys_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place gmr_placement_flags = { +static const struct ttm_place gmr_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED }; -static struct ttm_place gmr_ne_placement_flags = { +static const struct ttm_place gmr_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place mob_placement_flags = { +static const struct ttm_place mob_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED }; -static struct ttm_place mob_ne_placement_flags = { +static const struct ttm_place mob_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT @@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = { .busy_placement = &vram_placement_flags }; -static struct ttm_place vram_gmr_placement_flags[] = { +static const struct ttm_place vram_gmr_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = { } }; -static struct ttm_place gmr_vram_placement_flags[] = { +static const struct ttm_place gmr_vram_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = { .busy_placement = &gmr_placement_flags }; -static struct ttm_place vram_gmr_ne_placement_flags[] = { +static const struct ttm_place vram_gmr_ne_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = { .busy_placement = &sys_ne_placement_flags }; -static struct ttm_place evictable_placement_flags[] = { +static const struct ttm_place evictable_placement_flags[] = { { .fpfn = 0, .lpfn = 0, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 99a7f4ab7d97..86178796de6c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -779,8 +779,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, if (ret) return ret; - header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, - &header->handle); + header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL, + &header->handle); if (!header->cb_header) { ret = -ENOMEM; goto out_no_cb_header; @@ -790,7 +790,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, cb_hdr = header->cb_header; offset = header->node.start << PAGE_SHIFT; header->cmd = man->map + offset; - memset(cb_hdr, 0, sizeof(*cb_hdr)); if (man->using_mob) { cb_hdr->flags = SVGA_CB_FLAG_MOB; cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; @@ -827,8 +826,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) return -ENOMEM; - dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, - &header->handle); + dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL, + &header->handle); if (!dheader) return -ENOMEM; @@ -837,7 +836,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, cb_hdr = &dheader->cb_header; header->cb_header = cb_hdr; header->cmd = dheader->cmd; - memset(dheader, 0, sizeof(*dheader)); cb_hdr->status = SVGA_CB_STATUS_NONE; cb_hdr->flags = SVGA_CB_FLAG_NONE; cb_hdr->ptr.pa = (u64)header->handle + diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 1f013d45c9e9..36c7b6c839c0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, int ret; cres = kzalloc(sizeof(*cres), GFP_KERNEL); - if (unlikely(cres == NULL)) + if (unlikely(!cres)) return -ENOMEM; cres->hash.key = user_key | (res_type << 24); @@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv) int ret; man = kzalloc(sizeof(*man), GFP_KERNEL); - if (man == NULL) + if (!man) return ERR_PTR(-ENOMEM); man->dev_priv = dev_priv; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index bcc6d4136c87..4212b3e673bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { uctx->cotables[i] = vmw_cotable_alloc(dev_priv, &uctx->res, i); - if (unlikely(uctx->cotables[i] == NULL)) { - ret = -ENOMEM; + if (unlikely(IS_ERR(uctx->cotables[i]))) { + ret = PTR_ERR(uctx->cotables[i]); goto out_cotables; } } @@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data, } ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (unlikely(ctx == NULL)) { + if (unlikely(!ctx)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_context_size); ret = -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 6c026d75c180..d87861bbe971 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, return ERR_PTR(ret); vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); - if (unlikely(vcotbl == NULL)) { + if (unlikely(!vcotbl)) { ret = -ENOMEM; goto out_no_alloc; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4a641555b960..4436d53ae16c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -227,7 +227,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { DRM_AUTH | DRM_RENDER_ALLOW), }; -static struct pci_device_id vmw_pci_id_list[] = { +static const struct pci_device_id vmw_pci_id_list[] = { {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, {0, 0, 0} }; @@ -630,7 +630,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) char host_log[100] = {0}; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); - if (unlikely(dev_priv == NULL)) { + if (unlikely(!dev_priv)) { DRM_ERROR("Failed allocating a device private struct.\n"); return -ENOMEM; } @@ -1035,7 +1035,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) int ret = -ENOMEM; vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); - if (unlikely(vmw_fp == NULL)) + if (unlikely(!vmw_fp)) return ret; vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); @@ -1196,7 +1196,7 @@ static int vmw_master_create(struct drm_device *dev, struct vmw_master *vmaster; vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); - if (unlikely(vmaster == NULL)) + if (unlikely(!vmaster)) return -ENOMEM; vmw_master_init(vmaster); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c7b53d987f06..2cfb3c93f42a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -264,7 +264,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, } node = kzalloc(sizeof(*node), GFP_KERNEL); - if (unlikely(node == NULL)) { + if (unlikely(!node)) { DRM_ERROR("Failed to allocate a resource validation " "entry.\n"); return -ENOMEM; @@ -452,7 +452,7 @@ static int vmw_resource_relocation_add(struct list_head *list, struct vmw_resource_relocation *rel; rel = kmalloc(sizeof(*rel), GFP_KERNEL); - if (unlikely(rel == NULL)) { + if (unlikely(!rel)) { DRM_ERROR("Failed to allocate a resource relocation.\n"); return -ENOMEM; } @@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - return capable(CAP_SYS_ADMIN) ? : -EINVAL; + return -EINVAL; } static int vmw_cmd_ok(struct vmw_private *dev_priv, @@ -2584,7 +2584,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, /** * vmw_cmd_dx_ia_set_vertex_buffers - Validate an - * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. + * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 6b2708b4eafe..b8bc5bc7de7e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -284,7 +284,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) { struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); - if (unlikely(fman == NULL)) + if (unlikely(!fman)) return NULL; fman->dev_priv = dev_priv; @@ -541,7 +541,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman, int ret; fence = kzalloc(sizeof(*fence), GFP_KERNEL); - if (unlikely(fence == NULL)) + if (unlikely(!fence)) return -ENOMEM; ret = vmw_fence_obj_init(fman, fence, seqno, @@ -606,7 +606,7 @@ int vmw_user_fence_create(struct drm_file *file_priv, return ret; ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); - if (unlikely(ufence == NULL)) { + if (unlikely(!ufence)) { ret = -ENOMEM; goto out_no_object; } @@ -966,7 +966,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv, struct vmw_fence_manager *fman = fman_from_fence(fence); eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); - if (unlikely(eaction == NULL)) + if (unlikely(!eaction)) return -ENOMEM; eaction->event = event; @@ -1002,7 +1002,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, int ret; event = kzalloc(sizeof(*event), GFP_KERNEL); - if (unlikely(event == NULL)) { + if (unlikely(!event)) { DRM_ERROR("Failed to allocate an event.\n"); ret = -ENOMEM; goto out_no_space; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index c1900f4390a4..d2b03d4a3c86 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, struct vmwgfx_gmrid_man *gman = kzalloc(sizeof(*gman), GFP_KERNEL); - if (unlikely(gman == NULL)) + if (unlikely(!gman)) return -ENOMEM; spin_lock_init(&gman->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3d94ea67a825..625ba24f143f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, hotspot_x = du->hotspot_x; hotspot_y = du->hotspot_y; + + if (plane->fb) { + hotspot_x += plane->fb->hot_x; + hotspot_y += plane->fb->hot_y; + } + du->cursor_surface = vps->surf; du->cursor_dmabuf = vps->dmabuf; @@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, vmw_cursor_update_position(dev_priv, true, du->cursor_x + hotspot_x, du->cursor_y + hotspot_y); + + du->core_hotspot_x = hotspot_x - du->hotspot_x; + du->core_hotspot_y = hotspot_y - du->hotspot_y; } else { DRM_ERROR("Failed to update cursor image\n"); } @@ -1558,10 +1567,34 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev, } +/** + * vmw_kms_atomic_commit - Perform an atomic state commit + * + * @dev: DRM device + * @state: the driver state object + * @nonblock: Whether nonblocking behaviour is requested + * + * This is a simple wrapper around drm_atomic_helper_commit() for + * us to clear the nonblocking value. + * + * Nonblocking commits currently cause synchronization issues + * for vmwgfx. + * + * RETURNS + * Zero for success or negative error code on failure. + */ +int vmw_kms_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock) +{ + return drm_atomic_helper_commit(dev, state, false); +} + + static const struct drm_mode_config_funcs vmw_kms_funcs = { .fb_create = vmw_kms_fb_create, .atomic_check = vmw_kms_atomic_check_modeset, - .atomic_commit = drm_atomic_helper_commit, + .atomic_commit = vmw_kms_atomic_commit, }; static int vmw_kms_generic_present(struct vmw_private *dev_priv, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 941bcfd131ff..b17f08fc50d3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv) if (dev_priv->has_dx) { *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); - if (*otables == NULL) + if (!(*otables)) return -ENOMEM; dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); } else { *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), GFP_KERNEL); - if (*otables == NULL) + if (!(*otables)) return -ENOMEM; dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); @@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages) { struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); - if (unlikely(mob == NULL)) + if (unlikely(!mob)) return NULL; mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 6063c9636d4a..97000996b8dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, reply_len = ebx; reply = kzalloc(reply_len + 1, GFP_KERNEL); - if (reply == NULL) { + if (!reply) { DRM_ERROR("Cannot allocate memory for reply\n"); return -ENOMEM; } @@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param, msg_len = strlen(guest_info_param) + strlen("info-get ") + 1; msg = kzalloc(msg_len, GFP_KERNEL); - if (msg == NULL) { + if (!msg) { DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); return -ENOMEM; } @@ -400,7 +400,7 @@ int vmw_host_log(const char *log) msg_len = strlen(log) + strlen("log ") + 1; msg = kzalloc(msg_len, GFP_KERNEL); - if (msg == NULL) { + if (!msg) { DRM_ERROR("Cannot allocate memory for log message\n"); return -ENOMEM; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 7d591f653dfa..a96f90f017d1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, int ret; user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); - if (unlikely(user_bo == NULL)) { + if (unlikely(!user_bo)) { DRM_ERROR("Failed to allocate a buffer.\n"); return -ENOMEM; } @@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, } backup = kzalloc(sizeof(*backup), GFP_KERNEL); - if (unlikely(backup == NULL)) + if (unlikely(!backup)) return -ENOMEM; ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 68f135c5b0d8..9b832f136813 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, } ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); - if (unlikely(ushader == NULL)) { + if (unlikely(!ushader)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_shader_size); ret = -ENOMEM; @@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, } shader = kzalloc(sizeof(*shader), GFP_KERNEL); - if (unlikely(shader == NULL)) { + if (unlikely(!shader)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_size); ret = -ENOMEM; @@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, /* Allocate and pin a DMA buffer */ buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (unlikely(buf == NULL)) + if (unlikely(!buf)) return -ENOMEM; ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 50be1f034f9e..5284e8d2f7ba 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) * something arbitrarily large and we will reject any layout * that doesn't fit prim_bb_mem later */ - dev->mode_config.max_width = 16384; - dev->mode_config.max_height = 16384; + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; } vmw_kms_create_implicit_placement_property(dev_priv, false); |