diff options
Diffstat (limited to 'drivers')
222 files changed, 9395 insertions, 4512 deletions
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 5277a0ee5704..b1def411c0b8 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -512,7 +512,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev) dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); if (gsi >= 0) { acpi_unregister_gsi(gsi); - dev->irq = 0; dev->irq_managed = 0; } } diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index cb529e9a82dd..d826bf3e62c8 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -106,7 +106,7 @@ struct nvme_queue { dma_addr_t cq_dma_addr; u32 __iomem *q_db; u16 q_depth; - u16 cq_vector; + s16 cq_vector; u16 sq_head; u16 sq_tail; u16 cq_head; diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index eb7682dc123b..81bf297f1034 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -210,12 +210,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus, } /* Checks whether the given window number is available */ + +/* On Armada XP, 375 and 38x the MBus window 13 has the remap + * capability, like windows 0 to 7. However, the mvebu-mbus driver + * isn't currently taking into account this special case, which means + * that when window 13 is actually used, the remap registers are left + * to 0, making the device using this MBus window unavailable. The + * quick fix for stable is to not use window 13. A follow up patch + * will correctly handle this window. +*/ static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus, const int win) { void __iomem *addr = mbus->mbuswins_base + mbus->soc->win_cfg_offset(win); u32 ctrl = readl(addr + WIN_CTRL_OFF); + + if (win == 13) + return false; + return !(ctrl & WIN_CTRL_ENABLE); } diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index b709749c8639..4eb1c772ded7 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -219,7 +219,10 @@ struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev); /* generic functions for user-populated AGP memory types */ struct agp_memory *agp_generic_alloc_user(size_t page_count, int type); void agp_alloc_page_array(size_t size, struct agp_memory *mem); -void agp_free_page_array(struct agp_memory *mem); +static inline void agp_free_page_array(struct agp_memory *mem) +{ + kvfree(mem->pages); +} /* generic routines for agp>=3 */ diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 0fbccce1cee9..f002fa5d1887 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -98,17 +98,6 @@ void agp_alloc_page_array(size_t size, struct agp_memory *mem) } EXPORT_SYMBOL(agp_alloc_page_array); -void agp_free_page_array(struct agp_memory *mem) -{ - if (is_vmalloc_addr(mem->pages)) { - vfree(mem->pages); - } else { - kfree(mem->pages); - } -} -EXPORT_SYMBOL(agp_free_page_array); - - static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) { struct agp_memory *new; diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 0595dc6c453e..f1e33d08dd83 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c @@ -68,9 +68,8 @@ static void kona_timer_disable_and_clear(void __iomem *base) } static void -kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) +kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) { - void __iomem *base = IOMEM(timer_base); int loop_limit = 4; /* @@ -86,9 +85,9 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) */ while (--loop_limit) { - *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET); - *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET); - if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET)) + *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET); + *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET); + if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET)) break; } if (!loop_limit) { diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 9403061a2acc..83564c9cfdbe 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -97,8 +97,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset) writel_relaxed(value, reg_base + offset); if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { - stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; - switch (offset & EXYNOS4_MCT_L_MASK) { + stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; + switch (offset & ~EXYNOS4_MCT_L_MASK) { case MCT_L_TCON_OFFSET: mask = 1 << 3; /* L_TCON write status */ break; diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 0f665b8f2461..f150ca82bfaf 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -428,7 +428,7 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, ced->features = CLOCK_EVT_FEAT_PERIODIC; ced->features |= CLOCK_EVT_FEAT_ONESHOT; ced->rating = 200; - ced->cpumask = cpumask_of(0); + ced->cpumask = cpu_possible_mask; ced->set_next_event = sh_tmu_clock_event_next; ced->set_mode = sh_tmu_clock_event_mode; ced->suspend = sh_tmu_clock_event_suspend; diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index 70da9eb52a42..e9ed439a5b65 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -1,3 +1,6 @@ -obj-y += drm/ vga/ +# drm/tegra depends on host1x, so if both drivers are built-in care must be +# taken to initialize them in the correct order. Link order is the only way +# to ensure this currently. obj-$(CONFIG_TEGRA_HOST1X) += host1x/ +obj-y += drm/ vga/ obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 308c104ccdbd..151a050129e7 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -62,12 +62,13 @@ config DRM_TTM config DRM_GEM_CMA_HELPER bool - depends on DRM + depends on DRM && HAVE_DMA_ATTRS help Choose this if you need the GEM CMA helper functions config DRM_KMS_CMA_HELPER bool + depends on DRM && HAVE_DMA_ATTRS select DRM_GEM_CMA_HELPER select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index cf0eed8208b5..2c239b99de64 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -14,7 +14,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ drm_trace_points.o drm_global.o drm_prime.o \ drm_rect.o drm_vma_manager.o drm_flip_work.o \ - drm_modeset_lock.o drm_atomic.o + drm_modeset_lock.o drm_atomic.o drm_bridge.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1ba8332419fa..5bc32c26b989 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -183,16 +183,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->shared_resources = *gpu_resources; /* calculate max size of mqds needed for queues */ - size = max_num_of_processes * - max_num_of_queues_per_process * - kfd->device_info->mqd_size_aligned; + size = max_num_of_queues_per_device * + kfd->device_info->mqd_size_aligned; /* * calculate max size of runlist packet. * There can be only 2 packets at once */ - size += (max_num_of_processes * sizeof(struct pm4_map_process) + - max_num_of_processes * max_num_of_queues_per_process * + size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) + + max_num_of_queues_per_device * sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2; /* Add size of HIQ & DIQ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index b189f9791c90..36b95e16eab1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -135,6 +135,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, mutex_lock(&dqm->lock); + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); + return -EPERM; + } + if (list_empty(&qpd->queues_list)) { retval = allocate_vmid(dqm, qpd, q); if (retval != 0) { @@ -160,9 +167,20 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, } list_add(&q->list, &qpd->queues_list); - dqm->queue_count++; + if (q->properties.is_active) + dqm->queue_count++; + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) dqm->sdma_queue_count++; + + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); return 0; } @@ -296,7 +314,17 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, list_del(&q->list); if (list_empty(&qpd->queues_list)) deallocate_vmid(dqm, qpd, q); - dqm->queue_count--; + if (q->properties.is_active) + dqm->queue_count--; + + /* + * Unconditionally decrement this counter, regardless of the queue's + * type + */ + dqm->total_queue_count--; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + out: mutex_unlock(&dqm->lock); return retval; @@ -470,10 +498,14 @@ int init_pipelines(struct device_queue_manager *dqm, for (i = 0; i < pipes_num; i++) { inx = i + first_pipe; + /* + * HPD buffer on GTT is allocated by amdkfd, no need to waste + * space in GTT for pipelines we don't initialize + */ pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); /* = log2(bytes/4)-1 */ - kfd2kgd->init_pipeline(dqm->dev->kgd, i, + kfd2kgd->init_pipeline(dqm->dev->kgd, inx, CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); } @@ -488,8 +520,7 @@ static int init_scheduler(struct device_queue_manager *dqm) pr_debug("kfd: In %s\n", __func__); - retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); - + retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); return retval; } @@ -744,6 +775,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, pr_debug("kfd: In func %s\n", __func__); mutex_lock(&dqm->lock); + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); + return -EPERM; + } + + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + list_add(&kq->list, &qpd->priv_queue_list); dqm->queue_count++; qpd->is_debug = true; @@ -767,6 +813,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, dqm->queue_count--; qpd->is_debug = false; execute_queues_cpsch(dqm, false); + /* + * Unconditionally decrement this counter, regardless of the queue's + * type. + */ + dqm->total_queue_count++; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); mutex_unlock(&dqm->lock); } @@ -793,6 +846,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, mutex_lock(&dqm->lock); + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", + dqm->total_queue_count); + retval = -EPERM; + goto out; + } + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) select_sdma_engine_id(q); @@ -817,6 +877,14 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, if (q->properties.type == KFD_QUEUE_TYPE_SDMA) dqm->sdma_queue_count++; + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); out: mutex_unlock(&dqm->lock); @@ -952,12 +1020,21 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, dqm->sdma_queue_count--; list_del(&q->list); - dqm->queue_count--; + if (q->properties.is_active) + dqm->queue_count--; execute_queues_cpsch(dqm, false); mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + /* + * Unconditionally decrement this counter, regardless of the queue's + * type + */ + dqm->total_queue_count--; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index e7b17b28330e..d64f86cda34f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -144,6 +144,7 @@ struct device_queue_manager { unsigned int processes_count; unsigned int queue_count; unsigned int sdma_queue_count; + unsigned int total_queue_count; unsigned int next_pipe_to_allocate; unsigned int *allocated_queues; unsigned int sdma_bitmap; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index ac5445415667..3c6221905bc4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444); MODULE_PARM_DESC(sched_policy, "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)"); -int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; -module_param(max_num_of_processes, int, 0444); -MODULE_PARM_DESC(max_num_of_processes, - "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); - -int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT; -module_param(max_num_of_queues_per_process, int, 0444); -MODULE_PARM_DESC(max_num_of_queues_per_process, - "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process"); +int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; +module_param(max_num_of_queues_per_device, int, 0444); +MODULE_PARM_DESC(max_num_of_queues_per_device, + "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); bool kgd2kfd_init(unsigned interface_version, const struct kfd2kgd_calls *f2g, @@ -100,16 +95,10 @@ static int __init kfd_module_init(void) } /* Verify module parameters */ - if ((max_num_of_processes < 0) || - (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { - pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); - return -1; - } - - if ((max_num_of_queues_per_process < 0) || - (max_num_of_queues_per_process > - KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) { - pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n"); + if ((max_num_of_queues_per_device < 0) || + (max_num_of_queues_per_device > + KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { + pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); return -1; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index a318743cdcc2..a09e18a339f3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -94,6 +94,9 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_pipe_priority = 1; m->cp_hqd_queue_priority = 15; + if (q->format == KFD_QUEUE_FORMAT_AQL) + m->cp_hqd_iq_rptr = AQL_ENABLE; + *mqd = m; if (gart_addr != NULL) *gart_addr = addr; @@ -187,7 +190,6 @@ static int update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_vmid = q->vmid; if (q->format == KFD_QUEUE_FORMAT_AQL) { - m->cp_hqd_iq_rptr = AQL_ENABLE; m->cp_hqd_pq_control |= NO_UPDATE_RPTR; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 4c25ef504f79..6cfe7f1f18cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c @@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex); int kfd_pasid_init(void) { - pasid_limit = max_num_of_processes; + pasid_limit = KFD_MAX_NUM_OF_PROCESSES; pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); if (!pasid_bitmap) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 1b35a9c87437..5a44f2fecf38 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -52,20 +52,19 @@ #define kfd_alloc_struct(ptr_to_struct) \ ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) -/* Kernel module parameter to specify maximum number of supported processes */ -extern int max_num_of_processes; - -#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32 #define KFD_MAX_NUM_OF_PROCESSES 512 +#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 /* - * Kernel module parameter to specify maximum number of supported queues - * per process + * Kernel module parameter to specify maximum number of supported queues per + * device */ -extern int max_num_of_queues_per_process; +extern int max_num_of_queues_per_device; -#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 -#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ + (KFD_MAX_NUM_OF_PROCESSES * \ + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) #define KFD_KERNEL_QUEUE_SIZE 2048 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 513eeb6e402a..ca93ab0449c8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, pr_debug("kfd: in %s\n", __func__); found = find_first_zero_bit(pqm->queue_slot_bitmap, - max_num_of_queues_per_process); + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); pr_debug("kfd: the new slot id %lu\n", found); - if (found >= max_num_of_queues_per_process) { + if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { pr_info("amdkfd: Can not open more queues for process with pasid %d\n", pqm->process->pasid); return -ENOMEM; @@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) INIT_LIST_HEAD(&pqm->queues); pqm->queue_slot_bitmap = - kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, + kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_BYTE), GFP_KERNEL); if (pqm->queue_slot_bitmap == NULL) return -ENOMEM; @@ -206,6 +206,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, pqn->kq = NULL; retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, &q->properties.vmid); + pr_debug("DQM returned %d for create_queue\n", retval); print_queue(q); break; case KFD_QUEUE_TYPE_DIQ: @@ -226,7 +227,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, } if (retval != 0) { - pr_err("kfd: error dqm create queue\n"); + pr_debug("Error dqm create queue\n"); goto err_create_queue; } @@ -245,7 +246,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, err_create_queue: kfree(pqn); err_allocate_pqn: + /* check if queues list is empty unregister process from device */ clear_bit(*qid, pqm->queue_slot_bitmap); + if (list_empty(&pqm->queues)) + dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); return retval; } diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig index 1a085625538a..99b4f0698a30 100644 --- a/drivers/gpu/drm/atmel-hlcdc/Kconfig +++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig @@ -1,6 +1,6 @@ config DRM_ATMEL_HLCDC tristate "DRM Support for ATMEL HLCDC Display Controller" - depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC + depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER select DRM_KMS_FB_HELPER diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index b70f3c8d4e8a..f38bbcdf929b 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -1,10 +1,13 @@ -config DRM_PTN3460 - tristate "PTN3460 DP/LVDS bridge" +config DRM_DW_HDMI + tristate depends on DRM select DRM_KMS_HELPER - ---help--- -config DRM_DW_HDMI - tristate +config DRM_PTN3460 + tristate "PTN3460 DP/LVDS bridge" depends on DRM + depends on OF select DRM_KMS_HELPER + select DRM_PANEL + ---help--- + ptn3460 eDP-LVDS bridge chip driver. diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c index 6ea000504173..cd6a70647e32 100644 --- a/drivers/gpu/drm/bridge/dw_hdmi.c +++ b/drivers/gpu/drm/bridge/dw_hdmi.c @@ -1373,12 +1373,6 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge) dw_hdmi_poweron(hdmi); } -static void dw_hdmi_bridge_destroy(struct drm_bridge *bridge) -{ - drm_bridge_cleanup(bridge); - kfree(bridge); -} - static void dw_hdmi_bridge_nop(struct drm_bridge *bridge) { /* do nothing */ @@ -1468,7 +1462,6 @@ struct drm_bridge_funcs dw_hdmi_bridge_funcs = { .post_disable = dw_hdmi_bridge_nop, .mode_set = dw_hdmi_bridge_mode_set, .mode_fixup = dw_hdmi_bridge_mode_fixup, - .destroy = dw_hdmi_bridge_destroy, }; static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id) @@ -1531,8 +1524,8 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi) hdmi->bridge = bridge; bridge->driver_private = hdmi; - - ret = drm_bridge_init(drm, bridge, &dw_hdmi_bridge_funcs); + bridge->funcs = &dw_hdmi_bridge_funcs; + ret = drm_bridge_attach(drm, bridge); if (ret) { DRM_ERROR("Failed to initialize bridge with drm\n"); return -EINVAL; @@ -1649,7 +1642,7 @@ int dw_hdmi_bind(struct device *dev, struct device *master, dw_hdmi_irq, IRQF_SHARED, dev_name(dev), hdmi); if (ret) - return ret; + goto err_iahb; /* * To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c index d466696ed5e8..826833e396f0 100644 --- a/drivers/gpu/drm/bridge/ptn3460.c +++ b/drivers/gpu/drm/bridge/ptn3460.c @@ -13,20 +13,23 @@ * GNU General Public License for more details. */ +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/i2c.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_gpio.h> -#include <linux/i2c.h> -#include <linux/gpio.h> -#include <linux/delay.h> +#include <linux/of_graph.h> -#include "drmP.h" -#include "drm_edid.h" -#include "drm_crtc.h" -#include "drm_crtc_helper.h" +#include <drm/drm_panel.h> #include "bridge/ptn3460.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" +#include "drm_edid.h" +#include "drmP.h" + #define PTN3460_EDID_ADDR 0x0 #define PTN3460_EDID_EMULATION_ADDR 0x84 #define PTN3460_EDID_ENABLE_EMULATION 0 @@ -36,15 +39,27 @@ struct ptn3460_bridge { struct drm_connector connector; struct i2c_client *client; - struct drm_encoder *encoder; - struct drm_bridge *bridge; + struct drm_bridge bridge; struct edid *edid; - int gpio_pd_n; - int gpio_rst_n; + struct drm_panel *panel; + struct gpio_desc *gpio_pd_n; + struct gpio_desc *gpio_rst_n; u32 edid_emulation; bool enabled; }; +static inline struct ptn3460_bridge * + bridge_to_ptn3460(struct drm_bridge *bridge) +{ + return container_of(bridge, struct ptn3460_bridge, bridge); +} + +static inline struct ptn3460_bridge * + connector_to_ptn3460(struct drm_connector *connector) +{ + return container_of(connector, struct ptn3460_bridge, connector); +} + static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr, u8 *buf, int len) { @@ -92,7 +107,7 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge) ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_SRAM_LOAD_ADDR, ptn_bridge->edid_emulation); if (ret) { - DRM_ERROR("Failed to transfer edid to sram, ret=%d\n", ret); + DRM_ERROR("Failed to transfer EDID to sram, ret=%d\n", ret); return ret; } @@ -102,7 +117,7 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge) ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_EMULATION_ADDR, val); if (ret) { - DRM_ERROR("Failed to write edid value, ret=%d\n", ret); + DRM_ERROR("Failed to write EDID value, ret=%d\n", ret); return ret; } @@ -111,19 +126,21 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge) static void ptn3460_pre_enable(struct drm_bridge *bridge) { - struct ptn3460_bridge *ptn_bridge = bridge->driver_private; + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); int ret; if (ptn_bridge->enabled) return; - if (gpio_is_valid(ptn_bridge->gpio_pd_n)) - gpio_set_value(ptn_bridge->gpio_pd_n, 1); + gpiod_set_value(ptn_bridge->gpio_pd_n, 1); + + gpiod_set_value(ptn_bridge->gpio_rst_n, 0); + usleep_range(10, 20); + gpiod_set_value(ptn_bridge->gpio_rst_n, 1); - if (gpio_is_valid(ptn_bridge->gpio_rst_n)) { - gpio_set_value(ptn_bridge->gpio_rst_n, 0); - udelay(10); - gpio_set_value(ptn_bridge->gpio_rst_n, 1); + if (drm_panel_prepare(ptn_bridge->panel)) { + DRM_ERROR("failed to prepare panel\n"); + return; } /* @@ -135,73 +152,67 @@ static void ptn3460_pre_enable(struct drm_bridge *bridge) ret = ptn3460_select_edid(ptn_bridge); if (ret) - DRM_ERROR("Select edid failed ret=%d\n", ret); + DRM_ERROR("Select EDID failed ret=%d\n", ret); ptn_bridge->enabled = true; } static void ptn3460_enable(struct drm_bridge *bridge) { + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); + + if (drm_panel_enable(ptn_bridge->panel)) { + DRM_ERROR("failed to enable panel\n"); + return; + } } static void ptn3460_disable(struct drm_bridge *bridge) { - struct ptn3460_bridge *ptn_bridge = bridge->driver_private; + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); if (!ptn_bridge->enabled) return; ptn_bridge->enabled = false; - if (gpio_is_valid(ptn_bridge->gpio_rst_n)) - gpio_set_value(ptn_bridge->gpio_rst_n, 1); + if (drm_panel_disable(ptn_bridge->panel)) { + DRM_ERROR("failed to disable panel\n"); + return; + } - if (gpio_is_valid(ptn_bridge->gpio_pd_n)) - gpio_set_value(ptn_bridge->gpio_pd_n, 0); + gpiod_set_value(ptn_bridge->gpio_rst_n, 1); + gpiod_set_value(ptn_bridge->gpio_pd_n, 0); } static void ptn3460_post_disable(struct drm_bridge *bridge) { -} + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); -void ptn3460_bridge_destroy(struct drm_bridge *bridge) -{ - struct ptn3460_bridge *ptn_bridge = bridge->driver_private; - - drm_bridge_cleanup(bridge); - if (gpio_is_valid(ptn_bridge->gpio_pd_n)) - gpio_free(ptn_bridge->gpio_pd_n); - if (gpio_is_valid(ptn_bridge->gpio_rst_n)) - gpio_free(ptn_bridge->gpio_rst_n); - /* Nothing else to free, we've got devm allocated memory */ + if (drm_panel_unprepare(ptn_bridge->panel)) { + DRM_ERROR("failed to unprepare panel\n"); + return; + } } -struct drm_bridge_funcs ptn3460_bridge_funcs = { - .pre_enable = ptn3460_pre_enable, - .enable = ptn3460_enable, - .disable = ptn3460_disable, - .post_disable = ptn3460_post_disable, - .destroy = ptn3460_bridge_destroy, -}; - -int ptn3460_get_modes(struct drm_connector *connector) +static int ptn3460_get_modes(struct drm_connector *connector) { struct ptn3460_bridge *ptn_bridge; u8 *edid; - int ret, num_modes; + int ret, num_modes = 0; bool power_off; - ptn_bridge = container_of(connector, struct ptn3460_bridge, connector); + ptn_bridge = connector_to_ptn3460(connector); if (ptn_bridge->edid) return drm_add_edid_modes(connector, ptn_bridge->edid); power_off = !ptn_bridge->enabled; - ptn3460_pre_enable(ptn_bridge->bridge); + ptn3460_pre_enable(&ptn_bridge->bridge); edid = kmalloc(EDID_LENGTH, GFP_KERNEL); if (!edid) { - DRM_ERROR("Failed to allocate edid\n"); + DRM_ERROR("Failed to allocate EDID\n"); return 0; } @@ -209,7 +220,6 @@ int ptn3460_get_modes(struct drm_connector *connector) EDID_LENGTH); if (ret) { kfree(edid); - num_modes = 0; goto out; } @@ -220,124 +230,188 @@ int ptn3460_get_modes(struct drm_connector *connector) out: if (power_off) - ptn3460_disable(ptn_bridge->bridge); + ptn3460_disable(&ptn_bridge->bridge); return num_modes; } -struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector) +static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector) { - struct ptn3460_bridge *ptn_bridge; - - ptn_bridge = container_of(connector, struct ptn3460_bridge, connector); + struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector); - return ptn_bridge->encoder; + return ptn_bridge->bridge.encoder; } -struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { +static struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { .get_modes = ptn3460_get_modes, .best_encoder = ptn3460_best_encoder, }; -enum drm_connector_status ptn3460_detect(struct drm_connector *connector, +static enum drm_connector_status ptn3460_detect(struct drm_connector *connector, bool force) { return connector_status_connected; } -void ptn3460_connector_destroy(struct drm_connector *connector) +static void ptn3460_connector_destroy(struct drm_connector *connector) { drm_connector_cleanup(connector); } -struct drm_connector_funcs ptn3460_connector_funcs = { +static struct drm_connector_funcs ptn3460_connector_funcs = { .dpms = drm_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = ptn3460_detect, .destroy = ptn3460_connector_destroy, }; -int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder, - struct i2c_client *client, struct device_node *node) +int ptn3460_bridge_attach(struct drm_bridge *bridge) { + struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); int ret; - struct drm_bridge *bridge; - struct ptn3460_bridge *ptn_bridge; - bridge = devm_kzalloc(dev->dev, sizeof(*bridge), GFP_KERNEL); - if (!bridge) { - DRM_ERROR("Failed to allocate drm bridge\n"); - return -ENOMEM; + if (!bridge->encoder) { + DRM_ERROR("Parent encoder object not found"); + return -ENODEV; + } + + ptn_bridge->connector.polled = DRM_CONNECTOR_POLL_HPD; + ret = drm_connector_init(bridge->dev, &ptn_bridge->connector, + &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS); + if (ret) { + DRM_ERROR("Failed to initialize connector with drm\n"); + return ret; } + drm_connector_helper_add(&ptn_bridge->connector, + &ptn3460_connector_helper_funcs); + drm_connector_register(&ptn_bridge->connector); + drm_mode_connector_attach_encoder(&ptn_bridge->connector, + bridge->encoder); - ptn_bridge = devm_kzalloc(dev->dev, sizeof(*ptn_bridge), GFP_KERNEL); + if (ptn_bridge->panel) + drm_panel_attach(ptn_bridge->panel, &ptn_bridge->connector); + + drm_helper_hpd_irq_event(ptn_bridge->connector.dev); + + return ret; +} + +static struct drm_bridge_funcs ptn3460_bridge_funcs = { + .pre_enable = ptn3460_pre_enable, + .enable = ptn3460_enable, + .disable = ptn3460_disable, + .post_disable = ptn3460_post_disable, + .attach = ptn3460_bridge_attach, +}; + +static int ptn3460_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct ptn3460_bridge *ptn_bridge; + struct device_node *endpoint, *panel_node; + int ret; + + ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL); if (!ptn_bridge) { - DRM_ERROR("Failed to allocate ptn bridge\n"); return -ENOMEM; } - ptn_bridge->client = client; - ptn_bridge->encoder = encoder; - ptn_bridge->bridge = bridge; - ptn_bridge->gpio_pd_n = of_get_named_gpio(node, "powerdown-gpio", 0); - if (gpio_is_valid(ptn_bridge->gpio_pd_n)) { - ret = gpio_request_one(ptn_bridge->gpio_pd_n, - GPIOF_OUT_INIT_HIGH, "PTN3460_PD_N"); - if (ret) { - DRM_ERROR("Request powerdown-gpio failed (%d)\n", ret); - return ret; + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (endpoint) { + panel_node = of_graph_get_remote_port_parent(endpoint); + if (panel_node) { + ptn_bridge->panel = of_drm_find_panel(panel_node); + of_node_put(panel_node); + if (!ptn_bridge->panel) + return -EPROBE_DEFER; } } - ptn_bridge->gpio_rst_n = of_get_named_gpio(node, "reset-gpio", 0); - if (gpio_is_valid(ptn_bridge->gpio_rst_n)) { - /* - * Request the reset pin low to avoid the bridge being - * initialized prematurely - */ - ret = gpio_request_one(ptn_bridge->gpio_rst_n, - GPIOF_OUT_INIT_LOW, "PTN3460_RST_N"); - if (ret) { - DRM_ERROR("Request reset-gpio failed (%d)\n", ret); - gpio_free(ptn_bridge->gpio_pd_n); - return ret; - } + ptn_bridge->client = client; + + ptn_bridge->gpio_pd_n = devm_gpiod_get(&client->dev, "powerdown"); + if (IS_ERR(ptn_bridge->gpio_pd_n)) { + ret = PTR_ERR(ptn_bridge->gpio_pd_n); + dev_err(dev, "cannot get gpio_pd_n %d\n", ret); + return ret; } - ret = of_property_read_u32(node, "edid-emulation", - &ptn_bridge->edid_emulation); + ret = gpiod_direction_output(ptn_bridge->gpio_pd_n, 1); if (ret) { - DRM_ERROR("Can't read edid emulation value\n"); - goto err; + DRM_ERROR("cannot configure gpio_pd_n\n"); + return ret; } - ret = drm_bridge_init(dev, bridge, &ptn3460_bridge_funcs); + ptn_bridge->gpio_rst_n = devm_gpiod_get(&client->dev, "reset"); + if (IS_ERR(ptn_bridge->gpio_rst_n)) { + ret = PTR_ERR(ptn_bridge->gpio_rst_n); + DRM_ERROR("cannot get gpio_rst_n %d\n", ret); + return ret; + } + /* + * Request the reset pin low to avoid the bridge being + * initialized prematurely + */ + ret = gpiod_direction_output(ptn_bridge->gpio_rst_n, 0); if (ret) { - DRM_ERROR("Failed to initialize bridge with drm\n"); - goto err; + DRM_ERROR("cannot configure gpio_rst_n\n"); + return ret; } - bridge->driver_private = ptn_bridge; - encoder->bridge = bridge; + ret = of_property_read_u32(dev->of_node, "edid-emulation", + &ptn_bridge->edid_emulation); + if (ret) { + dev_err(dev, "Can't read EDID emulation value\n"); + return ret; + } - ret = drm_connector_init(dev, &ptn_bridge->connector, - &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS); + ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs; + ptn_bridge->bridge.of_node = dev->of_node; + ret = drm_bridge_add(&ptn_bridge->bridge); if (ret) { - DRM_ERROR("Failed to initialize connector with drm\n"); - goto err; + DRM_ERROR("Failed to add bridge\n"); + return ret; } - drm_connector_helper_add(&ptn_bridge->connector, - &ptn3460_connector_helper_funcs); - drm_connector_register(&ptn_bridge->connector); - drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder); + + i2c_set_clientdata(client, ptn_bridge); return 0; +} -err: - if (gpio_is_valid(ptn_bridge->gpio_pd_n)) - gpio_free(ptn_bridge->gpio_pd_n); - if (gpio_is_valid(ptn_bridge->gpio_rst_n)) - gpio_free(ptn_bridge->gpio_rst_n); - return ret; +static int ptn3460_remove(struct i2c_client *client) +{ + struct ptn3460_bridge *ptn_bridge = i2c_get_clientdata(client); + + drm_bridge_remove(&ptn_bridge->bridge); + + return 0; } -EXPORT_SYMBOL(ptn3460_init); + +static const struct i2c_device_id ptn3460_i2c_table[] = { + {"nxp,ptn3460", 0}, + {}, +}; +MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table); + +static const struct of_device_id ptn3460_match[] = { + { .compatible = "nxp,ptn3460" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ptn3460_match); + +static struct i2c_driver ptn3460_driver = { + .id_table = ptn3460_i2c_table, + .probe = ptn3460_probe, + .remove = ptn3460_remove, + .driver = { + .name = "nxp,ptn3460", + .owner = THIS_MODULE, + .of_match_table = ptn3460_match, + }, +}; +module_i2c_driver(ptn3460_driver); + +MODULE_AUTHOR("Sean Paul <seanpaul@chromium.org>"); +MODULE_DESCRIPTION("NXP ptn3460 eDP-LVDS converter driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 4c5c9c3899e0..c2e9c5283136 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -134,6 +134,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) connector->funcs->atomic_destroy_state(connector, state->connector_states[i]); + state->connector_states[i] = NULL; } for (i = 0; i < config->num_crtc; i++) { @@ -144,6 +145,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) crtc->funcs->atomic_destroy_state(crtc, state->crtc_states[i]); + state->crtc_states[i] = NULL; } for (i = 0; i < config->num_total_plane; i++) { @@ -154,6 +156,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) plane->funcs->atomic_destroy_state(plane, state->plane_states[i]); + state->plane_states[i] = NULL; } } EXPORT_SYMBOL(drm_atomic_state_clear); @@ -241,7 +244,13 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, struct drm_crtc_state *state, struct drm_property *property, uint64_t val) { - if (crtc->funcs->atomic_set_property) + struct drm_device *dev = crtc->dev; + struct drm_mode_config *config = &dev->mode_config; + + /* FIXME: Mode prop is missing, which also controls ->enable. */ + if (property == config->prop_active) { + state->active = val; + } else if (crtc->funcs->atomic_set_property) return crtc->funcs->atomic_set_property(crtc, state, property, val); return -EINVAL; } @@ -282,6 +291,13 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc, * * TODO: Add generic modeset state checks once we support those. */ + + if (state->active && !state->enable) { + DRM_DEBUG_KMS("[CRTC:%d] active without enabled\n", + crtc->base.id); + return -EINVAL; + } + return 0; } @@ -978,7 +994,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) if (!crtc) continue; - if (crtc_state->mode_changed) { + if (crtc_state->mode_changed || + crtc_state->active_changed) { DRM_DEBUG_KMS("[CRTC:%d] requires full modeset\n", crtc->base.id); return -EINVAL; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 541ba833ed36..7e3a52b97c7d 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -297,13 +297,22 @@ mode_fixup(struct drm_atomic_state *state) } } - - ret = funcs->mode_fixup(encoder, &crtc_state->mode, - &crtc_state->adjusted_mode); - if (!ret) { - DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n", - encoder->base.id, encoder->name); - return -EINVAL; + if (funcs->atomic_check) { + ret = funcs->atomic_check(encoder, crtc_state, + conn_state); + if (ret) { + DRM_DEBUG_KMS("[ENCODER:%d:%s] check failed\n", + encoder->base.id, encoder->name); + return ret; + } + } else { + ret = funcs->mode_fixup(encoder, &crtc_state->mode, + &crtc_state->adjusted_mode); + if (!ret) { + DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n", + encoder->base.id, encoder->name); + return -EINVAL; + } } } @@ -330,6 +339,12 @@ mode_fixup(struct drm_atomic_state *state) return 0; } +static bool +needs_modeset(struct drm_crtc_state *state) +{ + return state->mode_changed || state->active_changed; +} + /** * drm_atomic_helper_check - validate state object for modeset changes * @dev: DRM device @@ -404,12 +419,27 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, crtc = state->crtcs[i]; crtc_state = state->crtc_states[i]; - if (!crtc || !crtc_state->mode_changed) + if (!crtc) continue; - DRM_DEBUG_KMS("[CRTC:%d] needs full modeset, enable: %c\n", + /* + * We must set ->active_changed after walking connectors for + * otherwise an update that only changes active would result in + * a full modeset because update_connector_routing force that. + */ + if (crtc->state->active != crtc_state->active) { + DRM_DEBUG_KMS("[CRTC:%d] active changed\n", + crtc->base.id); + crtc_state->active_changed = true; + } + + if (!needs_modeset(crtc_state)) + continue; + + DRM_DEBUG_KMS("[CRTC:%d] needs all connectors, enable: %c, active: %c\n", crtc->base.id, - crtc_state->enable ? 'y' : 'n'); + crtc_state->enable ? 'y' : 'n', + crtc_state->active ? 'y' : 'n'); ret = drm_atomic_add_affected_connectors(state, crtc); if (ret != 0) @@ -545,6 +575,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) struct drm_connector *connector; struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; + struct drm_crtc_state *old_crtc_state; old_conn_state = old_state->connector_states[i]; connector = old_state->connectors[i]; @@ -554,6 +585,11 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) if (!old_conn_state || !old_conn_state->crtc) continue; + old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)]; + + if (!old_crtc_state->active) + continue; + encoder = old_conn_state->best_encoder; /* We shouldn't get this far if we didn't previously have @@ -564,6 +600,9 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = encoder->helper_private; + DRM_DEBUG_KMS("disabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + /* * Each encoder has at most one connector (since we always steal * it away), so we won't call call disable hooks twice. @@ -572,7 +611,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) encoder->bridge->funcs->disable(encoder->bridge); /* Right function depends upon target state. */ - if (connector->state->crtc) + if (connector->state->crtc && funcs->prepare) funcs->prepare(encoder); else if (funcs->disable) funcs->disable(encoder); @@ -586,17 +625,26 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) for (i = 0; i < ncrtcs; i++) { struct drm_crtc_helper_funcs *funcs; struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; crtc = old_state->crtcs[i]; + old_crtc_state = old_state->crtc_states[i]; /* Shut down everything that needs a full modeset. */ - if (!crtc || !crtc->state->mode_changed) + if (!crtc || !needs_modeset(crtc->state)) + continue; + + if (!old_crtc_state->active) continue; funcs = crtc->helper_private; + DRM_DEBUG_KMS("disabling [CRTC:%d]\n", + crtc->base.id); + + /* Right function depends upon target state. */ - if (crtc->state->enable) + if (crtc->state->enable && funcs->prepare) funcs->prepare(crtc); else if (funcs->disable) funcs->disable(crtc); @@ -675,8 +723,12 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = crtc->helper_private; - if (crtc->state->enable) + if (crtc->state->enable) { + DRM_DEBUG_KMS("modeset on [CRTC:%d]\n", + crtc->base.id); + funcs->mode_set_nofb(crtc); + } } for (i = 0; i < old_state->num_connector; i++) { @@ -697,6 +749,12 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) mode = &new_crtc_state->mode; adjusted_mode = &new_crtc_state->adjusted_mode; + if (!new_crtc_state->mode_changed) + continue; + + DRM_DEBUG_KMS("modeset on [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + /* * Each encoder has at most one connector (since we always steal * it away), so we won't call call mode_set hooks twice. @@ -749,13 +807,23 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev, crtc = old_state->crtcs[i]; /* Need to filter out CRTCs where only planes change. */ - if (!crtc || !crtc->state->mode_changed) + if (!crtc || !needs_modeset(crtc->state)) + continue; + + if (!crtc->state->active) continue; funcs = crtc->helper_private; - if (crtc->state->enable) - funcs->commit(crtc); + if (crtc->state->enable) { + DRM_DEBUG_KMS("enabling [CRTC:%d]\n", + crtc->base.id); + + if (funcs->enable) + funcs->enable(crtc); + else + funcs->commit(crtc); + } } for (i = 0; i < old_state->num_connector; i++) { @@ -768,9 +836,15 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev, if (!connector || !connector->state->best_encoder) continue; + if (!connector->state->crtc->state->active) + continue; + encoder = connector->state->best_encoder; funcs = encoder->helper_private; + DRM_DEBUG_KMS("enabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); + /* * Each encoder has at most one connector (since we always steal * it away), so we won't call call enable hooks twice. @@ -778,7 +852,10 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev, if (encoder->bridge) encoder->bridge->funcs->pre_enable(encoder->bridge); - funcs->commit(encoder); + if (funcs->enable) + funcs->enable(encoder); + else + funcs->commit(encoder); if (encoder->bridge) encoder->bridge->funcs->enable(encoder->bridge); @@ -868,6 +945,11 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, if (!crtc->state->enable) continue; + /* Legacy cursor ioctls are completely unsynced, and userspace + * relies on that (by doing tons of cursor updates). */ + if (old_state->legacy_cursor_update) + continue; + if (!framebuffer_changed(dev, old_state, crtc)) continue; @@ -1108,12 +1190,19 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, funcs = plane->helper_private; - if (!funcs || !funcs->atomic_update) + if (!funcs) continue; old_plane_state = old_state->plane_states[i]; - funcs->atomic_update(plane, old_plane_state); + /* + * Special-case disabling the plane if drivers support it. + */ + if (drm_atomic_plane_disabling(plane, old_plane_state) && + funcs->atomic_disable) + funcs->atomic_disable(plane, old_plane_state); + else + funcs->atomic_update(plane, old_plane_state); } for (i = 0; i < ncrtcs; i++) { @@ -1294,6 +1383,9 @@ retry: if (ret != 0) goto fail; + if (plane == crtc->cursor) + state->legacy_cursor_update = true; + /* Driver takes ownership of state on successful commit. */ return 0; fail: @@ -1369,6 +1461,9 @@ retry: plane_state->src_h = 0; plane_state->src_w = 0; + if (plane == plane->crtc->cursor) + state->legacy_cursor_update = true; + ret = drm_atomic_commit(state); if (ret != 0) goto fail; @@ -1518,6 +1613,7 @@ retry: WARN_ON(set->num_connectors); crtc_state->enable = false; + crtc_state->active = false; ret = drm_atomic_set_crtc_for_plane(primary_state, NULL); if (ret != 0) @@ -1532,6 +1628,7 @@ retry: WARN_ON(!set->num_connectors); crtc_state->enable = true; + crtc_state->active = true; drm_mode_copy(&crtc_state->mode, set->mode); ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); @@ -1844,6 +1941,83 @@ backoff: EXPORT_SYMBOL(drm_atomic_helper_page_flip); /** + * drm_atomic_helper_connector_dpms() - connector dpms helper implementation + * @connector: affected connector + * @mode: DPMS mode + * + * This is the main helper function provided by the atomic helper framework for + * implementing the legacy DPMS connector interface. It computes the new desired + * ->active state for the corresponding CRTC (if the connector is enabled) and + * updates it. + */ +void drm_atomic_helper_connector_dpms(struct drm_connector *connector, + int mode) +{ + struct drm_mode_config *config = &connector->dev->mode_config; + struct drm_atomic_state *state; + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + struct drm_connector *tmp_connector; + int ret; + bool active = false; + + if (mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + + connector->dpms = mode; + crtc = connector->state->crtc; + + if (!crtc) + return; + + /* FIXME: ->dpms has no return value so can't forward the -ENOMEM. */ + state = drm_atomic_state_alloc(connector->dev); + if (!state) + return; + + state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); +retry: + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return; + + WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); + + list_for_each_entry(tmp_connector, &config->connector_list, head) { + if (connector->state->crtc != crtc) + continue; + + if (connector->dpms == DRM_MODE_DPMS_ON) { + active = true; + break; + } + } + crtc_state->active = active; + + ret = drm_atomic_commit(state); + if (ret != 0) + goto fail; + + /* Driver takes ownership of state on successful async commit. */ + return; +fail: + if (ret == -EDEADLK) + goto backoff; + + drm_atomic_state_free(state); + + WARN(1, "Driver bug: Changing ->active failed with ret=%i\n", ret); + + return; +backoff: + drm_atomic_state_clear(state); + drm_atomic_legacy_backoff(state); + + goto retry; +} +EXPORT_SYMBOL(drm_atomic_helper_connector_dpms); + +/** * DOC: atomic state reset and initialization * * Both the drm core and the atomic helpers assume that there is always the full @@ -1894,6 +2068,7 @@ drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc) if (state) { state->mode_changed = false; + state->active_changed = false; state->planes_changed = false; state->event = NULL; } diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c new file mode 100644 index 000000000000..d1187e571c6d --- /dev/null +++ b/drivers/gpu/drm/drm_bridge.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include <linux/err.h> +#include <linux/module.h> + +#include <drm/drm_crtc.h> + +#include "drm/drmP.h" + +static DEFINE_MUTEX(bridge_lock); +static LIST_HEAD(bridge_list); + +int drm_bridge_add(struct drm_bridge *bridge) +{ + mutex_lock(&bridge_lock); + list_add_tail(&bridge->list, &bridge_list); + mutex_unlock(&bridge_lock); + + return 0; +} +EXPORT_SYMBOL(drm_bridge_add); + +void drm_bridge_remove(struct drm_bridge *bridge) +{ + mutex_lock(&bridge_lock); + list_del_init(&bridge->list); + mutex_unlock(&bridge_lock); +} +EXPORT_SYMBOL(drm_bridge_remove); + +extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge) +{ + if (!dev || !bridge) + return -EINVAL; + + if (bridge->dev) + return -EBUSY; + + bridge->dev = dev; + + if (bridge->funcs->attach) + return bridge->funcs->attach(bridge); + + return 0; +} +EXPORT_SYMBOL(drm_bridge_attach); + +#ifdef CONFIG_OF +struct drm_bridge *of_drm_find_bridge(struct device_node *np) +{ + struct drm_bridge *bridge; + + mutex_lock(&bridge_lock); + + list_for_each_entry(bridge, &bridge_list, list) { + if (bridge->of_node == np) { + mutex_unlock(&bridge_lock); + return bridge; + } + } + + mutex_unlock(&bridge_lock); + return NULL; +} +EXPORT_SYMBOL(of_drm_find_bridge); +#endif + +MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>"); +MODULE_DESCRIPTION("DRM bridge infrastructure"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index ad2934ba0bd2..6b00173d1be4 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -691,6 +691,10 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, if (cursor) cursor->possible_crtcs = 1 << drm_crtc_index(crtc); + if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { + drm_object_attach_property(&crtc->base, config->prop_active, 0); + } + return 0; } EXPORT_SYMBOL(drm_crtc_init_with_planes); @@ -783,7 +787,7 @@ int drm_display_info_set_bus_formats(struct drm_display_info *info, if (formats && num_formats) { fmts = kmemdup(formats, sizeof(*formats) * num_formats, GFP_KERNEL); - if (!formats) + if (!fmts) return -ENOMEM; } @@ -1062,61 +1066,6 @@ void drm_connector_unplug_all(struct drm_device *dev) EXPORT_SYMBOL(drm_connector_unplug_all); /** - * drm_bridge_init - initialize a drm transcoder/bridge - * @dev: drm device - * @bridge: transcoder/bridge to set up - * @funcs: bridge function table - * - * Initialises a preallocated bridge. Bridges should be - * subclassed as part of driver connector objects. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge, - const struct drm_bridge_funcs *funcs) -{ - int ret; - - drm_modeset_lock_all(dev); - - ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE); - if (ret) - goto out; - - bridge->dev = dev; - bridge->funcs = funcs; - - list_add_tail(&bridge->head, &dev->mode_config.bridge_list); - dev->mode_config.num_bridge++; - - out: - drm_modeset_unlock_all(dev); - return ret; -} -EXPORT_SYMBOL(drm_bridge_init); - -/** - * drm_bridge_cleanup - cleans up an initialised bridge - * @bridge: bridge to cleanup - * - * Cleans up the bridge but doesn't free the object. - */ -void drm_bridge_cleanup(struct drm_bridge *bridge) -{ - struct drm_device *dev = bridge->dev; - - drm_modeset_lock_all(dev); - drm_mode_object_put(dev, &bridge->base); - list_del(&bridge->head); - dev->mode_config.num_bridge--; - drm_modeset_unlock_all(dev); - - memset(bridge, 0, sizeof(*bridge)); -} -EXPORT_SYMBOL(drm_bridge_cleanup); - -/** * drm_encoder_init - Init a preallocated encoder * @dev: drm device * @encoder: the encoder to init @@ -1481,6 +1430,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev) return -ENOMEM; dev->mode_config.prop_crtc_id = prop; + prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC, + "ACTIVE"); + if (!prop) + return -ENOMEM; + dev->mode_config.prop_active = prop; + return 0; } @@ -1705,7 +1660,6 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr total_objects += dev->mode_config.num_crtc; total_objects += dev->mode_config.num_connector; total_objects += dev->mode_config.num_encoder; - total_objects += dev->mode_config.num_bridge; group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL); if (!group->id_list) @@ -1714,7 +1668,6 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr group->num_crtcs = 0; group->num_connectors = 0; group->num_encoders = 0; - group->num_bridges = 0; return 0; } @@ -1734,7 +1687,6 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; - struct drm_bridge *bridge; int ret; ret = drm_mode_group_init(dev, group); @@ -1752,11 +1704,6 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev, group->id_list[group->num_crtcs + group->num_encoders + group->num_connectors++] = connector->base.id; - list_for_each_entry(bridge, &dev->mode_config.bridge_list, head) - group->id_list[group->num_crtcs + group->num_encoders + - group->num_connectors + group->num_bridges++] = - bridge->base.id; - return 0; } EXPORT_SYMBOL(drm_mode_group_init_legacy_group); @@ -3810,7 +3757,7 @@ static struct drm_property *property_create_range(struct drm_device *dev, } /** - * drm_property_create_range - create a new ranged property type + * drm_property_create_range - create a new unsigned ranged property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property @@ -3821,8 +3768,8 @@ static struct drm_property *property_create_range(struct drm_device *dev, * object with drm_object_attach_property. The returned property object must be * freed with drm_property_destroy. * - * Userspace is allowed to set any integer value in the (min, max) range - * inclusive. + * Userspace is allowed to set any unsigned integer value in the (min, max) + * range inclusive. * * Returns: * A pointer to the newly created property on success, NULL on failure. @@ -3836,6 +3783,24 @@ struct drm_property *drm_property_create_range(struct drm_device *dev, int flags } EXPORT_SYMBOL(drm_property_create_range); +/** + * drm_property_create_signed_range - create a new signed ranged property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * @min: minimum value of the property + * @max: maximum value of the property + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy. + * + * Userspace is allowed to set any signed integer value in the (min, max) + * range inclusive. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ struct drm_property *drm_property_create_signed_range(struct drm_device *dev, int flags, const char *name, int64_t min, int64_t max) @@ -3845,6 +3810,23 @@ struct drm_property *drm_property_create_signed_range(struct drm_device *dev, } EXPORT_SYMBOL(drm_property_create_signed_range); +/** + * drm_property_create_object - create a new object property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * @type: object type from DRM_MODE_OBJECT_* defines + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy. + * + * Userspace is only allowed to set this to any property value of the given + * @type. Only useful for atomic properties, which is enforced. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ struct drm_property *drm_property_create_object(struct drm_device *dev, int flags, const char *name, uint32_t type) { @@ -3852,6 +3834,9 @@ struct drm_property *drm_property_create_object(struct drm_device *dev, flags |= DRM_MODE_PROP_OBJECT; + if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC))) + return NULL; + property = drm_property_create(dev, flags, name, 1); if (!property) return NULL; @@ -3863,6 +3848,28 @@ struct drm_property *drm_property_create_object(struct drm_device *dev, EXPORT_SYMBOL(drm_property_create_object); /** + * drm_property_create_bool - create a new boolean property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy. + * + * This is implemented as a ranged property with only {0, 1} as valid values. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ +struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags, + const char *name) +{ + return drm_property_create_range(dev, flags, name, 0, 1); +} +EXPORT_SYMBOL(drm_property_create_bool); + +/** * drm_property_add_enum - add a possible value to an enumeration property * @property: enumeration property to change * @index: index of the new enumeration @@ -5373,7 +5380,6 @@ void drm_mode_config_init(struct drm_device *dev) INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); INIT_LIST_HEAD(&dev->mode_config.connector_list); - INIT_LIST_HEAD(&dev->mode_config.bridge_list); INIT_LIST_HEAD(&dev->mode_config.encoder_list); INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list); @@ -5413,7 +5419,6 @@ void drm_mode_config_cleanup(struct drm_device *dev) struct drm_connector *connector, *ot; struct drm_crtc *crtc, *ct; struct drm_encoder *encoder, *enct; - struct drm_bridge *bridge, *brt; struct drm_framebuffer *fb, *fbt; struct drm_property *property, *pt; struct drm_property_blob *blob, *bt; @@ -5424,11 +5429,6 @@ void drm_mode_config_cleanup(struct drm_device *dev) encoder->funcs->destroy(encoder); } - list_for_each_entry_safe(bridge, brt, - &dev->mode_config.bridge_list, head) { - bridge->funcs->destroy(bridge); - } - list_for_each_entry_safe(connector, ot, &dev->mode_config.connector_list, head) { connector->funcs->destroy(connector); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 79968e39c8d0..f1283878ff6d 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -354,6 +354,37 @@ int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) EXPORT_SYMBOL(drm_dp_link_power_up); /** + * drm_dp_link_power_down() - power down a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_power_down); + +/** * drm_dp_link_configure() - configure a DisplayPort link * @aux: DisplayPort AUX channel * @link: pointer to a structure containing the link configuration diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 75647e7f012b..10574a0c3a55 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -185,8 +185,15 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) return; } - dev->driver->disable_vblank(dev, crtc); - vblank->enabled = false; + /* + * Only disable vblank interrupts if they're enabled. This avoids + * calling the ->disable_vblank() operation in atomic context with the + * hardware potentially runtime suspended. + */ + if (vblank->enabled) { + dev->driver->disable_vblank(dev, crtc); + vblank->enabled = false; + } /* No further vblank irq's will be processed after * this point. Get current hardware vblank count and diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index c0644bb865f2..2d5ca8eec13a 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -323,8 +323,6 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_long); int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, const struct mipi_dsi_msg *msg) { - const u8 *tx = msg->tx_buf; - if (!packet || !msg) return -EINVAL; @@ -353,8 +351,10 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, packet->header[2] = (msg->tx_len >> 8) & 0xff; packet->payload_length = msg->tx_len; - packet->payload = tx; + packet->payload = msg->tx_buf; } else { + const u8 *tx = msg->tx_buf; + packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0; packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0; } diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 20d977a52c58..487d0e35c134 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1011,6 +1011,62 @@ drm_mode_validate_size(const struct drm_display_mode *mode, } EXPORT_SYMBOL(drm_mode_validate_size); +#define MODE_STATUS(status) [MODE_ ## status + 3] = #status + +static const char * const drm_mode_status_names[] = { + MODE_STATUS(OK), + MODE_STATUS(HSYNC), + MODE_STATUS(VSYNC), + MODE_STATUS(H_ILLEGAL), + MODE_STATUS(V_ILLEGAL), + MODE_STATUS(BAD_WIDTH), + MODE_STATUS(NOMODE), + MODE_STATUS(NO_INTERLACE), + MODE_STATUS(NO_DBLESCAN), + MODE_STATUS(NO_VSCAN), + MODE_STATUS(MEM), + MODE_STATUS(VIRTUAL_X), + MODE_STATUS(VIRTUAL_Y), + MODE_STATUS(MEM_VIRT), + MODE_STATUS(NOCLOCK), + MODE_STATUS(CLOCK_HIGH), + MODE_STATUS(CLOCK_LOW), + MODE_STATUS(CLOCK_RANGE), + MODE_STATUS(BAD_HVALUE), + MODE_STATUS(BAD_VVALUE), + MODE_STATUS(BAD_VSCAN), + MODE_STATUS(HSYNC_NARROW), + MODE_STATUS(HSYNC_WIDE), + MODE_STATUS(HBLANK_NARROW), + MODE_STATUS(HBLANK_WIDE), + MODE_STATUS(VSYNC_NARROW), + MODE_STATUS(VSYNC_WIDE), + MODE_STATUS(VBLANK_NARROW), + MODE_STATUS(VBLANK_WIDE), + MODE_STATUS(PANEL), + MODE_STATUS(INTERLACE_WIDTH), + MODE_STATUS(ONE_WIDTH), + MODE_STATUS(ONE_HEIGHT), + MODE_STATUS(ONE_SIZE), + MODE_STATUS(NO_REDUCED), + MODE_STATUS(NO_STEREO), + MODE_STATUS(UNVERIFIED), + MODE_STATUS(BAD), + MODE_STATUS(ERROR), +}; + +#undef MODE_STATUS + +static const char *drm_get_mode_status_name(enum drm_mode_status status) +{ + int index = status + 3; + + if (WARN_ON(index < 0 || index >= ARRAY_SIZE(drm_mode_status_names))) + return ""; + + return drm_mode_status_names[index]; +} + /** * drm_mode_prune_invalid - remove invalid modes from mode list * @dev: DRM device @@ -1032,8 +1088,9 @@ void drm_mode_prune_invalid(struct drm_device *dev, list_del(&mode->head); if (verbose) { drm_mode_debug_printmodeline(mode); - DRM_DEBUG_KMS("Not using %s mode %d\n", - mode->name, mode->status); + DRM_DEBUG_KMS("Not using %s mode: %s\n", + mode->name, + drm_get_mode_status_name(mode->status)); } drm_mode_destroy(dev, mode); } diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index f24c4cfe674b..5ba5792bfdba 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -435,7 +435,8 @@ int drm_plane_helper_commit(struct drm_plane *plane, goto out; } - if (plane_funcs->prepare_fb && plane_state->fb) { + if (plane_funcs->prepare_fb && plane_state->fb && + plane_state->fb != old_fb) { ret = plane_funcs->prepare_fb(plane, plane_state->fb); if (ret) goto out; @@ -449,13 +450,28 @@ int drm_plane_helper_commit(struct drm_plane *plane, crtc_funcs[i]->atomic_begin(crtc[i]); } - plane_funcs->atomic_update(plane, plane_state); + /* + * Drivers may optionally implement the ->atomic_disable callback, so + * special-case that here. + */ + if (drm_atomic_plane_disabling(plane, plane_state) && + plane_funcs->atomic_disable) + plane_funcs->atomic_disable(plane, plane_state); + else + plane_funcs->atomic_update(plane, plane_state); for (i = 0; i < 2; i++) { if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush) crtc_funcs[i]->atomic_flush(crtc[i]); } + /* + * If we only moved the plane and didn't change fb's, there's no need to + * wait for vblank. + */ + if (plane->state->fb == old_fb) + goto out; + for (i = 0; i < 2; i++) { if (!crtc[i]) continue; @@ -484,7 +500,7 @@ out: } /** - * drm_plane_helper_update() - Helper for primary plane update + * drm_plane_helper_update() - Transitional helper for plane update * @plane: plane object to update * @crtc: owning CRTC of owning plane * @fb: framebuffer to flip onto plane @@ -541,7 +557,7 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, EXPORT_SYMBOL(drm_plane_helper_update); /** - * drm_plane_helper_disable() - Helper for primary plane disable + * drm_plane_helper_disable() - Transitional helper for plane disable * @plane: plane to disable * * Provides a default plane disable handler using the atomic plane update diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index cc3d6d6d67e0..5c99d3773212 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -339,19 +339,51 @@ static ssize_t select_subconnector_show(struct device *device, drm_get_dvi_i_select_name((int)subconnector)); } -static struct device_attribute connector_attrs[] = { - __ATTR_RO(status), - __ATTR_RO(enabled), - __ATTR_RO(dpms), - __ATTR_RO(modes), +static DEVICE_ATTR_RO(status); +static DEVICE_ATTR_RO(enabled); +static DEVICE_ATTR_RO(dpms); +static DEVICE_ATTR_RO(modes); + +static struct attribute *connector_dev_attrs[] = { + &dev_attr_status.attr, + &dev_attr_enabled.attr, + &dev_attr_dpms.attr, + &dev_attr_modes.attr, + NULL }; /* These attributes are for both DVI-I connectors and all types of tv-out. */ -static struct device_attribute connector_attrs_opt1[] = { - __ATTR_RO(subconnector), - __ATTR_RO(select_subconnector), +static DEVICE_ATTR_RO(subconnector); +static DEVICE_ATTR_RO(select_subconnector); + +static struct attribute *connector_opt_dev_attrs[] = { + &dev_attr_subconnector.attr, + &dev_attr_select_subconnector.attr, + NULL }; +static umode_t connector_opt_dev_is_visible(struct kobject *kobj, + struct attribute *attr, int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_connector *connector = to_drm_connector(dev); + + /* + * In the long run it maybe a good idea to make one set of + * optionals per connector type. + */ + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_Composite: + case DRM_MODE_CONNECTOR_SVIDEO: + case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_TV: + return attr->mode; + } + + return 0; +} + static struct bin_attribute edid_attr = { .attr.name = "edid", .attr.mode = 0444, @@ -359,6 +391,27 @@ static struct bin_attribute edid_attr = { .read = edid_show, }; +static struct bin_attribute *connector_bin_attrs[] = { + &edid_attr, + NULL +}; + +static const struct attribute_group connector_dev_group = { + .attrs = connector_dev_attrs, + .bin_attrs = connector_bin_attrs, +}; + +static const struct attribute_group connector_opt_dev_group = { + .attrs = connector_opt_dev_attrs, + .is_visible = connector_opt_dev_is_visible, +}; + +static const struct attribute_group *connector_dev_groups[] = { + &connector_dev_group, + &connector_opt_dev_group, + NULL +}; + /** * drm_sysfs_connector_add - add a connector to sysfs * @connector: connector to add @@ -371,73 +424,27 @@ static struct bin_attribute edid_attr = { int drm_sysfs_connector_add(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - int attr_cnt = 0; - int opt_cnt = 0; - int i; - int ret; if (connector->kdev) return 0; - connector->kdev = device_create(drm_class, dev->primary->kdev, - 0, connector, "card%d-%s", - dev->primary->index, connector->name); + connector->kdev = + device_create_with_groups(drm_class, dev->primary->kdev, 0, + connector, connector_dev_groups, + "card%d-%s", dev->primary->index, + connector->name); DRM_DEBUG("adding \"%s\" to sysfs\n", connector->name); if (IS_ERR(connector->kdev)) { DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev)); - ret = PTR_ERR(connector->kdev); - goto out; - } - - /* Standard attributes */ - - for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) { - ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]); - if (ret) - goto err_out_files; + return PTR_ERR(connector->kdev); } - /* Optional attributes */ - /* - * In the long run it maybe a good idea to make one set of - * optionals per connector type. - */ - switch (connector->connector_type) { - case DRM_MODE_CONNECTOR_DVII: - case DRM_MODE_CONNECTOR_Composite: - case DRM_MODE_CONNECTOR_SVIDEO: - case DRM_MODE_CONNECTOR_Component: - case DRM_MODE_CONNECTOR_TV: - for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) { - ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]); - if (ret) - goto err_out_files; - } - break; - default: - break; - } - - ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr); - if (ret) - goto err_out_files; - /* Let userspace know we have a new connector */ drm_sysfs_hotplug_event(dev); return 0; - -err_out_files: - for (i = 0; i < opt_cnt; i++) - device_remove_file(connector->kdev, &connector_attrs_opt1[i]); - for (i = 0; i < attr_cnt; i++) - device_remove_file(connector->kdev, &connector_attrs[i]); - device_unregister(connector->kdev); - -out: - return ret; } /** @@ -455,16 +462,11 @@ out: */ void drm_sysfs_connector_remove(struct drm_connector *connector) { - int i; - if (!connector->kdev) return; DRM_DEBUG("removing \"%s\" from sysfs\n", connector->name); - for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) - device_remove_file(connector->kdev, &connector_attrs[i]); - sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr); device_unregister(connector->kdev); connector->kdev = NULL; } diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index 34d46aa75416..46f149737bc8 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -18,6 +18,7 @@ #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_gpio.h> +#include <linux/of_graph.h> #include <linux/gpio.h> #include <linux/component.h> #include <linux/phy/phy.h> @@ -993,32 +994,20 @@ static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = { .best_encoder = exynos_dp_best_encoder, }; -static bool find_bridge(const char *compat, struct bridge_init *bridge) -{ - bridge->client = NULL; - bridge->node = of_find_compatible_node(NULL, NULL, compat); - if (!bridge->node) - return false; - - bridge->client = of_find_i2c_device_by_node(bridge->node); - if (!bridge->client) - return false; - - return true; -} - /* returns the number of bridges attached */ -static int exynos_drm_attach_lcd_bridge(struct drm_device *dev, +static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp, struct drm_encoder *encoder) { - struct bridge_init bridge; int ret; - if (find_bridge("nxp,ptn3460", &bridge)) { - ret = ptn3460_init(dev, encoder, bridge.client, bridge.node); - if (!ret) - return 1; + encoder->bridge = dp->bridge; + dp->bridge->encoder = encoder; + ret = drm_bridge_attach(encoder->dev, dp->bridge); + if (ret) { + DRM_ERROR("Failed to attach bridge to drm\n"); + return ret; } + return 0; } @@ -1032,9 +1021,11 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display, dp->encoder = encoder; /* Pre-empt DP connector creation if there's a bridge */ - ret = exynos_drm_attach_lcd_bridge(dp->drm_dev, encoder); - if (ret) - return 0; + if (dp->bridge) { + ret = exynos_drm_attach_lcd_bridge(dp, encoder); + if (!ret) + return 0; + } connector->polled = DRM_CONNECTOR_POLL_HPD; @@ -1241,7 +1232,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data) } } - if (!dp->panel) { + if (!dp->panel && !dp->bridge) { ret = exynos_dp_dt_parse_panel(dp); if (ret) return ret; @@ -1325,7 +1316,7 @@ static const struct component_ops exynos_dp_ops = { static int exynos_dp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *panel_node; + struct device_node *panel_node, *bridge_node, *endpoint; struct exynos_dp_device *dp; int ret; @@ -1351,6 +1342,18 @@ static int exynos_dp_probe(struct platform_device *pdev) return -EPROBE_DEFER; } + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (endpoint) { + bridge_node = of_graph_get_remote_port_parent(endpoint); + if (bridge_node) { + dp->bridge = of_drm_find_bridge(bridge_node); + of_node_put(bridge_node); + if (!dp->bridge) + return -EPROBE_DEFER; + } else + return -EPROBE_DEFER; + } + ret = component_add(&pdev->dev, &exynos_dp_ops); if (ret) exynos_drm_component_del(&pdev->dev, diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h index 164f171168e7..a4e799679669 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.h +++ b/drivers/gpu/drm/exynos/exynos_dp_core.h @@ -153,6 +153,7 @@ struct exynos_dp_device { struct drm_connector connector; struct drm_encoder *encoder; struct drm_panel *panel; + struct drm_bridge *bridge; struct clk *clock; unsigned int irq; void __iomem *reg_base; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 835b6af00970..842d6b8dc3c4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -461,7 +461,6 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt) cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE; break; case DRM_FORMAT_NV12: - case DRM_FORMAT_NV12MT: case DRM_FORMAT_NV16: cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR | EXYNOS_MSCTRL_C_INT_IN_2PLANE); @@ -511,7 +510,6 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt) case DRM_FORMAT_YVU420: case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: - case DRM_FORMAT_NV12MT: cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420; break; default: @@ -524,10 +522,7 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt) cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK; - if (fmt == DRM_FORMAT_NV12MT) - cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32; - else - cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; + cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); @@ -812,7 +807,6 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt) cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE; break; case DRM_FORMAT_NV12: - case DRM_FORMAT_NV12MT: case DRM_FORMAT_NV16: cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR; cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE; @@ -867,7 +861,6 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt) case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_NV12: - case DRM_FORMAT_NV12MT: case DRM_FORMAT_NV21: cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420; break; @@ -883,10 +876,7 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt) cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK; - if (fmt == DRM_FORMAT_NV12MT) - cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32; - else - cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; + cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 0261468c8019..8040ed2a831f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -542,9 +542,6 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt) cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P); break; - case DRM_FORMAT_NV12MT: - cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE); - break; default: dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt); return -EINVAL; @@ -809,9 +806,6 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt) cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P); break; - case DRM_FORMAT_NV12MT: - cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE); - break; default: dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt); return -EINVAL; diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 358cff67e5ce..2f43a3c4f7b7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -23,7 +23,6 @@ static const uint32_t formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_NV12, - DRM_FORMAT_NV12MT, }; /* diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index ed44cd4f01f7..2fd2e5d46142 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -412,8 +412,6 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) win_data = &ctx->win_data[win]; switch (win_data->pixel_format) { - case DRM_FORMAT_NV12MT: - tiled_mode = true; case DRM_FORMAT_NV12: crcb_mode = false; buf_num = 2; diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index d4762799351d..a9041d1a8ff0 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -32,6 +32,8 @@ struct tda998x_priv { struct i2c_client *cec; struct i2c_client *hdmi; + struct mutex mutex; + struct delayed_work dwork; uint16_t rev; uint8_t current_page; int dpms; @@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) uint8_t addr = REG2ADDR(reg); int ret; + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return ret; + goto out; ret = i2c_master_send(client, &addr, sizeof(addr)); if (ret < 0) @@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) if (ret < 0) goto fail; - return ret; + goto out; fail: dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); return ret; } @@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt) buf[0] = REG2ADDR(reg); memcpy(&buf[1], p, cnt); + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return; + goto out; ret = i2c_master_send(client, buf, cnt + 1); if (ret < 0) dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); } static int @@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val) uint8_t buf[] = {REG2ADDR(reg), val}; int ret; + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return; + goto out; ret = i2c_master_send(client, buf, sizeof(buf)); if (ret < 0) dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); } static void @@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val) uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; int ret; + mutex_lock(&priv->mutex); ret = set_page(priv, reg); if (ret < 0) - return; + goto out; ret = i2c_master_send(client, buf, sizeof(buf)); if (ret < 0) dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); +out: + mutex_unlock(&priv->mutex); } static void @@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv) reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); } +/* handle HDMI connect/disconnect */ +static void tda998x_hpd(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct tda998x_priv *priv = + container_of(dwork, struct tda998x_priv, dwork); + + if (priv->encoder && priv->encoder->dev) + drm_kms_helper_hotplug_event(priv->encoder->dev); +} + /* * only 2 interrupts may occur: screen plug/unplug and EDID read */ @@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data) priv->wq_edid_wait = 0; wake_up(&priv->wq_edid); } else if (cec != 0) { /* HPD change */ - if (priv->encoder && priv->encoder->dev) - drm_helper_hpd_irq_event(priv->encoder->dev); + schedule_delayed_work(&priv->dwork, HZ/10); } return IRQ_HANDLED; } @@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv) /* disable all IRQs and free the IRQ handler */ cec_write(priv, REG_CEC_RXSHPDINTENA, 0); reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); - if (priv->hdmi->irq) + if (priv->hdmi->irq) { free_irq(priv->hdmi->irq, priv); + cancel_delayed_work_sync(&priv->dwork); + } i2c_unregister_device(priv->cec); } @@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) struct device_node *np = client->dev.of_node; u32 video; int rev_lo, rev_hi, ret; + unsigned short cec_addr; priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); @@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) priv->current_page = 0xff; priv->hdmi = client; - priv->cec = i2c_new_dummy(client->adapter, 0x34); + /* CEC I2C address bound to TDA998x I2C addr by configuration pins */ + cec_addr = 0x34 + (client->addr & 0x03); + priv->cec = i2c_new_dummy(client->adapter, cec_addr); if (!priv->cec) return -ENODEV; priv->dpms = DRM_MODE_DPMS_OFF; + mutex_init(&priv->mutex); /* protect the page access */ + /* wake up the device: */ cec_write(priv, REG_CEC_ENAMODS, CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); @@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) if (client->irq) { int irqf_trigger; - /* init read EDID waitqueue */ + /* init read EDID waitqueue and HDP work */ init_waitqueue_head(&priv->wq_edid); + INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd); /* clear pending interrupts */ reg_read(priv, REG_INT_FLAGS_0); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 53c5f9e39fe3..4145d95902f5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2612,9 +2612,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged, va_list args; char error_msg[80]; - if (WARN_ON(mutex_is_locked(&dev_priv->dev->struct_mutex))) - return; - va_start(args, fmt); vscnprintf(error_msg, sizeof(error_msg), fmt, args); va_end(args); diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 5d5e4092d40a..33cdddf26684 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -5,7 +5,7 @@ config DRM_IMX select VIDEOMODE_HELPERS select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER - depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) + depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS depends on IMX_IPUV3_CORE help enable i.MX graphics support diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index b63601d04601..4216e479a9be 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -191,10 +191,18 @@ static int tve_setup_vga(struct imx_tve *tve) /* set gain to (1 + 10/128) to provide 0.7V peak-to-peak amplitude */ ret = regmap_update_bits(tve->regmap, TVE_TVDAC0_CONT_REG, TVE_TVDAC_GAIN_MASK, 0x0a); + if (ret) + return ret; + ret = regmap_update_bits(tve->regmap, TVE_TVDAC1_CONT_REG, TVE_TVDAC_GAIN_MASK, 0x0a); + if (ret) + return ret; + ret = regmap_update_bits(tve->regmap, TVE_TVDAC2_CONT_REG, TVE_TVDAC_GAIN_MASK, 0x0a); + if (ret) + return ret; /* set configuration register */ mask = TVE_DATA_SOURCE_MASK | TVE_INP_VIDEO_FORM; @@ -204,16 +212,12 @@ static int tve_setup_vga(struct imx_tve *tve) mask |= TVE_TV_OUT_MODE_MASK | TVE_SYNC_CH_0_EN; val |= TVE_TV_OUT_RGB | TVE_SYNC_CH_0_EN; ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, mask, val); - if (ret < 0) { - dev_err(tve->dev, "failed to set configuration: %d\n", ret); + if (ret) return ret; - } /* set test mode (as documented) */ - ret = regmap_update_bits(tve->regmap, TVE_TST_MODE_REG, + return regmap_update_bits(tve->regmap, TVE_TST_MODE_REG, TVE_TVDAC_TEST_MODE_MASK, 1); - - return 0; } static enum drm_connector_status imx_tve_connector_detect( @@ -335,9 +339,11 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder, } if (tve->mode == TVE_MODE_VGA) - tve_setup_vga(tve); + ret = tve_setup_vga(tve); else - tve_setup_tvout(tve); + ret = tve_setup_tvout(tve); + if (ret) + dev_err(tve->dev, "failed to set configuration: %d\n", ret); } static void imx_tve_encoder_commit(struct drm_encoder *encoder) @@ -671,6 +677,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) /* disable cable detection for VGA mode */ ret = regmap_write(tve->regmap, TVE_CD_CONT_REG, 0); + if (ret) + return ret; ret = imx_tve_register(drm, tve); if (ret) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 5b2a1ff95d3d..bacbbb70f679 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -3,6 +3,7 @@ config DRM_MSM tristate "MSM DRM" depends on DRM depends on ARCH_QCOM || (ARM && COMPILE_TEST) + depends on OF && COMMON_CLK select REGULATOR select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 143d988f8add..674a132fd76e 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -1,7 +1,4 @@ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) - ccflags-y += -Werror -endif msm-y := \ adreno/adreno_device.o \ @@ -16,6 +13,12 @@ msm-y := \ hdmi/hdmi_phy_8960.o \ hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x74.o \ + edp/edp.o \ + edp/edp_aux.o \ + edp/edp_bridge.o \ + edp/edp_connector.o \ + edp/edp_ctrl.o \ + edp/edp_phy.o \ mdp/mdp_format.o \ mdp/mdp_kms.o \ mdp/mdp4/mdp4_crtc.o \ diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h index 22882cc0a573..edc845fffdf4 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h @@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h index 109e9a263daf..e91a739452d7 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h @@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -58,111 +58,130 @@ enum a3xx_cache_opcode { }; enum a3xx_vtx_fmt { - VFMT_FLOAT_32 = 0, - VFMT_FLOAT_32_32 = 1, - VFMT_FLOAT_32_32_32 = 2, - VFMT_FLOAT_32_32_32_32 = 3, - VFMT_FLOAT_16 = 4, - VFMT_FLOAT_16_16 = 5, - VFMT_FLOAT_16_16_16 = 6, - VFMT_FLOAT_16_16_16_16 = 7, - VFMT_FIXED_32 = 8, - VFMT_FIXED_32_32 = 9, - VFMT_FIXED_32_32_32 = 10, - VFMT_FIXED_32_32_32_32 = 11, - VFMT_SHORT_16 = 16, - VFMT_SHORT_16_16 = 17, - VFMT_SHORT_16_16_16 = 18, - VFMT_SHORT_16_16_16_16 = 19, - VFMT_USHORT_16 = 20, - VFMT_USHORT_16_16 = 21, - VFMT_USHORT_16_16_16 = 22, - VFMT_USHORT_16_16_16_16 = 23, - VFMT_NORM_SHORT_16 = 24, - VFMT_NORM_SHORT_16_16 = 25, - VFMT_NORM_SHORT_16_16_16 = 26, - VFMT_NORM_SHORT_16_16_16_16 = 27, - VFMT_NORM_USHORT_16 = 28, - VFMT_NORM_USHORT_16_16 = 29, - VFMT_NORM_USHORT_16_16_16 = 30, - VFMT_NORM_USHORT_16_16_16_16 = 31, - VFMT_UINT_32 = 32, - VFMT_UINT_32_32 = 33, - VFMT_UINT_32_32_32 = 34, - VFMT_UINT_32_32_32_32 = 35, - VFMT_INT_32 = 36, - VFMT_INT_32_32 = 37, - VFMT_INT_32_32_32 = 38, - VFMT_INT_32_32_32_32 = 39, - VFMT_UBYTE_8 = 40, - VFMT_UBYTE_8_8 = 41, - VFMT_UBYTE_8_8_8 = 42, - VFMT_UBYTE_8_8_8_8 = 43, - VFMT_NORM_UBYTE_8 = 44, - VFMT_NORM_UBYTE_8_8 = 45, - VFMT_NORM_UBYTE_8_8_8 = 46, - VFMT_NORM_UBYTE_8_8_8_8 = 47, - VFMT_BYTE_8 = 48, - VFMT_BYTE_8_8 = 49, - VFMT_BYTE_8_8_8 = 50, - VFMT_BYTE_8_8_8_8 = 51, - VFMT_NORM_BYTE_8 = 52, - VFMT_NORM_BYTE_8_8 = 53, - VFMT_NORM_BYTE_8_8_8 = 54, - VFMT_NORM_BYTE_8_8_8_8 = 55, - VFMT_UINT_10_10_10_2 = 60, - VFMT_NORM_UINT_10_10_10_2 = 61, - VFMT_INT_10_10_10_2 = 62, - VFMT_NORM_INT_10_10_10_2 = 63, + VFMT_32_FLOAT = 0, + VFMT_32_32_FLOAT = 1, + VFMT_32_32_32_FLOAT = 2, + VFMT_32_32_32_32_FLOAT = 3, + VFMT_16_FLOAT = 4, + VFMT_16_16_FLOAT = 5, + VFMT_16_16_16_FLOAT = 6, + VFMT_16_16_16_16_FLOAT = 7, + VFMT_32_FIXED = 8, + VFMT_32_32_FIXED = 9, + VFMT_32_32_32_FIXED = 10, + VFMT_32_32_32_32_FIXED = 11, + VFMT_16_SINT = 16, + VFMT_16_16_SINT = 17, + VFMT_16_16_16_SINT = 18, + VFMT_16_16_16_16_SINT = 19, + VFMT_16_UINT = 20, + VFMT_16_16_UINT = 21, + VFMT_16_16_16_UINT = 22, + VFMT_16_16_16_16_UINT = 23, + VFMT_16_SNORM = 24, + VFMT_16_16_SNORM = 25, + VFMT_16_16_16_SNORM = 26, + VFMT_16_16_16_16_SNORM = 27, + VFMT_16_UNORM = 28, + VFMT_16_16_UNORM = 29, + VFMT_16_16_16_UNORM = 30, + VFMT_16_16_16_16_UNORM = 31, + VFMT_32_UINT = 32, + VFMT_32_32_UINT = 33, + VFMT_32_32_32_UINT = 34, + VFMT_32_32_32_32_UINT = 35, + VFMT_32_SINT = 36, + VFMT_32_32_SINT = 37, + VFMT_32_32_32_SINT = 38, + VFMT_32_32_32_32_SINT = 39, + VFMT_8_UINT = 40, + VFMT_8_8_UINT = 41, + VFMT_8_8_8_UINT = 42, + VFMT_8_8_8_8_UINT = 43, + VFMT_8_UNORM = 44, + VFMT_8_8_UNORM = 45, + VFMT_8_8_8_UNORM = 46, + VFMT_8_8_8_8_UNORM = 47, + VFMT_8_SINT = 48, + VFMT_8_8_SINT = 49, + VFMT_8_8_8_SINT = 50, + VFMT_8_8_8_8_SINT = 51, + VFMT_8_SNORM = 52, + VFMT_8_8_SNORM = 53, + VFMT_8_8_8_SNORM = 54, + VFMT_8_8_8_8_SNORM = 55, + VFMT_10_10_10_2_UINT = 60, + VFMT_10_10_10_2_UNORM = 61, + VFMT_10_10_10_2_SINT = 62, + VFMT_10_10_10_2_SNORM = 63, }; enum a3xx_tex_fmt { - TFMT_NORM_USHORT_565 = 4, - TFMT_NORM_USHORT_5551 = 6, - TFMT_NORM_USHORT_4444 = 7, - TFMT_NORM_USHORT_Z16 = 9, - TFMT_NORM_UINT_X8Z24 = 10, - TFMT_FLOAT_Z32 = 11, - TFMT_NORM_UINT_NV12_UV_TILED = 17, - TFMT_NORM_UINT_NV12_Y_TILED = 19, - TFMT_NORM_UINT_NV12_UV = 21, - TFMT_NORM_UINT_NV12_Y = 23, - TFMT_NORM_UINT_I420_Y = 24, - TFMT_NORM_UINT_I420_U = 26, - TFMT_NORM_UINT_I420_V = 27, - TFMT_NORM_UINT_2_10_10_10 = 41, - TFMT_FLOAT_9_9_9_E5 = 42, - TFMT_FLOAT_10_11_11 = 43, - TFMT_NORM_UINT_A8 = 44, - TFMT_NORM_UINT_L8_A8 = 47, - TFMT_NORM_UINT_8 = 48, - TFMT_NORM_UINT_8_8 = 49, - TFMT_NORM_UINT_8_8_8 = 50, - TFMT_NORM_UINT_8_8_8_8 = 51, - TFMT_NORM_SINT_8_8 = 53, - TFMT_NORM_SINT_8_8_8_8 = 55, - TFMT_UINT_8_8 = 57, - TFMT_UINT_8_8_8_8 = 59, - TFMT_SINT_8_8 = 61, - TFMT_SINT_8_8_8_8 = 63, - TFMT_FLOAT_16 = 64, - TFMT_FLOAT_16_16 = 65, - TFMT_FLOAT_16_16_16_16 = 67, - TFMT_UINT_16 = 68, - TFMT_UINT_16_16 = 69, - TFMT_UINT_16_16_16_16 = 71, - TFMT_SINT_16 = 72, - TFMT_SINT_16_16 = 73, - TFMT_SINT_16_16_16_16 = 75, - TFMT_FLOAT_32 = 84, - TFMT_FLOAT_32_32 = 85, - TFMT_FLOAT_32_32_32_32 = 87, - TFMT_UINT_32 = 88, - TFMT_UINT_32_32 = 89, - TFMT_UINT_32_32_32_32 = 91, - TFMT_SINT_32 = 92, - TFMT_SINT_32_32 = 93, - TFMT_SINT_32_32_32_32 = 95, + TFMT_5_6_5_UNORM = 4, + TFMT_5_5_5_1_UNORM = 5, + TFMT_4_4_4_4_UNORM = 7, + TFMT_Z16_UNORM = 9, + TFMT_X8Z24_UNORM = 10, + TFMT_Z32_FLOAT = 11, + TFMT_NV12_UV_TILED = 17, + TFMT_NV12_Y_TILED = 19, + TFMT_NV12_UV = 21, + TFMT_NV12_Y = 23, + TFMT_I420_Y = 24, + TFMT_I420_U = 26, + TFMT_I420_V = 27, + TFMT_DXT1 = 36, + TFMT_DXT3 = 37, + TFMT_DXT5 = 38, + TFMT_10_10_10_2_UNORM = 41, + TFMT_9_9_9_E5_FLOAT = 42, + TFMT_11_11_10_FLOAT = 43, + TFMT_A8_UNORM = 44, + TFMT_L8_A8_UNORM = 47, + TFMT_8_UNORM = 48, + TFMT_8_8_UNORM = 49, + TFMT_8_8_8_UNORM = 50, + TFMT_8_8_8_8_UNORM = 51, + TFMT_8_SNORM = 52, + TFMT_8_8_SNORM = 53, + TFMT_8_8_8_SNORM = 54, + TFMT_8_8_8_8_SNORM = 55, + TFMT_8_UINT = 56, + TFMT_8_8_UINT = 57, + TFMT_8_8_8_UINT = 58, + TFMT_8_8_8_8_UINT = 59, + TFMT_8_SINT = 60, + TFMT_8_8_SINT = 61, + TFMT_8_8_8_SINT = 62, + TFMT_8_8_8_8_SINT = 63, + TFMT_16_FLOAT = 64, + TFMT_16_16_FLOAT = 65, + TFMT_16_16_16_16_FLOAT = 67, + TFMT_16_UINT = 68, + TFMT_16_16_UINT = 69, + TFMT_16_16_16_16_UINT = 71, + TFMT_16_SINT = 72, + TFMT_16_16_SINT = 73, + TFMT_16_16_16_16_SINT = 75, + TFMT_16_UNORM = 76, + TFMT_16_16_UNORM = 77, + TFMT_16_16_16_16_UNORM = 79, + TFMT_16_SNORM = 80, + TFMT_16_16_SNORM = 81, + TFMT_16_16_16_16_SNORM = 83, + TFMT_32_FLOAT = 84, + TFMT_32_32_FLOAT = 85, + TFMT_32_32_32_32_FLOAT = 87, + TFMT_32_UINT = 88, + TFMT_32_32_UINT = 89, + TFMT_32_32_32_32_UINT = 91, + TFMT_32_SINT = 92, + TFMT_32_32_SINT = 93, + TFMT_32_32_32_32_SINT = 95, + TFMT_RGTC2_SNORM = 112, + TFMT_RGTC2_UNORM = 113, + TFMT_RGTC1_SNORM = 114, + TFMT_RGTC1_UNORM = 115, }; enum a3xx_tex_fetchsize { @@ -180,9 +199,11 @@ enum a3xx_color_fmt { RB_R4G4B4A4_UNORM = 3, RB_R8G8B8_UNORM = 4, RB_R8G8B8A8_UNORM = 8, + RB_R8G8B8A8_SNORM = 9, RB_R8G8B8A8_UINT = 10, RB_R8G8B8A8_SINT = 11, RB_R8G8_UNORM = 12, + RB_R8G8_SNORM = 13, RB_R8_UINT = 14, RB_R8_SINT = 15, RB_R10G10B10A2_UNORM = 16, @@ -258,6 +279,14 @@ enum a3xx_tex_clamp { A3XX_TEX_MIRROR_CLAMP = 4, }; +enum a3xx_tex_aniso { + A3XX_TEX_ANISO_1 = 0, + A3XX_TEX_ANISO_2 = 1, + A3XX_TEX_ANISO_4 = 2, + A3XX_TEX_ANISO_8 = 3, + A3XX_TEX_ANISO_16 = 4, +}; + enum a3xx_tex_swiz { A3XX_TEX_X = 0, A3XX_TEX_Y = 1, @@ -1563,12 +1592,13 @@ static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val) { return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK; } -#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80 +#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0000ff80 #define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7 static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val) { return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK; } +#define A3XX_VFD_FETCH_INSTR_0_INSTANCED 0x00010000 #define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000 #define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000 #define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18 @@ -2509,6 +2539,12 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val) { return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK; } +#define A3XX_TEX_SAMP_0_ANISO__MASK 0x00038000 +#define A3XX_TEX_SAMP_0_ANISO__SHIFT 15 +static inline uint32_t A3XX_TEX_SAMP_0_ANISO(enum a3xx_tex_aniso val) +{ + return ((val) << A3XX_TEX_SAMP_0_ANISO__SHIFT) & A3XX_TEX_SAMP_0_ANISO__MASK; +} #define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000 #define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20 static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val) diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h index 5a24c416d2dd..755723fd8ba5 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h @@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -63,72 +63,82 @@ enum a4xx_rb_blend_opcode { }; enum a4xx_vtx_fmt { - VFMT4_FLOAT_32 = 1, - VFMT4_FLOAT_32_32 = 2, - VFMT4_FLOAT_32_32_32 = 3, - VFMT4_FLOAT_32_32_32_32 = 4, - VFMT4_FLOAT_16 = 5, - VFMT4_FLOAT_16_16 = 6, - VFMT4_FLOAT_16_16_16 = 7, - VFMT4_FLOAT_16_16_16_16 = 8, - VFMT4_FIXED_32 = 9, - VFMT4_FIXED_32_32 = 10, - VFMT4_FIXED_32_32_32 = 11, - VFMT4_FIXED_32_32_32_32 = 12, - VFMT4_SHORT_16 = 16, - VFMT4_SHORT_16_16 = 17, - VFMT4_SHORT_16_16_16 = 18, - VFMT4_SHORT_16_16_16_16 = 19, - VFMT4_USHORT_16 = 20, - VFMT4_USHORT_16_16 = 21, - VFMT4_USHORT_16_16_16 = 22, - VFMT4_USHORT_16_16_16_16 = 23, - VFMT4_NORM_SHORT_16 = 24, - VFMT4_NORM_SHORT_16_16 = 25, - VFMT4_NORM_SHORT_16_16_16 = 26, - VFMT4_NORM_SHORT_16_16_16_16 = 27, - VFMT4_NORM_USHORT_16 = 28, - VFMT4_NORM_USHORT_16_16 = 29, - VFMT4_NORM_USHORT_16_16_16 = 30, - VFMT4_NORM_USHORT_16_16_16_16 = 31, - VFMT4_UBYTE_8 = 40, - VFMT4_UBYTE_8_8 = 41, - VFMT4_UBYTE_8_8_8 = 42, - VFMT4_UBYTE_8_8_8_8 = 43, - VFMT4_NORM_UBYTE_8 = 44, - VFMT4_NORM_UBYTE_8_8 = 45, - VFMT4_NORM_UBYTE_8_8_8 = 46, - VFMT4_NORM_UBYTE_8_8_8_8 = 47, - VFMT4_BYTE_8 = 48, - VFMT4_BYTE_8_8 = 49, - VFMT4_BYTE_8_8_8 = 50, - VFMT4_BYTE_8_8_8_8 = 51, - VFMT4_NORM_BYTE_8 = 52, - VFMT4_NORM_BYTE_8_8 = 53, - VFMT4_NORM_BYTE_8_8_8 = 54, - VFMT4_NORM_BYTE_8_8_8_8 = 55, - VFMT4_UINT_10_10_10_2 = 60, - VFMT4_NORM_UINT_10_10_10_2 = 61, - VFMT4_INT_10_10_10_2 = 62, - VFMT4_NORM_INT_10_10_10_2 = 63, + VFMT4_32_FLOAT = 1, + VFMT4_32_32_FLOAT = 2, + VFMT4_32_32_32_FLOAT = 3, + VFMT4_32_32_32_32_FLOAT = 4, + VFMT4_16_FLOAT = 5, + VFMT4_16_16_FLOAT = 6, + VFMT4_16_16_16_FLOAT = 7, + VFMT4_16_16_16_16_FLOAT = 8, + VFMT4_32_FIXED = 9, + VFMT4_32_32_FIXED = 10, + VFMT4_32_32_32_FIXED = 11, + VFMT4_32_32_32_32_FIXED = 12, + VFMT4_16_SINT = 16, + VFMT4_16_16_SINT = 17, + VFMT4_16_16_16_SINT = 18, + VFMT4_16_16_16_16_SINT = 19, + VFMT4_16_UINT = 20, + VFMT4_16_16_UINT = 21, + VFMT4_16_16_16_UINT = 22, + VFMT4_16_16_16_16_UINT = 23, + VFMT4_16_SNORM = 24, + VFMT4_16_16_SNORM = 25, + VFMT4_16_16_16_SNORM = 26, + VFMT4_16_16_16_16_SNORM = 27, + VFMT4_16_UNORM = 28, + VFMT4_16_16_UNORM = 29, + VFMT4_16_16_16_UNORM = 30, + VFMT4_16_16_16_16_UNORM = 31, + VFMT4_32_32_SINT = 37, + VFMT4_8_UINT = 40, + VFMT4_8_8_UINT = 41, + VFMT4_8_8_8_UINT = 42, + VFMT4_8_8_8_8_UINT = 43, + VFMT4_8_UNORM = 44, + VFMT4_8_8_UNORM = 45, + VFMT4_8_8_8_UNORM = 46, + VFMT4_8_8_8_8_UNORM = 47, + VFMT4_8_SINT = 48, + VFMT4_8_8_SINT = 49, + VFMT4_8_8_8_SINT = 50, + VFMT4_8_8_8_8_SINT = 51, + VFMT4_8_SNORM = 52, + VFMT4_8_8_SNORM = 53, + VFMT4_8_8_8_SNORM = 54, + VFMT4_8_8_8_8_SNORM = 55, + VFMT4_10_10_10_2_UINT = 60, + VFMT4_10_10_10_2_UNORM = 61, + VFMT4_10_10_10_2_SINT = 62, + VFMT4_10_10_10_2_SNORM = 63, }; enum a4xx_tex_fmt { - TFMT4_NORM_USHORT_565 = 11, - TFMT4_NORM_USHORT_5551 = 10, - TFMT4_NORM_USHORT_4444 = 8, - TFMT4_NORM_UINT_X8Z24 = 71, - TFMT4_NORM_UINT_2_10_10_10 = 33, - TFMT4_NORM_UINT_A8 = 3, - TFMT4_NORM_UINT_L8_A8 = 13, - TFMT4_NORM_UINT_8 = 4, - TFMT4_NORM_UINT_8_8_8_8 = 28, - TFMT4_FLOAT_16 = 20, - TFMT4_FLOAT_16_16 = 40, - TFMT4_FLOAT_16_16_16_16 = 53, - TFMT4_FLOAT_32 = 43, - TFMT4_FLOAT_32_32 = 56, - TFMT4_FLOAT_32_32_32_32 = 63, + TFMT4_5_6_5_UNORM = 11, + TFMT4_5_5_5_1_UNORM = 10, + TFMT4_4_4_4_4_UNORM = 8, + TFMT4_X8Z24_UNORM = 71, + TFMT4_10_10_10_2_UNORM = 33, + TFMT4_A8_UNORM = 3, + TFMT4_L8_A8_UNORM = 13, + TFMT4_8_UNORM = 4, + TFMT4_8_8_UNORM = 14, + TFMT4_8_8_8_8_UNORM = 28, + TFMT4_16_FLOAT = 20, + TFMT4_16_16_FLOAT = 40, + TFMT4_16_16_16_16_FLOAT = 53, + TFMT4_32_FLOAT = 43, + TFMT4_32_32_FLOAT = 56, + TFMT4_32_32_32_32_FLOAT = 63, +}; + +enum a4xx_tex_fetchsize { + TFETCH4_1_BYTE = 0, + TFETCH4_2_BYTE = 1, + TFETCH4_4_BYTE = 2, + TFETCH4_8_BYTE = 3, + TFETCH4_16_BYTE = 4, }; enum a4xx_depth_format { @@ -264,14 +274,19 @@ static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val) return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK; } -#define REG_A4XX_RB_MSAA_CONTROL2 0x000020a3 -#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK 0x00000380 -#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT 7 -static inline uint32_t A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES(uint32_t val) +#define REG_A4XX_RB_RENDER_CONTROL2 0x000020a3 +#define A4XX_RB_RENDER_CONTROL2_XCOORD 0x00000001 +#define A4XX_RB_RENDER_CONTROL2_YCOORD 0x00000002 +#define A4XX_RB_RENDER_CONTROL2_ZCOORD 0x00000004 +#define A4XX_RB_RENDER_CONTROL2_WCOORD 0x00000008 +#define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020 +#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK 0x00000380 +#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT 7 +static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val) { - return ((val) << A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK; + return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK; } -#define A4XX_RB_MSAA_CONTROL2_VARYING 0x00001000 +#define A4XX_RB_RENDER_CONTROL2_VARYING 0x00001000 static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; } @@ -362,7 +377,69 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; } +#define REG_A4XX_RB_BLEND_RED 0x000020f3 +#define A4XX_RB_BLEND_RED_UINT__MASK 0x00007fff +#define A4XX_RB_BLEND_RED_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK; +} +#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK; +} + +#define REG_A4XX_RB_BLEND_GREEN 0x000020f4 +#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x00007fff +#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK; +} +#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK; +} + +#define REG_A4XX_RB_BLEND_BLUE 0x000020f5 +#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x00007fff +#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK; +} +#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK; +} + +#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6 +#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x00007fff +#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK; +} +#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val) +{ + return ((util_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK; +} + #define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8 +#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff +#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0 +static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val) +{ + return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK; +} #define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100 #define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00 #define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9 @@ -372,7 +449,7 @@ static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare } #define REG_A4XX_RB_FS_OUTPUT 0x000020f9 -#define A4XX_RB_FS_OUTPUT_ENABLE_COLOR_PIPE 0x00000001 +#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND 0x00000001 #define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000 #define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16 @@ -416,11 +493,11 @@ static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) } #define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd -#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0 -#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 4 +#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xffffffe0 +#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 5 static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val) { - return ((val >> 4) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK; + return ((val >> 5) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK; } #define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe @@ -508,7 +585,7 @@ static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) #define A4XX_RB_DEPTH_PITCH__SHIFT 0 static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val) { - return ((val >> 4) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK; + return ((val >> 5) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK; } #define REG_A4XX_RB_DEPTH_PITCH2 0x00002105 @@ -516,7 +593,7 @@ static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val) #define A4XX_RB_DEPTH_PITCH2__SHIFT 0 static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val) { - return ((val >> 4) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK; + return ((val >> 5) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK; } #define REG_A4XX_RB_STENCIL_CONTROL 0x00002106 @@ -630,7 +707,11 @@ static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val) return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK; } -#define REG_A4XX_RB_VPORT_Z_CLAMP_MAX_15 0x0000213f +static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP(uint32_t i0) { return 0x00002120 + 0x2*i0; } + +static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MIN(uint32_t i0) { return 0x00002120 + 0x2*i0; } + +static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MAX(uint32_t i0) { return 0x00002121 + 0x2*i0; } #define REG_A4XX_RBBM_HW_VERSION 0x00000000 @@ -1121,7 +1202,9 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val) { return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK; } +#define A4XX_SP_FS_CTRL_REG1_FACENESS 0x00080000 #define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000 +#define A4XX_SP_FS_CTRL_REG1_FRAGCOORD 0x00200000 #define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea #define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 @@ -1384,6 +1467,12 @@ static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val) #define REG_A4XX_VFD_CONTROL_2 0x00002202 #define REG_A4XX_VFD_CONTROL_3 0x00002203 +#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK 0x0000ff00 +#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT 8 +static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK; +} #define REG_A4XX_VFD_CONTROL_4 0x00002204 @@ -1405,12 +1494,7 @@ static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val) return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK; } #define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000 -#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000 -#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24 -static inline uint32_t A4XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val) -{ - return ((val) << A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK; -} +#define A4XX_VFD_FETCH_INSTR_0_INSTANCED 0x00100000 static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; } @@ -1423,6 +1507,12 @@ static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val) } static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; } +#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK 0x000001ff +#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT 0 +static inline uint32_t A4XX_VFD_FETCH_INSTR_3_STEPRATE(uint32_t val) +{ + return ((val) << A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK; +} static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; } @@ -1446,6 +1536,7 @@ static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val) { return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK; } +#define A4XX_VFD_DECODE_INSTR_INT 0x00100000 #define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000 #define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22 static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) @@ -1585,7 +1676,47 @@ static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; } -#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f +#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077 +#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003 +#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0 +static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val) +{ + return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK; +} + +#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078 +#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001 +#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002 +#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004 +#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8 +#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3 +static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val) +{ + return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; +} +#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 +#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000 + +#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b +#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c +#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2 +static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val) +{ + return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK; +} +#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380 +#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7 +static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK; +} +#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800 +#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000 +#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12 +static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK; +} #define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c #define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 @@ -1647,46 +1778,34 @@ static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; } -#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077 -#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003 -#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0 -static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val) +#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_BR 0x0000209e +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK 0x00007fff +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT 0 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_X(uint32_t val) { - return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK; + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK; } - -#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078 -#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001 -#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002 -#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004 -#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8 -#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3 -static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val) +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK 0x7fff0000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT 16 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y(uint32_t val) { - return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK; } -#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 -#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000 -#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b -#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c -#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2 -static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val) -{ - return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK; -} -#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380 -#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7 -static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val) +#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK 0x00007fff +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT 0 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_X(uint32_t val) { - return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK; + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK; } -#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800 -#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000 -#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12 -static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val) +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK 0x7fff0000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT 16 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val) { - return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK; + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK; } #define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80 @@ -1742,6 +1861,12 @@ static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize } #define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100 #define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200 +#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK 0x00ff0000 +#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT 16 +static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_COORDREGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK; +} #define A4XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000 #define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2 @@ -1751,6 +1876,12 @@ static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val) { return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK; } +#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000003fc +#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 2 +static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; +} #define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3 #define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff @@ -1965,15 +2096,13 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) #define REG_A4XX_UNKNOWN_20F2 0x000020f2 -#define REG_A4XX_UNKNOWN_20F3 0x000020f3 - -#define REG_A4XX_UNKNOWN_20F4 0x000020f4 - -#define REG_A4XX_UNKNOWN_20F5 0x000020f5 - -#define REG_A4XX_UNKNOWN_20F6 0x000020f6 - #define REG_A4XX_UNKNOWN_20F7 0x000020f7 +#define A4XX_UNKNOWN_20F7__MASK 0xffffffff +#define A4XX_UNKNOWN_20F7__SHIFT 0 +static inline uint32_t A4XX_UNKNOWN_20F7(float val) +{ + return ((fui(val)) << A4XX_UNKNOWN_20F7__SHIFT) & A4XX_UNKNOWN_20F7__MASK; +} #define REG_A4XX_UNKNOWN_2152 0x00002152 @@ -2000,6 +2129,7 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) #define REG_A4XX_UNKNOWN_23A0 0x000023a0 #define REG_A4XX_TEX_SAMP_0 0x00000000 +#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 #define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 #define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1 static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val) @@ -2038,17 +2168,19 @@ static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val { return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK; } +#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020 +#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040 #define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00 #define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8 static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val) { - return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK; + return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK; } #define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000 #define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20 static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val) { - return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK; + return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK; } #define REG_A4XX_TEX_CONST_0 0x00000000 @@ -2077,6 +2209,12 @@ static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val) { return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK; } +#define A4XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000 +#define A4XX_TEX_CONST_0_MIPLVLS__SHIFT 16 +static inline uint32_t A4XX_TEX_CONST_0_MIPLVLS(uint32_t val) +{ + return ((val) << A4XX_TEX_CONST_0_MIPLVLS__SHIFT) & A4XX_TEX_CONST_0_MIPLVLS__MASK; +} #define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000 #define A4XX_TEX_CONST_0_FMT__SHIFT 22 static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val) @@ -2105,6 +2243,12 @@ static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val) } #define REG_A4XX_TEX_CONST_2 0x00000002 +#define A4XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f +#define A4XX_TEX_CONST_2_FETCHSIZE__SHIFT 0 +static inline uint32_t A4XX_TEX_CONST_2_FETCHSIZE(enum a4xx_tex_fetchsize val) +{ + return ((val) << A4XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A4XX_TEX_CONST_2_FETCHSIZE__MASK; +} #define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00 #define A4XX_TEX_CONST_2_PITCH__SHIFT 9 static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val) @@ -2119,19 +2263,31 @@ static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val) } #define REG_A4XX_TEX_CONST_3 0x00000003 -#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x0000000f +#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff #define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0 static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val) { return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK; } +#define A4XX_TEX_CONST_3_DEPTH__MASK 0x7ffc0000 +#define A4XX_TEX_CONST_3_DEPTH__SHIFT 18 +static inline uint32_t A4XX_TEX_CONST_3_DEPTH(uint32_t val) +{ + return ((val) << A4XX_TEX_CONST_3_DEPTH__SHIFT) & A4XX_TEX_CONST_3_DEPTH__MASK; +} #define REG_A4XX_TEX_CONST_4 0x00000004 -#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffff -#define A4XX_TEX_CONST_4_BASE__SHIFT 0 +#define A4XX_TEX_CONST_4_LAYERSZ__MASK 0x0000000f +#define A4XX_TEX_CONST_4_LAYERSZ__SHIFT 0 +static inline uint32_t A4XX_TEX_CONST_4_LAYERSZ(uint32_t val) +{ + return ((val >> 12) << A4XX_TEX_CONST_4_LAYERSZ__SHIFT) & A4XX_TEX_CONST_4_LAYERSZ__MASK; +} +#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffe0 +#define A4XX_TEX_CONST_4_BASE__SHIFT 5 static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val) { - return ((val) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK; + return ((val >> 5) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK; } #define REG_A4XX_TEX_CONST_5 0x00000005 diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h index a4b33af9338d..8531beb982e7 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h @@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h index 6a75cee94d81..6ffc4f6c8af1 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are: - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) - /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) - /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18) -- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) +- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -172,7 +172,9 @@ enum adreno_pm4_type3_packets { CP_DRAW_INDIRECT = 40, CP_DRAW_INDX_INDIRECT = 41, CP_DRAW_AUTO = 36, + CP_UNKNOWN_19 = 25, CP_UNKNOWN_1A = 26, + CP_UNKNOWN_4E = 78, CP_WIDE_REG_WRITE = 116, IN_IB_PREFETCH_END = 23, IN_SUBBLK_PREFETCH = 31, @@ -203,6 +205,12 @@ enum adreno_state_src { SS_INDIRECT = 4, }; +enum a4xx_index_size { + INDEX4_SIZE_8_BIT = 0, + INDEX4_SIZE_16_BIT = 1, + INDEX4_SIZE_32_BIT = 2, +}; + #define REG_CP_LOAD_STATE_0 0x00000000 #define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff #define CP_LOAD_STATE_0_DST_OFF__SHIFT 0 @@ -374,29 +382,20 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va { return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK; } -#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000700 -#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8 -static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK; -} -#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000800 -#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 11 -static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size val) +#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00 +#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val) { return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK; } -#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000 -#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000 -#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000 -#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK 0xffff0000 -#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT 16 -static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES(uint32_t val) -{ - return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK; -} #define REG_CP_DRAW_INDX_OFFSET_1 0x00000001 +#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK; +} #define REG_CP_DRAW_INDX_OFFSET_2 0x00000002 #define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 448438b759b4..abf1bba520bf 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h index c102a7f074ac..695f99d4bec2 100644 --- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h +++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h @@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h index a900134bdf33..50ff9851d73d 100644 --- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h +++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h @@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c new file mode 100644 index 000000000000..0940e84b2821 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/of_irq.h> +#include "edp.h" + +static irqreturn_t edp_irq(int irq, void *dev_id) +{ + struct msm_edp *edp = dev_id; + + /* Process eDP irq */ + return msm_edp_ctrl_irq(edp->ctrl); +} + +static void edp_destroy(struct platform_device *pdev) +{ + struct msm_edp *edp = platform_get_drvdata(pdev); + + if (!edp) + return; + + if (edp->ctrl) { + msm_edp_ctrl_destroy(edp->ctrl); + edp->ctrl = NULL; + } + + platform_set_drvdata(pdev, NULL); +} + +/* construct eDP at bind/probe time, grab all the resources. */ +static struct msm_edp *edp_init(struct platform_device *pdev) +{ + struct msm_edp *edp = NULL; + int ret; + + if (!pdev) { + pr_err("no eDP device\n"); + ret = -ENXIO; + goto fail; + } + + edp = devm_kzalloc(&pdev->dev, sizeof(*edp), GFP_KERNEL); + if (!edp) { + ret = -ENOMEM; + goto fail; + } + DBG("eDP probed=%p", edp); + + edp->pdev = pdev; + platform_set_drvdata(pdev, edp); + + ret = msm_edp_ctrl_init(edp); + if (ret) + goto fail; + + return edp; + +fail: + if (edp) + edp_destroy(pdev); + + return ERR_PTR(ret); +} + +static int edp_bind(struct device *dev, struct device *master, void *data) +{ + struct drm_device *drm = dev_get_drvdata(master); + struct msm_drm_private *priv = drm->dev_private; + struct msm_edp *edp; + + DBG(""); + edp = edp_init(to_platform_device(dev)); + if (IS_ERR(edp)) + return PTR_ERR(edp); + priv->edp = edp; + + return 0; +} + +static void edp_unbind(struct device *dev, struct device *master, void *data) +{ + struct drm_device *drm = dev_get_drvdata(master); + struct msm_drm_private *priv = drm->dev_private; + + DBG(""); + if (priv->edp) { + edp_destroy(to_platform_device(dev)); + priv->edp = NULL; + } +} + +static const struct component_ops edp_ops = { + .bind = edp_bind, + .unbind = edp_unbind, +}; + +static int edp_dev_probe(struct platform_device *pdev) +{ + DBG(""); + return component_add(&pdev->dev, &edp_ops); +} + +static int edp_dev_remove(struct platform_device *pdev) +{ + DBG(""); + component_del(&pdev->dev, &edp_ops); + return 0; +} + +static const struct of_device_id dt_match[] = { + { .compatible = "qcom,mdss-edp" }, + {} +}; + +static struct platform_driver edp_driver = { + .probe = edp_dev_probe, + .remove = edp_dev_remove, + .driver = { + .name = "msm_edp", + .of_match_table = dt_match, + }, +}; + +void __init msm_edp_register(void) +{ + DBG(""); + platform_driver_register(&edp_driver); +} + +void __exit msm_edp_unregister(void) +{ + DBG(""); + platform_driver_unregister(&edp_driver); +} + +/* Second part of initialization, the drm/kms level modeset_init */ +int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, + struct drm_encoder *encoder) +{ + struct platform_device *pdev = edp->pdev; + struct msm_drm_private *priv = dev->dev_private; + int ret; + + edp->encoder = encoder; + edp->dev = dev; + + edp->bridge = msm_edp_bridge_init(edp); + if (IS_ERR(edp->bridge)) { + ret = PTR_ERR(edp->bridge); + dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret); + edp->bridge = NULL; + goto fail; + } + + edp->connector = msm_edp_connector_init(edp); + if (IS_ERR(edp->connector)) { + ret = PTR_ERR(edp->connector); + dev_err(dev->dev, "failed to create eDP connector: %d\n", ret); + edp->connector = NULL; + goto fail; + } + + edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (edp->irq < 0) { + ret = edp->irq; + dev_err(dev->dev, "failed to get IRQ: %d\n", ret); + goto fail; + } + + ret = devm_request_irq(&pdev->dev, edp->irq, + edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "edp_isr", edp); + if (ret < 0) { + dev_err(dev->dev, "failed to request IRQ%u: %d\n", + edp->irq, ret); + goto fail; + } + + encoder->bridge = edp->bridge; + + priv->bridges[priv->num_bridges++] = edp->bridge; + priv->connectors[priv->num_connectors++] = edp->connector; + + return 0; + +fail: + /* bridge/connector are normally destroyed by drm */ + if (edp->bridge) { + edp_bridge_destroy(edp->bridge); + edp->bridge = NULL; + } + if (edp->connector) { + edp->connector->funcs->destroy(edp->connector); + edp->connector = NULL; + } + + return ret; +} diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h new file mode 100644 index 000000000000..ba5bedde5241 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __EDP_CONNECTOR_H__ +#define __EDP_CONNECTOR_H__ + +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> + +#include "drm_crtc.h" +#include "drm_dp_helper.h" +#include "msm_drv.h" + +#define edp_read(offset) msm_readl((offset)) +#define edp_write(offset, data) msm_writel((data), (offset)) + +struct edp_ctrl; +struct edp_aux; +struct edp_phy; + +struct msm_edp { + struct drm_device *dev; + struct platform_device *pdev; + + struct drm_connector *connector; + struct drm_bridge *bridge; + + /* the encoder we are hooked to (outside of eDP block) */ + struct drm_encoder *encoder; + + struct edp_ctrl *ctrl; + + int irq; +}; + +/* eDP bridge */ +struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp); +void edp_bridge_destroy(struct drm_bridge *bridge); + +/* eDP connector */ +struct drm_connector *msm_edp_connector_init(struct msm_edp *edp); + +/* AUX */ +void *msm_edp_aux_init(struct device *dev, void __iomem *regbase, + struct drm_dp_aux **drm_aux); +void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux); +irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr); +void msm_edp_aux_ctrl(struct edp_aux *aux, int enable); + +/* Phy */ +bool msm_edp_phy_ready(struct edp_phy *phy); +void msm_edp_phy_ctrl(struct edp_phy *phy, int enable); +void msm_edp_phy_vm_pe_init(struct edp_phy *phy); +void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1); +void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane); +void *msm_edp_phy_init(struct device *dev, void __iomem *regbase); + +/* Ctrl */ +irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl); +void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on); +int msm_edp_ctrl_init(struct msm_edp *edp); +void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl); +bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl); +int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl, + struct drm_connector *connector, struct edid **edid); +int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl, + const struct drm_display_mode *mode, + const struct drm_display_info *info); +/* @pixel_rate is in kHz */ +bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl, + u32 pixel_rate, u32 *pm, u32 *pn); + +#endif /* __EDP_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h new file mode 100644 index 000000000000..a29f1df15143 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp.xml.h @@ -0,0 +1,292 @@ +#ifndef EDP_XML +#define EDP_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) +- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) + +Copyright (C) 2013-2014 by the following authors: +- Rob Clark <robdclark@gmail.com> (robclark) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum edp_color_depth { + EDP_6BIT = 0, + EDP_8BIT = 1, + EDP_10BIT = 2, + EDP_12BIT = 3, + EDP_16BIT = 4, +}; + +enum edp_component_format { + EDP_RGB = 0, + EDP_YUV422 = 1, + EDP_YUV444 = 2, +}; + +#define REG_EDP_MAINLINK_CTRL 0x00000004 +#define EDP_MAINLINK_CTRL_ENABLE 0x00000001 +#define EDP_MAINLINK_CTRL_RESET 0x00000002 + +#define REG_EDP_STATE_CTRL 0x00000008 +#define EDP_STATE_CTRL_TRAIN_PATTERN_1 0x00000001 +#define EDP_STATE_CTRL_TRAIN_PATTERN_2 0x00000002 +#define EDP_STATE_CTRL_TRAIN_PATTERN_3 0x00000004 +#define EDP_STATE_CTRL_SYMBOL_ERR_RATE_MEAS 0x00000008 +#define EDP_STATE_CTRL_PRBS7 0x00000010 +#define EDP_STATE_CTRL_CUSTOM_80_BIT_PATTERN 0x00000020 +#define EDP_STATE_CTRL_SEND_VIDEO 0x00000040 +#define EDP_STATE_CTRL_PUSH_IDLE 0x00000080 + +#define REG_EDP_CONFIGURATION_CTRL 0x0000000c +#define EDP_CONFIGURATION_CTRL_SYNC_CLK 0x00000001 +#define EDP_CONFIGURATION_CTRL_STATIC_MVID 0x00000002 +#define EDP_CONFIGURATION_CTRL_PROGRESSIVE 0x00000004 +#define EDP_CONFIGURATION_CTRL_LANES__MASK 0x00000030 +#define EDP_CONFIGURATION_CTRL_LANES__SHIFT 4 +static inline uint32_t EDP_CONFIGURATION_CTRL_LANES(uint32_t val) +{ + return ((val) << EDP_CONFIGURATION_CTRL_LANES__SHIFT) & EDP_CONFIGURATION_CTRL_LANES__MASK; +} +#define EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING 0x00000040 +#define EDP_CONFIGURATION_CTRL_COLOR__MASK 0x00000100 +#define EDP_CONFIGURATION_CTRL_COLOR__SHIFT 8 +static inline uint32_t EDP_CONFIGURATION_CTRL_COLOR(enum edp_color_depth val) +{ + return ((val) << EDP_CONFIGURATION_CTRL_COLOR__SHIFT) & EDP_CONFIGURATION_CTRL_COLOR__MASK; +} + +#define REG_EDP_SOFTWARE_MVID 0x00000014 + +#define REG_EDP_SOFTWARE_NVID 0x00000018 + +#define REG_EDP_TOTAL_HOR_VER 0x0000001c +#define EDP_TOTAL_HOR_VER_HORIZ__MASK 0x0000ffff +#define EDP_TOTAL_HOR_VER_HORIZ__SHIFT 0 +static inline uint32_t EDP_TOTAL_HOR_VER_HORIZ(uint32_t val) +{ + return ((val) << EDP_TOTAL_HOR_VER_HORIZ__SHIFT) & EDP_TOTAL_HOR_VER_HORIZ__MASK; +} +#define EDP_TOTAL_HOR_VER_VERT__MASK 0xffff0000 +#define EDP_TOTAL_HOR_VER_VERT__SHIFT 16 +static inline uint32_t EDP_TOTAL_HOR_VER_VERT(uint32_t val) +{ + return ((val) << EDP_TOTAL_HOR_VER_VERT__SHIFT) & EDP_TOTAL_HOR_VER_VERT__MASK; +} + +#define REG_EDP_START_HOR_VER_FROM_SYNC 0x00000020 +#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK 0x0000ffff +#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT 0 +static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_HORIZ(uint32_t val) +{ + return ((val) << EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK; +} +#define EDP_START_HOR_VER_FROM_SYNC_VERT__MASK 0xffff0000 +#define EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT 16 +static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_VERT(uint32_t val) +{ + return ((val) << EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_VERT__MASK; +} + +#define REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY 0x00000024 +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK 0x00007fff +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT 0 +static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(uint32_t val) +{ + return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK; +} +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC 0x00008000 +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK 0x7fff0000 +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT 16 +static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(uint32_t val) +{ + return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK; +} +#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC 0x80000000 + +#define REG_EDP_ACTIVE_HOR_VER 0x00000028 +#define EDP_ACTIVE_HOR_VER_HORIZ__MASK 0x0000ffff +#define EDP_ACTIVE_HOR_VER_HORIZ__SHIFT 0 +static inline uint32_t EDP_ACTIVE_HOR_VER_HORIZ(uint32_t val) +{ + return ((val) << EDP_ACTIVE_HOR_VER_HORIZ__SHIFT) & EDP_ACTIVE_HOR_VER_HORIZ__MASK; +} +#define EDP_ACTIVE_HOR_VER_VERT__MASK 0xffff0000 +#define EDP_ACTIVE_HOR_VER_VERT__SHIFT 16 +static inline uint32_t EDP_ACTIVE_HOR_VER_VERT(uint32_t val) +{ + return ((val) << EDP_ACTIVE_HOR_VER_VERT__SHIFT) & EDP_ACTIVE_HOR_VER_VERT__MASK; +} + +#define REG_EDP_MISC1_MISC0 0x0000002c +#define EDP_MISC1_MISC0_MISC0__MASK 0x000000ff +#define EDP_MISC1_MISC0_MISC0__SHIFT 0 +static inline uint32_t EDP_MISC1_MISC0_MISC0(uint32_t val) +{ + return ((val) << EDP_MISC1_MISC0_MISC0__SHIFT) & EDP_MISC1_MISC0_MISC0__MASK; +} +#define EDP_MISC1_MISC0_SYNC 0x00000001 +#define EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK 0x00000006 +#define EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT 1 +static inline uint32_t EDP_MISC1_MISC0_COMPONENT_FORMAT(enum edp_component_format val) +{ + return ((val) << EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT) & EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK; +} +#define EDP_MISC1_MISC0_CEA 0x00000008 +#define EDP_MISC1_MISC0_BT709_5 0x00000010 +#define EDP_MISC1_MISC0_COLOR__MASK 0x000000e0 +#define EDP_MISC1_MISC0_COLOR__SHIFT 5 +static inline uint32_t EDP_MISC1_MISC0_COLOR(enum edp_color_depth val) +{ + return ((val) << EDP_MISC1_MISC0_COLOR__SHIFT) & EDP_MISC1_MISC0_COLOR__MASK; +} +#define EDP_MISC1_MISC0_MISC1__MASK 0x0000ff00 +#define EDP_MISC1_MISC0_MISC1__SHIFT 8 +static inline uint32_t EDP_MISC1_MISC0_MISC1(uint32_t val) +{ + return ((val) << EDP_MISC1_MISC0_MISC1__SHIFT) & EDP_MISC1_MISC0_MISC1__MASK; +} +#define EDP_MISC1_MISC0_INTERLACED_ODD 0x00000100 +#define EDP_MISC1_MISC0_STEREO__MASK 0x00000600 +#define EDP_MISC1_MISC0_STEREO__SHIFT 9 +static inline uint32_t EDP_MISC1_MISC0_STEREO(uint32_t val) +{ + return ((val) << EDP_MISC1_MISC0_STEREO__SHIFT) & EDP_MISC1_MISC0_STEREO__MASK; +} + +#define REG_EDP_PHY_CTRL 0x00000074 +#define EDP_PHY_CTRL_SW_RESET_PLL 0x00000001 +#define EDP_PHY_CTRL_SW_RESET 0x00000004 + +#define REG_EDP_MAINLINK_READY 0x00000084 +#define EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY 0x00000008 +#define EDP_MAINLINK_READY_TRAIN_PATTERN_2_READY 0x00000010 +#define EDP_MAINLINK_READY_TRAIN_PATTERN_3_READY 0x00000020 + +#define REG_EDP_AUX_CTRL 0x00000300 +#define EDP_AUX_CTRL_ENABLE 0x00000001 +#define EDP_AUX_CTRL_RESET 0x00000002 + +#define REG_EDP_INTERRUPT_REG_1 0x00000308 +#define EDP_INTERRUPT_REG_1_HPD 0x00000001 +#define EDP_INTERRUPT_REG_1_HPD_ACK 0x00000002 +#define EDP_INTERRUPT_REG_1_HPD_EN 0x00000004 +#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE 0x00000008 +#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_ACK 0x00000010 +#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_EN 0x00000020 +#define EDP_INTERRUPT_REG_1_WRONG_ADDR 0x00000040 +#define EDP_INTERRUPT_REG_1_WRONG_ADDR_ACK 0x00000080 +#define EDP_INTERRUPT_REG_1_WRONG_ADDR_EN 0x00000100 +#define EDP_INTERRUPT_REG_1_TIMEOUT 0x00000200 +#define EDP_INTERRUPT_REG_1_TIMEOUT_ACK 0x00000400 +#define EDP_INTERRUPT_REG_1_TIMEOUT_EN 0x00000800 +#define EDP_INTERRUPT_REG_1_NACK_DEFER 0x00001000 +#define EDP_INTERRUPT_REG_1_NACK_DEFER_ACK 0x00002000 +#define EDP_INTERRUPT_REG_1_NACK_DEFER_EN 0x00004000 +#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT 0x00008000 +#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_ACK 0x00010000 +#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_EN 0x00020000 +#define EDP_INTERRUPT_REG_1_I2C_NACK 0x00040000 +#define EDP_INTERRUPT_REG_1_I2C_NACK_ACK 0x00080000 +#define EDP_INTERRUPT_REG_1_I2C_NACK_EN 0x00100000 +#define EDP_INTERRUPT_REG_1_I2C_DEFER 0x00200000 +#define EDP_INTERRUPT_REG_1_I2C_DEFER_ACK 0x00400000 +#define EDP_INTERRUPT_REG_1_I2C_DEFER_EN 0x00800000 +#define EDP_INTERRUPT_REG_1_PLL_UNLOCK 0x01000000 +#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_ACK 0x02000000 +#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_EN 0x04000000 +#define EDP_INTERRUPT_REG_1_AUX_ERROR 0x08000000 +#define EDP_INTERRUPT_REG_1_AUX_ERROR_ACK 0x10000000 +#define EDP_INTERRUPT_REG_1_AUX_ERROR_EN 0x20000000 + +#define REG_EDP_INTERRUPT_REG_2 0x0000030c +#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO 0x00000001 +#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_ACK 0x00000002 +#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_EN 0x00000004 +#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT 0x00000008 +#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_ACK 0x00000010 +#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_EN 0x00000020 +#define EDP_INTERRUPT_REG_2_FRAME_END 0x00000200 +#define EDP_INTERRUPT_REG_2_FRAME_END_ACK 0x00000080 +#define EDP_INTERRUPT_REG_2_FRAME_END_EN 0x00000100 +#define EDP_INTERRUPT_REG_2_CRC_UPDATED 0x00000200 +#define EDP_INTERRUPT_REG_2_CRC_UPDATED_ACK 0x00000400 +#define EDP_INTERRUPT_REG_2_CRC_UPDATED_EN 0x00000800 + +#define REG_EDP_INTERRUPT_TRANS_NUM 0x00000310 + +#define REG_EDP_AUX_DATA 0x00000314 +#define EDP_AUX_DATA_READ 0x00000001 +#define EDP_AUX_DATA_DATA__MASK 0x0000ff00 +#define EDP_AUX_DATA_DATA__SHIFT 8 +static inline uint32_t EDP_AUX_DATA_DATA(uint32_t val) +{ + return ((val) << EDP_AUX_DATA_DATA__SHIFT) & EDP_AUX_DATA_DATA__MASK; +} +#define EDP_AUX_DATA_INDEX__MASK 0x00ff0000 +#define EDP_AUX_DATA_INDEX__SHIFT 16 +static inline uint32_t EDP_AUX_DATA_INDEX(uint32_t val) +{ + return ((val) << EDP_AUX_DATA_INDEX__SHIFT) & EDP_AUX_DATA_INDEX__MASK; +} +#define EDP_AUX_DATA_INDEX_WRITE 0x80000000 + +#define REG_EDP_AUX_TRANS_CTRL 0x00000318 +#define EDP_AUX_TRANS_CTRL_I2C 0x00000100 +#define EDP_AUX_TRANS_CTRL_GO 0x00000200 + +#define REG_EDP_AUX_STATUS 0x00000324 + +static inline uint32_t REG_EDP_PHY_LN(uint32_t i0) { return 0x00000400 + 0x40*i0; } + +static inline uint32_t REG_EDP_PHY_LN_PD_CTL(uint32_t i0) { return 0x00000404 + 0x40*i0; } + +#define REG_EDP_PHY_GLB_VM_CFG0 0x00000510 + +#define REG_EDP_PHY_GLB_VM_CFG1 0x00000514 + +#define REG_EDP_PHY_GLB_MISC9 0x00000518 + +#define REG_EDP_PHY_GLB_CFG 0x00000528 + +#define REG_EDP_PHY_GLB_PD_CTL 0x0000052c + +#define REG_EDP_PHY_GLB_PHY_STATUS 0x00000598 + + +#endif /* EDP_XML */ diff --git a/drivers/gpu/drm/msm/edp/edp_aux.c b/drivers/gpu/drm/msm/edp/edp_aux.c new file mode 100644 index 000000000000..5f5a84f6074c --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp_aux.c @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "edp.h" +#include "edp.xml.h" + +#define AUX_CMD_FIFO_LEN 144 +#define AUX_CMD_NATIVE_MAX 16 +#define AUX_CMD_I2C_MAX 128 + +#define EDP_INTR_AUX_I2C_ERR \ + (EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \ + EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \ + EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER) +#define EDP_INTR_TRANS_STATUS \ + (EDP_INTERRUPT_REG_1_AUX_I2C_DONE | EDP_INTR_AUX_I2C_ERR) + +struct edp_aux { + void __iomem *base; + bool msg_err; + + struct completion msg_comp; + + /* To prevent the message transaction routine from reentry. */ + struct mutex msg_mutex; + + struct drm_dp_aux drm_aux; +}; +#define to_edp_aux(x) container_of(x, struct edp_aux, drm_aux) + +static int edp_msg_fifo_tx(struct edp_aux *aux, struct drm_dp_aux_msg *msg) +{ + u32 data[4]; + u32 reg, len; + bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); + bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + u8 *msgdata = msg->buffer; + int i; + + if (read) + len = 4; + else + len = msg->size + 4; + + /* + * cmd fifo only has depth of 144 bytes + */ + if (len > AUX_CMD_FIFO_LEN) + return -EINVAL; + + /* Pack cmd and write to HW */ + data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */ + if (read) + data[0] |= BIT(4); /* R/W */ + + data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */ + data[2] = msg->address & 0xff; /* addr[7:0] */ + data[3] = (msg->size - 1) & 0xff; /* len[7:0] */ + + for (i = 0; i < len; i++) { + reg = (i < 4) ? data[i] : msgdata[i - 4]; + reg = EDP_AUX_DATA_DATA(reg); /* index = 0, write */ + if (i == 0) + reg |= EDP_AUX_DATA_INDEX_WRITE; + edp_write(aux->base + REG_EDP_AUX_DATA, reg); + } + + reg = 0; /* Transaction number is always 1 */ + if (!native) /* i2c */ + reg |= EDP_AUX_TRANS_CTRL_I2C; + + reg |= EDP_AUX_TRANS_CTRL_GO; + edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, reg); + + return 0; +} + +static int edp_msg_fifo_rx(struct edp_aux *aux, struct drm_dp_aux_msg *msg) +{ + u32 data; + u8 *dp; + int i; + u32 len = msg->size; + + edp_write(aux->base + REG_EDP_AUX_DATA, + EDP_AUX_DATA_INDEX_WRITE | EDP_AUX_DATA_READ); /* index = 0 */ + + dp = msg->buffer; + + /* discard first byte */ + data = edp_read(aux->base + REG_EDP_AUX_DATA); + for (i = 0; i < len; i++) { + data = edp_read(aux->base + REG_EDP_AUX_DATA); + dp[i] = (u8)((data >> 8) & 0xff); + } + + return 0; +} + +/* + * This function does the real job to process an AUX transaction. + * It will call msm_edp_aux_ctrl() function to reset the AUX channel, + * if the waiting is timeout. + * The caller who triggers the transaction should avoid the + * msm_edp_aux_ctrl() running concurrently in other threads, i.e. + * start transaction only when AUX channel is fully enabled. + */ +ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg) +{ + struct edp_aux *aux = to_edp_aux(drm_aux); + ssize_t ret; + bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); + bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + + /* Ignore address only message */ + if ((msg->size == 0) || (msg->buffer == NULL)) { + msg->reply = native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + return msg->size; + } + + /* msg sanity check */ + if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) || + (msg->size > AUX_CMD_I2C_MAX)) { + pr_err("%s: invalid msg: size(%d), request(%x)\n", + __func__, msg->size, msg->request); + return -EINVAL; + } + + mutex_lock(&aux->msg_mutex); + + aux->msg_err = false; + reinit_completion(&aux->msg_comp); + + ret = edp_msg_fifo_tx(aux, msg); + if (ret < 0) + goto unlock_exit; + + DBG("wait_for_completion"); + ret = wait_for_completion_timeout(&aux->msg_comp, 300); + if (ret <= 0) { + /* + * Clear GO and reset AUX channel + * to cancel the current transaction. + */ + edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); + msm_edp_aux_ctrl(aux, 1); + pr_err("%s: aux timeout, %d\n", __func__, ret); + goto unlock_exit; + } + DBG("completion"); + + if (!aux->msg_err) { + if (read) { + ret = edp_msg_fifo_rx(aux, msg); + if (ret < 0) + goto unlock_exit; + } + + msg->reply = native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + } else { + /* Reply defer to retry */ + msg->reply = native ? + DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER; + /* + * The sleep time in caller is not long enough to make sure + * our H/W completes transactions. Add more defer time here. + */ + msleep(100); + } + + /* Return requested size for success or retry */ + ret = msg->size; + +unlock_exit: + mutex_unlock(&aux->msg_mutex); + return ret; +} + +void *msm_edp_aux_init(struct device *dev, void __iomem *regbase, + struct drm_dp_aux **drm_aux) +{ + struct edp_aux *aux = NULL; + int ret; + + DBG(""); + aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL); + if (!aux) + return NULL; + + aux->base = regbase; + mutex_init(&aux->msg_mutex); + init_completion(&aux->msg_comp); + + aux->drm_aux.name = "msm_edp_aux"; + aux->drm_aux.dev = dev; + aux->drm_aux.transfer = edp_aux_transfer; + ret = drm_dp_aux_register(&aux->drm_aux); + if (ret) { + pr_err("%s: failed to register drm aux: %d\n", __func__, ret); + mutex_destroy(&aux->msg_mutex); + } + + if (drm_aux && aux) + *drm_aux = &aux->drm_aux; + + return aux; +} + +void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux) +{ + if (aux) { + drm_dp_aux_unregister(&aux->drm_aux); + mutex_destroy(&aux->msg_mutex); + } +} + +irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr) +{ + if (isr & EDP_INTR_TRANS_STATUS) { + DBG("isr=%x", isr); + edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); + + if (isr & EDP_INTR_AUX_I2C_ERR) + aux->msg_err = true; + else + aux->msg_err = false; + + complete(&aux->msg_comp); + } + + return IRQ_HANDLED; +} + +void msm_edp_aux_ctrl(struct edp_aux *aux, int enable) +{ + u32 data; + + DBG("enable=%d", enable); + data = edp_read(aux->base + REG_EDP_AUX_CTRL); + + if (enable) { + data |= EDP_AUX_CTRL_RESET; + edp_write(aux->base + REG_EDP_AUX_CTRL, data); + /* Make sure full reset */ + wmb(); + usleep_range(500, 1000); + + data &= ~EDP_AUX_CTRL_RESET; + data |= EDP_AUX_CTRL_ENABLE; + edp_write(aux->base + REG_EDP_AUX_CTRL, data); + } else { + data &= ~EDP_AUX_CTRL_ENABLE; + edp_write(aux->base + REG_EDP_AUX_CTRL, data); + } +} + diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c new file mode 100644 index 000000000000..2bc73f82f3f5 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp_bridge.c @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "edp.h" + +struct edp_bridge { + struct drm_bridge base; + struct msm_edp *edp; +}; +#define to_edp_bridge(x) container_of(x, struct edp_bridge, base) + +void edp_bridge_destroy(struct drm_bridge *bridge) +{ +} + +static void edp_bridge_pre_enable(struct drm_bridge *bridge) +{ + struct edp_bridge *edp_bridge = to_edp_bridge(bridge); + struct msm_edp *edp = edp_bridge->edp; + + DBG(""); + msm_edp_ctrl_power(edp->ctrl, true); +} + +static void edp_bridge_enable(struct drm_bridge *bridge) +{ + DBG(""); +} + +static void edp_bridge_disable(struct drm_bridge *bridge) +{ + DBG(""); +} + +static void edp_bridge_post_disable(struct drm_bridge *bridge) +{ + struct edp_bridge *edp_bridge = to_edp_bridge(bridge); + struct msm_edp *edp = edp_bridge->edp; + + DBG(""); + msm_edp_ctrl_power(edp->ctrl, false); +} + +static void edp_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = bridge->dev; + struct drm_connector *connector; + struct edp_bridge *edp_bridge = to_edp_bridge(bridge); + struct msm_edp *edp = edp_bridge->edp; + + DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", + mode->base.id, mode->name, + mode->vrefresh, mode->clock, + mode->hdisplay, mode->hsync_start, + mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, + mode->vsync_end, mode->vtotal, + mode->type, mode->flags); + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if ((connector->encoder != NULL) && + (connector->encoder->bridge == bridge)) { + msm_edp_ctrl_timing_cfg(edp->ctrl, + adjusted_mode, &connector->display_info); + break; + } + } +} + +static const struct drm_bridge_funcs edp_bridge_funcs = { + .pre_enable = edp_bridge_pre_enable, + .enable = edp_bridge_enable, + .disable = edp_bridge_disable, + .post_disable = edp_bridge_post_disable, + .mode_set = edp_bridge_mode_set, +}; + +/* initialize bridge */ +struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp) +{ + struct drm_bridge *bridge = NULL; + struct edp_bridge *edp_bridge; + int ret; + + edp_bridge = devm_kzalloc(edp->dev->dev, + sizeof(*edp_bridge), GFP_KERNEL); + if (!edp_bridge) { + ret = -ENOMEM; + goto fail; + } + + edp_bridge->edp = edp; + + bridge = &edp_bridge->base; + bridge->funcs = &edp_bridge_funcs; + + ret = drm_bridge_attach(edp->dev, bridge); + if (ret) + goto fail; + + return bridge; + +fail: + if (bridge) + edp_bridge_destroy(bridge); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c new file mode 100644 index 000000000000..d8812e84da54 --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp_connector.c @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "drm/drm_edid.h" +#include "msm_kms.h" +#include "edp.h" + +struct edp_connector { + struct drm_connector base; + struct msm_edp *edp; +}; +#define to_edp_connector(x) container_of(x, struct edp_connector, base) + +static enum drm_connector_status edp_connector_detect( + struct drm_connector *connector, bool force) +{ + struct edp_connector *edp_connector = to_edp_connector(connector); + struct msm_edp *edp = edp_connector->edp; + + DBG(""); + return msm_edp_ctrl_panel_connected(edp->ctrl) ? + connector_status_connected : connector_status_disconnected; +} + +static void edp_connector_destroy(struct drm_connector *connector) +{ + struct edp_connector *edp_connector = to_edp_connector(connector); + + DBG(""); + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + + kfree(edp_connector); +} + +static int edp_connector_get_modes(struct drm_connector *connector) +{ + struct edp_connector *edp_connector = to_edp_connector(connector); + struct msm_edp *edp = edp_connector->edp; + + struct edid *drm_edid = NULL; + int ret = 0; + + DBG(""); + ret = msm_edp_ctrl_get_panel_info(edp->ctrl, connector, &drm_edid); + if (ret) + return ret; + + drm_mode_connector_update_edid_property(connector, drm_edid); + if (drm_edid) + ret = drm_add_edid_modes(connector, drm_edid); + + return ret; +} + +static int edp_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct edp_connector *edp_connector = to_edp_connector(connector); + struct msm_edp *edp = edp_connector->edp; + struct msm_drm_private *priv = connector->dev->dev_private; + struct msm_kms *kms = priv->kms; + long actual, requested; + + requested = 1000 * mode->clock; + actual = kms->funcs->round_pixclk(kms, + requested, edp_connector->edp->encoder); + + DBG("requested=%ld, actual=%ld", requested, actual); + if (actual != requested) + return MODE_CLOCK_RANGE; + + if (!msm_edp_ctrl_pixel_clock_valid( + edp->ctrl, mode->clock, NULL, NULL)) + return MODE_CLOCK_RANGE; + + /* Invalidate all modes if color format is not supported */ + if (connector->display_info.bpc > 8) + return MODE_BAD; + + return MODE_OK; +} + +static struct drm_encoder * +edp_connector_best_encoder(struct drm_connector *connector) +{ + struct edp_connector *edp_connector = to_edp_connector(connector); + + DBG(""); + return edp_connector->edp->encoder; +} + +static const struct drm_connector_funcs edp_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .detect = edp_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = edp_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs edp_connector_helper_funcs = { + .get_modes = edp_connector_get_modes, + .mode_valid = edp_connector_mode_valid, + .best_encoder = edp_connector_best_encoder, +}; + +/* initialize connector */ +struct drm_connector *msm_edp_connector_init(struct msm_edp *edp) +{ + struct drm_connector *connector = NULL; + struct edp_connector *edp_connector; + int ret; + + edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL); + if (!edp_connector) { + ret = -ENOMEM; + goto fail; + } + + edp_connector->edp = edp; + + connector = &edp_connector->base; + + ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs, + DRM_MODE_CONNECTOR_eDP); + if (ret) + goto fail; + + drm_connector_helper_add(connector, &edp_connector_helper_funcs); + + /* We don't support HPD, so only poll status until connected. */ + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + /* Display driver doesn't support interlace now. */ + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + ret = drm_connector_register(connector); + if (ret) + goto fail; + + return connector; + +fail: + if (connector) + edp_connector_destroy(connector); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c new file mode 100644 index 000000000000..3e246210c46f --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c @@ -0,0 +1,1373 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/gpio/consumer.h> +#include <linux/regulator/consumer.h> + +#include "drm_crtc.h" +#include "drm_dp_helper.h" +#include "drm_edid.h" +#include "edp.h" +#include "edp.xml.h" + +#define VDDA_MIN_UV 1800000 /* uV units */ +#define VDDA_MAX_UV 1800000 /* uV units */ +#define VDDA_UA_ON_LOAD 100000 /* uA units */ +#define VDDA_UA_OFF_LOAD 100 /* uA units */ + +#define DPCD_LINK_VOLTAGE_MAX 4 +#define DPCD_LINK_PRE_EMPHASIS_MAX 4 + +#define EDP_LINK_BW_MAX DP_LINK_BW_2_7 + +/* Link training return value */ +#define EDP_TRAIN_FAIL -1 +#define EDP_TRAIN_SUCCESS 0 +#define EDP_TRAIN_RECONFIG 1 + +#define EDP_CLK_MASK_AHB BIT(0) +#define EDP_CLK_MASK_AUX BIT(1) +#define EDP_CLK_MASK_LINK BIT(2) +#define EDP_CLK_MASK_PIXEL BIT(3) +#define EDP_CLK_MASK_MDP_CORE BIT(4) +#define EDP_CLK_MASK_LINK_CHAN (EDP_CLK_MASK_LINK | EDP_CLK_MASK_PIXEL) +#define EDP_CLK_MASK_AUX_CHAN \ + (EDP_CLK_MASK_AHB | EDP_CLK_MASK_AUX | EDP_CLK_MASK_MDP_CORE) +#define EDP_CLK_MASK_ALL (EDP_CLK_MASK_AUX_CHAN | EDP_CLK_MASK_LINK_CHAN) + +#define EDP_BACKLIGHT_MAX 255 + +#define EDP_INTR_STATUS1 \ + (EDP_INTERRUPT_REG_1_HPD | EDP_INTERRUPT_REG_1_AUX_I2C_DONE | \ + EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \ + EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \ + EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER | \ + EDP_INTERRUPT_REG_1_PLL_UNLOCK | EDP_INTERRUPT_REG_1_AUX_ERROR) +#define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2) +#define EDP_INTR_STATUS2 \ + (EDP_INTERRUPT_REG_2_READY_FOR_VIDEO | \ + EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT | \ + EDP_INTERRUPT_REG_2_FRAME_END | EDP_INTERRUPT_REG_2_CRC_UPDATED) +#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2) + +struct edp_ctrl { + struct platform_device *pdev; + + void __iomem *base; + + /* regulators */ + struct regulator *vdda_vreg; + struct regulator *lvl_vreg; + + /* clocks */ + struct clk *aux_clk; + struct clk *pixel_clk; + struct clk *ahb_clk; + struct clk *link_clk; + struct clk *mdp_core_clk; + + /* gpios */ + struct gpio_desc *panel_en_gpio; + struct gpio_desc *panel_hpd_gpio; + + /* completion and mutex */ + struct completion idle_comp; + struct mutex dev_mutex; /* To protect device power status */ + + /* work queue */ + struct work_struct on_work; + struct work_struct off_work; + struct workqueue_struct *workqueue; + + /* Interrupt register lock */ + spinlock_t irq_lock; + + bool edp_connected; + bool power_on; + + /* edid raw data */ + struct edid *edid; + + struct drm_dp_link dp_link; + struct drm_dp_aux *drm_aux; + + /* dpcd raw data */ + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + + /* Link status */ + u8 link_rate; + u8 lane_cnt; + u8 v_level; + u8 p_level; + + /* Timing status */ + u8 interlaced; + u32 pixel_rate; /* in kHz */ + u32 color_depth; + + struct edp_aux *aux; + struct edp_phy *phy; +}; + +struct edp_pixel_clk_div { + u32 rate; /* in kHz */ + u32 m; + u32 n; +}; + +#define EDP_PIXEL_CLK_NUM 8 +static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = { + { /* Link clock = 162MHz, source clock = 810MHz */ + {119000, 31, 211}, /* WSXGA+ 1680x1050@60Hz CVT */ + {130250, 32, 199}, /* UXGA 1600x1200@60Hz CVT */ + {148500, 11, 60}, /* FHD 1920x1080@60Hz */ + {154000, 50, 263}, /* WUXGA 1920x1200@60Hz CVT */ + {209250, 31, 120}, /* QXGA 2048x1536@60Hz CVT */ + {268500, 119, 359}, /* WQXGA 2560x1600@60Hz CVT */ + {138530, 33, 193}, /* AUO B116HAN03.0 Panel */ + {141400, 48, 275}, /* AUO B133HTN01.2 Panel */ + }, + { /* Link clock = 270MHz, source clock = 675MHz */ + {119000, 52, 295}, /* WSXGA+ 1680x1050@60Hz CVT */ + {130250, 11, 57}, /* UXGA 1600x1200@60Hz CVT */ + {148500, 11, 50}, /* FHD 1920x1080@60Hz */ + {154000, 47, 206}, /* WUXGA 1920x1200@60Hz CVT */ + {209250, 31, 100}, /* QXGA 2048x1536@60Hz CVT */ + {268500, 107, 269}, /* WQXGA 2560x1600@60Hz CVT */ + {138530, 63, 307}, /* AUO B116HAN03.0 Panel */ + {141400, 53, 253}, /* AUO B133HTN01.2 Panel */ + }, +}; + +static int edp_clk_init(struct edp_ctrl *ctrl) +{ + struct device *dev = &ctrl->pdev->dev; + int ret; + + ctrl->aux_clk = devm_clk_get(dev, "core_clk"); + if (IS_ERR(ctrl->aux_clk)) { + ret = PTR_ERR(ctrl->aux_clk); + pr_err("%s: Can't find aux_clk, %d\n", __func__, ret); + ctrl->aux_clk = NULL; + return ret; + } + + ctrl->pixel_clk = devm_clk_get(dev, "pixel_clk"); + if (IS_ERR(ctrl->pixel_clk)) { + ret = PTR_ERR(ctrl->pixel_clk); + pr_err("%s: Can't find pixel_clk, %d\n", __func__, ret); + ctrl->pixel_clk = NULL; + return ret; + } + + ctrl->ahb_clk = devm_clk_get(dev, "iface_clk"); + if (IS_ERR(ctrl->ahb_clk)) { + ret = PTR_ERR(ctrl->ahb_clk); + pr_err("%s: Can't find ahb_clk, %d\n", __func__, ret); + ctrl->ahb_clk = NULL; + return ret; + } + + ctrl->link_clk = devm_clk_get(dev, "link_clk"); + if (IS_ERR(ctrl->link_clk)) { + ret = PTR_ERR(ctrl->link_clk); + pr_err("%s: Can't find link_clk, %d\n", __func__, ret); + ctrl->link_clk = NULL; + return ret; + } + + /* need mdp core clock to receive irq */ + ctrl->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk"); + if (IS_ERR(ctrl->mdp_core_clk)) { + ret = PTR_ERR(ctrl->mdp_core_clk); + pr_err("%s: Can't find mdp_core_clk, %d\n", __func__, ret); + ctrl->mdp_core_clk = NULL; + return ret; + } + + return 0; +} + +static int edp_clk_enable(struct edp_ctrl *ctrl, u32 clk_mask) +{ + int ret; + + DBG("mask=%x", clk_mask); + /* ahb_clk should be enabled first */ + if (clk_mask & EDP_CLK_MASK_AHB) { + ret = clk_prepare_enable(ctrl->ahb_clk); + if (ret) { + pr_err("%s: Failed to enable ahb clk\n", __func__); + goto f0; + } + } + if (clk_mask & EDP_CLK_MASK_AUX) { + ret = clk_set_rate(ctrl->aux_clk, 19200000); + if (ret) { + pr_err("%s: Failed to set rate aux clk\n", __func__); + goto f1; + } + ret = clk_prepare_enable(ctrl->aux_clk); + if (ret) { + pr_err("%s: Failed to enable aux clk\n", __func__); + goto f1; + } + } + /* Need to set rate and enable link_clk prior to pixel_clk */ + if (clk_mask & EDP_CLK_MASK_LINK) { + DBG("edp->link_clk, set_rate %ld", + (unsigned long)ctrl->link_rate * 27000000); + ret = clk_set_rate(ctrl->link_clk, + (unsigned long)ctrl->link_rate * 27000000); + if (ret) { + pr_err("%s: Failed to set rate to link clk\n", + __func__); + goto f2; + } + + ret = clk_prepare_enable(ctrl->link_clk); + if (ret) { + pr_err("%s: Failed to enable link clk\n", __func__); + goto f2; + } + } + if (clk_mask & EDP_CLK_MASK_PIXEL) { + DBG("edp->pixel_clk, set_rate %ld", + (unsigned long)ctrl->pixel_rate * 1000); + ret = clk_set_rate(ctrl->pixel_clk, + (unsigned long)ctrl->pixel_rate * 1000); + if (ret) { + pr_err("%s: Failed to set rate to pixel clk\n", + __func__); + goto f3; + } + + ret = clk_prepare_enable(ctrl->pixel_clk); + if (ret) { + pr_err("%s: Failed to enable pixel clk\n", __func__); + goto f3; + } + } + if (clk_mask & EDP_CLK_MASK_MDP_CORE) { + ret = clk_prepare_enable(ctrl->mdp_core_clk); + if (ret) { + pr_err("%s: Failed to enable mdp core clk\n", __func__); + goto f4; + } + } + + return 0; + +f4: + if (clk_mask & EDP_CLK_MASK_PIXEL) + clk_disable_unprepare(ctrl->pixel_clk); +f3: + if (clk_mask & EDP_CLK_MASK_LINK) + clk_disable_unprepare(ctrl->link_clk); +f2: + if (clk_mask & EDP_CLK_MASK_AUX) + clk_disable_unprepare(ctrl->aux_clk); +f1: + if (clk_mask & EDP_CLK_MASK_AHB) + clk_disable_unprepare(ctrl->ahb_clk); +f0: + return ret; +} + +static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask) +{ + if (clk_mask & EDP_CLK_MASK_MDP_CORE) + clk_disable_unprepare(ctrl->mdp_core_clk); + if (clk_mask & EDP_CLK_MASK_PIXEL) + clk_disable_unprepare(ctrl->pixel_clk); + if (clk_mask & EDP_CLK_MASK_LINK) + clk_disable_unprepare(ctrl->link_clk); + if (clk_mask & EDP_CLK_MASK_AUX) + clk_disable_unprepare(ctrl->aux_clk); + if (clk_mask & EDP_CLK_MASK_AHB) + clk_disable_unprepare(ctrl->ahb_clk); +} + +static int edp_regulator_init(struct edp_ctrl *ctrl) +{ + struct device *dev = &ctrl->pdev->dev; + + DBG(""); + ctrl->vdda_vreg = devm_regulator_get(dev, "vdda"); + if (IS_ERR(ctrl->vdda_vreg)) { + pr_err("%s: Could not get vdda reg, ret = %ld\n", __func__, + PTR_ERR(ctrl->vdda_vreg)); + ctrl->vdda_vreg = NULL; + return PTR_ERR(ctrl->vdda_vreg); + } + ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd"); + if (IS_ERR(ctrl->lvl_vreg)) { + pr_err("Could not get lvl-vdd reg, %ld", + PTR_ERR(ctrl->lvl_vreg)); + ctrl->lvl_vreg = NULL; + return PTR_ERR(ctrl->lvl_vreg); + } + + return 0; +} + +static int edp_regulator_enable(struct edp_ctrl *ctrl) +{ + int ret; + + ret = regulator_set_voltage(ctrl->vdda_vreg, VDDA_MIN_UV, VDDA_MAX_UV); + if (ret) { + pr_err("%s:vdda_vreg set_voltage failed, %d\n", __func__, ret); + goto vdda_set_fail; + } + + ret = regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_ON_LOAD); + if (ret < 0) { + pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__); + goto vdda_set_fail; + } + + ret = regulator_enable(ctrl->vdda_vreg); + if (ret) { + pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__); + goto vdda_enable_fail; + } + + ret = regulator_enable(ctrl->lvl_vreg); + if (ret) { + pr_err("Failed to enable lvl-vdd reg regulator, %d", ret); + goto lvl_enable_fail; + } + + DBG("exit"); + return 0; + +lvl_enable_fail: + regulator_disable(ctrl->vdda_vreg); +vdda_enable_fail: + regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD); +vdda_set_fail: + return ret; +} + +static void edp_regulator_disable(struct edp_ctrl *ctrl) +{ + regulator_disable(ctrl->lvl_vreg); + regulator_disable(ctrl->vdda_vreg); + regulator_set_optimum_mode(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD); +} + +static int edp_gpio_config(struct edp_ctrl *ctrl) +{ + struct device *dev = &ctrl->pdev->dev; + int ret; + + ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd"); + if (IS_ERR(ctrl->panel_hpd_gpio)) { + ret = PTR_ERR(ctrl->panel_hpd_gpio); + ctrl->panel_hpd_gpio = NULL; + pr_err("%s: cannot get panel-hpd-gpios, %d\n", __func__, ret); + return ret; + } + + ret = gpiod_direction_input(ctrl->panel_hpd_gpio); + if (ret) { + pr_err("%s: Set direction for hpd failed, %d\n", __func__, ret); + return ret; + } + + ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en"); + if (IS_ERR(ctrl->panel_en_gpio)) { + ret = PTR_ERR(ctrl->panel_en_gpio); + ctrl->panel_en_gpio = NULL; + pr_err("%s: cannot get panel-en-gpios, %d\n", __func__, ret); + return ret; + } + + ret = gpiod_direction_output(ctrl->panel_en_gpio, 0); + if (ret) { + pr_err("%s: Set direction for panel_en failed, %d\n", + __func__, ret); + return ret; + } + + DBG("gpio on"); + + return 0; +} + +static void edp_ctrl_irq_enable(struct edp_ctrl *ctrl, int enable) +{ + unsigned long flags; + + DBG("%d", enable); + spin_lock_irqsave(&ctrl->irq_lock, flags); + if (enable) { + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, EDP_INTR_MASK1); + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, EDP_INTR_MASK2); + } else { + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, 0x0); + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, 0x0); + } + spin_unlock_irqrestore(&ctrl->irq_lock, flags); + DBG("exit"); +} + +static void edp_fill_link_cfg(struct edp_ctrl *ctrl) +{ + u32 prate; + u32 lrate; + u32 bpp; + u8 max_lane = ctrl->dp_link.num_lanes; + u8 lane; + + prate = ctrl->pixel_rate; + bpp = ctrl->color_depth * 3; + + /* + * By default, use the maximum link rate and minimum lane count, + * so that we can do rate down shift during link training. + */ + ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate); + + prate *= bpp; + prate /= 8; /* in kByte */ + + lrate = 270000; /* in kHz */ + lrate *= ctrl->link_rate; + lrate /= 10; /* in kByte, 10 bits --> 8 bits */ + + for (lane = 1; lane <= max_lane; lane <<= 1) { + if (lrate >= prate) + break; + lrate <<= 1; + } + + ctrl->lane_cnt = lane; + DBG("rate=%d lane=%d", ctrl->link_rate, ctrl->lane_cnt); +} + +static void edp_config_ctrl(struct edp_ctrl *ctrl) +{ + u32 data; + enum edp_color_depth depth; + + data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1); + + if (ctrl->dp_link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING; + + depth = EDP_6BIT; + if (ctrl->color_depth == 8) + depth = EDP_8BIT; + + data |= EDP_CONFIGURATION_CTRL_COLOR(depth); + + if (!ctrl->interlaced) /* progressive */ + data |= EDP_CONFIGURATION_CTRL_PROGRESSIVE; + + data |= (EDP_CONFIGURATION_CTRL_SYNC_CLK | + EDP_CONFIGURATION_CTRL_STATIC_MVID); + + edp_write(ctrl->base + REG_EDP_CONFIGURATION_CTRL, data); +} + +static void edp_state_ctrl(struct edp_ctrl *ctrl, u32 state) +{ + edp_write(ctrl->base + REG_EDP_STATE_CTRL, state); + /* Make sure H/W status is set */ + wmb(); +} + +static int edp_lane_set_write(struct edp_ctrl *ctrl, + u8 voltage_level, u8 pre_emphasis_level) +{ + int i; + u8 buf[4]; + + if (voltage_level >= DPCD_LINK_VOLTAGE_MAX) + voltage_level |= 0x04; + + if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX) + pre_emphasis_level |= 0x04; + + pre_emphasis_level <<= 3; + + for (i = 0; i < 4; i++) + buf[i] = voltage_level | pre_emphasis_level; + + DBG("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level); + if (drm_dp_dpcd_write(ctrl->drm_aux, 0x103, buf, 4) < 4) { + pr_err("%s: Set sw/pe to panel failed\n", __func__); + return -ENOLINK; + } + + return 0; +} + +static int edp_train_pattern_set_write(struct edp_ctrl *ctrl, u8 pattern) +{ + u8 p = pattern; + + DBG("pattern=%x", p); + if (drm_dp_dpcd_write(ctrl->drm_aux, + DP_TRAINING_PATTERN_SET, &p, 1) < 1) { + pr_err("%s: Set training pattern to panel failed\n", __func__); + return -ENOLINK; + } + + return 0; +} + +static void edp_sink_train_set_adjust(struct edp_ctrl *ctrl, + const u8 *link_status) +{ + int i; + u8 max = 0; + u8 data; + + /* use the max level across lanes */ + for (i = 0; i < ctrl->lane_cnt; i++) { + data = drm_dp_get_adjust_request_voltage(link_status, i); + DBG("lane=%d req_voltage_swing=0x%x", i, data); + if (max < data) + max = data; + } + + ctrl->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; + + /* use the max level across lanes */ + max = 0; + for (i = 0; i < ctrl->lane_cnt; i++) { + data = drm_dp_get_adjust_request_pre_emphasis(link_status, i); + DBG("lane=%d req_pre_emphasis=0x%x", i, data); + if (max < data) + max = data; + } + + ctrl->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; + DBG("v_level=%d, p_level=%d", ctrl->v_level, ctrl->p_level); +} + +static void edp_host_train_set(struct edp_ctrl *ctrl, u32 train) +{ + int cnt = 10; + u32 data; + u32 shift = train - 1; + + DBG("train=%d", train); + + edp_state_ctrl(ctrl, EDP_STATE_CTRL_TRAIN_PATTERN_1 << shift); + while (--cnt) { + data = edp_read(ctrl->base + REG_EDP_MAINLINK_READY); + if (data & (EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY << shift)) + break; + } + + if (cnt == 0) + pr_err("%s: set link_train=%d failed\n", __func__, train); +} + +static const u8 vm_pre_emphasis[4][4] = { + {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */ + {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */ + {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */ + {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */ +}; + +/* voltage swing, 0.2v and 1.0v are not support */ +static const u8 vm_voltage_swing[4][4] = { + {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */ + {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */ + {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */ + {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */ +}; + +static int edp_voltage_pre_emphasise_set(struct edp_ctrl *ctrl) +{ + u32 value0; + u32 value1; + + DBG("v=%d p=%d", ctrl->v_level, ctrl->p_level); + + value0 = vm_pre_emphasis[(int)(ctrl->v_level)][(int)(ctrl->p_level)]; + value1 = vm_voltage_swing[(int)(ctrl->v_level)][(int)(ctrl->p_level)]; + + /* Configure host and panel only if both values are allowed */ + if (value0 != 0xFF && value1 != 0xFF) { + msm_edp_phy_vm_pe_cfg(ctrl->phy, value0, value1); + return edp_lane_set_write(ctrl, ctrl->v_level, ctrl->p_level); + } + + return -EINVAL; +} + +static int edp_start_link_train_1(struct edp_ctrl *ctrl) +{ + u8 link_status[DP_LINK_STATUS_SIZE]; + u8 old_v_level; + int tries; + int ret; + int rlen; + + DBG(""); + + edp_host_train_set(ctrl, DP_TRAINING_PATTERN_1); + ret = edp_voltage_pre_emphasise_set(ctrl); + if (ret) + return ret; + ret = edp_train_pattern_set_write(ctrl, + DP_TRAINING_PATTERN_1 | DP_RECOVERED_CLOCK_OUT_EN); + if (ret) + return ret; + + tries = 0; + old_v_level = ctrl->v_level; + while (1) { + drm_dp_link_train_clock_recovery_delay(ctrl->dpcd); + + rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status); + if (rlen < DP_LINK_STATUS_SIZE) { + pr_err("%s: read link status failed\n", __func__); + return -ENOLINK; + } + if (drm_dp_clock_recovery_ok(link_status, ctrl->lane_cnt)) { + ret = 0; + break; + } + + if (ctrl->v_level == DPCD_LINK_VOLTAGE_MAX) { + ret = -1; + break; + } + + if (old_v_level == ctrl->v_level) { + tries++; + if (tries >= 5) { + ret = -1; + break; + } + } else { + tries = 0; + old_v_level = ctrl->v_level; + } + + edp_sink_train_set_adjust(ctrl, link_status); + ret = edp_voltage_pre_emphasise_set(ctrl); + if (ret) + return ret; + } + + return ret; +} + +static int edp_start_link_train_2(struct edp_ctrl *ctrl) +{ + u8 link_status[DP_LINK_STATUS_SIZE]; + int tries = 0; + int ret; + int rlen; + + DBG(""); + + edp_host_train_set(ctrl, DP_TRAINING_PATTERN_2); + ret = edp_voltage_pre_emphasise_set(ctrl); + if (ret) + return ret; + + ret = edp_train_pattern_set_write(ctrl, + DP_TRAINING_PATTERN_2 | DP_RECOVERED_CLOCK_OUT_EN); + if (ret) + return ret; + + while (1) { + drm_dp_link_train_channel_eq_delay(ctrl->dpcd); + + rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status); + if (rlen < DP_LINK_STATUS_SIZE) { + pr_err("%s: read link status failed\n", __func__); + return -ENOLINK; + } + if (drm_dp_channel_eq_ok(link_status, ctrl->lane_cnt)) { + ret = 0; + break; + } + + tries++; + if (tries > 10) { + ret = -1; + break; + } + + edp_sink_train_set_adjust(ctrl, link_status); + ret = edp_voltage_pre_emphasise_set(ctrl); + if (ret) + return ret; + } + + return ret; +} + +static int edp_link_rate_down_shift(struct edp_ctrl *ctrl) +{ + u32 prate, lrate, bpp; + u8 rate, lane, max_lane; + int changed = 0; + + rate = ctrl->link_rate; + lane = ctrl->lane_cnt; + max_lane = ctrl->dp_link.num_lanes; + + bpp = ctrl->color_depth * 3; + prate = ctrl->pixel_rate; + prate *= bpp; + prate /= 8; /* in kByte */ + + if (rate > DP_LINK_BW_1_62 && rate <= EDP_LINK_BW_MAX) { + rate -= 4; /* reduce rate */ + changed++; + } + + if (changed) { + if (lane >= 1 && lane < max_lane) + lane <<= 1; /* increase lane */ + + lrate = 270000; /* in kHz */ + lrate *= rate; + lrate /= 10; /* kByte, 10 bits --> 8 bits */ + lrate *= lane; + + DBG("new lrate=%u prate=%u(kHz) rate=%d lane=%d p=%u b=%d", + lrate, prate, rate, lane, + ctrl->pixel_rate, + bpp); + + if (lrate > prate) { + ctrl->link_rate = rate; + ctrl->lane_cnt = lane; + DBG("new rate=%d %d", rate, lane); + return 0; + } + } + + return -EINVAL; +} + +static int edp_clear_training_pattern(struct edp_ctrl *ctrl) +{ + int ret; + + ret = edp_train_pattern_set_write(ctrl, 0); + + drm_dp_link_train_channel_eq_delay(ctrl->dpcd); + + return ret; +} + +static int edp_do_link_train(struct edp_ctrl *ctrl) +{ + int ret; + struct drm_dp_link dp_link; + + DBG(""); + /* + * Set the current link rate and lane cnt to panel. They may have been + * adjusted and the values are different from them in DPCD CAP + */ + dp_link.num_lanes = ctrl->lane_cnt; + dp_link.rate = drm_dp_bw_code_to_link_rate(ctrl->link_rate); + dp_link.capabilities = ctrl->dp_link.capabilities; + if (drm_dp_link_configure(ctrl->drm_aux, &dp_link) < 0) + return EDP_TRAIN_FAIL; + + ctrl->v_level = 0; /* start from default level */ + ctrl->p_level = 0; + + edp_state_ctrl(ctrl, 0); + if (edp_clear_training_pattern(ctrl)) + return EDP_TRAIN_FAIL; + + ret = edp_start_link_train_1(ctrl); + if (ret < 0) { + if (edp_link_rate_down_shift(ctrl) == 0) { + DBG("link reconfig"); + ret = EDP_TRAIN_RECONFIG; + goto clear; + } else { + pr_err("%s: Training 1 failed", __func__); + ret = EDP_TRAIN_FAIL; + goto clear; + } + } + DBG("Training 1 completed successfully"); + + edp_state_ctrl(ctrl, 0); + if (edp_clear_training_pattern(ctrl)) + return EDP_TRAIN_FAIL; + + ret = edp_start_link_train_2(ctrl); + if (ret < 0) { + if (edp_link_rate_down_shift(ctrl) == 0) { + DBG("link reconfig"); + ret = EDP_TRAIN_RECONFIG; + goto clear; + } else { + pr_err("%s: Training 2 failed", __func__); + ret = EDP_TRAIN_FAIL; + goto clear; + } + } + DBG("Training 2 completed successfully"); + + edp_state_ctrl(ctrl, EDP_STATE_CTRL_SEND_VIDEO); +clear: + edp_clear_training_pattern(ctrl); + + return ret; +} + +static void edp_clock_synchrous(struct edp_ctrl *ctrl, int sync) +{ + u32 data; + enum edp_color_depth depth; + + data = edp_read(ctrl->base + REG_EDP_MISC1_MISC0); + + if (sync) + data |= EDP_MISC1_MISC0_SYNC; + else + data &= ~EDP_MISC1_MISC0_SYNC; + + /* only legacy rgb mode supported */ + depth = EDP_6BIT; /* Default */ + if (ctrl->color_depth == 8) + depth = EDP_8BIT; + else if (ctrl->color_depth == 10) + depth = EDP_10BIT; + else if (ctrl->color_depth == 12) + depth = EDP_12BIT; + else if (ctrl->color_depth == 16) + depth = EDP_16BIT; + + data |= EDP_MISC1_MISC0_COLOR(depth); + + edp_write(ctrl->base + REG_EDP_MISC1_MISC0, data); +} + +static int edp_sw_mvid_nvid(struct edp_ctrl *ctrl, u32 m, u32 n) +{ + u32 n_multi, m_multi = 5; + + if (ctrl->link_rate == DP_LINK_BW_1_62) { + n_multi = 1; + } else if (ctrl->link_rate == DP_LINK_BW_2_7) { + n_multi = 2; + } else { + pr_err("%s: Invalid link rate, %d\n", __func__, + ctrl->link_rate); + return -EINVAL; + } + + edp_write(ctrl->base + REG_EDP_SOFTWARE_MVID, m * m_multi); + edp_write(ctrl->base + REG_EDP_SOFTWARE_NVID, n * n_multi); + + return 0; +} + +static void edp_mainlink_ctrl(struct edp_ctrl *ctrl, int enable) +{ + u32 data = 0; + + edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, EDP_MAINLINK_CTRL_RESET); + /* Make sure fully reset */ + wmb(); + usleep_range(500, 1000); + + if (enable) + data |= EDP_MAINLINK_CTRL_ENABLE; + + edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, data); +} + +static void edp_ctrl_phy_aux_enable(struct edp_ctrl *ctrl, int enable) +{ + if (enable) { + edp_regulator_enable(ctrl); + edp_clk_enable(ctrl, EDP_CLK_MASK_AUX_CHAN); + msm_edp_phy_ctrl(ctrl->phy, 1); + msm_edp_aux_ctrl(ctrl->aux, 1); + gpiod_set_value(ctrl->panel_en_gpio, 1); + } else { + gpiod_set_value(ctrl->panel_en_gpio, 0); + msm_edp_aux_ctrl(ctrl->aux, 0); + msm_edp_phy_ctrl(ctrl->phy, 0); + edp_clk_disable(ctrl, EDP_CLK_MASK_AUX_CHAN); + edp_regulator_disable(ctrl); + } +} + +static void edp_ctrl_link_enable(struct edp_ctrl *ctrl, int enable) +{ + u32 m, n; + + if (enable) { + /* Enable link channel clocks */ + edp_clk_enable(ctrl, EDP_CLK_MASK_LINK_CHAN); + + msm_edp_phy_lane_power_ctrl(ctrl->phy, true, ctrl->lane_cnt); + + msm_edp_phy_vm_pe_init(ctrl->phy); + + /* Make sure phy is programed */ + wmb(); + msm_edp_phy_ready(ctrl->phy); + + edp_config_ctrl(ctrl); + msm_edp_ctrl_pixel_clock_valid(ctrl, ctrl->pixel_rate, &m, &n); + edp_sw_mvid_nvid(ctrl, m, n); + edp_mainlink_ctrl(ctrl, 1); + } else { + edp_mainlink_ctrl(ctrl, 0); + + msm_edp_phy_lane_power_ctrl(ctrl->phy, false, 0); + edp_clk_disable(ctrl, EDP_CLK_MASK_LINK_CHAN); + } +} + +static int edp_ctrl_training(struct edp_ctrl *ctrl) +{ + int ret; + + /* Do link training only when power is on */ + if (!ctrl->power_on) + return -EINVAL; + +train_start: + ret = edp_do_link_train(ctrl); + if (ret == EDP_TRAIN_RECONFIG) { + /* Re-configure main link */ + edp_ctrl_irq_enable(ctrl, 0); + edp_ctrl_link_enable(ctrl, 0); + msm_edp_phy_ctrl(ctrl->phy, 0); + + /* Make sure link is fully disabled */ + wmb(); + usleep_range(500, 1000); + + msm_edp_phy_ctrl(ctrl->phy, 1); + edp_ctrl_link_enable(ctrl, 1); + edp_ctrl_irq_enable(ctrl, 1); + goto train_start; + } + + return ret; +} + +static void edp_ctrl_on_worker(struct work_struct *work) +{ + struct edp_ctrl *ctrl = container_of( + work, struct edp_ctrl, on_work); + int ret; + + mutex_lock(&ctrl->dev_mutex); + + if (ctrl->power_on) { + DBG("already on"); + goto unlock_ret; + } + + edp_ctrl_phy_aux_enable(ctrl, 1); + edp_ctrl_link_enable(ctrl, 1); + + edp_ctrl_irq_enable(ctrl, 1); + ret = drm_dp_link_power_up(ctrl->drm_aux, &ctrl->dp_link); + if (ret) + goto fail; + + ctrl->power_on = true; + + /* Start link training */ + ret = edp_ctrl_training(ctrl); + if (ret != EDP_TRAIN_SUCCESS) + goto fail; + + DBG("DONE"); + goto unlock_ret; + +fail: + edp_ctrl_irq_enable(ctrl, 0); + edp_ctrl_link_enable(ctrl, 0); + edp_ctrl_phy_aux_enable(ctrl, 0); + ctrl->power_on = false; +unlock_ret: + mutex_unlock(&ctrl->dev_mutex); +} + +static void edp_ctrl_off_worker(struct work_struct *work) +{ + struct edp_ctrl *ctrl = container_of( + work, struct edp_ctrl, off_work); + int ret; + + mutex_lock(&ctrl->dev_mutex); + + if (!ctrl->power_on) { + DBG("already off"); + goto unlock_ret; + } + + reinit_completion(&ctrl->idle_comp); + edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE); + + ret = wait_for_completion_timeout(&ctrl->idle_comp, + msecs_to_jiffies(500)); + if (ret <= 0) + DBG("%s: idle pattern timedout, %d\n", + __func__, ret); + + edp_state_ctrl(ctrl, 0); + + drm_dp_link_power_down(ctrl->drm_aux, &ctrl->dp_link); + + edp_ctrl_irq_enable(ctrl, 0); + + edp_ctrl_link_enable(ctrl, 0); + + edp_ctrl_phy_aux_enable(ctrl, 0); + + ctrl->power_on = false; + +unlock_ret: + mutex_unlock(&ctrl->dev_mutex); +} + +irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl) +{ + u32 isr1, isr2, mask1, mask2; + u32 ack; + + DBG(""); + spin_lock(&ctrl->irq_lock); + isr1 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_1); + isr2 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_2); + + mask1 = isr1 & EDP_INTR_MASK1; + mask2 = isr2 & EDP_INTR_MASK2; + + isr1 &= ~mask1; /* remove masks bit */ + isr2 &= ~mask2; + + DBG("isr=%x mask=%x isr2=%x mask2=%x", + isr1, mask1, isr2, mask2); + + ack = isr1 & EDP_INTR_STATUS1; + ack <<= 1; /* ack bits */ + ack |= mask1; + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, ack); + + ack = isr2 & EDP_INTR_STATUS2; + ack <<= 1; /* ack bits */ + ack |= mask2; + edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, ack); + spin_unlock(&ctrl->irq_lock); + + if (isr1 & EDP_INTERRUPT_REG_1_HPD) + DBG("edp_hpd"); + + if (isr2 & EDP_INTERRUPT_REG_2_READY_FOR_VIDEO) + DBG("edp_video_ready"); + + if (isr2 & EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT) { + DBG("idle_patterns_sent"); + complete(&ctrl->idle_comp); + } + + msm_edp_aux_irq(ctrl->aux, isr1); + + return IRQ_HANDLED; +} + +void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on) +{ + if (on) + queue_work(ctrl->workqueue, &ctrl->on_work); + else + queue_work(ctrl->workqueue, &ctrl->off_work); +} + +int msm_edp_ctrl_init(struct msm_edp *edp) +{ + struct edp_ctrl *ctrl = NULL; + struct device *dev = &edp->pdev->dev; + int ret; + + if (!edp) { + pr_err("%s: edp is NULL!\n", __func__); + return -EINVAL; + } + + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + edp->ctrl = ctrl; + ctrl->pdev = edp->pdev; + + ctrl->base = msm_ioremap(ctrl->pdev, "edp", "eDP"); + if (IS_ERR(ctrl->base)) + return PTR_ERR(ctrl->base); + + /* Get regulator, clock, gpio, pwm */ + ret = edp_regulator_init(ctrl); + if (ret) { + pr_err("%s:regulator init fail\n", __func__); + return ret; + } + ret = edp_clk_init(ctrl); + if (ret) { + pr_err("%s:clk init fail\n", __func__); + return ret; + } + ret = edp_gpio_config(ctrl); + if (ret) { + pr_err("%s:failed to configure GPIOs: %d", __func__, ret); + return ret; + } + + /* Init aux and phy */ + ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux); + if (!ctrl->aux || !ctrl->drm_aux) { + pr_err("%s:failed to init aux\n", __func__); + return ret; + } + + ctrl->phy = msm_edp_phy_init(dev, ctrl->base); + if (!ctrl->phy) { + pr_err("%s:failed to init phy\n", __func__); + goto err_destory_aux; + } + + spin_lock_init(&ctrl->irq_lock); + mutex_init(&ctrl->dev_mutex); + init_completion(&ctrl->idle_comp); + + /* setup workqueue */ + ctrl->workqueue = alloc_ordered_workqueue("edp_drm_work", 0); + INIT_WORK(&ctrl->on_work, edp_ctrl_on_worker); + INIT_WORK(&ctrl->off_work, edp_ctrl_off_worker); + + return 0; + +err_destory_aux: + msm_edp_aux_destroy(dev, ctrl->aux); + ctrl->aux = NULL; + return ret; +} + +void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl) +{ + if (!ctrl) + return; + + if (ctrl->workqueue) { + flush_workqueue(ctrl->workqueue); + destroy_workqueue(ctrl->workqueue); + ctrl->workqueue = NULL; + } + + if (ctrl->aux) { + msm_edp_aux_destroy(&ctrl->pdev->dev, ctrl->aux); + ctrl->aux = NULL; + } + + kfree(ctrl->edid); + ctrl->edid = NULL; + + mutex_destroy(&ctrl->dev_mutex); +} + +bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl) +{ + mutex_lock(&ctrl->dev_mutex); + DBG("connect status = %d", ctrl->edp_connected); + if (ctrl->edp_connected) { + mutex_unlock(&ctrl->dev_mutex); + return true; + } + + if (!ctrl->power_on) { + edp_ctrl_phy_aux_enable(ctrl, 1); + edp_ctrl_irq_enable(ctrl, 1); + } + + if (drm_dp_dpcd_read(ctrl->drm_aux, DP_DPCD_REV, ctrl->dpcd, + DP_RECEIVER_CAP_SIZE) < DP_RECEIVER_CAP_SIZE) { + pr_err("%s: AUX channel is NOT ready\n", __func__); + memset(ctrl->dpcd, 0, DP_RECEIVER_CAP_SIZE); + } else { + ctrl->edp_connected = true; + } + + if (!ctrl->power_on) { + edp_ctrl_irq_enable(ctrl, 0); + edp_ctrl_phy_aux_enable(ctrl, 0); + } + + DBG("exit: connect status=%d", ctrl->edp_connected); + + mutex_unlock(&ctrl->dev_mutex); + + return ctrl->edp_connected; +} + +int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl, + struct drm_connector *connector, struct edid **edid) +{ + int ret = 0; + + mutex_lock(&ctrl->dev_mutex); + + if (ctrl->edid) { + if (edid) { + DBG("Just return edid buffer"); + *edid = ctrl->edid; + } + goto unlock_ret; + } + + if (!ctrl->power_on) { + edp_ctrl_phy_aux_enable(ctrl, 1); + edp_ctrl_irq_enable(ctrl, 1); + } + + ret = drm_dp_link_probe(ctrl->drm_aux, &ctrl->dp_link); + if (ret) { + pr_err("%s: read dpcd cap failed, %d\n", __func__, ret); + goto disable_ret; + } + + /* Initialize link rate as panel max link rate */ + ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate); + + ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc); + if (!ctrl->edid) { + pr_err("%s: edid read fail\n", __func__); + goto disable_ret; + } + + if (edid) + *edid = ctrl->edid; + +disable_ret: + if (!ctrl->power_on) { + edp_ctrl_irq_enable(ctrl, 0); + edp_ctrl_phy_aux_enable(ctrl, 0); + } +unlock_ret: + mutex_unlock(&ctrl->dev_mutex); + return ret; +} + +int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl, + const struct drm_display_mode *mode, + const struct drm_display_info *info) +{ + u32 hstart_from_sync, vstart_from_sync; + u32 data; + int ret = 0; + + mutex_lock(&ctrl->dev_mutex); + /* + * Need to keep color depth, pixel rate and + * interlaced information in ctrl context + */ + ctrl->color_depth = info->bpc; + ctrl->pixel_rate = mode->clock; + ctrl->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); + + /* Fill initial link config based on passed in timing */ + edp_fill_link_cfg(ctrl); + + if (edp_clk_enable(ctrl, EDP_CLK_MASK_AHB)) { + pr_err("%s, fail to prepare enable ahb clk\n", __func__); + ret = -EINVAL; + goto unlock_ret; + } + edp_clock_synchrous(ctrl, 1); + + /* Configure eDP timing to HW */ + edp_write(ctrl->base + REG_EDP_TOTAL_HOR_VER, + EDP_TOTAL_HOR_VER_HORIZ(mode->htotal) | + EDP_TOTAL_HOR_VER_VERT(mode->vtotal)); + + vstart_from_sync = mode->vtotal - mode->vsync_start; + hstart_from_sync = mode->htotal - mode->hsync_start; + edp_write(ctrl->base + REG_EDP_START_HOR_VER_FROM_SYNC, + EDP_START_HOR_VER_FROM_SYNC_HORIZ(hstart_from_sync) | + EDP_START_HOR_VER_FROM_SYNC_VERT(vstart_from_sync)); + + data = EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT( + mode->vsync_end - mode->vsync_start); + data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ( + mode->hsync_end - mode->hsync_start); + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC; + edp_write(ctrl->base + REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY, data); + + edp_write(ctrl->base + REG_EDP_ACTIVE_HOR_VER, + EDP_ACTIVE_HOR_VER_HORIZ(mode->hdisplay) | + EDP_ACTIVE_HOR_VER_VERT(mode->vdisplay)); + + edp_clk_disable(ctrl, EDP_CLK_MASK_AHB); + +unlock_ret: + mutex_unlock(&ctrl->dev_mutex); + return ret; +} + +bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl, + u32 pixel_rate, u32 *pm, u32 *pn) +{ + const struct edp_pixel_clk_div *divs; + u32 err = 1; /* 1% error tolerance */ + u32 clk_err; + int i; + + if (ctrl->link_rate == DP_LINK_BW_1_62) { + divs = clk_divs[0]; + } else if (ctrl->link_rate == DP_LINK_BW_2_7) { + divs = clk_divs[1]; + } else { + pr_err("%s: Invalid link rate,%d\n", __func__, ctrl->link_rate); + return false; + } + + for (i = 0; i < EDP_PIXEL_CLK_NUM; i++) { + clk_err = abs(divs[i].rate - pixel_rate); + if ((divs[i].rate * err / 100) >= clk_err) { + if (pm) + *pm = divs[i].m; + if (pn) + *pn = divs[i].n; + return true; + } + } + + DBG("pixel clock %d(kHz) not supported", pixel_rate); + + return false; +} + diff --git a/drivers/gpu/drm/msm/edp/edp_phy.c b/drivers/gpu/drm/msm/edp/edp_phy.c new file mode 100644 index 000000000000..36bb8933e9ee --- /dev/null +++ b/drivers/gpu/drm/msm/edp/edp_phy.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "edp.h" +#include "edp.xml.h" + +#define EDP_MAX_LANE 4 + +struct edp_phy { + void __iomem *base; +}; + +bool msm_edp_phy_ready(struct edp_phy *phy) +{ + u32 status; + int cnt = 100; + + while (--cnt) { + status = edp_read(phy->base + + REG_EDP_PHY_GLB_PHY_STATUS); + if (status & 0x01) + break; + usleep_range(500, 1000); + } + + if (cnt == 0) { + pr_err("%s: PHY NOT ready\n", __func__); + return false; + } else { + return true; + } +} + +void msm_edp_phy_ctrl(struct edp_phy *phy, int enable) +{ + DBG("enable=%d", enable); + if (enable) { + /* Reset */ + edp_write(phy->base + REG_EDP_PHY_CTRL, + EDP_PHY_CTRL_SW_RESET | EDP_PHY_CTRL_SW_RESET_PLL); + /* Make sure fully reset */ + wmb(); + usleep_range(500, 1000); + edp_write(phy->base + REG_EDP_PHY_CTRL, 0x000); + edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0x3f); + edp_write(phy->base + REG_EDP_PHY_GLB_CFG, 0x1); + } else { + edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0xc0); + } +} + +/* voltage mode and pre emphasis cfg */ +void msm_edp_phy_vm_pe_init(struct edp_phy *phy) +{ + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, 0x3); + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, 0x64); + edp_write(phy->base + REG_EDP_PHY_GLB_MISC9, 0x6c); +} + +void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1) +{ + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, v0); + edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, v1); +} + +void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane) +{ + u32 i; + u32 data; + + if (up) + data = 0; /* power up */ + else + data = 0x7; /* power down */ + + for (i = 0; i < max_lane; i++) + edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data); + + /* power down unused lane */ + data = 0x7; /* power down */ + for (i = max_lane; i < EDP_MAX_LANE; i++) + edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data); +} + +void *msm_edp_phy_init(struct device *dev, void __iomem *regbase) +{ + struct edp_phy *phy = NULL; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return NULL; + + phy->base = regbase; + return phy; +} + diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 062c68725376..814536202efe 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -106,7 +107,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) goto fail; } - BUG_ON(config->hpd_reg_cnt > ARRAY_SIZE(hdmi->hpd_regs)); + hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) * + config->hpd_reg_cnt, GFP_KERNEL); + if (!hdmi->hpd_regs) { + ret = -ENOMEM; + goto fail; + } for (i = 0; i < config->hpd_reg_cnt; i++) { struct regulator *reg; @@ -122,7 +128,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->hpd_regs[i] = reg; } - BUG_ON(config->pwr_reg_cnt > ARRAY_SIZE(hdmi->pwr_regs)); + hdmi->pwr_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_regs[0]) * + config->pwr_reg_cnt, GFP_KERNEL); + if (!hdmi->pwr_regs) { + ret = -ENOMEM; + goto fail; + } for (i = 0; i < config->pwr_reg_cnt; i++) { struct regulator *reg; @@ -138,7 +149,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->pwr_regs[i] = reg; } - BUG_ON(config->hpd_clk_cnt > ARRAY_SIZE(hdmi->hpd_clks)); + hdmi->hpd_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_clks[0]) * + config->hpd_clk_cnt, GFP_KERNEL); + if (!hdmi->hpd_clks) { + ret = -ENOMEM; + goto fail; + } for (i = 0; i < config->hpd_clk_cnt; i++) { struct clk *clk; @@ -153,7 +169,12 @@ static struct hdmi *hdmi_init(struct platform_device *pdev) hdmi->hpd_clks[i] = clk; } - BUG_ON(config->pwr_clk_cnt > ARRAY_SIZE(hdmi->pwr_clks)); + hdmi->pwr_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_clks[0]) * + config->pwr_clk_cnt, GFP_KERNEL); + if (!hdmi->pwr_clks) { + ret = -ENOMEM; + goto fail; + } for (i = 0; i < config->pwr_clk_cnt; i++) { struct clk *clk; @@ -247,9 +268,9 @@ int hdmi_modeset_init(struct hdmi *hdmi, return 0; fail: - /* bridge/connector are normally destroyed by drm: */ + /* bridge is normally destroyed by drm: */ if (hdmi->bridge) { - hdmi->bridge->funcs->destroy(hdmi->bridge); + hdmi_bridge_destroy(hdmi->bridge); hdmi->bridge = NULL; } if (hdmi->connector) { @@ -266,6 +287,57 @@ fail: #include <linux/of_gpio.h> +#define HDMI_CFG(item, entry) \ + .item ## _names = item ##_names_ ## entry, \ + .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry) + +static struct hdmi_platform_config hdmi_tx_8660_config = { + .phy_init = hdmi_phy_8x60_init, +}; + +static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"}; +static const char *hpd_clk_names_8960[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; + +static struct hdmi_platform_config hdmi_tx_8960_config = { + .phy_init = hdmi_phy_8960_init, + HDMI_CFG(hpd_reg, 8960), + HDMI_CFG(hpd_clk, 8960), +}; + +static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"}; +static const char *hpd_reg_names_8x74[] = {"hpd-gdsc", "hpd-5v"}; +static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"}; +static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"}; +static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0}; + +static struct hdmi_platform_config hdmi_tx_8074_config = { + .phy_init = hdmi_phy_8x74_init, + HDMI_CFG(pwr_reg, 8x74), + HDMI_CFG(hpd_reg, 8x74), + HDMI_CFG(pwr_clk, 8x74), + HDMI_CFG(hpd_clk, 8x74), + .hpd_freq = hpd_clk_freq_8x74, +}; + +static const char *hpd_reg_names_8084[] = {"hpd-gdsc", "hpd-5v", "hpd-5v-en"}; + +static struct hdmi_platform_config hdmi_tx_8084_config = { + .phy_init = hdmi_phy_8x74_init, + HDMI_CFG(pwr_reg, 8x74), + HDMI_CFG(hpd_reg, 8084), + HDMI_CFG(pwr_clk, 8x74), + HDMI_CFG(hpd_clk, 8x74), + .hpd_freq = hpd_clk_freq_8x74, +}; + +static const struct of_device_id dt_match[] = { + { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config }, + { .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config }, + { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config }, + { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config }, + {} +}; + #ifdef CONFIG_OF static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) { @@ -288,50 +360,31 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; - static struct hdmi_platform_config config = {}; + static struct hdmi_platform_config *hdmi_cfg; struct hdmi *hdmi; #ifdef CONFIG_OF struct device_node *of_node = dev->of_node; + const struct of_device_id *match; - if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) { - static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; - static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; - static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"}; - static unsigned long hpd_clk_freq[] = {0, 19200000, 0}; - static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"}; - config.phy_init = hdmi_phy_8x74_init; - config.hpd_reg_names = hpd_reg_names; - config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); - config.pwr_reg_names = pwr_reg_names; - config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names); - config.hpd_clk_names = hpd_clk_names; - config.hpd_freq = hpd_clk_freq; - config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); - config.pwr_clk_names = pwr_clk_names; - config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); - } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) { - static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; - static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"}; - config.phy_init = hdmi_phy_8960_init; - config.hpd_reg_names = hpd_reg_names; - config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names); - config.hpd_clk_names = hpd_clk_names; - config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); - } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8660")) { - config.phy_init = hdmi_phy_8x60_init; + match = of_match_node(dt_match, of_node); + if (match && match->data) { + hdmi_cfg = (struct hdmi_platform_config *)match->data; + DBG("hdmi phy: %s", match->compatible); } else { dev_err(dev, "unknown phy: %s\n", of_node->name); + return -ENXIO; } - config.mmio_name = "core_physical"; - config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); - config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); - config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); - config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); - config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); - config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); + hdmi_cfg->mmio_name = "core_physical"; + hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); + hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); + hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); + hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); + hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); + hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); #else + static struct hdmi_platform_config config = {}; static const char *hpd_clk_names[] = { "core_clk", "master_iface_clk", "slave_iface_clk", }; @@ -377,12 +430,15 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) config.mux_en_gpio = -1; config.mux_sel_gpio = -1; } + hdmi_cfg = &config; #endif - dev->platform_data = &config; + dev->platform_data = hdmi_cfg; + hdmi = hdmi_init(to_platform_device(dev)); if (IS_ERR(hdmi)) return PTR_ERR(hdmi); priv->hdmi = hdmi; + return 0; } @@ -413,13 +469,6 @@ static int hdmi_dev_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id dt_match[] = { - { .compatible = "qcom,hdmi-tx-8074" }, - { .compatible = "qcom,hdmi-tx-8960" }, - { .compatible = "qcom,hdmi-tx-8660" }, - {} -}; - static struct platform_driver hdmi_driver = { .probe = hdmi_dev_probe, .remove = hdmi_dev_remove, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 43e654f751b7..68fdfb3622a5 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -52,10 +52,10 @@ struct hdmi { void __iomem *mmio; - struct regulator *hpd_regs[2]; - struct regulator *pwr_regs[2]; - struct clk *hpd_clks[3]; - struct clk *pwr_clks[2]; + struct regulator **hpd_regs; + struct regulator **pwr_regs; + struct clk **hpd_clks; + struct clk **pwr_clks; struct hdmi_phy *phy; struct i2c_adapter *i2c; @@ -146,6 +146,7 @@ void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate); */ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi); +void hdmi_bridge_destroy(struct drm_bridge *bridge); /* * hdmi connector: diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index 5b0844befbab..350988740e9f 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h @@ -8,18 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) -Copyright (C) 2013-2014 by the following authors: +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -45,12 +46,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. enum hdmi_hdcp_key_state { - NO_KEYS = 0, - NOT_CHECKED = 1, - CHECKING = 2, - KEYS_VALID = 3, - AKSV_INVALID = 4, - CHECKSUM_MISMATCH = 5, + HDCP_KEYS_STATE_NO_KEYS = 0, + HDCP_KEYS_STATE_NOT_CHECKED = 1, + HDCP_KEYS_STATE_CHECKING = 2, + HDCP_KEYS_STATE_VALID = 3, + HDCP_KEYS_STATE_AKSV_NOT_VALID = 4, + HDCP_KEYS_STATE_CHKSUM_MISMATCH = 5, + HDCP_KEYS_STATE_PROD_AKSV = 6, + HDCP_KEYS_STATE_RESERVED = 7, }; enum hdmi_ddc_read_write { @@ -199,11 +202,29 @@ static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val) #define HDMI_HDCP_CTRL_ENABLE 0x00000001 #define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100 +#define REG_HDMI_HDCP_DEBUG_CTRL 0x00000114 +#define HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER 0x00000004 + #define REG_HDMI_HDCP_INT_CTRL 0x00000118 +#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT 0x00000001 +#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK 0x00000002 +#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK 0x00000004 +#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT 0x00000010 +#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK 0x00000020 +#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK 0x00000040 +#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK 0x00000080 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT 0x00000100 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_ACK 0x00000200 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_MASK 0x00000400 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT 0x00001000 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_ACK 0x00002000 +#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_MASK 0x00004000 #define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c #define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100 #define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200 +#define HDMI_HDCP_LINK0_STATUS_RI_MATCHES 0x00001000 +#define HDMI_HDCP_LINK0_STATUS_V_MATCHES 0x00100000 #define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000 #define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28 static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val) @@ -211,9 +232,56 @@ static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK; } +#define REG_HDMI_HDCP_DDC_CTRL_0 0x00000120 +#define HDMI_HDCP_DDC_CTRL_0_DISABLE 0x00000001 + +#define REG_HDMI_HDCP_DDC_CTRL_1 0x00000124 +#define HDMI_HDCP_DDC_CTRL_1_FAILED_ACK 0x00000001 + +#define REG_HDMI_HDCP_DDC_STATUS 0x00000128 +#define HDMI_HDCP_DDC_STATUS_XFER_REQ 0x00000010 +#define HDMI_HDCP_DDC_STATUS_XFER_DONE 0x00000400 +#define HDMI_HDCP_DDC_STATUS_ABORTED 0x00001000 +#define HDMI_HDCP_DDC_STATUS_TIMEOUT 0x00002000 +#define HDMI_HDCP_DDC_STATUS_NACK0 0x00004000 +#define HDMI_HDCP_DDC_STATUS_NACK1 0x00008000 +#define HDMI_HDCP_DDC_STATUS_FAILED 0x00010000 + +#define REG_HDMI_HDCP_ENTROPY_CTRL0 0x0000012c + +#define REG_HDMI_HDCP_ENTROPY_CTRL1 0x0000025c + #define REG_HDMI_HDCP_RESET 0x00000130 #define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001 +#define REG_HDMI_HDCP_RCVPORT_DATA0 0x00000134 + +#define REG_HDMI_HDCP_RCVPORT_DATA1 0x00000138 + +#define REG_HDMI_HDCP_RCVPORT_DATA2_0 0x0000013c + +#define REG_HDMI_HDCP_RCVPORT_DATA2_1 0x00000140 + +#define REG_HDMI_HDCP_RCVPORT_DATA3 0x00000144 + +#define REG_HDMI_HDCP_RCVPORT_DATA4 0x00000148 + +#define REG_HDMI_HDCP_RCVPORT_DATA5 0x0000014c + +#define REG_HDMI_HDCP_RCVPORT_DATA6 0x00000150 + +#define REG_HDMI_HDCP_RCVPORT_DATA7 0x00000154 + +#define REG_HDMI_HDCP_RCVPORT_DATA8 0x00000158 + +#define REG_HDMI_HDCP_RCVPORT_DATA9 0x0000015c + +#define REG_HDMI_HDCP_RCVPORT_DATA10 0x00000160 + +#define REG_HDMI_HDCP_RCVPORT_DATA11 0x00000164 + +#define REG_HDMI_HDCP_RCVPORT_DATA12 0x00000168 + #define REG_HDMI_VENSPEC_INFO0 0x0000016c #define REG_HDMI_VENSPEC_INFO1 0x00000170 @@ -266,6 +334,7 @@ static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val) #define HDMI_DDC_SW_STATUS_NACK3 0x00008000 #define REG_HDMI_DDC_HW_STATUS 0x0000021c +#define HDMI_DDC_HW_STATUS_DONE 0x00000008 #define REG_HDMI_DDC_SPEED 0x00000220 #define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003 @@ -329,6 +398,15 @@ static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val) } #define HDMI_DDC_DATA_INDEX_WRITE 0x80000000 +#define REG_HDMI_HDCP_SHA_CTRL 0x0000023c + +#define REG_HDMI_HDCP_SHA_STATUS 0x00000240 +#define HDMI_HDCP_SHA_STATUS_BLOCK_DONE 0x00000001 +#define HDMI_HDCP_SHA_STATUS_COMP_DONE 0x00000010 + +#define REG_HDMI_HDCP_SHA_DATA 0x00000244 +#define HDMI_HDCP_SHA_DATA_DONE 0x00000001 + #define REG_HDMI_HPD_INT_STATUS 0x00000250 #define HDMI_HPD_INT_STATUS_INT 0x00000001 #define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002 @@ -359,6 +437,10 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val) return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK; } +#define REG_HDMI_HDCP_SW_UPPER_AKSV 0x00000284 + +#define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288 + #define REG_HDMI_CEC_STATUS 0x00000298 #define REG_HDMI_CEC_INT 0x0000029c diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 6902ad6da710..a7a1d8267cf0 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -23,11 +23,8 @@ struct hdmi_bridge { }; #define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base) -static void hdmi_bridge_destroy(struct drm_bridge *bridge) +void hdmi_bridge_destroy(struct drm_bridge *bridge) { - struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); - drm_bridge_cleanup(bridge); - kfree(hdmi_bridge); } static void power_on(struct drm_bridge *bridge) @@ -200,7 +197,6 @@ static const struct drm_bridge_funcs hdmi_bridge_funcs = { .disable = hdmi_bridge_disable, .post_disable = hdmi_bridge_post_disable, .mode_set = hdmi_bridge_mode_set, - .destroy = hdmi_bridge_destroy, }; @@ -211,7 +207,8 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi) struct hdmi_bridge *hdmi_bridge; int ret; - hdmi_bridge = kzalloc(sizeof(*hdmi_bridge), GFP_KERNEL); + hdmi_bridge = devm_kzalloc(hdmi->dev->dev, + sizeof(*hdmi_bridge), GFP_KERNEL); if (!hdmi_bridge) { ret = -ENOMEM; goto fail; @@ -220,8 +217,11 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi) hdmi_bridge->hdmi = hdmi; bridge = &hdmi_bridge->base; + bridge->funcs = &hdmi_bridge_funcs; - drm_bridge_init(hdmi->dev, bridge, &hdmi_bridge_funcs); + ret = drm_bridge_attach(hdmi->dev, bridge); + if (ret) + goto fail; return bridge; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index b4e70e0e3cfa..b62cdb968614 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -386,7 +386,7 @@ hdmi_connector_best_encoder(struct drm_connector *connector) } static const struct drm_connector_funcs hdmi_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .detect = hdmi_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = hdmi_connector_destroy, @@ -426,7 +426,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi) connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; - connector->interlace_allowed = 1; + connector->interlace_allowed = 0; connector->doublescan_allowed = 0; drm_connector_register(connector); diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h index 29bd796797de..43bb54a9afbf 100644 --- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h +++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h @@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) Copyright (C) 2013 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h index a4a7f8c7122a..1d39174d91fb 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h @@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) @@ -72,6 +73,18 @@ enum mdp4_cursor_format { CURSOR_XRGB = 2, }; +enum mdp4_frame_format { + FRAME_LINEAR = 0, + FRAME_TILE_ARGB_4X4 = 1, + FRAME_TILE_YCBCR_420 = 2, +}; + +enum mdp4_scale_unit { + SCALE_FIR = 0, + SCALE_MN_PHASE = 1, + SCALE_PIXEL_RPT = 2, +}; + enum mdp4_dma { DMA_P = 0, DMA_S = 1, @@ -637,6 +650,8 @@ static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00 static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; } +static inline uint32_t REG_MDP4_PIPE_SRCP3_BASE(enum mdp4_pipe i0) { return 0x0002001c + 0x10000*i0; } + static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; } #define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff #define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0 @@ -720,7 +735,25 @@ static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) } #define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 #define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 +#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK 0x00180000 +#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT 19 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT) & MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK; +} #define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000 +#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x0c000000 +#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 26 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK 0x60000000 +#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT 29 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(enum mdp4_frame_format val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT) & MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK; +} static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; } #define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff @@ -751,6 +784,18 @@ static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val) static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; } #define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001 #define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002 +#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK 0x0000000c +#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT 2 +static inline uint32_t MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(enum mdp4_scale_unit val) +{ + return ((val) << MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK; +} +#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK 0x00000030 +#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT 4 +static inline uint32_t MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(enum mdp4_scale_unit val) +{ + return ((val) << MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK; +} #define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200 #define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400 #define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800 diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 20ae50385e5b..73afa21822b4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -140,26 +140,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc) kfree(mdp4_crtc); } -static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - bool enabled = (mode == DRM_MODE_DPMS_ON); - - DBG("%s: mode=%d", mdp4_crtc->name, mode); - - if (enabled != mdp4_crtc->enabled) { - if (enabled) { - mdp4_enable(mdp4_kms); - mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); - } else { - mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); - mdp4_disable(mdp4_kms); - } - mdp4_crtc->enabled = enabled; - } -} - static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -304,23 +284,38 @@ static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc) } } -static void mdp4_crtc_prepare(struct drm_crtc *crtc) +static void mdp4_crtc_disable(struct drm_crtc *crtc) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + DBG("%s", mdp4_crtc->name); - /* make sure we hold a ref to mdp clks while setting up mode: */ - drm_crtc_vblank_get(crtc); - mdp4_enable(get_kms(crtc)); - mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + + if (WARN_ON(!mdp4_crtc->enabled)) + return; + + mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); + mdp4_disable(mdp4_kms); + + mdp4_crtc->enabled = false; } -static void mdp4_crtc_commit(struct drm_crtc *crtc) +static void mdp4_crtc_enable(struct drm_crtc *crtc) { - mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + + DBG("%s", mdp4_crtc->name); + + if (WARN_ON(mdp4_crtc->enabled)) + return; + + mdp4_enable(mdp4_kms); + mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); + crtc_flush(crtc); - /* drop the ref to mdp clk's that we got in prepare: */ - mdp4_disable(get_kms(crtc)); - drm_crtc_vblank_put(crtc); + + mdp4_crtc->enabled = true; } static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, @@ -504,13 +499,10 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = { }; static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { - .dpms = mdp4_crtc_dpms, .mode_fixup = mdp4_crtc_mode_fixup, .mode_set_nofb = mdp4_crtc_mode_set_nofb, - .mode_set = drm_helper_crtc_mode_set, - .mode_set_base = drm_helper_crtc_mode_set_base, - .prepare = mdp4_crtc_prepare, - .commit = mdp4_crtc_commit, + .disable = mdp4_crtc_disable, + .enable = mdp4_crtc_enable, .atomic_check = mdp4_crtc_atomic_check, .atomic_begin = mdp4_crtc_atomic_begin, .atomic_flush = mdp4_crtc_atomic_flush, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c index c3878420180b..7896323b2631 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c @@ -94,61 +94,6 @@ static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = { .destroy = mdp4_dtv_encoder_destroy, }; -static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct drm_device *dev = encoder->dev; - struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - bool enabled = (mode == DRM_MODE_DPMS_ON); - - DBG("mode=%d", mode); - - if (enabled == mdp4_dtv_encoder->enabled) - return; - - if (enabled) { - unsigned long pc = mdp4_dtv_encoder->pixclock; - int ret; - - bs_set(mdp4_dtv_encoder, 1); - - DBG("setting src_clk=%lu", pc); - - ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc); - if (ret) - dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret); - clk_prepare_enable(mdp4_dtv_encoder->src_clk); - ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); - if (ret) - dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret); - ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); - if (ret) - dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); - - mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); - } else { - mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); - - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); - - clk_disable_unprepare(mdp4_dtv_encoder->src_clk); - clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); - clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); - - bs_set(mdp4_dtv_encoder, 0); - } - - mdp4_dtv_encoder->enabled = enabled; -} - static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -221,28 +166,78 @@ static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder, mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0); } -static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder) +static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder) { - mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + + if (WARN_ON(!mdp4_dtv_encoder->enabled)) + return; + + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); + + clk_disable_unprepare(mdp4_dtv_encoder->src_clk); + clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); + clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); + + bs_set(mdp4_dtv_encoder, 0); + + mdp4_dtv_encoder->enabled = false; } -static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder) +static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder) { + struct drm_device *dev = encoder->dev; + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + unsigned long pc = mdp4_dtv_encoder->pixclock; + int ret; + + if (WARN_ON(mdp4_dtv_encoder->enabled)) + return; + mdp4_crtc_set_config(encoder->crtc, MDP4_DMA_CONFIG_R_BPC(BPC8) | MDP4_DMA_CONFIG_G_BPC(BPC8) | MDP4_DMA_CONFIG_B_BPC(BPC8) | MDP4_DMA_CONFIG_PACK(0x21)); mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1); - mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + + bs_set(mdp4_dtv_encoder, 1); + + DBG("setting src_clk=%lu", pc); + + ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc); + if (ret) + dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret); + clk_prepare_enable(mdp4_dtv_encoder->src_clk); + ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); + if (ret) + dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret); + ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); + if (ret) + dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); + + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); + + mdp4_dtv_encoder->enabled = true; } static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = { - .dpms = mdp4_dtv_encoder_dpms, .mode_fixup = mdp4_dtv_encoder_mode_fixup, .mode_set = mdp4_dtv_encoder_mode_set, - .prepare = mdp4_dtv_encoder_prepare, - .commit = mdp4_dtv_encoder_commit, + .enable = mdp4_dtv_encoder_enable, + .disable = mdp4_dtv_encoder_disable, }; long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index a62109e4ae0d..d847b9436194 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -125,6 +125,38 @@ out: return ret; } +static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + int i, ncrtcs = state->dev->mode_config.num_crtc; + + mdp4_enable(mdp4_kms); + + /* see 119ecb7fd */ + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc *crtc = state->crtcs[i]; + if (!crtc) + continue; + drm_crtc_vblank_get(crtc); + } +} + +static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + int i, ncrtcs = state->dev->mode_config.num_crtc; + + /* see 119ecb7fd */ + for (i = 0; i < ncrtcs; i++) { + struct drm_crtc *crtc = state->crtcs[i]; + if (!crtc) + continue; + drm_crtc_vblank_put(crtc); + } + + mdp4_disable(mdp4_kms); +} + static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder) { @@ -161,6 +193,8 @@ static const struct mdp_kms_funcs kms_funcs = { .irq = mdp4_irq, .enable_vblank = mdp4_enable_vblank, .disable_vblank = mdp4_disable_vblank, + .prepare_commit = mdp4_prepare_commit, + .complete_commit = mdp4_complete_commit, .get_format = mdp_get_format, .round_pixclk = mdp4_round_pixclk, .preclose = mdp4_preclose, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index cbd77bc626d5..0a5c58bde7a9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -175,14 +175,25 @@ irqreturn_t mdp4_irq(struct msm_kms *kms); int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +static inline bool pipe_supports_yuv(enum mdp4_pipe pipe) +{ + switch (pipe) { + case VG1: + case VG2: + case VG3: + case VG4: + return true; + default: + return false; + } +} + static inline uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats, uint32_t max_formats) { - /* TODO when we have YUV, we need to filter supported formats - * based on pipe_id.. - */ - return mdp_get_formats(pixel_formats, max_formats); + return mdp_get_formats(pixel_formats, max_formats, + !pipe_supports_yuv(pipe_id)); } void mdp4_plane_install_properties(struct drm_plane *plane, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c index 41f6436754fc..60ec8222c9f6 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c @@ -259,77 +259,6 @@ static void setup_phy(struct drm_encoder *encoder) mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0); } -static void mdp4_lcdc_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct drm_device *dev = encoder->dev; - struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = - to_mdp4_lcdc_encoder(encoder); - struct mdp4_kms *mdp4_kms = get_kms(encoder); - struct drm_panel *panel = mdp4_lcdc_encoder->panel; - bool enabled = (mode == DRM_MODE_DPMS_ON); - int i, ret; - - DBG("mode=%d", mode); - - if (enabled == mdp4_lcdc_encoder->enabled) - return; - - if (enabled) { - unsigned long pc = mdp4_lcdc_encoder->pixclock; - int ret; - - bs_set(mdp4_lcdc_encoder, 1); - - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { - ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); - if (ret) - dev_err(dev->dev, "failed to enable regulator: %d\n", ret); - } - - DBG("setting lcdc_clk=%lu", pc); - ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc); - if (ret) - dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret); - ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk); - if (ret) - dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); - - if (panel) - drm_panel_enable(panel); - - setup_phy(encoder); - - mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1); - } else { - mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); - - if (panel) - drm_panel_disable(panel); - - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC); - - clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk); - - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { - ret = regulator_disable(mdp4_lcdc_encoder->regs[i]); - if (ret) - dev_err(dev->dev, "failed to disable regulator: %d\n", ret); - } - - bs_set(mdp4_lcdc_encoder, 0); - } - - mdp4_lcdc_encoder->enabled = enabled; -} - static bool mdp4_lcdc_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -403,13 +332,59 @@ static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder, mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VEND, 0); } -static void mdp4_lcdc_encoder_prepare(struct drm_encoder *encoder) +static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) { - mdp4_lcdc_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + struct drm_device *dev = encoder->dev; + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + struct drm_panel *panel = mdp4_lcdc_encoder->panel; + int i, ret; + + if (WARN_ON(!mdp4_lcdc_encoder->enabled)) + return; + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); + + if (panel) + drm_panel_disable(panel); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC); + + clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk); + + for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { + ret = regulator_disable(mdp4_lcdc_encoder->regs[i]); + if (ret) + dev_err(dev->dev, "failed to disable regulator: %d\n", ret); + } + + bs_set(mdp4_lcdc_encoder, 0); + + mdp4_lcdc_encoder->enabled = false; } -static void mdp4_lcdc_encoder_commit(struct drm_encoder *encoder) +static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) { + struct drm_device *dev = encoder->dev; + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + unsigned long pc = mdp4_lcdc_encoder->pixclock; + struct mdp4_kms *mdp4_kms = get_kms(encoder); + struct drm_panel *panel = mdp4_lcdc_encoder->panel; + int i, ret; + + if (WARN_ON(mdp4_lcdc_encoder->enabled)) + return; + /* TODO: hard-coded for 18bpp: */ mdp4_crtc_set_config(encoder->crtc, MDP4_DMA_CONFIG_R_BPC(BPC6) | @@ -420,15 +395,38 @@ static void mdp4_lcdc_encoder_commit(struct drm_encoder *encoder) MDP4_DMA_CONFIG_DEFLKR_EN | MDP4_DMA_CONFIG_DITHER_EN); mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0); - mdp4_lcdc_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + + bs_set(mdp4_lcdc_encoder, 1); + + for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { + ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); + if (ret) + dev_err(dev->dev, "failed to enable regulator: %d\n", ret); + } + + DBG("setting lcdc_clk=%lu", pc); + ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc); + if (ret) + dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret); + ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk); + if (ret) + dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret); + + if (panel) + drm_panel_enable(panel); + + setup_phy(encoder); + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1); + + mdp4_lcdc_encoder->enabled = true; } static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = { - .dpms = mdp4_lcdc_encoder_dpms, .mode_fixup = mdp4_lcdc_encoder_mode_fixup, .mode_set = mdp4_lcdc_encoder_mode_set, - .prepare = mdp4_lcdc_encoder_prepare, - .commit = mdp4_lcdc_encoder_commit, + .disable = mdp4_lcdc_encoder_disable, + .enable = mdp4_lcdc_encoder_enable, }; long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c index 4ddc28e1275b..921185133d38 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c @@ -94,7 +94,7 @@ mdp4_lvds_connector_best_encoder(struct drm_connector *connector) } static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .detect = mdp4_lvds_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = mdp4_lvds_connector_destroy, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 1e5ebe83647d..cde25009203a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -17,6 +17,8 @@ #include "mdp4_kms.h" +#define DOWN_SCALE_MAX 8 +#define UP_SCALE_MAX 8 struct mdp4_plane { struct drm_plane base; @@ -136,10 +138,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane, struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); struct mdp4_kms *mdp4_kms = get_kms(plane); enum mdp4_pipe pipe = mdp4_plane->pipe; - uint32_t iova = msm_framebuffer_iova(fb, mdp4_kms->id, 0); - - DBG("%s: set_scanout: %08x (%u)", mdp4_plane->name, - iova, fb->pitches[0]); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | @@ -149,11 +147,45 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane, MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), + msm_framebuffer_iova(fb, mdp4_kms->id, 0)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe), + msm_framebuffer_iova(fb, mdp4_kms->id, 1)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe), + msm_framebuffer_iova(fb, mdp4_kms->id, 2)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe), + msm_framebuffer_iova(fb, mdp4_kms->id, 3)); plane->fb = fb; } +static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms, + enum mdp4_pipe pipe, struct csc_cfg *csc) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(csc->matrix); i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_MV(pipe, i), + csc->matrix[i]); + } + + for (i = 0; i < ARRAY_SIZE(csc->post_bias) ; i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_BV(pipe, i), + csc->pre_bias[i]); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_BV(pipe, i), + csc->post_bias[i]); + } + + for (i = 0; i < ARRAY_SIZE(csc->post_clamp) ; i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_LV(pipe, i), + csc->pre_clamp[i]); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_LV(pipe, i), + csc->post_clamp[i]); + } +} + #define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000 static int mdp4_plane_mode_set(struct drm_plane *plane, @@ -163,6 +195,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { + struct drm_device *dev = plane->dev; struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); struct mdp4_kms *mdp4_kms = get_kms(plane); enum mdp4_pipe pipe = mdp4_plane->pipe; @@ -186,14 +219,59 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, fb->base.id, src_x, src_y, src_w, src_h, crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); + format = to_mdp_format(msm_framebuffer_format(fb)); + + if (src_w > (crtc_w * DOWN_SCALE_MAX)) { + dev_err(dev->dev, "Width down scaling exceeds limits!\n"); + return -ERANGE; + } + + if (src_h > (crtc_h * DOWN_SCALE_MAX)) { + dev_err(dev->dev, "Height down scaling exceeds limits!\n"); + return -ERANGE; + } + + if (crtc_w > (src_w * UP_SCALE_MAX)) { + dev_err(dev->dev, "Width up scaling exceeds limits!\n"); + return -ERANGE; + } + + if (crtc_h > (src_h * UP_SCALE_MAX)) { + dev_err(dev->dev, "Height up scaling exceeds limits!\n"); + return -ERANGE; + } + if (src_w != crtc_w) { + uint32_t sel_unit = SCALE_FIR; op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN; - /* TODO calc phasex_step */ + + if (MDP_FORMAT_IS_YUV(format)) { + if (crtc_w > src_w) + sel_unit = SCALE_PIXEL_RPT; + else if (crtc_w <= (src_w / 4)) + sel_unit = SCALE_MN_PHASE; + + op_mode |= MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(sel_unit); + phasex_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT, + src_w, crtc_w); + } } if (src_h != crtc_h) { + uint32_t sel_unit = SCALE_FIR; op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN; - /* TODO calc phasey_step */ + + if (MDP_FORMAT_IS_YUV(format)) { + + if (crtc_h > src_h) + sel_unit = SCALE_PIXEL_RPT; + else if (crtc_h <= (src_h / 4)) + sel_unit = SCALE_MN_PHASE; + + op_mode |= MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(sel_unit); + phasey_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT, + src_h, crtc_h); + } } mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe), @@ -214,8 +292,6 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, mdp4_plane_set_scanout(plane, fb); - format = to_mdp_format(msm_framebuffer_format(fb)); - mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | @@ -224,6 +300,8 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) | MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | + MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) | + MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) | COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe), @@ -232,6 +310,14 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); + if (MDP_FORMAT_IS_YUV(format)) { + struct csc_cfg *csc = mdp_get_default_csc_cfg(CSC_YUV2RGB); + + op_mode |= MDP4_PIPE_OP_MODE_SRC_YCBCR; + op_mode |= MDP4_PIPE_OP_MODE_CSC_EN; + mdp4_write_csc_config(mdp4_kms, pipe, csc); + } + mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode); mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index e87ef5512cb0..09b4a25eb553 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h @@ -8,18 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) - /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) -Copyright (C) 2013-2014 by the following authors: +Copyright (C) 2013-2015 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -88,13 +89,6 @@ enum mdp5_pack_3d { PACK_3D_COL_INT = 3, }; -enum mdp5_chroma_samp_type { - CHROMA_RGB = 0, - CHROMA_H2V1 = 1, - CHROMA_H1V2 = 2, - CHROMA_420 = 3, -}; - enum mdp5_scale_filter { SCALE_FILTER_NEAREST = 0, SCALE_FILTER_BIL = 1, @@ -135,6 +129,17 @@ enum mdp5_client_id { CID_MAX = 23, }; +enum mdp5_cursor_format { + CURSOR_FMT_ARGB8888 = 0, + CURSOR_FMT_ARGB1555 = 2, + CURSOR_FMT_ARGB4444 = 4, +}; + +enum mdp5_cursor_alpha { + CURSOR_ALPHA_CONST = 0, + CURSOR_ALPHA_PER_PIXEL = 2, +}; + enum mdp5_igc_type { IGC_VIG = 0, IGC_RGB = 1, @@ -142,6 +147,11 @@ enum mdp5_igc_type { IGC_DSPP = 3, }; +enum mdp5_data_format { + DATA_FORMAT_RGB = 0, + DATA_FORMAT_YUV = 1, +}; + #define MDP5_IRQ_INTF0_WB_ROT_COMP 0x00000001 #define MDP5_IRQ_INTF1_WB_ROT_COMP 0x00000002 #define MDP5_IRQ_INTF2_WB_ROT_COMP 0x00000004 @@ -463,12 +473,143 @@ static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) } static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } +static inline uint32_t REG_MDP5_PIPE_OP_MODE(enum mdp5_pipe i0) { return 0x00000200 + __offset_PIPE(i0); } +#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00080000 +#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 19 +static inline uint32_t MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(enum mdp5_data_format val) +{ + return ((val) << MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK; +} +#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00040000 +#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 18 +static inline uint32_t MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(enum mdp5_data_format val) +{ + return ((val) << MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK; +} +#define MDP5_PIPE_OP_MODE_CSC_1_EN 0x00020000 + static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); } static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); } static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); } +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(enum mdp5_pipe i0) { return 0x00000320 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(enum mdp5_pipe i0) { return 0x00000324 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(enum mdp5_pipe i0) { return 0x00000328 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(enum mdp5_pipe i0) { return 0x0000032c + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(enum mdp5_pipe i0) { return 0x00000330 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK; +} +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK; +} +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK; +} + static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } #define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 #define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 @@ -618,15 +759,15 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) } #define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 #define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 -#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00780000 +#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00180000 #define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(uint32_t val) +static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_sspp_fetch_type val) { return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK; } #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000 #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23 -static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_type val) +static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) { return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; } @@ -753,6 +894,10 @@ static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { ret static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); } +static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000218 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x0000021c + __offset_PIPE(i0); } + static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); } static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); } @@ -839,20 +984,88 @@ static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; } static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_W(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK; +} +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK 0xffff0000 +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_H(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_SIZE_ROI_W__MASK 0x0000ffff +#define MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_W(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_W__MASK; +} +#define MDP5_LM_CURSOR_SIZE_ROI_H__MASK 0xffff0000 +#define MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_H(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_H__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_XY_SRC_X__MASK 0x0000ffff +#define MDP5_LM_CURSOR_XY_SRC_X__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_XY_SRC_X(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_XY_SRC_X__SHIFT) & MDP5_LM_CURSOR_XY_SRC_X__MASK; +} +#define MDP5_LM_CURSOR_XY_SRC_Y__MASK 0xffff0000 +#define MDP5_LM_CURSOR_XY_SRC_Y__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_XY_SRC_Y(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_XY_SRC_Y__SHIFT) & MDP5_LM_CURSOR_XY_SRC_Y__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); } +#define MDP5_LM_CURSOR_STRIDE_STRIDE__MASK 0x0000ffff +#define MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_STRIDE_STRIDE(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT) & MDP5_LM_CURSOR_STRIDE_STRIDE__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); } +#define MDP5_LM_CURSOR_FORMAT_FORMAT__MASK 0x00000007 +#define MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_FORMAT_FORMAT(enum mdp5_cursor_format val) +{ + return ((val) << MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT) & MDP5_LM_CURSOR_FORMAT_FORMAT__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); } static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_START_XY_X_START__MASK 0x0000ffff +#define MDP5_LM_CURSOR_START_XY_X_START__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_START_XY_X_START(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_START_XY_X_START__SHIFT) & MDP5_LM_CURSOR_START_XY_X_START__MASK; +} +#define MDP5_LM_CURSOR_START_XY_Y_START__MASK 0xffff0000 +#define MDP5_LM_CURSOR_START_XY_Y_START__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_START_XY_Y_START(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_START_XY_Y_START__SHIFT) & MDP5_LM_CURSOR_START_XY_Y_START__MASK; +} static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN 0x00000001 +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK 0x00000006 +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT 1 +static inline uint32_t MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(enum mdp5_cursor_alpha val) +{ + return ((val) << MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT) & MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK; +} +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN 0x00000008 static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 6b25f9f731ed..46fac545dc2b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -24,6 +24,9 @@ #include "drm_crtc_helper.h" #include "drm_flip_work.h" +#define CURSOR_WIDTH 64 +#define CURSOR_HEIGHT 64 + #define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ struct mdp5_crtc { @@ -47,8 +50,21 @@ struct mdp5_crtc { #define PENDING_FLIP 0x2 atomic_t pending; + /* for unref'ing cursor bo's after scanout completes: */ + struct drm_flip_work unref_cursor_work; + struct mdp_irq vblank; struct mdp_irq err; + + struct { + /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/ + spinlock_t lock; + + /* current cursor being scanned out: */ + struct drm_gem_object *scanout_bo; + uint32_t width; + uint32_t height; + } cursor; }; #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) @@ -129,37 +145,26 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) } } +static void unref_cursor_worker(struct drm_flip_work *work, void *val) +{ + struct mdp5_crtc *mdp5_crtc = + container_of(work, struct mdp5_crtc, unref_cursor_work); + struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base); + + msm_gem_put_iova(val, mdp5_kms->id); + drm_gem_object_unreference_unlocked(val); +} + static void mdp5_crtc_destroy(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); drm_crtc_cleanup(crtc); + drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work); kfree(mdp5_crtc); } -static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - struct mdp5_kms *mdp5_kms = get_kms(crtc); - bool enabled = (mode == DRM_MODE_DPMS_ON); - - DBG("%s: mode=%d", mdp5_crtc->name, mode); - - if (enabled != mdp5_crtc->enabled) { - if (enabled) { - mdp5_enable(mdp5_kms); - mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); - } else { - /* set STAGE_UNUSED for all layers */ - mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000); - mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); - mdp5_disable(mdp5_kms); - } - mdp5_crtc->enabled = enabled; - } -} - static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -256,23 +261,41 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); } -static void mdp5_crtc_prepare(struct drm_crtc *crtc) +static void mdp5_crtc_disable(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + DBG("%s", mdp5_crtc->name); - /* make sure we hold a ref to mdp clks while setting up mode: */ - mdp5_enable(get_kms(crtc)); - mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + + if (WARN_ON(!mdp5_crtc->enabled)) + return; + + /* set STAGE_UNUSED for all layers */ + mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000); + + mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); + mdp5_disable(mdp5_kms); + + mdp5_crtc->enabled = false; } -static void mdp5_crtc_commit(struct drm_crtc *crtc) +static void mdp5_crtc_enable(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + DBG("%s", mdp5_crtc->name); - mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + + if (WARN_ON(mdp5_crtc->enabled)) + return; + + mdp5_enable(mdp5_kms); + mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); + crtc_flush_all(crtc); - /* drop the ref to mdp clk's that we got in prepare: */ - mdp5_disable(get_kms(crtc)); + + mdp5_crtc->enabled = true; } struct plane_state { @@ -380,6 +403,132 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc, return -EINVAL; } +static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file, uint32_t handle, + uint32_t width, uint32_t height) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct drm_gem_object *cursor_bo, *old_bo; + uint32_t blendcfg, cursor_addr, stride; + int ret, bpp, lm; + unsigned int depth; + enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; + uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); + unsigned long flags; + + if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { + dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height); + return -EINVAL; + } + + if (NULL == mdp5_crtc->ctl) + return -EINVAL; + + if (!handle) { + DBG("Cursor off"); + return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false); + } + + cursor_bo = drm_gem_object_lookup(dev, file, handle); + if (!cursor_bo) + return -ENOENT; + + ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr); + if (ret) + return -EINVAL; + + lm = mdp5_crtc->lm; + drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp); + stride = width * (bpp >> 3); + + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); + old_bo = mdp5_crtc->cursor.scanout_bo; + + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), + MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm), + MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | + MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), + MDP5_LM_CURSOR_SIZE_ROI_H(height) | + MDP5_LM_CURSOR_SIZE_ROI_W(width)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr); + + + blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; + blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN; + blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); + + mdp5_crtc->cursor.scanout_bo = cursor_bo; + mdp5_crtc->cursor.width = width; + mdp5_crtc->cursor.height = height; + spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); + + ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); + if (ret) + goto end; + + flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl); + crtc_flush(crtc, flush_mask); + +end: + if (old_bo) { + drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); + /* enable vblank to complete cursor work: */ + request_pending(crtc, PENDING_CURSOR); + } + return ret; +} + +static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); + uint32_t xres = crtc->mode.hdisplay; + uint32_t yres = crtc->mode.vdisplay; + uint32_t roi_w; + uint32_t roi_h; + unsigned long flags; + + x = (x > 0) ? x : 0; + y = (y > 0) ? y : 0; + + /* + * Cursor Region Of Interest (ROI) is a plane read from cursor + * buffer to render. The ROI region is determined by the visiblity of + * the cursor point. In the default Cursor image the cursor point will + * be at the top left of the cursor image, unless it is specified + * otherwise using hotspot feature. + * + * If the cursor point reaches the right (xres - x < cursor.width) or + * bottom (yres - y < cursor.height) boundary of the screen, then ROI + * width and ROI height need to be evaluated to crop the cursor image + * accordingly. + * (xres-x) will be new cursor width when x > (xres - cursor.width) + * (yres-y) will be new cursor height when y > (yres - cursor.height) + */ + roi_w = min(mdp5_crtc->cursor.width, xres - x); + roi_h = min(mdp5_crtc->cursor.height, yres - y); + + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), + MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | + MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm), + MDP5_LM_CURSOR_START_XY_Y_START(y) | + MDP5_LM_CURSOR_START_XY_X_START(x)); + spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); + + crtc_flush(crtc, flush_mask); + + return 0; +} + static const struct drm_crtc_funcs mdp5_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = mdp5_crtc_destroy, @@ -388,16 +537,15 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = { .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .cursor_set = mdp5_crtc_cursor_set, + .cursor_move = mdp5_crtc_cursor_move, }; static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { - .dpms = mdp5_crtc_dpms, .mode_fixup = mdp5_crtc_mode_fixup, .mode_set_nofb = mdp5_crtc_mode_set_nofb, - .mode_set = drm_helper_crtc_mode_set, - .mode_set_base = drm_helper_crtc_mode_set_base, - .prepare = mdp5_crtc_prepare, - .commit = mdp5_crtc_commit, + .prepare = mdp5_crtc_disable, + .commit = mdp5_crtc_enable, .atomic_check = mdp5_crtc_atomic_check, .atomic_begin = mdp5_crtc_atomic_begin, .atomic_flush = mdp5_crtc_atomic_flush, @@ -407,6 +555,7 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) { struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); struct drm_crtc *crtc = &mdp5_crtc->base; + struct msm_drm_private *priv = crtc->dev->dev_private; unsigned pending; mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); @@ -416,6 +565,9 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) if (pending & PENDING_FLIP) { complete_flip(crtc, NULL); } + + if (pending & PENDING_CURSOR) + drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq); } static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) @@ -515,6 +667,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, mdp5_crtc->lm = GET_LM_ID(id); spin_lock_init(&mdp5_crtc->lm_lock); + spin_lock_init(&mdp5_crtc->cursor.lock); mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq; @@ -523,6 +676,10 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, pipe2name(mdp5_plane_pipe(plane)), id); drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs); + + drm_flip_work_init(&mdp5_crtc->unref_cursor_work, + "unref cursor", unref_cursor_worker); + drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); plane->crtc = crtc; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index dea4505ac963..151129032d16 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -95,7 +95,7 @@ u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) } -int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf) +int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf) { unsigned long flags; static const enum mdp5_intfnum intfnum[] = { diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h index 1018519b6af2..ad48788efeea 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h @@ -34,7 +34,7 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); */ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc); -int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf); +int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, int intf); int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 0254bfdeb92f..d6a14bb99988 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -110,45 +111,6 @@ static const struct drm_encoder_funcs mdp5_encoder_funcs = { .destroy = mdp5_encoder_destroy, }; -static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_kms *mdp5_kms = get_kms(encoder); - int intf = mdp5_encoder->intf; - bool enabled = (mode == DRM_MODE_DPMS_ON); - unsigned long flags; - - DBG("mode=%d", mode); - - if (enabled == mdp5_encoder->enabled) - return; - - if (enabled) { - bs_set(mdp5_encoder, 1); - spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); - spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - } else { - spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); - spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - - /* - * Wait for a vsync so we know the ENABLE=0 latched before - * the (connector) source of the vsync's gets disabled, - * otherwise we end up in a funny state if we re-enable - * before the disable latches, which results that some of - * the settings changes for the new modeset (like new - * scanout buffer) don't latch properly.. - */ - mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf)); - - bs_set(mdp5_encoder, 0); - } - - mdp5_encoder->enabled = enabled; -} - static bool mdp5_encoder_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -162,11 +124,13 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; int intf = mdp5_encoder->intf; uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; uint32_t display_v_start, display_v_end; uint32_t hsync_start_x, hsync_end_x; - uint32_t format; + uint32_t format = 0x2100; unsigned long flags; mode = adjusted_mode; @@ -188,7 +152,28 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, /* probably need to get DATA_EN polarity from panel.. */ dtv_hsync_skew = 0; /* get this from panel? */ - format = 0x213f; /* get this from panel? */ + + /* Get color format from panel, default is 8bpc */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + switch (connector->display_info.bpc) { + case 4: + format |= 0; + break; + case 5: + format |= 0x15; + break; + case 6: + format |= 0x2A; + break; + case 8: + default: + format |= 0x3F; + break; + } + break; + } + } hsync_start_x = (mode->htotal - mode->hsync_start); hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; @@ -198,6 +183,16 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; + /* + * For edp only: + * DISPLAY_V_START = (VBP * HCYCLE) + HBP + * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP + */ + if (mdp5_encoder->intf_id == INTF_eDP) { + display_v_start += mode->htotal - mode->hsync_start; + display_v_end -= mode->hsync_start - mode->hdisplay; + } + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), @@ -225,25 +220,61 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); } -static void mdp5_encoder_prepare(struct drm_encoder *encoder) +static void mdp5_encoder_disable(struct drm_encoder *encoder) { - mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + int intf = mdp5_encoder->intf; + unsigned long flags; + + if (WARN_ON(!mdp5_encoder->enabled)) + return; + + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf)); + + bs_set(mdp5_encoder, 0); + + mdp5_encoder->enabled = false; } -static void mdp5_encoder_commit(struct drm_encoder *encoder) +static void mdp5_encoder_enable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + int intf = mdp5_encoder->intf; + unsigned long flags; + + if (WARN_ON(mdp5_encoder->enabled)) + return; + mdp5_crtc_set_intf(encoder->crtc, mdp5_encoder->intf, mdp5_encoder->intf_id); - mdp5_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + + bs_set(mdp5_encoder, 1); + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + + mdp5_encoder->enabled = false; } static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { - .dpms = mdp5_encoder_dpms, .mode_fixup = mdp5_encoder_mode_fixup, .mode_set = mdp5_encoder_mode_set, - .prepare = mdp5_encoder_prepare, - .commit = mdp5_encoder_commit, + .prepare = mdp5_encoder_disable, + .commit = mdp5_encoder_enable, }; /* initialize encoder */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 9f01a4f21af2..92b61db5754c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -68,6 +68,18 @@ static int mdp5_hw_init(struct msm_kms *kms) return 0; } +static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + mdp5_enable(mdp5_kms); +} + +static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + mdp5_disable(mdp5_kms); +} + static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder) { @@ -115,6 +127,8 @@ static const struct mdp_kms_funcs kms_funcs = { .irq = mdp5_irq, .enable_vblank = mdp5_enable_vblank, .disable_vblank = mdp5_disable_vblank, + .prepare_commit = mdp5_prepare_commit, + .complete_commit = mdp5_complete_commit, .get_format = mdp_get_format, .round_pixclk = mdp5_round_pixclk, .preclose = mdp5_preclose, @@ -208,19 +222,18 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) } } - /* Construct encoder for HDMI: */ - encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); - if (IS_ERR(encoder)) { - dev_err(dev->dev, "failed to construct encoder\n"); - ret = PTR_ERR(encoder); - goto fail; - } + if (priv->hdmi) { + /* Construct encoder for HDMI: */ + encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); + if (IS_ERR(encoder)) { + dev_err(dev->dev, "failed to construct encoder\n"); + ret = PTR_ERR(encoder); + goto fail; + } - encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;; - priv->encoders[priv->num_encoders++] = encoder; + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;; + priv->encoders[priv->num_encoders++] = encoder; - /* Construct bridge/connector for HDMI: */ - if (priv->hdmi) { ret = hdmi_modeset_init(priv->hdmi, dev, encoder); if (ret) { dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); @@ -228,6 +241,27 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) } } + if (priv->edp) { + /* Construct encoder for eDP: */ + encoder = mdp5_encoder_init(dev, 0, INTF_eDP); + if (IS_ERR(encoder)) { + dev_err(dev->dev, "failed to construct eDP encoder\n"); + ret = PTR_ERR(encoder); + goto fail; + } + + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; + priv->encoders[priv->num_encoders++] = encoder; + + /* Construct bridge/connector for eDP: */ + ret = msm_edp_modeset_init(priv->edp, dev, encoder); + if (ret) { + dev_err(dev->dev, "failed to initialize eDP: %d\n", + ret); + goto fail; + } + } + return 0; fail: diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index dd69c77c0d64..49d011e8835b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -165,14 +165,25 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); +static inline bool pipe_supports_yuv(enum mdp5_pipe pipe) +{ + switch (pipe) { + case SSPP_VIG0: + case SSPP_VIG1: + case SSPP_VIG2: + case SSPP_VIG3: + return true; + default: + return false; + } +} + static inline uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, uint32_t max_formats) { - /* TODO when we have YUV, we need to filter supported formats - * based on pipe id.. - */ - return mdp_get_formats(pixel_formats, max_formats); + return mdp_get_formats(pixel_formats, max_formats, + !pipe_supports_yuv(pipe)); } void mdp5_plane_install_properties(struct drm_plane *plane, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index fc76f630e5b1..05cf9ab2a876 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -18,8 +18,6 @@ #include "mdp5_kms.h" -#define MAX_PLANE 4 - struct mdp5_plane { struct drm_plane base; const char *name; @@ -278,6 +276,155 @@ static void set_scanout_locked(struct drm_plane *plane, plane->fb = fb; } +/* Note: mdp5_plane->pipe_lock must be locked */ +static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe) +{ + uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) & + ~MDP5_PIPE_OP_MODE_CSC_1_EN; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value); +} + +/* Note: mdp5_plane->pipe_lock must be locked */ +static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, + struct csc_cfg *csc) +{ + uint32_t i, mode = 0; /* RGB, no CSC */ + uint32_t *matrix; + + if (unlikely(!csc)) + return; + + if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type)) + mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV); + if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type)) + mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV); + mode |= MDP5_PIPE_OP_MODE_CSC_1_EN; + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode); + + matrix = csc->matrix; + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8])); + + for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) { + uint32_t *pre_clamp = csc->pre_clamp; + uint32_t *post_clamp = csc->post_clamp; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i), + MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) | + MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i), + MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) | + MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i), + MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i), + MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i])); + } +} + +#define PHASE_STEP_SHIFT 21 +#define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */ + +static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase) +{ + uint32_t unit; + + if (src == 0 || dst == 0) + return -EINVAL; + + /* + * PHASE_STEP_X/Y is coded on 26 bits (25:0), + * where 2^21 represents the unity "1" in fixed-point hardware design. + * This leaves 5 bits for the integer part (downscale case): + * -> maximum downscale ratio = 0b1_1111 = 31 + */ + if (src > (dst * DOWN_SCALE_RATIO_MAX)) + return -EOVERFLOW; + + unit = 1 << PHASE_STEP_SHIFT; + *out_phase = mult_frac(unit, src, dst); + + return 0; +} + +static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, + uint32_t phasex_steps[2]) +{ + uint32_t phasex_step; + unsigned int hsub; + int ret; + + ret = calc_phase_step(src, dest, &phasex_step); + if (ret) + return ret; + + hsub = drm_format_horz_chroma_subsampling(pixel_format); + + phasex_steps[0] = phasex_step; + phasex_steps[1] = phasex_step / hsub; + + return 0; +} + +static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest, + uint32_t phasey_steps[2]) +{ + uint32_t phasey_step; + unsigned int vsub; + int ret; + + ret = calc_phase_step(src, dest, &phasey_step); + if (ret) + return ret; + + vsub = drm_format_vert_chroma_subsampling(pixel_format); + + phasey_steps[0] = phasey_step; + phasey_steps[1] = phasey_step / vsub; + + return 0; +} + +static uint32_t get_scalex_config(uint32_t src, uint32_t dest) +{ + uint32_t filter; + + filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + + return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(filter); +} + +static uint32_t get_scaley_config(uint32_t src, uint32_t dest) +{ + uint32_t filter; + + filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + + return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(filter); +} + static int mdp5_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, @@ -287,11 +434,14 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); + struct device *dev = mdp5_kms->dev->dev; enum mdp5_pipe pipe = mdp5_plane->pipe; const struct mdp_format *format; uint32_t nplanes, config = 0; - uint32_t phasex_step = 0, phasey_step = 0; + /* below array -> index 0: comp 0/3 ; index 1: comp 1/2 */ + uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,}; uint32_t hdecm = 0, vdecm = 0; + uint32_t pix_format; unsigned long flags; int ret; @@ -301,6 +451,9 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, if (WARN_ON(nplanes > pipe2nclients(pipe))) return -EINVAL; + format = to_mdp_format(msm_framebuffer_format(fb)); + pix_format = format->base.pixel_format; + /* src values are in Q16 fixed point, convert to integer: */ src_x = src_x >> 16; src_y = src_y >> 16; @@ -325,14 +478,28 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, */ mdp5_smp_configure(mdp5_kms->smp, pipe); - if (src_w != crtc_w) { - config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN; - /* TODO calc phasex_step, hdecm */ + /* SCALE is used to both scale and up-sample chroma components */ + + if ((src_w != crtc_w) || MDP_FORMAT_IS_YUV(format)) { + /* TODO calc hdecm */ + ret = calc_scalex_steps(pix_format, src_w, crtc_w, phasex_step); + if (ret) { + dev_err(dev, "X scaling (%d -> %d) failed: %d\n", + src_w, crtc_w, ret); + return ret; + } + config |= get_scalex_config(src_w, crtc_w); } - if (src_h != crtc_h) { - config |= MDP5_PIPE_SCALE_CONFIG_SCALEY_EN; - /* TODO calc phasey_step, vdecm */ + if ((src_h != crtc_h) || MDP_FORMAT_IS_YUV(format)) { + /* TODO calc vdecm */ + ret = calc_scaley_steps(pix_format, src_h, crtc_h, phasey_step); + if (ret) { + dev_err(dev, "Y scaling (%d -> %d) failed: %d\n", + src_h, crtc_h, ret); + return ret; + } + config |= get_scaley_config(src_h, crtc_h); } spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); @@ -357,8 +524,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, MDP5_PIPE_OUT_XY_X(crtc_x) | MDP5_PIPE_OUT_XY_Y(crtc_y)); - format = to_mdp_format(msm_framebuffer_format(fb)); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | @@ -368,8 +533,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | - MDP5_PIPE_SRC_FORMAT_NUM_PLANES(nplanes - 1) | - MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(CHROMA_RGB)); + MDP5_PIPE_SRC_FORMAT_NUM_PLANES(format->fetch_type) | + MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | @@ -383,18 +548,24 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, /* not using secure mode: */ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), phasey_step); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), + phasex_step[0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), + phasey_step[0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), + phasex_step[1]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), + phasey_step[1]); mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), MDP5_PIPE_DECIMATION_VERT(vdecm) | MDP5_PIPE_DECIMATION_HORZ(hdecm)); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), - MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(SCALE_FILTER_NEAREST) | - MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(SCALE_FILTER_NEAREST) | - MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(SCALE_FILTER_NEAREST) | - MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(SCALE_FILTER_NEAREST) | - MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) | - MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config); + + if (MDP_FORMAT_IS_YUV(format)) + csc_enable(mdp5_kms, pipe, + mdp_get_default_csc_cfg(CSC_YUV2RGB)); + else + csc_disable(mdp5_kms, pipe); set_scanout_locked(plane, fb); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index bf551885e019..1f795af89680 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -119,9 +119,10 @@ static int smp_request_block(struct mdp5_smp *smp, spin_lock_irqsave(&smp->state_lock, flags); - nblks -= reserved; - if (reserved) + if (reserved) { + nblks = max(0, nblks - reserved); DBG("%d MMBs allocated (%d reserved)", nblks, reserved); + } avail = cnt - bitmap_weight(smp->state, cnt); if (nblks > avail) { diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h index 64c1afd6030a..a1d35f162c7f 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h @@ -8,18 +8,19 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) +- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) - /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) +- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) - /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) - /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) +- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) - /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) +- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) +- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) -Copyright (C) 2013 by the following authors: +Copyright (C) 2013-2014 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) Permission is hereby granted, free of charge, to any person obtaining @@ -44,6 +45,19 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +enum mdp_chroma_samp_type { + CHROMA_RGB = 0, + CHROMA_H2V1 = 1, + CHROMA_H1V2 = 2, + CHROMA_420 = 3, +}; + +enum mdp_sspp_fetch_type { + MDP_PLANE_INTERLEAVED = 0, + MDP_PLANE_PLANAR = 1, + MDP_PLANE_PSEUDO_PLANAR = 2, +}; + enum mdp_mixer_stage_id { STAGE_UNUSED = 0, STAGE_BASE = 1, diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c index e0a6ffbe6ab4..f683433b6727 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_format.c +++ b/drivers/gpu/drm/msm/mdp/mdp_format.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -19,7 +20,58 @@ #include "msm_drv.h" #include "mdp_kms.h" -#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \ +static struct csc_cfg csc_convert[CSC_MAX] = { + [CSC_RGB2RGB] = { + .type = CSC_RGB2RGB, + .matrix = { + 0x0200, 0x0000, 0x0000, + 0x0000, 0x0200, 0x0000, + 0x0000, 0x0000, 0x0200 + }, + .pre_bias = { 0x0, 0x0, 0x0 }, + .post_bias = { 0x0, 0x0, 0x0 }, + .pre_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff }, + .post_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff }, + }, + [CSC_YUV2RGB] = { + .type = CSC_YUV2RGB, + .matrix = { + 0x0254, 0x0000, 0x0331, + 0x0254, 0xff37, 0xfe60, + 0x0254, 0x0409, 0x0000 + }, + .pre_bias = { 0xfff0, 0xff80, 0xff80 }, + .post_bias = { 0x00, 0x00, 0x00 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + }, + [CSC_RGB2YUV] = { + .type = CSC_RGB2YUV, + .matrix = { + 0x0083, 0x0102, 0x0032, + 0x1fb5, 0x1f6c, 0x00e1, + 0x00e1, 0x1f45, 0x1fdc + }, + .pre_bias = { 0x00, 0x00, 0x00 }, + .post_bias = { 0x10, 0x80, 0x80 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0 }, + }, + [CSC_YUV2YUV] = { + .type = CSC_YUV2YUV, + .matrix = { + 0x0200, 0x0000, 0x0000, + 0x0000, 0x0200, 0x0000, + 0x0000, 0x0000, 0x0200 + }, + .pre_bias = { 0x00, 0x00, 0x00 }, + .post_bias = { 0x00, 0x00, 0x00 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + }, +}; + +#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs) { \ .base = { .pixel_format = DRM_FORMAT_ ## name }, \ .bpc_a = BPC ## a ## A, \ .bpc_r = BPC ## r, \ @@ -30,21 +82,46 @@ .unpack_tight = tight, \ .cpp = c, \ .unpack_count = cnt, \ - } + .fetch_type = fp, \ + .chroma_sample = cs \ +} #define BPC0A 0 +/* + * Note: Keep RGB formats 1st, followed by YUV formats to avoid breaking + * mdp_get_rgb_formats()'s implementation. + */ static const struct mdp_format formats[] = { - /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */ - FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4), - FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4), - FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3), - FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3), - FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3), - FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3), + /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */ + FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3, + MDP_PLANE_INTERLEAVED, CHROMA_RGB), + + /* --- RGB formats above / YUV formats below this line --- */ + + FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420), + FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420), }; -uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats) +/* + * Note: + * @rgb_only must be set to true, when requesting + * supported formats for RGB pipes. + */ +uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats, + bool rgb_only) { uint32_t i; for (i = 0; i < ARRAY_SIZE(formats); i++) { @@ -53,6 +130,9 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats) if (i == max_formats) break; + if (rgb_only && MDP_FORMAT_IS_YUV(f)) + break; + pixel_formats[i] = f->base.pixel_format; } @@ -69,3 +149,11 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) } return NULL; } + +struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type type) +{ + if (unlikely(WARN_ON(type >= CSC_MAX))) + return NULL; + + return &csc_convert[type]; +} diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c index 2a731722d840..1988c243f437 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c @@ -34,7 +34,7 @@ static void update_irq(struct mdp_kms *mdp_kms) struct mdp_irq *irq; uint32_t irqmask = mdp_kms->vblank_mask; - BUG_ON(!spin_is_locked(&list_lock)); + assert_spin_locked(&list_lock); list_for_each_entry(irq, &mdp_kms->irq_list, node) irqmask |= irq->irqmask; diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h index b268ce95d394..5ae4039d68e4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h @@ -88,10 +88,32 @@ struct mdp_format { uint8_t unpack[4]; bool alpha_enable, unpack_tight; uint8_t cpp, unpack_count; + enum mdp_sspp_fetch_type fetch_type; + enum mdp_chroma_samp_type chroma_sample; }; #define to_mdp_format(x) container_of(x, struct mdp_format, base) +#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->chroma_sample > CHROMA_RGB) -uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats); +uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); +enum csc_type { + CSC_RGB2RGB = 0, + CSC_YUV2RGB, + CSC_RGB2YUV, + CSC_YUV2YUV, + CSC_MAX +}; + +struct csc_cfg { + enum csc_type type; + uint32_t matrix[9]; + uint32_t pre_bias[3]; + uint32_t post_bias[3]; + uint32_t pre_clamp[6]; + uint32_t post_clamp[6]; +}; + +struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type); + #endif /* __MDP_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 2c396540e279..871aa2108dc6 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -20,6 +20,7 @@ #include "msm_gem.h" struct msm_commit { + struct drm_device *dev; struct drm_atomic_state *state; uint32_t fence; struct msm_fence_cb fence_cb; @@ -58,14 +59,16 @@ static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) spin_unlock(&priv->pending_crtcs_event.lock); } -static struct msm_commit *new_commit(struct drm_atomic_state *state) +static struct msm_commit *commit_init(struct drm_atomic_state *state) { struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) return NULL; + c->dev = state->dev; c->state = state; + /* TODO we might need a way to indicate to run the cb on a * different wq so wait_for_vblanks() doesn't block retiring * bo's.. @@ -75,6 +78,12 @@ static struct msm_commit *new_commit(struct drm_atomic_state *state) return c; } +static void commit_destroy(struct msm_commit *c) +{ + end_atomic(c->dev->dev_private, c->crtc_mask); + kfree(c); +} + /* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ @@ -82,6 +91,10 @@ static void complete_commit(struct msm_commit *c) { struct drm_atomic_state *state = c->state; struct drm_device *dev = state->dev; + struct msm_drm_private *priv = dev->dev_private; + struct msm_kms *kms = priv->kms; + + kms->funcs->prepare_commit(kms, state); drm_atomic_helper_commit_pre_planes(dev, state); @@ -106,11 +119,11 @@ static void complete_commit(struct msm_commit *c) drm_atomic_helper_cleanup_planes(dev, state); - drm_atomic_state_free(state); + kms->funcs->complete_commit(kms, state); - end_atomic(dev->dev_private, c->crtc_mask); + drm_atomic_state_free(state); - kfree(c); + commit_destroy(c); } static void fence_cb(struct msm_fence_cb *cb) @@ -165,6 +178,7 @@ int msm_atomic_commit(struct drm_device *dev, { int nplanes = dev->mode_config.num_total_plane; int ncrtcs = dev->mode_config.num_crtc; + struct timespec timeout; struct msm_commit *c; int i, ret; @@ -172,7 +186,7 @@ int msm_atomic_commit(struct drm_device *dev, if (ret) return ret; - c = new_commit(state); + c = commit_init(state); if (!c) return -ENOMEM; @@ -237,10 +251,12 @@ int msm_atomic_commit(struct drm_device *dev, return 0; } - ret = msm_wait_fence_interruptable(dev, c->fence, NULL); + jiffies_to_timespec(jiffies + msecs_to_jiffies(1000), &timeout); + + ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); if (ret) { WARN_ON(ret); // TODO unswap state back? or?? - kfree(c); + commit_destroy(c); return ret; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index f1ebedde6346..a4269119f9ea 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -54,6 +54,12 @@ module_param(reglog, bool, 0600); #define reglog 0 #endif +#ifdef CONFIG_DRM_MSM_FBDEV +static bool fbdev = true; +MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer"); +module_param(fbdev, bool, 0600); +#endif + static char *vram = "16m"; MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); module_param(vram, charp, 0); @@ -300,7 +306,8 @@ static int msm_load(struct drm_device *dev, unsigned long flags) drm_mode_config_reset(dev); #ifdef CONFIG_DRM_MSM_FBDEV - priv->fbdev = msm_fbdev_init(dev); + if (fbdev) + priv->fbdev = msm_fbdev_init(dev); #endif ret = msm_debugfs_late_init(dev); @@ -1023,6 +1030,7 @@ static struct platform_driver msm_platform_driver = { static int __init msm_drm_register(void) { DBG("init"); + msm_edp_register(); hdmi_register(); adreno_register(); return platform_driver_register(&msm_platform_driver); @@ -1034,6 +1042,7 @@ static void __exit msm_drm_unregister(void) platform_driver_unregister(&msm_platform_driver); hdmi_unregister(); adreno_unregister(); + msm_edp_unregister(); } module_init(msm_drm_register); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 22e5391a7ce8..9e8d441b61c3 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -76,6 +76,12 @@ struct msm_drm_private { */ struct hdmi *hdmi; + /* eDP is for mdp5 only, but kms has not been created + * when edp_bind() and edp_init() are called. Here is the only + * place to keep the edp instance. + */ + struct msm_edp *edp; + /* when we have more than one 'msm_gpu' these need to be an array: */ struct msm_gpu *gpu; struct msm_file_private *lastctx; @@ -224,6 +230,12 @@ int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev, void __init hdmi_register(void); void __exit hdmi_unregister(void); +struct msm_edp; +void __init msm_edp_register(void); +void __exit msm_edp_unregister(void); +int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, + struct drm_encoder *encoder); + #ifdef CONFIG_DEBUG_FS void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 84dec161d836..6b573e612f27 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -24,7 +24,7 @@ struct msm_framebuffer { struct drm_framebuffer base; const struct msm_format *format; - struct drm_gem_object *planes[3]; + struct drm_gem_object *planes[MAX_PLANE]; }; #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base) @@ -122,7 +122,7 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane) struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); if (!msm_fb->planes[plane]) return 0; - return msm_gem_iova(msm_fb->planes[plane], id); + return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane]; } struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane) diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 115b509a4a00..df60f65728ff 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -245,9 +245,6 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev) if (ret) goto fini; - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); - ret = drm_fb_helper_initial_config(helper, 32); if (ret) goto fini; diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 06437745bc2c..3a78cb48662b 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -23,6 +23,8 @@ #include "msm_drv.h" +#define MAX_PLANE 4 + /* As there are different display controller blocks depending on the * snapdragon version, the kms support is split out and the appropriate * implementation is loaded at runtime. The kms module is responsible @@ -38,6 +40,9 @@ struct msm_kms_funcs { irqreturn_t (*irq)(struct msm_kms *kms); int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); + /* modeset, bracketing atomic_commit(): */ + void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state); + void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state); /* misc: */ const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 024e98ef8e4d..d84583776d50 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -10,6 +10,7 @@ menu "Display Panels" config DRM_PANEL_SIMPLE tristate "support for simple panels" depends on OF + depends on BACKLIGHT_CLASS_DEVICE help DRM panel driver for dumb panels that need at most a regulator and a GPIO to be powered up. Optionally a backlight can be attached so @@ -31,6 +32,7 @@ config DRM_PANEL_SHARP_LQ101R1SX01 tristate "Sharp LQ101R1SX01 panel" depends on OF depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE help Say Y here if you want to enable support for Sharp LQ101R1SX01 TFT-LCD modules. The panel has a 2560x1600 resolution and uses diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c index 9d81759d82fc..3cce3ca19601 100644 --- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c +++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c @@ -19,8 +19,6 @@ #include <video/mipi_display.h> -#include <linux/host1x.h> - struct sharp_panel { struct drm_panel base; /* the datasheet refers to them as DSI-LINK1 and DSI-LINK2 */ @@ -41,6 +39,16 @@ static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel) return container_of(panel, struct sharp_panel, base); } +static void sharp_wait_frames(struct sharp_panel *sharp, unsigned int frames) +{ + unsigned int refresh = drm_mode_vrefresh(sharp->mode); + + if (WARN_ON(frames > refresh)) + return; + + msleep(1000 / (refresh / frames)); +} + static int sharp_panel_write(struct sharp_panel *sharp, u16 offset, u8 value) { u8 payload[3] = { offset >> 8, offset & 0xff, value }; @@ -106,6 +114,8 @@ static int sharp_panel_unprepare(struct drm_panel *panel) if (!sharp->prepared) return 0; + sharp_wait_frames(sharp, 4); + err = mipi_dsi_dcs_set_display_off(sharp->link1); if (err < 0) dev_err(panel->dev, "failed to set display off: %d\n", err); @@ -170,15 +180,13 @@ static int sharp_panel_prepare(struct drm_panel *panel) if (err < 0) return err; - usleep_range(10000, 20000); - - err = mipi_dsi_dcs_soft_reset(sharp->link1); - if (err < 0) { - dev_err(panel->dev, "soft reset failed: %d\n", err); - goto poweroff; - } - - msleep(120); + /* + * According to the datasheet, the panel needs around 10 ms to fully + * power up. At least another 120 ms is required before exiting sleep + * mode to make sure the panel is ready. Throw in another 20 ms for + * good measure. + */ + msleep(150); err = mipi_dsi_dcs_exit_sleep_mode(sharp->link1); if (err < 0) { @@ -238,6 +246,9 @@ static int sharp_panel_prepare(struct drm_panel *panel) sharp->prepared = true; + /* wait for 6 frames before continuing */ + sharp_wait_frames(sharp, 6); + return 0; poweroff: diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 6049d245c20e..39806c335339 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -448,6 +448,34 @@ static const struct panel_desc auo_b133htn01 = { }, }; +static const struct drm_display_mode avic_tm070ddh03_mode = { + .clock = 51200, + .hdisplay = 1024, + .hsync_start = 1024 + 160, + .hsync_end = 1024 + 160 + 4, + .htotal = 1024 + 160 + 4 + 156, + .vdisplay = 600, + .vsync_start = 600 + 17, + .vsync_end = 600 + 17 + 1, + .vtotal = 600 + 17 + 1 + 17, + .vrefresh = 60, +}; + +static const struct panel_desc avic_tm070ddh03 = { + .modes = &avic_tm070ddh03_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 154, + .height = 90, + }, + .delay = { + .prepare = 20, + .enable = 200, + .disable = 200, + }, +}; + static const struct drm_display_mode chunghwa_claa101wa01a_mode = { .clock = 72070, .hdisplay = 1366, @@ -566,6 +594,29 @@ static const struct panel_desc foxlink_fl500wvr00_a0t = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, }; +static const struct drm_display_mode giantplus_gpg482739qs5_mode = { + .clock = 9000, + .hdisplay = 480, + .hsync_start = 480 + 5, + .hsync_end = 480 + 5 + 1, + .htotal = 480 + 5 + 1 + 40, + .vdisplay = 272, + .vsync_start = 272 + 8, + .vsync_end = 272 + 8 + 1, + .vtotal = 272 + 8 + 1 + 8, + .vrefresh = 60, +}; + +static const struct panel_desc giantplus_gpg482739qs5 = { + .modes = &giantplus_gpg482739qs5_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 95, + .height = 54, + }, +}; + static const struct drm_display_mode hannstar_hsd070pww1_mode = { .clock = 71100, .hdisplay = 1280, @@ -745,6 +796,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "auo,b133xtn01", .data = &auo_b133xtn01, }, { + .compatible = "avic,tm070ddh03", + .data = &avic_tm070ddh03, + }, { .compatible = "chunghwa,claa101wa01a", .data = &chunghwa_claa101wa01a }, { @@ -763,6 +817,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "foxlink,fl500wvr00-a0t", .data = &foxlink_fl500wvr00_a0t, }, { + .compatible = "giantplus,gpg482739qs5", + .data = &giantplus_gpg482739qs5 + }, { .compatible = "hannstar,hsd070pww1", .data = &hannstar_hsd070pww1, }, { diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 7d2ff31c35a5..f86eb54e7763 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -845,7 +845,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev, for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & R600_PTE_VALID) { value = addr; } else { diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 4be2bb7cbef3..ce787a9f12c0 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c @@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev, for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & R600_PTE_VALID) { value = addr; } else { diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 74f06d540591..279801ca5110 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) return r; rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; rdev->asic->gart.set_page = &r100_pci_gart_set_page; return radeon_gart_table_ram_alloc(rdev); } @@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev) WREG32(RADEON_AIC_HI_ADDR, 0); } +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) +{ + return addr; +} + void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) + uint64_t entry) { u32 *gtt = rdev->gart.ptr; - gtt[i] = cpu_to_le32(lower_32_bits(addr)); + gtt[i] = cpu_to_le32(lower_32_bits(entry)); } void r100_pci_gart_fini(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 064ad5569cca..08d68f3e13e9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) #define R300_PTE_WRITEABLE (1 << 2) #define R300_PTE_READABLE (1 << 3) -void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) { - void __iomem *ptr = rdev->gart.ptr; - addr = (lower_32_bits(addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 24); if (flags & RADEON_GART_PAGE_READ) @@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, addr |= R300_PTE_WRITEABLE; if (!(flags & RADEON_GART_PAGE_SNOOP)) addr |= R300_PTE_UNSNOOPED; + return addr; +} + +void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + void __iomem *ptr = rdev->gart.ptr; + /* on x86 we want this to be CPU endian, on powerpc * on powerpc without HW swappers, it'll get swapped on way * into VRAM - so no need for cpu_to_le32 on VRAM tables */ - writel(addr, ((void __iomem *)ptr) + (i * 4)); + writel(entry, ((void __iomem *)ptr) + (i * 4)); } int rv370_pcie_gart_init(struct radeon_device *rdev) @@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; return radeon_gart_table_vram_alloc(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 93e407b7e7a7..5587603b4a89 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev); * Dummy page */ struct radeon_dummy_page { + uint64_t entry; struct page *page; dma_addr_t addr; }; @@ -645,7 +646,7 @@ struct radeon_gart { unsigned num_cpu_pages; unsigned table_size; struct page **pages; - dma_addr_t *pages_addr; + uint64_t *pages_entry; bool ready; }; @@ -1858,8 +1859,9 @@ struct radeon_asic { /* gart */ struct { void (*tlb_flush)(struct radeon_device *rdev); + uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); void (*set_page)(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); } gart; struct { int (*init)(struct radeon_device *rdev); @@ -2867,7 +2869,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) -#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) +#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) +#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index f811ee14a237..c0ecd128b14b 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev) DRM_INFO("Forcing AGP to PCIE mode\n"); rdev->flags |= RADEON_IS_PCIE; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; } else { DRM_INFO("Forcing AGP to PCI mode\n"); rdev->flags |= RADEON_IS_PCI; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; rdev->asic->gart.set_page = &r100_pci_gart_set_page; } rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; @@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = { .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = { .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, + .get_page_entry = &r100_pci_gart_get_page_entry, .set_page = &r100_pci_gart_set_page, }, .ring = { @@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = { .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = { .mc_wait_for_idle = &rs400_mc_wait_for_idle, .gart = { .tlb_flush = &rs400_gart_tlb_flush, + .get_page_entry = &rs400_gart_get_page_entry, .set_page = &rs400_gart_set_page, }, .ring = { @@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = { .mc_wait_for_idle = &rs600_mc_wait_for_idle, .gart = { .tlb_flush = &rs600_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -689,6 +698,7 @@ static struct radeon_asic rs690_asic = { .mc_wait_for_idle = &rs690_mc_wait_for_idle, .gart = { .tlb_flush = &rs400_gart_tlb_flush, + .get_page_entry = &rs400_gart_get_page_entry, .set_page = &rs400_gart_set_page, }, .ring = { @@ -755,6 +765,7 @@ static struct radeon_asic rv515_asic = { .mc_wait_for_idle = &rv515_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -821,6 +832,7 @@ static struct radeon_asic r520_asic = { .mc_wait_for_idle = &r520_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, + .get_page_entry = &rv370_pcie_gart_get_page_entry, .set_page = &rv370_pcie_gart_set_page, }, .ring = { @@ -915,6 +927,7 @@ static struct radeon_asic r600_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -998,6 +1011,7 @@ static struct radeon_asic rv6xx_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1087,6 +1101,7 @@ static struct radeon_asic rs780_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1189,6 +1204,7 @@ static struct radeon_asic rv770_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1305,6 +1321,7 @@ static struct radeon_asic evergreen_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1395,6 +1412,7 @@ static struct radeon_asic sumo_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1484,6 +1502,7 @@ static struct radeon_asic btc_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .ring = { @@ -1617,6 +1636,7 @@ static struct radeon_asic cayman_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -1718,6 +1738,7 @@ static struct radeon_asic trinity_asic = { .get_gpu_clock_counter = &r600_get_gpu_clock_counter, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -1849,6 +1870,7 @@ static struct radeon_asic si_asic = { .get_gpu_clock_counter = &si_get_gpu_clock_counter, .gart = { .tlb_flush = &si_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -2012,6 +2034,7 @@ static struct radeon_asic ci_asic = { .get_gpu_clock_counter = &cik_get_gpu_clock_counter, .gart = { .tlb_flush = &cik_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { @@ -2121,6 +2144,7 @@ static struct radeon_asic kv_asic = { .get_gpu_clock_counter = &cik_get_gpu_clock_counter, .gart = { .tlb_flush = &cik_pcie_gart_tlb_flush, + .get_page_entry = &rs600_gart_get_page_entry, .set_page = &rs600_gart_set_page, }, .vm = { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 4045a320a424..72bdd3bf0d8e 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); int r100_asic_reset(struct radeon_device *rdev); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); void r100_pci_gart_tlb_flush(struct radeon_device *rdev); +uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags); void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); int r100_irq_set(struct radeon_device *rdev); int r100_irq_process(struct radeon_device *rdev); @@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); extern int r300_cs_parse(struct radeon_cs_parser *p); extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); +extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags); extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int rv370_get_pcie_lanes(struct radeon_device *rdev); extern void r300_set_reg_safe(struct radeon_device *rdev); @@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev); extern int rs400_suspend(struct radeon_device *rdev); extern int rs400_resume(struct radeon_device *rdev); void rs400_gart_tlb_flush(struct radeon_device *rdev); +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags); void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int rs400_gart_init(struct radeon_device *rdev); @@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev); void rs600_irq_disable(struct radeon_device *rdev); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); void rs600_gart_tlb_flush(struct radeon_device *rdev); +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags); void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags); + uint64_t entry); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_bandwidth_update(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0ec65168f331..bd7519fdd3f4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev) rdev->dummy_page.page = NULL; return -ENOMEM; } + rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, + RADEON_GART_PAGE_DUMMY); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 84146d5901aa..5450fa95a47e 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) radeon_bo_unpin(rdev->gart.robj); radeon_bo_unreserve(rdev->gart.robj); rdev->gart.table_addr = gpu_addr; + + if (!r) { + int i; + + /* We might have dropped some GART table updates while it wasn't + * mapped, restore all entries + */ + for (i = 0; i < rdev->gart.num_gpu_pages; i++) + radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); + mb(); + radeon_gart_tlb_flush(rdev); + } + return r; } @@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, unsigned t; unsigned p; int i, j; - u64 page_base; if (!rdev->gart.ready) { WARN(1, "trying to unbind memory from uninitialized GART !\n"); @@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, for (i = 0; i < pages; i++, p++) { if (rdev->gart.pages[p]) { rdev->gart.pages[p] = NULL; - rdev->gart.pages_addr[p] = rdev->dummy_page.addr; - page_base = rdev->gart.pages_addr[p]; for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { + rdev->gart.pages_entry[t] = rdev->dummy_page.entry; if (rdev->gart.ptr) { - radeon_gart_set_page(rdev, t, page_base, - RADEON_GART_PAGE_DUMMY); + radeon_gart_set_page(rdev, t, + rdev->dummy_page.entry); } - page_base += RADEON_GPU_PAGE_SIZE; } } } @@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, { unsigned t; unsigned p; - uint64_t page_base; + uint64_t page_base, page_entry; int i, j; if (!rdev->gart.ready) { @@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - rdev->gart.pages_addr[p] = dma_addr[i]; rdev->gart.pages[p] = pagelist[i]; - if (rdev->gart.ptr) { - page_base = rdev->gart.pages_addr[p]; - for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { - radeon_gart_set_page(rdev, t, page_base, flags); - page_base += RADEON_GPU_PAGE_SIZE; + page_base = dma_addr[i]; + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { + page_entry = radeon_gart_get_page_entry(page_base, flags); + rdev->gart.pages_entry[t] = page_entry; + if (rdev->gart.ptr) { + radeon_gart_set_page(rdev, t, page_entry); } + page_base += RADEON_GPU_PAGE_SIZE; } } mb(); @@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev) radeon_gart_fini(rdev); return -ENOMEM; } - rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * - rdev->gart.num_cpu_pages); - if (rdev->gart.pages_addr == NULL) { + rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * + rdev->gart.num_gpu_pages); + if (rdev->gart.pages_entry == NULL) { radeon_gart_fini(rdev); return -ENOMEM; } /* set GART entry to point to the dummy page by default */ - for (i = 0; i < rdev->gart.num_cpu_pages; i++) { - rdev->gart.pages_addr[i] = rdev->dummy_page.addr; - } + for (i = 0; i < rdev->gart.num_gpu_pages; i++) + rdev->gart.pages_entry[i] = rdev->dummy_page.entry; return 0; } @@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev) */ void radeon_gart_fini(struct radeon_device *rdev) { - if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { + if (rdev->gart.ready) { /* unbind pages */ radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); } rdev->gart.ready = false; vfree(rdev->gart.pages); - vfree(rdev->gart.pages_addr); + vfree(rdev->gart.pages_entry); rdev->gart.pages = NULL; - rdev->gart.pages_addr = NULL; + rdev->gart.pages_entry = NULL; radeon_dummy_page_fini(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 7b274205eeaf..061eaa9c19c7 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -392,7 +392,7 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t hpd_size, uint64_t hpd_gpu_addr) { - uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; + uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1; uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); lock_srbm(kgd, mec, pipe, 0, 0); diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index cde48c42b30a..06d2246d07f1 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) uint64_t result; /* page table offset */ - result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; - - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); + result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; + result &= ~RADEON_GPU_PAGE_MASK; return result; } diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index c5799f16aa4b..34e3235f41d2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev) #define RS400_PTE_WRITEABLE (1 << 2) #define RS400_PTE_READABLE (1 << 3) -void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) { uint32_t entry; - u32 *gtt = rdev->gart.ptr; entry = (lower_32_bits(addr) & PAGE_MASK) | ((upper_32_bits(addr) & 0xff) << 4); @@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, entry |= RS400_PTE_WRITEABLE; if (!(flags & RADEON_GART_PAGE_SNOOP)) entry |= RS400_PTE_UNSNOOPED; - entry = cpu_to_le32(entry); - gtt[i] = entry; + return entry; +} + +void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + u32 *gtt = rdev->gart.ptr; + gtt[i] = cpu_to_le32(lower_32_bits(entry)); } int rs400_mc_wait_for_idle(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 68f154a451c0..d81182ad53ec 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -626,11 +626,8 @@ static void rs600_gart_fini(struct radeon_device *rdev) radeon_gart_table_vram_free(rdev); } -void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, - uint64_t addr, uint32_t flags) +uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags) { - void __iomem *ptr = (void *)rdev->gart.ptr; - addr = addr & 0xFFFFFFFFFFFFF000ULL; addr |= R600_PTE_SYSTEM; if (flags & RADEON_GART_PAGE_VALID) @@ -641,7 +638,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, addr |= R600_PTE_WRITEABLE; if (flags & RADEON_GART_PAGE_SNOOP) addr |= R600_PTE_SNOOPED; - writeq(addr, ptr + (i * 8)); + return addr; +} + +void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, + uint64_t entry) +{ + void __iomem *ptr = (void *)rdev->gart.ptr; + writeq(entry, ptr + (i * 8)); } int rs600_irq_set(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index aa7b872b2c43..83207929fc62 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev, for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & R600_PTE_VALID) { value = addr; } else { diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 2324a526de65..11485a4a16ae 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -1,6 +1,6 @@ config DRM_RCAR_DU tristate "DRM Support for R-Car Display Unit" - depends on DRM && ARM + depends on DRM && ARM && HAVE_DMA_ATTRS depends on ARCH_SHMOBILE || COMPILE_TEST select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index cb21e3821244..35215f6867d3 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -1,6 +1,7 @@ config DRM_ROCKCHIP tristate "DRM Support for Rockchip" depends on DRM && ROCKCHIP_IOMMU + depends on RESET_CONTROLLER select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index a50fe0eeaa0d..b9202aa6f8ab 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig @@ -1,8 +1,10 @@ config DRM_SHMOBILE tristate "DRM Support for SH Mobile" - depends on DRM && ARM + depends on DRM && ARM && HAVE_DMA_ATTRS depends on ARCH_SHMOBILE || COMPILE_TEST + depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM select BACKLIGHT_CLASS_DEVICE + select BACKLIGHT_LCD_SUPPORT select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index d6d6b705b8c1..fbccc105819b 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -1,10 +1,11 @@ config DRM_STI tristate "DRM Support for STMicroelectronics SoC stiH41x Series" - depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) + depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS select RESET_CONTROLLER select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER + select DRM_PANEL select FW_LOADER_USER_HELPER_FALLBACK help Choose this option to enable DRM on STM stiH41x chipset diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c index 9fde3ee8b1a5..6029a2e3db1d 100644 --- a/drivers/gpu/drm/sti/sti_awg_utils.c +++ b/drivers/gpu/drm/sti/sti_awg_utils.c @@ -60,8 +60,6 @@ static int awg_generate_instr(enum opcode opcode, * pixel. So we transform SKIP into SET * instruction */ opcode = SET; - arg = (arg << 24) >> 24; - arg &= (0x0ff); break; } diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 651afad21f92..aeb5070c8363 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -91,6 +91,7 @@ struct sti_dvo { struct dvo_config *config; bool enabled; struct drm_encoder *encoder; + struct drm_bridge *bridge; }; struct sti_dvo_connector { @@ -272,19 +273,12 @@ static void sti_dvo_bridge_nope(struct drm_bridge *bridge) /* do nothing */ } -static void sti_dvo_brigde_destroy(struct drm_bridge *bridge) -{ - drm_bridge_cleanup(bridge); - kfree(bridge); -} - static const struct drm_bridge_funcs sti_dvo_bridge_funcs = { .pre_enable = sti_dvo_pre_enable, .enable = sti_dvo_bridge_nope, .disable = sti_dvo_disable, .post_disable = sti_dvo_bridge_nope, .mode_set = sti_dvo_set_mode, - .destroy = sti_dvo_brigde_destroy, }; static int sti_dvo_connector_get_modes(struct drm_connector *connector) @@ -416,8 +410,21 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data) return -ENOMEM; bridge->driver_private = dvo; - drm_bridge_init(drm_dev, bridge, &sti_dvo_bridge_funcs); + bridge->funcs = &sti_dvo_bridge_funcs; + bridge->of_node = dvo->dev.of_node; + err = drm_bridge_add(bridge); + if (err) { + DRM_ERROR("Failed to add bridge\n"); + return err; + } + err = drm_bridge_attach(drm_dev, bridge); + if (err) { + DRM_ERROR("Failed to attach bridge\n"); + return err; + } + + dvo->bridge = bridge; encoder->bridge = bridge; connector->encoder = encoder; dvo->encoder = encoder; @@ -446,7 +453,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data) err_sysfs: drm_connector_unregister(drm_connector); err_connector: - drm_bridge_cleanup(bridge); + drm_bridge_remove(bridge); drm_connector_cleanup(drm_connector); return -EINVAL; } @@ -454,7 +461,9 @@ err_connector: static void sti_dvo_unbind(struct device *dev, struct device *master, void *data) { - /* do nothing */ + struct sti_dvo *dvo = dev_get_drvdata(dev); + + drm_bridge_remove(dvo->bridge); } static const struct component_ops sti_dvo_ops = { diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 32448d1d1e8f..087906fd8846 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -14,15 +14,19 @@ #include "sti_layer.h" #include "sti_vtg.h" +#define ALPHASWITCH BIT(6) #define ENA_COLOR_FILL BIT(8) +#define BIGNOTLITTLE BIT(23) #define WAIT_NEXT_VSYNC BIT(31) /* GDP color formats */ #define GDP_RGB565 0x00 #define GDP_RGB888 0x01 #define GDP_RGB888_32 0x02 +#define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH) #define GDP_ARGB8565 0x04 #define GDP_ARGB8888 0x05 +#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH) #define GDP_ARGB1555 0x06 #define GDP_ARGB4444 0x07 #define GDP_CLUT8 0x0B @@ -103,7 +107,9 @@ struct sti_gdp { static const uint32_t gdp_supported_formats[] = { DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB4444, DRM_FORMAT_ARGB1555, DRM_FORMAT_RGB565, @@ -129,8 +135,12 @@ static int sti_gdp_fourcc2format(int fourcc) switch (fourcc) { case DRM_FORMAT_XRGB8888: return GDP_RGB888_32; + case DRM_FORMAT_XBGR8888: + return GDP_XBGR8888; case DRM_FORMAT_ARGB8888: return GDP_ARGB8888; + case DRM_FORMAT_ABGR8888: + return GDP_ABGR8888; case DRM_FORMAT_ARGB4444: return GDP_ARGB4444; case DRM_FORMAT_ARGB1555: @@ -157,6 +167,7 @@ static int sti_gdp_get_alpharange(int format) case GDP_ARGB8565: case GDP_ARGB8888: case GDP_AYCBR8888: + case GDP_ABGR8888: return GAM_GDP_ALPHARANGE_255; } return 0; diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 2ae9a9b73666..a9bbb081ecad 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -508,19 +508,12 @@ static void sti_hda_bridge_nope(struct drm_bridge *bridge) /* do nothing */ } -static void sti_hda_brigde_destroy(struct drm_bridge *bridge) -{ - drm_bridge_cleanup(bridge); - kfree(bridge); -} - static const struct drm_bridge_funcs sti_hda_bridge_funcs = { .pre_enable = sti_hda_pre_enable, .enable = sti_hda_bridge_nope, .disable = sti_hda_disable, .post_disable = sti_hda_bridge_nope, .mode_set = sti_hda_set_mode, - .destroy = sti_hda_brigde_destroy, }; static int sti_hda_connector_get_modes(struct drm_connector *connector) @@ -664,7 +657,8 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) return -ENOMEM; bridge->driver_private = hda; - drm_bridge_init(drm_dev, bridge, &sti_hda_bridge_funcs); + bridge->funcs = &sti_hda_bridge_funcs; + drm_bridge_attach(drm_dev, bridge); encoder->bridge = bridge; connector->encoder = encoder; @@ -693,7 +687,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) err_sysfs: drm_connector_unregister(drm_connector); err_connector: - drm_bridge_cleanup(bridge); drm_connector_cleanup(drm_connector); return -EINVAL; } diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index d032e024b0b8..1485ade98710 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -42,8 +42,17 @@ #define HDMI_SW_DI_1_PKT_WORD5 0x0228 #define HDMI_SW_DI_1_PKT_WORD6 0x022C #define HDMI_SW_DI_CFG 0x0230 +#define HDMI_SW_DI_2_HEAD_WORD 0x0600 +#define HDMI_SW_DI_2_PKT_WORD0 0x0604 +#define HDMI_SW_DI_2_PKT_WORD1 0x0608 +#define HDMI_SW_DI_2_PKT_WORD2 0x060C +#define HDMI_SW_DI_2_PKT_WORD3 0x0610 +#define HDMI_SW_DI_2_PKT_WORD4 0x0614 +#define HDMI_SW_DI_2_PKT_WORD5 0x0618 +#define HDMI_SW_DI_2_PKT_WORD6 0x061C #define HDMI_IFRAME_SLOT_AVI 1 +#define HDMI_IFRAME_SLOT_AUDIO 2 #define XCAT(prefix, x, suffix) prefix ## x ## suffix #define HDMI_SW_DI_N_HEAD_WORD(x) XCAT(HDMI_SW_DI_, x, _HEAD_WORD) @@ -99,6 +108,10 @@ #define HDMI_STA_SW_RST BIT(1) +#define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0) +#define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8) +#define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16) + struct sti_hdmi_connector { struct drm_connector drm_connector; struct drm_encoder *encoder; @@ -228,6 +241,90 @@ static void hdmi_config(struct sti_hdmi *hdmi) } /** + * Helper to concatenate infoframe in 32 bits word + * + * @ptr: pointer on the hdmi internal structure + * @data: infoframe to write + * @size: size to write + */ +static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size) +{ + unsigned long value = 0; + size_t i; + + for (i = size; i > 0; i--) + value = (value << 8) | ptr[i - 1]; + + return value; +} + +/** + * Helper to write info frame + * + * @hdmi: pointer on the hdmi internal structure + * @data: infoframe to write + * @size: size to write + */ +static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data) +{ + const u8 *ptr = data; + u32 val, slot, mode, i; + u32 head_offset, pack_offset; + size_t size; + + switch (*ptr) { + case HDMI_INFOFRAME_TYPE_AVI: + slot = HDMI_IFRAME_SLOT_AVI; + mode = HDMI_IFRAME_FIELD; + head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI); + pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI); + size = HDMI_AVI_INFOFRAME_SIZE; + break; + + case HDMI_INFOFRAME_TYPE_AUDIO: + slot = HDMI_IFRAME_SLOT_AUDIO; + mode = HDMI_IFRAME_FRAME; + head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AUDIO); + pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AUDIO); + size = HDMI_AUDIO_INFOFRAME_SIZE; + break; + + default: + DRM_ERROR("unsupported infoframe type: %#x\n", *ptr); + return; + } + + /* Disable transmission slot for updated infoframe */ + val = hdmi_read(hdmi, HDMI_SW_DI_CFG); + val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, slot); + hdmi_write(hdmi, val, HDMI_SW_DI_CFG); + + val = HDMI_INFOFRAME_HEADER_TYPE(*ptr++); + val |= HDMI_INFOFRAME_HEADER_VERSION(*ptr++); + val |= HDMI_INFOFRAME_HEADER_LEN(*ptr++); + writel(val, hdmi->regs + head_offset); + + /* + * Each subpack contains 4 bytes + * The First Bytes of the first subpacket must contain the checksum + * Packet size in increase by one. + */ + for (i = 0; i < size; i += sizeof(u32)) { + size_t num; + + num = min_t(size_t, size - i, sizeof(u32)); + val = hdmi_infoframe_subpack(ptr, num); + ptr += sizeof(u32); + writel(val, hdmi->regs + pack_offset + i); + } + + /* Enable transmission slot for updated infoframe */ + val = hdmi_read(hdmi, HDMI_SW_DI_CFG); + val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, slot); + hdmi_write(hdmi, val, HDMI_SW_DI_CFG); +} + +/** * Prepare and configure the AVI infoframe * * AVI infoframe are transmitted at least once per two video field and @@ -243,8 +340,6 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi) struct drm_display_mode *mode = &hdmi->mode; struct hdmi_avi_infoframe infoframe; u8 buffer[HDMI_INFOFRAME_SIZE(AVI)]; - u8 *frame = buffer + HDMI_INFOFRAME_HEADER_SIZE; - u32 val; int ret; DRM_DEBUG_DRIVER("\n"); @@ -266,47 +361,43 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi) return ret; } - /* Disable transmission slot for AVI infoframe */ - val = hdmi_read(hdmi, HDMI_SW_DI_CFG); - val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, HDMI_IFRAME_SLOT_AVI); - hdmi_write(hdmi, val, HDMI_SW_DI_CFG); + hdmi_infoframe_write_infopack(hdmi, buffer); - /* Infoframe header */ - val = buffer[0]; - val |= buffer[1] << 8; - val |= buffer[2] << 16; - hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI)); - - /* Infoframe packet bytes */ - val = buffer[3]; - val |= *(frame++) << 8; - val |= *(frame++) << 16; - val |= *(frame++) << 24; - hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI)); - - val = *(frame++); - val |= *(frame++) << 8; - val |= *(frame++) << 16; - val |= *(frame++) << 24; - hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI)); - - val = *(frame++); - val |= *(frame++) << 8; - val |= *(frame++) << 16; - val |= *(frame++) << 24; - hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI)); - - val = *(frame++); - val |= *(frame) << 8; - hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI)); - - /* Enable transmission slot for AVI infoframe - * According to the hdmi specification, AVI infoframe should be - * transmitted at least once per two video fields - */ - val = hdmi_read(hdmi, HDMI_SW_DI_CFG); - val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, HDMI_IFRAME_SLOT_AVI); - hdmi_write(hdmi, val, HDMI_SW_DI_CFG); + return 0; +} + +/** + * Prepare and configure the AUDIO infoframe + * + * AUDIO infoframe are transmitted once per frame and + * contains information about HDMI transmission mode such as audio codec, + * sample size, ... + * + * @hdmi: pointer on the hdmi internal structure + * + * Return negative value if error occurs + */ +static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi) +{ + struct hdmi_audio_infoframe infofame; + u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)]; + int ret; + + ret = hdmi_audio_infoframe_init(&infofame); + if (ret < 0) { + DRM_ERROR("failed to setup audio infoframe: %d\n", ret); + return ret; + } + + infofame.channels = 2; + + ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer)); + if (ret < 0) { + DRM_ERROR("failed to pack audio infoframe: %d\n", ret); + return ret; + } + + hdmi_infoframe_write_infopack(hdmi, buffer); return 0; } @@ -427,6 +518,10 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge) if (hdmi_avi_infoframe_config(hdmi)) DRM_ERROR("Unable to configure AVI infoframe\n"); + /* Program AUDIO infoframe */ + if (hdmi_audio_infoframe_config(hdmi)) + DRM_ERROR("Unable to configure AUDIO infoframe\n"); + /* Sw reset */ hdmi_swreset(hdmi); } @@ -463,19 +558,12 @@ static void sti_hdmi_bridge_nope(struct drm_bridge *bridge) /* do nothing */ } -static void sti_hdmi_brigde_destroy(struct drm_bridge *bridge) -{ - drm_bridge_cleanup(bridge); - kfree(bridge); -} - static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = { .pre_enable = sti_hdmi_pre_enable, .enable = sti_hdmi_bridge_nope, .disable = sti_hdmi_disable, .post_disable = sti_hdmi_bridge_nope, .mode_set = sti_hdmi_set_mode, - .destroy = sti_hdmi_brigde_destroy, }; static int sti_hdmi_connector_get_modes(struct drm_connector *connector) @@ -635,7 +723,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) goto err_adapt; bridge->driver_private = hdmi; - drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs); + bridge->funcs = &sti_hdmi_bridge_funcs; + drm_bridge_attach(drm_dev, bridge); encoder->bridge = bridge; connector->encoder = encoder; @@ -667,7 +756,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) err_sysfs: drm_connector_unregister(drm_connector); err_connector: - drm_bridge_cleanup(bridge); drm_connector_cleanup(drm_connector); err_adapt: put_device(&hdmi->ddc_adapt->dev); diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index f3db05dab0ab..b0eb62de1b2e 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1025,7 +1025,7 @@ static int sti_hqvdp_probe(struct platform_device *pdev) /* Get clock resources */ hqvdp->clk = devm_clk_get(dev, "hqvdp"); hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main"); - if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk)) { + if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk_pix_main)) { DRM_ERROR("Cannot get clocks\n"); return -ENXIO; } diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index ae26cc054fff..3aaa84ae2681 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -18,9 +18,12 @@ #include "drm.h" #include "gem.h" +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_plane_helper.h> struct tegra_dc_soc_info { + bool supports_border_color; bool supports_interlacing; bool supports_cursor; bool supports_block_linear; @@ -38,63 +41,122 @@ static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane) return container_of(plane, struct tegra_plane, base); } -static void tegra_dc_window_commit(struct tegra_dc *dc, unsigned int index) +struct tegra_dc_state { + struct drm_crtc_state base; + + struct clk *clk; + unsigned long pclk; + unsigned int div; + + u32 planes; +}; + +static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state) { - u32 value = WIN_A_ACT_REQ << index; + if (state) + return container_of(state, struct tegra_dc_state, base); - tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); + return NULL; } -static void tegra_dc_cursor_commit(struct tegra_dc *dc) +struct tegra_plane_state { + struct drm_plane_state base; + + struct tegra_bo_tiling tiling; + u32 format; + u32 swap; +}; + +static inline struct tegra_plane_state * +to_tegra_plane_state(struct drm_plane_state *state) { - tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL); + if (state) + return container_of(state, struct tegra_plane_state, base); + + return NULL; } -static void tegra_dc_commit(struct tegra_dc *dc) +/* + * Reads the active copy of a register. This takes the dc->lock spinlock to + * prevent races with the VBLANK processing which also needs access to the + * active copy of some registers. + */ +static u32 tegra_dc_readl_active(struct tegra_dc *dc, unsigned long offset) +{ + unsigned long flags; + u32 value; + + spin_lock_irqsave(&dc->lock, flags); + + tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); + value = tegra_dc_readl(dc, offset); + tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); + + spin_unlock_irqrestore(&dc->lock, flags); + return value; +} + +/* + * Double-buffered registers have two copies: ASSEMBLY and ACTIVE. When the + * *_ACT_REQ bits are set the ASSEMBLY copy is latched into the ACTIVE copy. + * Latching happens mmediately if the display controller is in STOP mode or + * on the next frame boundary otherwise. + * + * Triple-buffered registers have three copies: ASSEMBLY, ARM and ACTIVE. The + * ASSEMBLY copy is latched into the ARM copy immediately after *_UPDATE bits + * are written. When the *_ACT_REQ bits are written, the ARM copy is latched + * into the ACTIVE copy, either immediately if the display controller is in + * STOP mode, or at the next frame boundary otherwise. + */ +void tegra_dc_commit(struct tegra_dc *dc) { tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); } -static unsigned int tegra_dc_format(uint32_t format, uint32_t *swap) +static int tegra_dc_format(u32 fourcc, u32 *format, u32 *swap) { /* assume no swapping of fetched data */ if (swap) *swap = BYTE_SWAP_NOSWAP; - switch (format) { + switch (fourcc) { case DRM_FORMAT_XBGR8888: - return WIN_COLOR_DEPTH_R8G8B8A8; + *format = WIN_COLOR_DEPTH_R8G8B8A8; + break; case DRM_FORMAT_XRGB8888: - return WIN_COLOR_DEPTH_B8G8R8A8; + *format = WIN_COLOR_DEPTH_B8G8R8A8; + break; case DRM_FORMAT_RGB565: - return WIN_COLOR_DEPTH_B5G6R5; + *format = WIN_COLOR_DEPTH_B5G6R5; + break; case DRM_FORMAT_UYVY: - return WIN_COLOR_DEPTH_YCbCr422; + *format = WIN_COLOR_DEPTH_YCbCr422; + break; case DRM_FORMAT_YUYV: if (swap) *swap = BYTE_SWAP_SWAP2; - return WIN_COLOR_DEPTH_YCbCr422; + *format = WIN_COLOR_DEPTH_YCbCr422; + break; case DRM_FORMAT_YUV420: - return WIN_COLOR_DEPTH_YCbCr420P; + *format = WIN_COLOR_DEPTH_YCbCr420P; + break; case DRM_FORMAT_YUV422: - return WIN_COLOR_DEPTH_YCbCr422P; + *format = WIN_COLOR_DEPTH_YCbCr422P; + break; default: - break; + return -EINVAL; } - WARN(1, "unsupported pixel format %u, using default\n", format); - return WIN_COLOR_DEPTH_B8G8R8A8; + return 0; } static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar) @@ -121,6 +183,9 @@ static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar) return true; } + if (planar) + *planar = false; + return false; } @@ -164,8 +229,8 @@ static inline u32 compute_initial_dda(unsigned int in) return dfixed_frac(inf); } -static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, - const struct tegra_dc_window *window) +static void tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, + const struct tegra_dc_window *window) { unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp; unsigned long value, flags; @@ -274,9 +339,11 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, break; case TEGRA_BO_TILING_MODE_BLOCK: - DRM_ERROR("hardware doesn't support block linear mode\n"); - spin_unlock_irqrestore(&dc->lock, flags); - return -EINVAL; + /* + * No need to handle this here because ->atomic_check + * will already have filtered it out. + */ + break; } tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE); @@ -332,109 +399,245 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, break; } - tegra_dc_window_commit(dc, index); - spin_unlock_irqrestore(&dc->lock, flags); - - return 0; } -static int tegra_window_plane_disable(struct drm_plane *plane) +static void tegra_plane_destroy(struct drm_plane *plane) { - struct tegra_dc *dc = to_tegra_dc(plane->crtc); struct tegra_plane *p = to_tegra_plane(plane); - unsigned long flags; - u32 value; - if (!plane->crtc) - return 0; + drm_plane_cleanup(plane); + kfree(p); +} - spin_lock_irqsave(&dc->lock, flags); +static const u32 tegra_primary_plane_formats[] = { + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGB565, +}; - value = WINDOW_A_SELECT << p->index; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); +static void tegra_primary_plane_destroy(struct drm_plane *plane) +{ + tegra_plane_destroy(plane); +} - value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); - value &= ~WIN_ENABLE; - tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); +static void tegra_plane_reset(struct drm_plane *plane) +{ + struct tegra_plane_state *state; - tegra_dc_window_commit(dc, p->index); + if (plane->state && plane->state->fb) + drm_framebuffer_unreference(plane->state->fb); - spin_unlock_irqrestore(&dc->lock, flags); + kfree(plane->state); + plane->state = NULL; - return 0; + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) { + plane->state = &state->base; + plane->state->plane = plane; + } } -static void tegra_plane_destroy(struct drm_plane *plane) +static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_plane *plane) { - struct tegra_plane *p = to_tegra_plane(plane); + struct tegra_plane_state *state = to_tegra_plane_state(plane->state); + struct tegra_plane_state *copy; - drm_plane_cleanup(plane); - kfree(p); + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!copy) + return NULL; + + if (copy->base.fb) + drm_framebuffer_reference(copy->base.fb); + + return ©->base; } -static const u32 tegra_primary_plane_formats[] = { - DRM_FORMAT_XBGR8888, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_RGB565, +static void tegra_plane_atomic_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + if (state->fb) + drm_framebuffer_unreference(state->fb); + + kfree(state); +} + +static const struct drm_plane_funcs tegra_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = tegra_primary_plane_destroy, + .reset = tegra_plane_reset, + .atomic_duplicate_state = tegra_plane_atomic_duplicate_state, + .atomic_destroy_state = tegra_plane_atomic_destroy_state, }; -static int tegra_primary_plane_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, - int crtc_y, unsigned int crtc_w, - unsigned int crtc_h, uint32_t src_x, - uint32_t src_y, uint32_t src_w, - uint32_t src_h) +static int tegra_plane_prepare_fb(struct drm_plane *plane, + struct drm_framebuffer *fb) { - struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); - struct tegra_plane *p = to_tegra_plane(plane); - struct tegra_dc *dc = to_tegra_dc(crtc); - struct tegra_dc_window window; + return 0; +} + +static void tegra_plane_cleanup_fb(struct drm_plane *plane, + struct drm_framebuffer *fb) +{ +} + +static int tegra_plane_state_add(struct tegra_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state; + struct tegra_dc_state *tegra; + + /* Propagate errors from allocation or locking failures. */ + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + tegra = to_dc_state(crtc_state); + + tegra->planes |= WIN_A_ACT_REQ << plane->index; + + return 0; +} + +static int tegra_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct tegra_plane_state *plane_state = to_tegra_plane_state(state); + struct tegra_bo_tiling *tiling = &plane_state->tiling; + struct tegra_plane *tegra = to_tegra_plane(plane); + struct tegra_dc *dc = to_tegra_dc(state->crtc); int err; - memset(&window, 0, sizeof(window)); - window.src.x = src_x >> 16; - window.src.y = src_y >> 16; - window.src.w = src_w >> 16; - window.src.h = src_h >> 16; - window.dst.x = crtc_x; - window.dst.y = crtc_y; - window.dst.w = crtc_w; - window.dst.h = crtc_h; - window.format = tegra_dc_format(fb->pixel_format, &window.swap); - window.bits_per_pixel = fb->bits_per_pixel; - window.bottom_up = tegra_fb_is_bottom_up(fb); + /* no need for further checks if the plane is being disabled */ + if (!state->crtc) + return 0; - err = tegra_fb_get_tiling(fb, &window.tiling); + err = tegra_dc_format(state->fb->pixel_format, &plane_state->format, + &plane_state->swap); if (err < 0) return err; - window.base[0] = bo->paddr + fb->offsets[0]; - window.stride[0] = fb->pitches[0]; + err = tegra_fb_get_tiling(state->fb, tiling); + if (err < 0) + return err; + + if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK && + !dc->soc->supports_block_linear) { + DRM_ERROR("hardware doesn't support block linear mode\n"); + return -EINVAL; + } + + /* + * Tegra doesn't support different strides for U and V planes so we + * error out if the user tries to display a framebuffer with such a + * configuration. + */ + if (drm_format_num_planes(state->fb->pixel_format) > 2) { + if (state->fb->pitches[2] != state->fb->pitches[1]) { + DRM_ERROR("unsupported UV-plane configuration\n"); + return -EINVAL; + } + } - err = tegra_dc_setup_window(dc, p->index, &window); + err = tegra_plane_state_add(tegra, state); if (err < 0) return err; return 0; } -static void tegra_primary_plane_destroy(struct drm_plane *plane) +static void tegra_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) { - tegra_window_plane_disable(plane); - tegra_plane_destroy(plane); + struct tegra_plane_state *state = to_tegra_plane_state(plane->state); + struct tegra_dc *dc = to_tegra_dc(plane->state->crtc); + struct drm_framebuffer *fb = plane->state->fb; + struct tegra_plane *p = to_tegra_plane(plane); + struct tegra_dc_window window; + unsigned int i; + + /* rien ne va plus */ + if (!plane->state->crtc || !plane->state->fb) + return; + + memset(&window, 0, sizeof(window)); + window.src.x = plane->state->src_x >> 16; + window.src.y = plane->state->src_y >> 16; + window.src.w = plane->state->src_w >> 16; + window.src.h = plane->state->src_h >> 16; + window.dst.x = plane->state->crtc_x; + window.dst.y = plane->state->crtc_y; + window.dst.w = plane->state->crtc_w; + window.dst.h = plane->state->crtc_h; + window.bits_per_pixel = fb->bits_per_pixel; + window.bottom_up = tegra_fb_is_bottom_up(fb); + + /* copy from state */ + window.tiling = state->tiling; + window.format = state->format; + window.swap = state->swap; + + for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { + struct tegra_bo *bo = tegra_fb_get_plane(fb, i); + + window.base[i] = bo->paddr + fb->offsets[i]; + window.stride[i] = fb->pitches[i]; + } + + tegra_dc_setup_window(dc, p->index, &window); } -static const struct drm_plane_funcs tegra_primary_plane_funcs = { - .update_plane = tegra_primary_plane_update, - .disable_plane = tegra_window_plane_disable, - .destroy = tegra_primary_plane_destroy, +static void tegra_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct tegra_plane *p = to_tegra_plane(plane); + struct tegra_dc *dc; + unsigned long flags; + u32 value; + + /* rien ne va plus */ + if (!old_state || !old_state->crtc) + return; + + dc = to_tegra_dc(old_state->crtc); + + spin_lock_irqsave(&dc->lock, flags); + + value = WINDOW_A_SELECT << p->index; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); + + value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); + value &= ~WIN_ENABLE; + tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); + + spin_unlock_irqrestore(&dc->lock, flags); +} + +static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, + .atomic_check = tegra_plane_atomic_check, + .atomic_update = tegra_plane_atomic_update, + .atomic_disable = tegra_plane_atomic_disable, }; static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm, struct tegra_dc *dc) { + /* + * Ideally this would use drm_crtc_mask(), but that would require the + * CRTC to already be in the mode_config's list of CRTCs. However, it + * will only be added to that list in the drm_crtc_init_with_planes() + * (in tegra_dc_init()), which in turn requires registration of these + * planes. So we have ourselves a nice little chicken and egg problem + * here. + * + * We work around this by manually creating the mask from the number + * of CRTCs that have been registered, and should therefore always be + * the same as drm_crtc_index() after registration. + */ + unsigned long possible_crtcs = 1 << drm->mode_config.num_crtc; struct tegra_plane *plane; unsigned int num_formats; const u32 *formats; @@ -447,7 +650,7 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm, num_formats = ARRAY_SIZE(tegra_primary_plane_formats); formats = tegra_primary_plane_formats; - err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe, + err = drm_universal_plane_init(drm, &plane->base, possible_crtcs, &tegra_primary_plane_funcs, formats, num_formats, DRM_PLANE_TYPE_PRIMARY); if (err < 0) { @@ -455,6 +658,8 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm, return ERR_PTR(err); } + drm_plane_helper_add(&plane->base, &tegra_primary_plane_helper_funcs); + return &plane->base; } @@ -462,27 +667,49 @@ static const u32 tegra_cursor_plane_formats[] = { DRM_FORMAT_RGBA8888, }; -static int tegra_cursor_plane_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, - int crtc_y, unsigned int crtc_w, - unsigned int crtc_h, uint32_t src_x, - uint32_t src_y, uint32_t src_w, - uint32_t src_h) +static int tegra_cursor_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) { - struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); - struct tegra_dc *dc = to_tegra_dc(crtc); - u32 value = CURSOR_CLIP_DISPLAY; + struct tegra_plane *tegra = to_tegra_plane(plane); + int err; + + /* no need for further checks if the plane is being disabled */ + if (!state->crtc) + return 0; /* scaling not supported for cursor */ - if ((src_w >> 16 != crtc_w) || (src_h >> 16 != crtc_h)) + if ((state->src_w >> 16 != state->crtc_w) || + (state->src_h >> 16 != state->crtc_h)) return -EINVAL; /* only square cursors supported */ - if (src_w != src_h) + if (state->src_w != state->src_h) + return -EINVAL; + + if (state->crtc_w != 32 && state->crtc_w != 64 && + state->crtc_w != 128 && state->crtc_w != 256) return -EINVAL; - switch (crtc_w) { + err = tegra_plane_state_add(tegra, state); + if (err < 0) + return err; + + return 0; +} + +static void tegra_cursor_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct tegra_bo *bo = tegra_fb_get_plane(plane->state->fb, 0); + struct tegra_dc *dc = to_tegra_dc(plane->state->crtc); + struct drm_plane_state *state = plane->state; + u32 value = CURSOR_CLIP_DISPLAY; + + /* rien ne va plus */ + if (!plane->state->crtc || !plane->state->fb) + return; + + switch (state->crtc_w) { case 32: value |= CURSOR_SIZE_32x32; break; @@ -500,7 +727,9 @@ static int tegra_cursor_plane_update(struct drm_plane *plane, break; default: - return -EINVAL; + WARN(1, "cursor size %ux%u not supported\n", state->crtc_w, + state->crtc_h); + return; } value |= (bo->paddr >> 10) & 0x3fffff; @@ -526,38 +755,43 @@ static int tegra_cursor_plane_update(struct drm_plane *plane, tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL); /* position the cursor */ - value = (crtc_y & 0x3fff) << 16 | (crtc_x & 0x3fff); + value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff); tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION); - /* apply changes */ - tegra_dc_cursor_commit(dc); - tegra_dc_commit(dc); - - return 0; } -static int tegra_cursor_plane_disable(struct drm_plane *plane) +static void tegra_cursor_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) { - struct tegra_dc *dc = to_tegra_dc(plane->crtc); + struct tegra_dc *dc; u32 value; - if (!plane->crtc) - return 0; + /* rien ne va plus */ + if (!old_state || !old_state->crtc) + return; + + dc = to_tegra_dc(old_state->crtc); value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); value &= ~CURSOR_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - tegra_dc_cursor_commit(dc); - tegra_dc_commit(dc); - - return 0; } static const struct drm_plane_funcs tegra_cursor_plane_funcs = { - .update_plane = tegra_cursor_plane_update, - .disable_plane = tegra_cursor_plane_disable, + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, .destroy = tegra_plane_destroy, + .reset = tegra_plane_reset, + .atomic_duplicate_state = tegra_plane_atomic_duplicate_state, + .atomic_destroy_state = tegra_plane_atomic_destroy_state, +}; + +static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, + .atomic_check = tegra_cursor_atomic_check, + .atomic_update = tegra_cursor_atomic_update, + .atomic_disable = tegra_cursor_atomic_disable, }; static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, @@ -572,6 +806,13 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, if (!plane) return ERR_PTR(-ENOMEM); + /* + * We'll treat the cursor as an overlay plane with index 6 here so + * that the update and activation request bits in DC_CMD_STATE_CONTROL + * match up. + */ + plane->index = 6; + num_formats = ARRAY_SIZE(tegra_cursor_plane_formats); formats = tegra_cursor_plane_formats; @@ -583,71 +824,23 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, return ERR_PTR(err); } - return &plane->base; -} - -static int tegra_overlay_plane_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, - int crtc_y, unsigned int crtc_w, - unsigned int crtc_h, uint32_t src_x, - uint32_t src_y, uint32_t src_w, - uint32_t src_h) -{ - struct tegra_plane *p = to_tegra_plane(plane); - struct tegra_dc *dc = to_tegra_dc(crtc); - struct tegra_dc_window window; - unsigned int i; - int err; - - memset(&window, 0, sizeof(window)); - window.src.x = src_x >> 16; - window.src.y = src_y >> 16; - window.src.w = src_w >> 16; - window.src.h = src_h >> 16; - window.dst.x = crtc_x; - window.dst.y = crtc_y; - window.dst.w = crtc_w; - window.dst.h = crtc_h; - window.format = tegra_dc_format(fb->pixel_format, &window.swap); - window.bits_per_pixel = fb->bits_per_pixel; - window.bottom_up = tegra_fb_is_bottom_up(fb); - - err = tegra_fb_get_tiling(fb, &window.tiling); - if (err < 0) - return err; - - for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { - struct tegra_bo *bo = tegra_fb_get_plane(fb, i); - - window.base[i] = bo->paddr + fb->offsets[i]; + drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs); - /* - * Tegra doesn't support different strides for U and V planes - * so we display a warning if the user tries to display a - * framebuffer with such a configuration. - */ - if (i >= 2) { - if (fb->pitches[i] != window.stride[1]) - DRM_ERROR("unsupported UV-plane configuration\n"); - } else { - window.stride[i] = fb->pitches[i]; - } - } - - return tegra_dc_setup_window(dc, p->index, &window); + return &plane->base; } static void tegra_overlay_plane_destroy(struct drm_plane *plane) { - tegra_window_plane_disable(plane); tegra_plane_destroy(plane); } static const struct drm_plane_funcs tegra_overlay_plane_funcs = { - .update_plane = tegra_overlay_plane_update, - .disable_plane = tegra_window_plane_disable, + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, .destroy = tegra_overlay_plane_destroy, + .reset = tegra_plane_reset, + .atomic_duplicate_state = tegra_plane_atomic_duplicate_state, + .atomic_destroy_state = tegra_plane_atomic_destroy_state, }; static const uint32_t tegra_overlay_plane_formats[] = { @@ -660,6 +853,14 @@ static const uint32_t tegra_overlay_plane_formats[] = { DRM_FORMAT_YUV422, }; +static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, + .atomic_check = tegra_plane_atomic_check, + .atomic_update = tegra_plane_atomic_update, + .atomic_disable = tegra_plane_atomic_disable, +}; + static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm, struct tegra_dc *dc, unsigned int index) @@ -686,6 +887,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm, return ERR_PTR(err); } + drm_plane_helper_add(&plane->base, &tegra_overlay_plane_helper_funcs); + return &plane->base; } @@ -703,99 +906,6 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc) return 0; } -static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, - struct drm_framebuffer *fb) -{ - struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); - unsigned int h_offset = 0, v_offset = 0; - struct tegra_bo_tiling tiling; - unsigned long value, flags; - unsigned int format, swap; - int err; - - err = tegra_fb_get_tiling(fb, &tiling); - if (err < 0) - return err; - - spin_lock_irqsave(&dc->lock, flags); - - tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); - - value = fb->offsets[0] + y * fb->pitches[0] + - x * fb->bits_per_pixel / 8; - - tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR); - tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE); - - format = tegra_dc_format(fb->pixel_format, &swap); - tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH); - tegra_dc_writel(dc, swap, DC_WIN_BYTE_SWAP); - - if (dc->soc->supports_block_linear) { - unsigned long height = tiling.value; - - switch (tiling.mode) { - case TEGRA_BO_TILING_MODE_PITCH: - value = DC_WINBUF_SURFACE_KIND_PITCH; - break; - - case TEGRA_BO_TILING_MODE_TILED: - value = DC_WINBUF_SURFACE_KIND_TILED; - break; - - case TEGRA_BO_TILING_MODE_BLOCK: - value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) | - DC_WINBUF_SURFACE_KIND_BLOCK; - break; - } - - tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND); - } else { - switch (tiling.mode) { - case TEGRA_BO_TILING_MODE_PITCH: - value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV | - DC_WIN_BUFFER_ADDR_MODE_LINEAR; - break; - - case TEGRA_BO_TILING_MODE_TILED: - value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV | - DC_WIN_BUFFER_ADDR_MODE_TILE; - break; - - case TEGRA_BO_TILING_MODE_BLOCK: - DRM_ERROR("hardware doesn't support block linear mode\n"); - spin_unlock_irqrestore(&dc->lock, flags); - return -EINVAL; - } - - tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE); - } - - /* make sure bottom-up buffers are properly displayed */ - if (tegra_fb_is_bottom_up(fb)) { - value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); - value |= V_DIRECTION; - tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); - - v_offset += fb->height - 1; - } else { - value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS); - value &= ~V_DIRECTION; - tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); - } - - tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET); - tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET); - - value = GENERAL_ACT_REQ | WIN_A_ACT_REQ; - tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); - - spin_unlock_irqrestore(&dc->lock, flags); - - return 0; -} - void tegra_dc_enable_vblank(struct tegra_dc *dc) { unsigned long value, flags; @@ -838,7 +948,7 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc) bo = tegra_fb_get_plane(crtc->primary->fb, 0); - spin_lock_irqsave(&dc->lock, flags); + spin_lock(&dc->lock); /* check if new start address has been latched */ tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); @@ -846,7 +956,7 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc) base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); - spin_unlock_irqrestore(&dc->lock, flags); + spin_unlock(&dc->lock); if (base == bo->paddr + crtc->primary->fb->offsets[0]) { drm_crtc_send_vblank_event(crtc, dc->event); @@ -874,64 +984,130 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) spin_unlock_irqrestore(&drm->event_lock, flags); } -static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, uint32_t page_flip_flags) +static void tegra_dc_destroy(struct drm_crtc *crtc) { - unsigned int pipe = drm_crtc_index(crtc); - struct tegra_dc *dc = to_tegra_dc(crtc); - - if (dc->event) - return -EBUSY; + drm_crtc_cleanup(crtc); +} - if (event) { - event->pipe = pipe; - dc->event = event; - drm_crtc_vblank_get(crtc); - } +static void tegra_crtc_reset(struct drm_crtc *crtc) +{ + struct tegra_dc_state *state; - tegra_dc_set_base(dc, 0, 0, fb); - crtc->primary->fb = fb; + kfree(crtc->state); + crtc->state = NULL; - return 0; + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) + crtc->state = &state->base; } -static void drm_crtc_clear(struct drm_crtc *crtc) +static struct drm_crtc_state * +tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc) { - memset(crtc, 0, sizeof(*crtc)); + struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc_state *copy; + + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!copy) + return NULL; + + copy->base.mode_changed = false; + copy->base.planes_changed = false; + copy->base.event = NULL; + + return ©->base; } -static void tegra_dc_destroy(struct drm_crtc *crtc) +static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) { - drm_crtc_cleanup(crtc); - drm_crtc_clear(crtc); + kfree(state); } static const struct drm_crtc_funcs tegra_crtc_funcs = { - .page_flip = tegra_dc_page_flip, - .set_config = drm_crtc_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .set_config = drm_atomic_helper_set_config, .destroy = tegra_dc_destroy, + .reset = tegra_crtc_reset, + .atomic_duplicate_state = tegra_crtc_atomic_duplicate_state, + .atomic_destroy_state = tegra_crtc_atomic_destroy_state, }; +static void tegra_dc_stop(struct tegra_dc *dc) +{ + u32 value; + + /* stop the display controller */ + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); + value &= ~DISP_CTRL_MODE_MASK; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); + + tegra_dc_commit(dc); +} + +static bool tegra_dc_idle(struct tegra_dc *dc) +{ + u32 value; + + value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND); + + return (value & DISP_CTRL_MODE_MASK) == 0; +} + +static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout) +{ + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + if (tegra_dc_idle(dc)) + return 0; + + usleep_range(1000, 2000); + } + + dev_dbg(dc->dev, "timeout waiting for DC to become idle\n"); + return -ETIMEDOUT; +} + static void tegra_crtc_disable(struct drm_crtc *crtc) { struct tegra_dc *dc = to_tegra_dc(crtc); - struct drm_device *drm = crtc->dev; - struct drm_plane *plane; + u32 value; - drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) { - if (plane->crtc == crtc) { - tegra_window_plane_disable(plane); - plane->crtc = NULL; + if (!tegra_dc_idle(dc)) { + tegra_dc_stop(dc); - if (plane->fb) { - drm_framebuffer_unreference(plane->fb); - plane->fb = NULL; - } - } + /* + * Ignore the return value, there isn't anything useful to do + * in case this fails. + */ + tegra_dc_wait_idle(dc, 100); + } + + /* + * This should really be part of the RGB encoder driver, but clearing + * these bits has the side-effect of stopping the display controller. + * When that happens no VBLANK interrupts will be raised. At the same + * time the encoder is disabled before the display controller, so the + * above code is always going to timeout waiting for the controller + * to go idle. + * + * Given the close coupling between the RGB encoder and the display + * controller doing it here is still kind of okay. None of the other + * encoder drivers require these bits to be cleared. + * + * XXX: Perhaps given that the display controller is switched off at + * this point anyway maybe clearing these bits isn't even useful for + * the RGB encoder? + */ + if (dc->rgb) { + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); + value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | + PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); } drm_crtc_vblank_off(crtc); - tegra_dc_commit(dc); } static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, @@ -971,33 +1147,15 @@ static int tegra_dc_set_timings(struct tegra_dc *dc, return 0; } -static int tegra_crtc_setup_clk(struct drm_crtc *crtc, - struct drm_display_mode *mode) +int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent, + unsigned long pclk, unsigned int div) { - unsigned long pclk = mode->clock * 1000; - struct tegra_dc *dc = to_tegra_dc(crtc); - struct tegra_output *output = NULL; - struct drm_encoder *encoder; - unsigned int div; u32 value; - long err; - - list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head) - if (encoder->crtc == crtc) { - output = encoder_to_output(encoder); - break; - } - - if (!output) - return -ENODEV; + int err; - /* - * This assumes that the parent clock is pll_d_out0 or pll_d2_out - * respectively, each of which divides the base pll_d by 2. - */ - err = tegra_output_setup_clock(output, dc->clk, pclk, &div); + err = clk_set_parent(dc->clk, parent); if (err < 0) { - dev_err(dc->dev, "failed to setup clock: %ld\n", err); + dev_err(dc->dev, "failed to set parent clock: %d\n", err); return err; } @@ -1009,26 +1167,69 @@ static int tegra_crtc_setup_clk(struct drm_crtc *crtc, return 0; } -static int tegra_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted, - int x, int y, struct drm_framebuffer *old_fb) +int tegra_dc_state_setup_clock(struct tegra_dc *dc, + struct drm_crtc_state *crtc_state, + struct clk *clk, unsigned long pclk, + unsigned int div) +{ + struct tegra_dc_state *state = to_dc_state(crtc_state); + + state->clk = clk; + state->pclk = pclk; + state->div = div; + + return 0; +} + +static void tegra_dc_commit_state(struct tegra_dc *dc, + struct tegra_dc_state *state) { - struct tegra_bo *bo = tegra_fb_get_plane(crtc->primary->fb, 0); - struct tegra_dc *dc = to_tegra_dc(crtc); - struct tegra_dc_window window; u32 value; int err; - err = tegra_crtc_setup_clk(crtc, mode); - if (err) { - dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err); - return err; + err = clk_set_parent(dc->clk, state->clk); + if (err < 0) + dev_err(dc->dev, "failed to set parent clock: %d\n", err); + + /* + * Outputs may not want to change the parent clock rate. This is only + * relevant to Tegra20 where only a single display PLL is available. + * Since that PLL would typically be used for HDMI, an internal LVDS + * panel would need to be driven by some other clock such as PLL_P + * which is shared with other peripherals. Changing the clock rate + * should therefore be avoided. + */ + if (state->pclk > 0) { + err = clk_set_rate(state->clk, state->pclk); + if (err < 0) + dev_err(dc->dev, + "failed to set clock rate to %lu Hz\n", + state->pclk); } + DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), + state->div); + DRM_DEBUG_KMS("pclk: %lu\n", state->pclk); + + value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; + tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); +} + +static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc *dc = to_tegra_dc(crtc); + u32 value; + + tegra_dc_commit_state(dc, state); + /* program display mode */ tegra_dc_set_timings(dc, mode); + if (dc->soc->supports_border_color) + tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); + /* interlacing isn't supported yet, so disable it */ if (dc->soc->supports_interlacing) { value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); @@ -1036,35 +1237,17 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc, tegra_dc_writel(dc, value, DC_DISP_INTERLACE_CONTROL); } - /* setup window parameters */ - memset(&window, 0, sizeof(window)); - window.src.x = 0; - window.src.y = 0; - window.src.w = mode->hdisplay; - window.src.h = mode->vdisplay; - window.dst.x = 0; - window.dst.y = 0; - window.dst.w = mode->hdisplay; - window.dst.h = mode->vdisplay; - window.format = tegra_dc_format(crtc->primary->fb->pixel_format, - &window.swap); - window.bits_per_pixel = crtc->primary->fb->bits_per_pixel; - window.stride[0] = crtc->primary->fb->pitches[0]; - window.base[0] = bo->paddr; - - err = tegra_dc_setup_window(dc, 0, &window); - if (err < 0) - dev_err(dc->dev, "failed to enable root plane\n"); - - return 0; -} + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); + value &= ~DISP_CTRL_MODE_MASK; + value |= DISP_CTRL_MODE_C_DISPLAY; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); -static int tegra_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) -{ - struct tegra_dc *dc = to_tegra_dc(crtc); + value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); + value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | + PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; + tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - return tegra_dc_set_base(dc, x, y, crtc->primary->fb); + tegra_dc_commit(dc); } static void tegra_crtc_prepare(struct drm_crtc *crtc) @@ -1075,10 +1258,6 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc) drm_crtc_vblank_off(crtc); - /* hardware initialization */ - reset_control_deassert(dc->rst); - usleep_range(10000, 20000); - if (dc->pipe) syncpt = SYNCPT_VBLANK1; else @@ -1113,19 +1292,49 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc) static void tegra_crtc_commit(struct drm_crtc *crtc) { + drm_crtc_vblank_on(crtc); +} + +static int tegra_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + return 0; +} + +static void tegra_crtc_atomic_begin(struct drm_crtc *crtc) +{ struct tegra_dc *dc = to_tegra_dc(crtc); - drm_crtc_vblank_on(crtc); - tegra_dc_commit(dc); + if (crtc->state->event) { + crtc->state->event->pipe = drm_crtc_index(crtc); + + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + dc->event = crtc->state->event; + crtc->state->event = NULL; + } +} + +static void tegra_crtc_atomic_flush(struct drm_crtc *crtc) +{ + struct tegra_dc_state *state = to_dc_state(crtc->state); + struct tegra_dc *dc = to_tegra_dc(crtc); + + tegra_dc_writel(dc, state->planes << 8, DC_CMD_STATE_CONTROL); + tegra_dc_writel(dc, state->planes, DC_CMD_STATE_CONTROL); } static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = { .disable = tegra_crtc_disable, .mode_fixup = tegra_crtc_mode_fixup, - .mode_set = tegra_crtc_mode_set, - .mode_set_base = tegra_crtc_mode_set_base, + .mode_set = drm_helper_crtc_mode_set, + .mode_set_nofb = tegra_crtc_mode_set_nofb, + .mode_set_base = drm_helper_crtc_mode_set_base, .prepare = tegra_crtc_prepare, .commit = tegra_crtc_commit, + .atomic_check = tegra_crtc_atomic_check, + .atomic_begin = tegra_crtc_atomic_begin, + .atomic_flush = tegra_crtc_atomic_flush, }; static irqreturn_t tegra_dc_irq(int irq, void *data) @@ -1571,6 +1780,7 @@ static const struct host1x_client_ops dc_client_ops = { }; static const struct tegra_dc_soc_info tegra20_dc_soc_info = { + .supports_border_color = true, .supports_interlacing = false, .supports_cursor = false, .supports_block_linear = false, @@ -1579,6 +1789,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = { }; static const struct tegra_dc_soc_info tegra30_dc_soc_info = { + .supports_border_color = true, .supports_interlacing = false, .supports_cursor = false, .supports_block_linear = false, @@ -1587,6 +1798,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = { }; static const struct tegra_dc_soc_info tegra114_dc_soc_info = { + .supports_border_color = true, .supports_interlacing = false, .supports_cursor = false, .supports_block_linear = false, @@ -1595,6 +1807,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = { }; static const struct tegra_dc_soc_info tegra124_dc_soc_info = { + .supports_border_color = false, .supports_interlacing = true, .supports_cursor = true, .supports_block_linear = true, diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index d4f827593dfa..7dd328d77996 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -10,6 +10,9 @@ #include <linux/host1x.h> #include <linux/iommu.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> + #include "drm.h" #include "gem.h" @@ -24,6 +27,92 @@ struct tegra_drm_file { struct list_head contexts; }; +static void tegra_atomic_schedule(struct tegra_drm *tegra, + struct drm_atomic_state *state) +{ + tegra->commit.state = state; + schedule_work(&tegra->commit.work); +} + +static void tegra_atomic_complete(struct tegra_drm *tegra, + struct drm_atomic_state *state) +{ + struct drm_device *drm = tegra->drm; + + /* + * Everything below can be run asynchronously without the need to grab + * any modeset locks at all under one condition: It must be guaranteed + * that the asynchronous work has either been cancelled (if the driver + * supports it, which at least requires that the framebuffers get + * cleaned up with drm_atomic_helper_cleanup_planes()) or completed + * before the new state gets committed on the software side with + * drm_atomic_helper_swap_state(). + * + * This scheme allows new atomic state updates to be prepared and + * checked in parallel to the asynchronous completion of the previous + * update. Which is important since compositors need to figure out the + * composition of the next frame right after having submitted the + * current layout. + */ + + drm_atomic_helper_commit_pre_planes(drm, state); + drm_atomic_helper_commit_planes(drm, state); + drm_atomic_helper_commit_post_planes(drm, state); + + drm_atomic_helper_wait_for_vblanks(drm, state); + + drm_atomic_helper_cleanup_planes(drm, state); + drm_atomic_state_free(state); +} + +static void tegra_atomic_work(struct work_struct *work) +{ + struct tegra_drm *tegra = container_of(work, struct tegra_drm, + commit.work); + + tegra_atomic_complete(tegra, tegra->commit.state); +} + +static int tegra_atomic_commit(struct drm_device *drm, + struct drm_atomic_state *state, bool async) +{ + struct tegra_drm *tegra = drm->dev_private; + int err; + + err = drm_atomic_helper_prepare_planes(drm, state); + if (err) + return err; + + /* serialize outstanding asynchronous commits */ + mutex_lock(&tegra->commit.lock); + flush_work(&tegra->commit.work); + + /* + * This is the point of no return - everything below never fails except + * when the hw goes bonghits. Which means we can commit the new state on + * the software side now. + */ + + drm_atomic_helper_swap_state(drm, state); + + if (async) + tegra_atomic_schedule(tegra, state); + else + tegra_atomic_complete(tegra, state); + + mutex_unlock(&tegra->commit.lock); + return 0; +} + +static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { + .fb_create = tegra_fb_create, +#ifdef CONFIG_DRM_TEGRA_FBDEV + .output_poll_changed = tegra_fb_output_poll_changed, +#endif + .atomic_check = drm_atomic_helper_check, + .atomic_commit = tegra_atomic_commit, +}; + static int tegra_drm_load(struct drm_device *drm, unsigned long flags) { struct host1x_device *device = to_host1x_device(drm->dev); @@ -36,8 +125,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) if (iommu_present(&platform_bus_type)) { tegra->domain = iommu_domain_alloc(&platform_bus_type); - if (IS_ERR(tegra->domain)) { - err = PTR_ERR(tegra->domain); + if (!tegra->domain) { + err = -ENOMEM; goto free; } @@ -47,11 +136,23 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) mutex_init(&tegra->clients_lock); INIT_LIST_HEAD(&tegra->clients); + + mutex_init(&tegra->commit.lock); + INIT_WORK(&tegra->commit.work, tegra_atomic_work); + drm->dev_private = tegra; tegra->drm = drm; drm_mode_config_init(drm); + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + + drm->mode_config.max_width = 4096; + drm->mode_config.max_height = 4096; + + drm->mode_config.funcs = &tegra_drm_mode_funcs; + err = tegra_drm_fb_prepare(drm); if (err < 0) goto config; @@ -62,6 +163,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) if (err < 0) goto fbdev; + drm_mode_config_reset(drm); + /* * We don't use the drm_irq_install() helpers provided by the DRM * core, so we need to set this manually in order to allow the @@ -106,8 +209,8 @@ static int tegra_drm_unload(struct drm_device *drm) drm_kms_helper_poll_fini(drm); tegra_drm_fb_exit(drm); - drm_vblank_cleanup(drm); drm_mode_config_cleanup(drm); + drm_vblank_cleanup(drm); err = host1x_device_exit(device); if (err < 0) @@ -190,7 +293,7 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, if (err < 0) return err; - err = get_user(dest->target.offset, &src->cmdbuf.offset); + err = get_user(dest->target.offset, &src->target.offset); if (err < 0) return err; @@ -893,6 +996,30 @@ static int host1x_drm_remove(struct host1x_device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int host1x_drm_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + drm_kms_helper_poll_disable(drm); + + return 0; +} + +static int host1x_drm_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + drm_kms_helper_poll_enable(drm); + + return 0; +} +#endif + +static const struct dev_pm_ops host1x_drm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume) +}; + static const struct of_device_id host1x_drm_subdevs[] = { { .compatible = "nvidia,tegra20-dc", }, { .compatible = "nvidia,tegra20-hdmi", }, @@ -912,7 +1039,10 @@ static const struct of_device_id host1x_drm_subdevs[] = { }; static struct host1x_driver host1x_drm_driver = { - .name = "drm", + .driver = { + .name = "drm", + .pm = &host1x_drm_pm_ops, + }, .probe = host1x_drm_probe, .remove = host1x_drm_remove, .subdevs = host1x_drm_subdevs, diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 3a3b2e7b5b3f..8cb2dfeaa957 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -50,6 +50,12 @@ struct tegra_drm { #endif unsigned int pitch_align; + + struct { + struct drm_atomic_state *state; + struct work_struct work; + struct mutex lock; + } commit; }; struct tegra_drm_client; @@ -164,45 +170,31 @@ struct tegra_dc_window { unsigned int h; } dst; unsigned int bits_per_pixel; - unsigned int format; - unsigned int swap; unsigned int stride[2]; unsigned long base[3]; bool bottom_up; struct tegra_bo_tiling tiling; + u32 format; + u32 swap; }; /* from dc.c */ void tegra_dc_enable_vblank(struct tegra_dc *dc); void tegra_dc_disable_vblank(struct tegra_dc *dc); void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file); - -struct tegra_output_ops { - int (*enable)(struct tegra_output *output); - int (*disable)(struct tegra_output *output); - int (*setup_clock)(struct tegra_output *output, struct clk *clk, - unsigned long pclk, unsigned int *div); - int (*check_mode)(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status); - enum drm_connector_status (*detect)(struct tegra_output *output); -}; - -enum tegra_output_type { - TEGRA_OUTPUT_RGB, - TEGRA_OUTPUT_HDMI, - TEGRA_OUTPUT_DSI, - TEGRA_OUTPUT_EDP, -}; +void tegra_dc_commit(struct tegra_dc *dc); +int tegra_dc_setup_clock(struct tegra_dc *dc, struct clk *parent, + unsigned long pclk, unsigned int div); +int tegra_dc_state_setup_clock(struct tegra_dc *dc, + struct drm_crtc_state *crtc_state, + struct clk *clk, unsigned long pclk, + unsigned int div); struct tegra_output { struct device_node *of_node; struct device *dev; - const struct tegra_output_ops *ops; - enum tegra_output_type type; - struct drm_panel *panel; struct i2c_adapter *ddc; const struct edid *edid; @@ -223,42 +215,6 @@ static inline struct tegra_output *connector_to_output(struct drm_connector *c) return container_of(c, struct tegra_output, connector); } -static inline int tegra_output_enable(struct tegra_output *output) -{ - if (output && output->ops && output->ops->enable) - return output->ops->enable(output); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_disable(struct tegra_output *output) -{ - if (output && output->ops && output->ops->disable) - return output->ops->disable(output); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk, - unsigned int *div) -{ - if (output && output->ops && output->ops->setup_clock) - return output->ops->setup_clock(output, clk, pclk, div); - - return output ? -ENOSYS : -EINVAL; -} - -static inline int tegra_output_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - if (output && output->ops && output->ops->check_mode) - return output->ops->check_mode(output, mode, status); - - return output ? -ENOSYS : -EINVAL; -} - /* from rgb.c */ int tegra_dc_rgb_probe(struct tegra_dc *dc); int tegra_dc_rgb_remove(struct tegra_dc *dc); @@ -267,9 +223,18 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc); /* from output.c */ int tegra_output_probe(struct tegra_output *output); -int tegra_output_remove(struct tegra_output *output); +void tegra_output_remove(struct tegra_output *output); int tegra_output_init(struct drm_device *drm, struct tegra_output *output); -int tegra_output_exit(struct tegra_output *output); +void tegra_output_exit(struct tegra_output *output); + +int tegra_output_connector_get_modes(struct drm_connector *connector); +struct drm_encoder * +tegra_output_connector_best_encoder(struct drm_connector *connector); +enum drm_connector_status +tegra_output_connector_detect(struct drm_connector *connector, bool force); +void tegra_output_connector_destroy(struct drm_connector *connector); + +void tegra_output_encoder_destroy(struct drm_encoder *encoder); /* from dpaux.c */ struct tegra_dpaux; @@ -291,12 +256,16 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer); int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, struct tegra_bo_tiling *tiling); +struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd); int tegra_drm_fb_prepare(struct drm_device *drm); void tegra_drm_fb_free(struct drm_device *drm); int tegra_drm_fb_init(struct drm_device *drm); void tegra_drm_fb_exit(struct drm_device *drm); #ifdef CONFIG_DRM_TEGRA_FBDEV void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev); +void tegra_fb_output_poll_changed(struct drm_device *drm); #endif extern struct platform_driver tegra_dc_driver; diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 33f67fd601c6..ed970f622903 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -17,6 +17,7 @@ #include <linux/regulator/consumer.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_panel.h> @@ -27,6 +28,28 @@ #include "dsi.h" #include "mipi-phy.h" +struct tegra_dsi_state { + struct drm_connector_state base; + + struct mipi_dphy_timing timing; + unsigned long period; + + unsigned int vrefresh; + unsigned int lanes; + unsigned long pclk; + unsigned long bclk; + + enum tegra_dsi_format format; + unsigned int mul; + unsigned int div; +}; + +static inline struct tegra_dsi_state * +to_dsi_state(struct drm_connector_state *state) +{ + return container_of(state, struct tegra_dsi_state, base); +} + struct tegra_dsi { struct host1x_client client; struct tegra_output output; @@ -51,7 +74,6 @@ struct tegra_dsi { struct mipi_dsi_host host; struct regulator *vdd; - bool enabled; unsigned int video_fifo_depth; unsigned int host_fifo_depth; @@ -77,13 +99,17 @@ static inline struct tegra_dsi *to_dsi(struct tegra_output *output) return container_of(output, struct tegra_dsi, output); } -static inline unsigned long tegra_dsi_readl(struct tegra_dsi *dsi, - unsigned long reg) +static struct tegra_dsi_state *tegra_dsi_get_state(struct tegra_dsi *dsi) +{ + return to_dsi_state(dsi->output.connector.state); +} + +static inline u32 tegra_dsi_readl(struct tegra_dsi *dsi, unsigned long reg) { return readl(dsi->regs + (reg << 2)); } -static inline void tegra_dsi_writel(struct tegra_dsi *dsi, unsigned long value, +static inline void tegra_dsi_writel(struct tegra_dsi *dsi, u32 value, unsigned long reg) { writel(value, dsi->regs + (reg << 2)); @@ -95,7 +121,7 @@ static int tegra_dsi_show_regs(struct seq_file *s, void *data) struct tegra_dsi *dsi = node->info_ent->data; #define DUMP_REG(name) \ - seq_printf(s, "%-32s %#05x %08lx\n", #name, name, \ + seq_printf(s, "%-32s %#05x %08x\n", #name, name, \ tegra_dsi_readl(dsi, name)) DUMP_REG(DSI_INCR_SYNCPT); @@ -230,7 +256,7 @@ remove: return err; } -static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi) +static void tegra_dsi_debugfs_exit(struct tegra_dsi *dsi) { drm_debugfs_remove_files(dsi->debugfs_files, ARRAY_SIZE(debugfs_files), dsi->minor); @@ -241,8 +267,6 @@ static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi) debugfs_remove(dsi->debugfs); dsi->debugfs = NULL; - - return 0; } #define PKT_ID0(id) ((((id) & 0x3f) << 3) | (1 << 9)) @@ -338,61 +362,36 @@ static const u32 pkt_seq_command_mode[NUM_PKT_SEQ] = { [11] = 0, }; -static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi) +static void tegra_dsi_set_phy_timing(struct tegra_dsi *dsi, + unsigned long period, + const struct mipi_dphy_timing *timing) { - struct mipi_dphy_timing timing; - unsigned long value, period; - long rate; - int err; - - rate = clk_get_rate(dsi->clk); - if (rate < 0) - return rate; - - period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate * 2); - - err = mipi_dphy_timing_get_default(&timing, period); - if (err < 0) - return err; - - err = mipi_dphy_timing_validate(&timing, period); - if (err < 0) { - dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err); - return err; - } - - /* - * The D-PHY timing fields below are expressed in byte-clock cycles, - * so multiply the period by 8. - */ - period *= 8; + u32 value; - value = DSI_TIMING_FIELD(timing.hsexit, period, 1) << 24 | - DSI_TIMING_FIELD(timing.hstrail, period, 0) << 16 | - DSI_TIMING_FIELD(timing.hszero, period, 3) << 8 | - DSI_TIMING_FIELD(timing.hsprepare, period, 1); + value = DSI_TIMING_FIELD(timing->hsexit, period, 1) << 24 | + DSI_TIMING_FIELD(timing->hstrail, period, 0) << 16 | + DSI_TIMING_FIELD(timing->hszero, period, 3) << 8 | + DSI_TIMING_FIELD(timing->hsprepare, period, 1); tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0); - value = DSI_TIMING_FIELD(timing.clktrail, period, 1) << 24 | - DSI_TIMING_FIELD(timing.clkpost, period, 1) << 16 | - DSI_TIMING_FIELD(timing.clkzero, period, 1) << 8 | - DSI_TIMING_FIELD(timing.lpx, period, 1); + value = DSI_TIMING_FIELD(timing->clktrail, period, 1) << 24 | + DSI_TIMING_FIELD(timing->clkpost, period, 1) << 16 | + DSI_TIMING_FIELD(timing->clkzero, period, 1) << 8 | + DSI_TIMING_FIELD(timing->lpx, period, 1); tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1); - value = DSI_TIMING_FIELD(timing.clkprepare, period, 1) << 16 | - DSI_TIMING_FIELD(timing.clkpre, period, 1) << 8 | + value = DSI_TIMING_FIELD(timing->clkprepare, period, 1) << 16 | + DSI_TIMING_FIELD(timing->clkpre, period, 1) << 8 | DSI_TIMING_FIELD(0xff * period, period, 0) << 0; tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2); - value = DSI_TIMING_FIELD(timing.taget, period, 1) << 16 | - DSI_TIMING_FIELD(timing.tasure, period, 1) << 8 | - DSI_TIMING_FIELD(timing.tago, period, 1); + value = DSI_TIMING_FIELD(timing->taget, period, 1) << 16 | + DSI_TIMING_FIELD(timing->tasure, period, 1) << 8 | + DSI_TIMING_FIELD(timing->tago, period, 1); tegra_dsi_writel(dsi, value, DSI_BTA_TIMING); if (dsi->slave) - return tegra_dsi_set_phy_timing(dsi->slave); - - return 0; + tegra_dsi_set_phy_timing(dsi->slave, period, timing); } static int tegra_dsi_get_muldiv(enum mipi_dsi_pixel_format format, @@ -484,14 +483,22 @@ static unsigned int tegra_dsi_get_lanes(struct tegra_dsi *dsi) return dsi->lanes; } -static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, - const struct drm_display_mode *mode) +static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, + const struct drm_display_mode *mode) { unsigned int hact, hsw, hbp, hfp, i, mul, div; - enum tegra_dsi_format format; + struct tegra_dsi_state *state; const u32 *pkt_seq; u32 value; - int err; + + /* XXX: pass in state into this function? */ + if (dsi->master) + state = tegra_dsi_get_state(dsi->master); + else + state = tegra_dsi_get_state(dsi); + + mul = state->mul; + div = state->div; if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) { DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n"); @@ -504,15 +511,8 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, pkt_seq = pkt_seq_command_mode; } - err = tegra_dsi_get_muldiv(dsi->format, &mul, &div); - if (err < 0) - return err; - - err = tegra_dsi_get_format(dsi->format, &format); - if (err < 0) - return err; - - value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(format) | + value = DSI_CONTROL_CHANNEL(0) | + DSI_CONTROL_FORMAT(state->format) | DSI_CONTROL_LANES(dsi->lanes - 1) | DSI_CONTROL_SOURCE(pipe); tegra_dsi_writel(dsi, value, DSI_CONTROL); @@ -591,8 +591,8 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, /* set SOL delay */ if (dsi->master || dsi->slave) { - unsigned int lanes = tegra_dsi_get_lanes(dsi); unsigned long delay, bclk, bclk_ganged; + unsigned int lanes = state->lanes; /* SOL to valid, valid to FIFO and FIFO write delay */ delay = 4 + 4 + 2; @@ -612,9 +612,7 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, } if (dsi->slave) { - err = tegra_dsi_configure(dsi->slave, pipe, mode); - if (err < 0) - return err; + tegra_dsi_configure(dsi->slave, pipe, mode); /* * TODO: Support modes other than symmetrical left-right @@ -624,49 +622,6 @@ static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe, tegra_dsi_ganged_enable(dsi->slave, mode->hdisplay / 2, mode->hdisplay / 2); } - - return 0; -} - -static int tegra_output_dsi_enable(struct tegra_output *output) -{ - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - const struct drm_display_mode *mode = &dc->base.mode; - struct tegra_dsi *dsi = to_dsi(output); - u32 value; - int err; - - if (dsi->enabled) - return 0; - - err = tegra_dsi_configure(dsi, dc->pipe, mode); - if (err < 0) - return err; - - /* enable display controller */ - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); - value |= DSI_ENABLE; - tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - value |= DISP_CTRL_MODE_C_DISPLAY; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); - - /* enable DSI controller */ - tegra_dsi_enable(dsi); - - dsi->enabled = true; - - return 0; } static int tegra_dsi_wait_idle(struct tegra_dsi *dsi, unsigned long timeout) @@ -705,6 +660,29 @@ static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi) tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL); } +static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk, + unsigned int vrefresh) +{ + unsigned int timeout; + u32 value; + + /* one frame high-speed transmission timeout */ + timeout = (bclk / vrefresh) / 512; + value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout); + tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0); + + /* 2 ms peripheral timeout for panel */ + timeout = 2 * bclk / 512 * 1000; + value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000); + tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1); + + value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0); + tegra_dsi_writel(dsi, value, DSI_TO_TALLY); + + if (dsi->slave) + tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh); +} + static void tegra_dsi_disable(struct tegra_dsi *dsi) { u32 value; @@ -724,15 +702,149 @@ static void tegra_dsi_disable(struct tegra_dsi *dsi) usleep_range(5000, 10000); } -static int tegra_output_dsi_disable(struct tegra_output *output) +static void tegra_dsi_soft_reset(struct tegra_dsi *dsi) +{ + u32 value; + + value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); + value &= ~DSI_POWER_CONTROL_ENABLE; + tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); + + usleep_range(300, 1000); + + value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); + value |= DSI_POWER_CONTROL_ENABLE; + tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL); + + usleep_range(300, 1000); + + value = tegra_dsi_readl(dsi, DSI_TRIGGER); + if (value) + tegra_dsi_writel(dsi, 0, DSI_TRIGGER); + + if (dsi->slave) + tegra_dsi_soft_reset(dsi->slave); +} + +static void tegra_dsi_connector_dpms(struct drm_connector *connector, int mode) +{ +} + +static void tegra_dsi_connector_reset(struct drm_connector *connector) +{ + struct tegra_dsi_state *state; + + kfree(connector->state); + connector->state = NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) + connector->state = &state->base; +} + +static struct drm_connector_state * +tegra_dsi_connector_duplicate_state(struct drm_connector *connector) +{ + struct tegra_dsi_state *state = to_dsi_state(connector->state); + struct tegra_dsi_state *copy; + + copy = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!copy) + return NULL; + + return ©->base; +} + +static const struct drm_connector_funcs tegra_dsi_connector_funcs = { + .dpms = tegra_dsi_connector_dpms, + .reset = tegra_dsi_connector_reset, + .detect = tegra_output_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = tegra_dsi_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static enum drm_mode_status +tegra_dsi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = { + .get_modes = tegra_output_connector_get_modes, + .mode_valid = tegra_dsi_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_dsi_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_dsi_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void tegra_dsi_encoder_commit(struct drm_encoder *encoder) { - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); +} + +static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_dsi *dsi = to_dsi(output); + struct tegra_dsi_state *state; + u32 value; + + state = tegra_dsi_get_state(dsi); + + tegra_dsi_set_timeout(dsi, state->bclk, state->vrefresh); + + /* + * The D-PHY timing fields are expressed in byte-clock cycles, so + * multiply the period by 8. + */ + tegra_dsi_set_phy_timing(dsi, state->period * 8, &state->timing); + + if (output->panel) + drm_panel_prepare(output->panel); + + tegra_dsi_configure(dsi, dc->pipe, mode); + + /* enable display controller */ + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value |= DSI_ENABLE; + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + + /* enable DSI controller */ + tegra_dsi_enable(dsi); + + if (output->panel) + drm_panel_enable(output->panel); + + return; +} + +static void tegra_dsi_encoder_disable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); struct tegra_dsi *dsi = to_dsi(output); - unsigned long value; + u32 value; int err; - if (!dsi->enabled) - return 0; + if (output->panel) + drm_panel_disable(output->panel); tegra_dsi_video_disable(dsi); @@ -741,85 +853,78 @@ static int tegra_output_dsi_disable(struct tegra_output *output) * sure it's only executed when the output is attached to one. */ if (dc) { - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); value &= ~DSI_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); + tegra_dc_commit(dc); } err = tegra_dsi_wait_idle(dsi, 100); if (err < 0) dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err); - tegra_dsi_disable(dsi); + tegra_dsi_soft_reset(dsi); - dsi->enabled = false; + if (output->panel) + drm_panel_unprepare(output->panel); - return 0; -} - -static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk, - unsigned int vrefresh) -{ - unsigned int timeout; - u32 value; - - /* one frame high-speed transmission timeout */ - timeout = (bclk / vrefresh) / 512; - value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout); - tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0); - - /* 2 ms peripheral timeout for panel */ - timeout = 2 * bclk / 512 * 1000; - value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000); - tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1); - - value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0); - tegra_dsi_writel(dsi, value, DSI_TO_TALLY); + tegra_dsi_disable(dsi); - if (dsi->slave) - tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh); + return; } -static int tegra_output_dsi_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk, - unsigned int *divp) +static int +tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - struct drm_display_mode *mode = &dc->base.mode; + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dsi_state *state = to_dsi_state(conn_state); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); struct tegra_dsi *dsi = to_dsi(output); - unsigned int mul, div, vrefresh, lanes; - unsigned long bclk, plld; + unsigned int scdiv; + unsigned long plld; int err; - lanes = tegra_dsi_get_lanes(dsi); + state->pclk = crtc_state->mode.clock * 1000; + + err = tegra_dsi_get_muldiv(dsi->format, &state->mul, &state->div); + if (err < 0) + return err; + + state->lanes = tegra_dsi_get_lanes(dsi); - err = tegra_dsi_get_muldiv(dsi->format, &mul, &div); + err = tegra_dsi_get_format(dsi->format, &state->format); if (err < 0) return err; - DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, lanes); - vrefresh = drm_mode_vrefresh(mode); - DRM_DEBUG_KMS("vrefresh: %u\n", vrefresh); + state->vrefresh = drm_mode_vrefresh(&crtc_state->mode); /* compute byte clock */ - bclk = (pclk * mul) / (div * lanes); + state->bclk = (state->pclk * state->mul) / (state->div * state->lanes); + + DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", state->mul, state->div, + state->lanes); + DRM_DEBUG_KMS("format: %u, vrefresh: %u\n", state->format, + state->vrefresh); + DRM_DEBUG_KMS("bclk: %lu\n", state->bclk); /* * Compute bit clock and round up to the next MHz. */ - plld = DIV_ROUND_UP(bclk * 8, USEC_PER_SEC) * USEC_PER_SEC; + plld = DIV_ROUND_UP(state->bclk * 8, USEC_PER_SEC) * USEC_PER_SEC; + state->period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, plld); + + err = mipi_dphy_timing_get_default(&state->timing, state->period); + if (err < 0) + return err; + + err = mipi_dphy_timing_validate(&state->timing, state->period); + if (err < 0) { + dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err); + return err; + } /* * We divide the frequency by two here, but we make up for that by @@ -828,19 +933,6 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output, */ plld /= 2; - err = clk_set_parent(clk, dsi->clk_parent); - if (err < 0) { - dev_err(dsi->dev, "failed to set parent clock: %d\n", err); - return err; - } - - err = clk_set_rate(dsi->clk_parent, plld); - if (err < 0) { - dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n", - plld); - return err; - } - /* * Derive pixel clock from bit clock using the shift clock divider. * Note that this is only half of what we would expect, but we need @@ -851,44 +943,30 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output, * not working properly otherwise. Perhaps the PLLs cannot generate * frequencies sufficiently high. */ - *divp = ((8 * mul) / (div * lanes)) - 2; + scdiv = ((8 * state->mul) / (state->div * state->lanes)) - 2; - /* - * XXX: Move the below somewhere else so that we don't need to have - * access to the vrefresh in this function? - */ - tegra_dsi_set_timeout(dsi, bclk, vrefresh); - - err = tegra_dsi_set_phy_timing(dsi); - if (err < 0) + err = tegra_dc_state_setup_clock(dc, crtc_state, dsi->clk_parent, + plld, scdiv); + if (err < 0) { + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); return err; + } - return 0; -} - -static int tegra_output_dsi_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - /* - * FIXME: For now, always assume that the mode is okay. - */ - - *status = MODE_OK; - - return 0; + return err; } -static const struct tegra_output_ops dsi_ops = { - .enable = tegra_output_dsi_enable, - .disable = tegra_output_dsi_disable, - .setup_clock = tegra_output_dsi_setup_clock, - .check_mode = tegra_output_dsi_check_mode, +static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = { + .dpms = tegra_dsi_encoder_dpms, + .prepare = tegra_dsi_encoder_prepare, + .commit = tegra_dsi_encoder_commit, + .mode_set = tegra_dsi_encoder_mode_set, + .disable = tegra_dsi_encoder_disable, + .atomic_check = tegra_dsi_encoder_atomic_check, }; static int tegra_dsi_pad_enable(struct tegra_dsi *dsi) { - unsigned long value; + u32 value; value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0); tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0); @@ -923,17 +1001,44 @@ static int tegra_dsi_init(struct host1x_client *client) struct tegra_dsi *dsi = host1x_client_to_dsi(client); int err; + reset_control_deassert(dsi->rst); + + err = tegra_dsi_pad_calibrate(dsi); + if (err < 0) { + dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); + goto reset; + } + /* Gangsters must not register their own outputs. */ if (!dsi->master) { - dsi->output.type = TEGRA_OUTPUT_DSI; dsi->output.dev = client->dev; - dsi->output.ops = &dsi_ops; + + drm_connector_init(drm, &dsi->output.connector, + &tegra_dsi_connector_funcs, + DRM_MODE_CONNECTOR_DSI); + drm_connector_helper_add(&dsi->output.connector, + &tegra_dsi_connector_helper_funcs); + dsi->output.connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &dsi->output.encoder, + &tegra_dsi_encoder_funcs, + DRM_MODE_ENCODER_DSI); + drm_encoder_helper_add(&dsi->output.encoder, + &tegra_dsi_encoder_helper_funcs); + + drm_mode_connector_attach_encoder(&dsi->output.connector, + &dsi->output.encoder); + drm_connector_register(&dsi->output.connector); err = tegra_output_init(drm, &dsi->output); if (err < 0) { - dev_err(client->dev, "output setup failed: %d\n", err); - return err; + dev_err(client->dev, + "failed to initialize output: %d\n", + err); + goto reset; } + + dsi->output.encoder.possible_crtcs = 0x3; } if (IS_ENABLED(CONFIG_DEBUG_FS)) { @@ -943,34 +1048,22 @@ static int tegra_dsi_init(struct host1x_client *client) } return 0; + +reset: + reset_control_assert(dsi->rst); + return err; } static int tegra_dsi_exit(struct host1x_client *client) { struct tegra_dsi *dsi = host1x_client_to_dsi(client); - int err; - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_dsi_debugfs_exit(dsi); - if (err < 0) - dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err); - } + tegra_output_exit(&dsi->output); - if (!dsi->master) { - err = tegra_output_disable(&dsi->output); - if (err < 0) { - dev_err(client->dev, "output failed to disable: %d\n", - err); - return err; - } + if (IS_ENABLED(CONFIG_DEBUG_FS)) + tegra_dsi_debugfs_exit(dsi); - err = tegra_output_exit(&dsi->output); - if (err < 0) { - dev_err(client->dev, "output cleanup failed: %d\n", - err); - return err; - } - } + reset_control_assert(dsi->rst); return 0; } @@ -1398,13 +1491,6 @@ static int tegra_dsi_probe(struct platform_device *pdev) if (IS_ERR(dsi->rst)) return PTR_ERR(dsi->rst); - err = reset_control_deassert(dsi->rst); - if (err < 0) { - dev_err(&pdev->dev, "failed to bring DSI out of reset: %d\n", - err); - return err; - } - dsi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(dsi->clk)) { dev_err(&pdev->dev, "cannot get DSI clock\n"); @@ -1470,12 +1556,6 @@ static int tegra_dsi_probe(struct platform_device *pdev) goto disable_vdd; } - err = tegra_dsi_pad_calibrate(dsi); - if (err < 0) { - dev_err(dsi->dev, "MIPI calibration failed: %d\n", err); - goto mipi_free; - } - dsi->host.ops = &tegra_dsi_host_ops; dsi->host.dev = &pdev->dev; @@ -1527,6 +1607,8 @@ static int tegra_dsi_remove(struct platform_device *pdev) return err; } + tegra_output_remove(&dsi->output); + mipi_dsi_host_unregister(&dsi->host); tegra_mipi_free(dsi->mipi); @@ -1535,12 +1617,6 @@ static int tegra_dsi_remove(struct platform_device *pdev) clk_disable_unprepare(dsi->clk); reset_control_assert(dsi->rst); - err = tegra_output_remove(&dsi->output); - if (err < 0) { - dev_err(&pdev->dev, "failed to remove output: %d\n", err); - return err; - } - return 0; } diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index e9c715d89261..397fb34d5d5b 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -129,9 +129,9 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm, return fb; } -static struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, - struct drm_file *file, - struct drm_mode_fb_cmd2 *cmd) +struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd) { unsigned int hsub, vsub, i; struct tegra_bo *planes[4]; @@ -377,7 +377,7 @@ void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev) drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->base); } -static void tegra_fb_output_poll_changed(struct drm_device *drm) +void tegra_fb_output_poll_changed(struct drm_device *drm) { struct tegra_drm *tegra = drm->dev_private; @@ -386,28 +386,11 @@ static void tegra_fb_output_poll_changed(struct drm_device *drm) } #endif -static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { - .fb_create = tegra_fb_create, -#ifdef CONFIG_DRM_TEGRA_FBDEV - .output_poll_changed = tegra_fb_output_poll_changed, -#endif -}; - int tegra_drm_fb_prepare(struct drm_device *drm) { #ifdef CONFIG_DRM_TEGRA_FBDEV struct tegra_drm *tegra = drm->dev_private; -#endif - drm->mode_config.min_width = 0; - drm->mode_config.min_height = 0; - - drm->mode_config.max_width = 4096; - drm->mode_config.max_height = 4096; - - drm->mode_config.funcs = &tegra_drm_mode_funcs; - -#ifdef CONFIG_DRM_TEGRA_FBDEV tegra->fbdev = tegra_fbdev_create(drm); if (IS_ERR(tegra->fbdev)) return PTR_ERR(tegra->fbdev); diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 8777b7f75791..cfb481943b6b 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -92,36 +92,6 @@ static const struct host1x_bo_ops tegra_bo_ops = { .kunmap = tegra_bo_kunmap, }; -/* - * A generic iommu_map_sg() function is being reviewed and will hopefully be - * merged soon. At that point this function can be dropped in favour of the - * one provided by the IOMMU API. - */ -static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg, unsigned int nents, - int prot) -{ - struct scatterlist *s; - size_t offset = 0; - unsigned int i; - int err; - - for_each_sg(sg, s, nents, i) { - phys_addr_t phys = page_to_phys(sg_page(s)); - size_t length = s->offset + s->length; - - err = iommu_map(domain, iova + offset, phys, length, prot); - if (err < 0) { - iommu_unmap(domain, iova, offset); - return err; - } - - offset += length; - } - - return offset; -} - static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) { int prot = IOMMU_READ | IOMMU_WRITE; @@ -144,8 +114,8 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) bo->paddr = bo->mm->start; - err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, - bo->sgt->nents, prot); + err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, + bo->sgt->nents, prot); if (err < 0) { dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err); goto remove; @@ -244,10 +214,8 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) for_each_sg(sgt->sgl, s, sgt->nents, i) sg_dma_address(s) = sg_phys(s); - if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) { - sgt = ERR_PTR(-ENOMEM); + if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) goto release_sgt; - } bo->sgt = sgt; @@ -256,6 +224,7 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) release_sgt: sg_free_table(sgt); kfree(sgt); + sgt = ERR_PTR(-ENOMEM); put_pages: drm_gem_put_pages(&bo->gem, bo->pages, false, false); return PTR_ERR(sgt); diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index ffe26547328d..7e06657ae58b 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -9,10 +9,15 @@ #include <linux/clk.h> #include <linux/debugfs.h> +#include <linux/gpio.h> #include <linux/hdmi.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> + #include "hdmi.h" #include "drm.h" #include "dc.h" @@ -31,7 +36,7 @@ struct tegra_hdmi_config { unsigned int num_tmds; unsigned long fuse_override_offset; - unsigned long fuse_override_value; + u32 fuse_override_value; bool has_sor_io_peak_current; }; @@ -40,7 +45,6 @@ struct tegra_hdmi { struct host1x_client client; struct tegra_output output; struct device *dev; - bool enabled; struct regulator *hdmi; struct regulator *pll; @@ -85,16 +89,16 @@ enum { HDA, }; -static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi, - unsigned long reg) +static inline u32 tegra_hdmi_readl(struct tegra_hdmi *hdmi, + unsigned long offset) { - return readl(hdmi->regs + (reg << 2)); + return readl(hdmi->regs + (offset << 2)); } -static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val, - unsigned long reg) +static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, u32 value, + unsigned long offset) { - writel(val, hdmi->regs + (reg << 2)); + writel(value, hdmi->regs + (offset << 2)); } struct tegra_hdmi_audio_config { @@ -455,8 +459,8 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi) for (i = 0; i < ARRAY_SIZE(freqs); i++) { unsigned int f = freqs[i]; unsigned int eight_half; - unsigned long value; unsigned int delta; + u32 value; if (f > 96000) delta = 2; @@ -477,7 +481,7 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) struct device_node *node = hdmi->dev->of_node; const struct tegra_hdmi_audio_config *config; unsigned int offset = 0; - unsigned long value; + u32 value; switch (hdmi->audio_source) { case HDA: @@ -571,9 +575,9 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) return 0; } -static inline unsigned long tegra_hdmi_subpack(const u8 *ptr, size_t size) +static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size) { - unsigned long value = 0; + u32 value = 0; size_t i; for (i = size; i > 0; i--) @@ -587,8 +591,8 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data, { const u8 *ptr = data; unsigned long offset; - unsigned long value; size_t i, j; + u32 value; switch (ptr[0]) { case HDMI_INFOFRAME_TYPE_AVI: @@ -707,9 +711,9 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) { struct hdmi_vendor_infoframe frame; - unsigned long value; u8 buffer[10]; ssize_t err; + u32 value; if (!hdmi->stereo) { value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); @@ -738,7 +742,7 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi, const struct tmds_config *tmds) { - unsigned long value; + u32 value; tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0); tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1); @@ -768,21 +772,78 @@ static bool tegra_output_is_hdmi(struct tegra_output *output) return drm_detect_hdmi_monitor(edid); } -static int tegra_output_hdmi_enable(struct tegra_output *output) +static void tegra_hdmi_connector_dpms(struct drm_connector *connector, + int mode) +{ +} + +static const struct drm_connector_funcs tegra_hdmi_connector_funcs = { + .dpms = tegra_hdmi_connector_dpms, + .reset = drm_atomic_helper_connector_reset, + .detect = tegra_output_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static enum drm_mode_status +tegra_hdmi_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct tegra_output *output = connector_to_output(connector); + struct tegra_hdmi *hdmi = to_hdmi(output); + unsigned long pclk = mode->clock * 1000; + enum drm_mode_status status = MODE_OK; + struct clk *parent; + long err; + + parent = clk_get_parent(hdmi->clk_parent); + + err = clk_round_rate(parent, pclk * 4); + if (err <= 0) + status = MODE_NOCLOCK; + + return status; +} + +static const struct drm_connector_helper_funcs +tegra_hdmi_connector_helper_funcs = { + .get_modes = tegra_output_connector_get_modes, + .mode_valid = tegra_hdmi_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_hdmi_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void tegra_hdmi_encoder_commit(struct drm_encoder *encoder) +{ +} + +static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) { unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey; - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - struct drm_display_mode *mode = &dc->base.mode; + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct device_node *node = output->dev->of_node; struct tegra_hdmi *hdmi = to_hdmi(output); - struct device_node *node = hdmi->dev->of_node; unsigned int pulse_start, div82, pclk; - unsigned long value; int retries = 1000; + u32 value; int err; - if (hdmi->enabled) - return 0; - hdmi->dvi = !tegra_output_is_hdmi(output); pclk = mode->clock * 1000; @@ -790,32 +851,6 @@ static int tegra_output_hdmi_enable(struct tegra_output *output) h_back_porch = mode->htotal - mode->hsync_end; h_front_porch = mode->hsync_start - mode->hdisplay; - err = regulator_enable(hdmi->pll); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); - return err; - } - - err = regulator_enable(hdmi->vdd); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err); - return err; - } - - err = clk_set_rate(hdmi->clk, pclk); - if (err < 0) - return err; - - err = clk_prepare_enable(hdmi->clk); - if (err < 0) { - dev_err(hdmi->dev, "failed to enable clock: %d\n", err); - return err; - } - - reset_control_assert(hdmi->rst); - usleep_range(1000, 2000); - reset_control_deassert(hdmi->rst); - /* power up sequence */ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); value &= ~SOR_PLL_PDBG; @@ -987,123 +1022,57 @@ static int tegra_output_hdmi_enable(struct tegra_output *output) value |= HDMI_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - value |= DISP_CTRL_MODE_C_DISPLAY; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); + tegra_dc_commit(dc); /* TODO: add HDCP support */ - - hdmi->enabled = true; - - return 0; } -static int tegra_output_hdmi_disable(struct tegra_output *output) +static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) { - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - struct tegra_hdmi *hdmi = to_hdmi(output); - unsigned long value; - - if (!hdmi->enabled) - return 0; + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + u32 value; /* * The following accesses registers of the display controller, so make * sure it's only executed when the output is attached to one. */ if (dc) { - /* - * XXX: We can't do this here because it causes HDMI to go - * into an erroneous state with the result that HDMI won't - * properly work once disabled. See also a similar symptom - * for the SOR output. - */ - /* - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - */ - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); value &= ~HDMI_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); + tegra_dc_commit(dc); } - - clk_disable_unprepare(hdmi->clk); - reset_control_assert(hdmi->rst); - regulator_disable(hdmi->vdd); - regulator_disable(hdmi->pll); - - hdmi->enabled = false; - - return 0; } -static int tegra_output_hdmi_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk, - unsigned int *div) +static int +tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); + unsigned long pclk = crtc_state->mode.clock * 1000; struct tegra_hdmi *hdmi = to_hdmi(output); int err; - err = clk_set_parent(clk, hdmi->clk_parent); + err = tegra_dc_state_setup_clock(dc, crtc_state, hdmi->clk_parent, + pclk, 0); if (err < 0) { - dev_err(output->dev, "failed to set parent: %d\n", err); + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); return err; } - err = clk_set_rate(hdmi->clk_parent, pclk); - if (err < 0) - dev_err(output->dev, "failed to set clock rate to %lu Hz\n", - pclk); - - *div = 0; - - return 0; -} - -static int tegra_output_hdmi_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - struct tegra_hdmi *hdmi = to_hdmi(output); - unsigned long pclk = mode->clock * 1000; - struct clk *parent; - long err; - - parent = clk_get_parent(hdmi->clk_parent); - - err = clk_round_rate(parent, pclk * 4); - if (err <= 0) - *status = MODE_NOCLOCK; - else - *status = MODE_OK; - - return 0; + return err; } -static const struct tegra_output_ops hdmi_ops = { - .enable = tegra_output_hdmi_enable, - .disable = tegra_output_hdmi_disable, - .setup_clock = tegra_output_hdmi_setup_clock, - .check_mode = tegra_output_hdmi_check_mode, +static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = { + .dpms = tegra_hdmi_encoder_dpms, + .prepare = tegra_hdmi_encoder_prepare, + .commit = tegra_hdmi_encoder_commit, + .mode_set = tegra_hdmi_encoder_mode_set, + .disable = tegra_hdmi_encoder_disable, + .atomic_check = tegra_hdmi_encoder_atomic_check, }; static int tegra_hdmi_show_regs(struct seq_file *s, void *data) @@ -1117,8 +1086,8 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data) return err; #define DUMP_REG(name) \ - seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \ - tegra_hdmi_readl(hdmi, name)) + seq_printf(s, "%-56s %#05x %08x\n", #name, name, \ + tegra_hdmi_readl(hdmi, name)) DUMP_REG(HDMI_CTXSW); DUMP_REG(HDMI_NV_PDISP_SOR_STATE0); @@ -1330,7 +1299,7 @@ remove: return err; } -static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi) +static void tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi) { drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files), hdmi->minor); @@ -1341,8 +1310,6 @@ static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi) debugfs_remove(hdmi->debugfs); hdmi->debugfs = NULL; - - return 0; } static int tegra_hdmi_init(struct host1x_client *client) @@ -1351,16 +1318,32 @@ static int tegra_hdmi_init(struct host1x_client *client) struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); int err; - hdmi->output.type = TEGRA_OUTPUT_HDMI; hdmi->output.dev = client->dev; - hdmi->output.ops = &hdmi_ops; + + drm_connector_init(drm, &hdmi->output.connector, + &tegra_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + drm_connector_helper_add(&hdmi->output.connector, + &tegra_hdmi_connector_helper_funcs); + hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&hdmi->output.encoder, + &tegra_hdmi_encoder_helper_funcs); + + drm_mode_connector_attach_encoder(&hdmi->output.connector, + &hdmi->output.encoder); + drm_connector_register(&hdmi->output.connector); err = tegra_output_init(drm, &hdmi->output); if (err < 0) { - dev_err(client->dev, "output setup failed: %d\n", err); + dev_err(client->dev, "failed to initialize output: %d\n", err); return err; } + hdmi->output.encoder.possible_crtcs = 0x3; + if (IS_ENABLED(CONFIG_DEBUG_FS)) { err = tegra_hdmi_debugfs_init(hdmi, drm->primary); if (err < 0) @@ -1374,34 +1357,44 @@ static int tegra_hdmi_init(struct host1x_client *client) return err; } + err = regulator_enable(hdmi->pll); + if (err < 0) { + dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); + return err; + } + + err = regulator_enable(hdmi->vdd); + if (err < 0) { + dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err); + return err; + } + + err = clk_prepare_enable(hdmi->clk); + if (err < 0) { + dev_err(hdmi->dev, "failed to enable clock: %d\n", err); + return err; + } + + reset_control_deassert(hdmi->rst); + return 0; } static int tegra_hdmi_exit(struct host1x_client *client) { struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); - int err; - regulator_disable(hdmi->hdmi); + tegra_output_exit(&hdmi->output); - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_hdmi_debugfs_exit(hdmi); - if (err < 0) - dev_err(client->dev, "debugfs cleanup failed: %d\n", - err); - } + clk_disable_unprepare(hdmi->clk); + reset_control_assert(hdmi->rst); - err = tegra_output_disable(&hdmi->output); - if (err < 0) { - dev_err(client->dev, "output failed to disable: %d\n", err); - return err; - } + regulator_disable(hdmi->vdd); + regulator_disable(hdmi->pll); + regulator_disable(hdmi->hdmi); - err = tegra_output_exit(&hdmi->output); - if (err < 0) { - dev_err(client->dev, "output cleanup failed: %d\n", err); - return err; - } + if (IS_ENABLED(CONFIG_DEBUG_FS)) + tegra_hdmi_debugfs_exit(hdmi); return 0; } @@ -1559,11 +1552,7 @@ static int tegra_hdmi_remove(struct platform_device *pdev) return err; } - err = tegra_output_remove(&hdmi->output); - if (err < 0) { - dev_err(&pdev->dev, "failed to remove output: %d\n", err); - return err; - } + tegra_output_remove(&hdmi->output); clk_disable_unprepare(hdmi->clk_parent); clk_disable_unprepare(hdmi->clk); diff --git a/drivers/gpu/drm/tegra/mipi-phy.c b/drivers/gpu/drm/tegra/mipi-phy.c index 486d19d589c8..ba2ae6511957 100644 --- a/drivers/gpu/drm/tegra/mipi-phy.c +++ b/drivers/gpu/drm/tegra/mipi-phy.c @@ -12,9 +12,9 @@ #include "mipi-phy.h" /* - * Default D-PHY timings based on MIPI D-PHY specification. Derived from - * the valid ranges specified in Section 5.9 of the D-PHY specification - * with minor adjustments. + * Default D-PHY timings based on MIPI D-PHY specification. Derived from the + * valid ranges specified in Section 6.9, Table 14, Page 40 of the D-PHY + * specification (v1.2) with minor adjustments. */ int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing, unsigned long period) @@ -34,7 +34,20 @@ int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing, timing->hszero = 145 + 5 * period; timing->hssettle = 85 + 6 * period; timing->hsskip = 40; - timing->hstrail = max(8 * period, 60 + 4 * period); + + /* + * The MIPI D-PHY specification (Section 6.9, v1.2, Table 14, Page 40) + * contains this formula as: + * + * T_HS-TRAIL = max(n * 8 * period, 60 + n * 4 * period) + * + * where n = 1 for forward-direction HS mode and n = 4 for reverse- + * direction HS mode. There's only one setting and this function does + * not parameterize on anything other that period, so this code will + * assumes that reverse-direction HS mode is supported and uses n = 4. + */ + timing->hstrail = max(4 * 8 * period, 60 + 4 * 4 * period); + timing->init = 100000; timing->lpx = 60; timing->taget = 5 * timing->lpx; @@ -46,8 +59,8 @@ int mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing, } /* - * Validate D-PHY timing according to MIPI Alliance Specification for D-PHY, - * Section 5.9 "Global Operation Timing Parameters". + * Validate D-PHY timing according to MIPI D-PHY specification (v1.2, Section + * Section 6.9 "Global Operation Timing Parameters"). */ int mipi_dphy_timing_validate(struct mipi_dphy_timing *timing, unsigned long period) diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 6a5c7b81fbc5..37db47975d48 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -9,10 +9,11 @@ #include <linux/of_gpio.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_panel.h> #include "drm.h" -static int tegra_connector_get_modes(struct drm_connector *connector) +int tegra_output_connector_get_modes(struct drm_connector *connector) { struct tegra_output *output = connector_to_output(connector); struct edid *edid = NULL; @@ -43,43 +44,20 @@ static int tegra_connector_get_modes(struct drm_connector *connector) return err; } -static int tegra_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct tegra_output *output = connector_to_output(connector); - enum drm_mode_status status = MODE_OK; - int err; - - err = tegra_output_check_mode(output, mode, &status); - if (err < 0) - return MODE_ERROR; - - return status; -} - -static struct drm_encoder * -tegra_connector_best_encoder(struct drm_connector *connector) +struct drm_encoder * +tegra_output_connector_best_encoder(struct drm_connector *connector) { struct tegra_output *output = connector_to_output(connector); return &output->encoder; } -static const struct drm_connector_helper_funcs connector_helper_funcs = { - .get_modes = tegra_connector_get_modes, - .mode_valid = tegra_connector_mode_valid, - .best_encoder = tegra_connector_best_encoder, -}; - -static enum drm_connector_status -tegra_connector_detect(struct drm_connector *connector, bool force) +enum drm_connector_status +tegra_output_connector_detect(struct drm_connector *connector, bool force) { struct tegra_output *output = connector_to_output(connector); enum drm_connector_status status = connector_status_unknown; - if (output->ops->detect) - return output->ops->detect(output); - if (gpio_is_valid(output->hpd_gpio)) { if (gpio_get_value(output->hpd_gpio) == 0) status = connector_status_disconnected; @@ -90,95 +68,22 @@ tegra_connector_detect(struct drm_connector *connector, bool force) status = connector_status_disconnected; else status = connector_status_connected; - - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) - status = connector_status_connected; } return status; } -static void drm_connector_clear(struct drm_connector *connector) -{ - memset(connector, 0, sizeof(*connector)); -} - -static void tegra_connector_destroy(struct drm_connector *connector) +void tegra_output_connector_destroy(struct drm_connector *connector) { drm_connector_unregister(connector); drm_connector_cleanup(connector); - drm_connector_clear(connector); } -static const struct drm_connector_funcs connector_funcs = { - .dpms = drm_helper_connector_dpms, - .detect = tegra_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = tegra_connector_destroy, -}; - -static void drm_encoder_clear(struct drm_encoder *encoder) -{ - memset(encoder, 0, sizeof(*encoder)); -} - -static void tegra_encoder_destroy(struct drm_encoder *encoder) +void tegra_output_encoder_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); - drm_encoder_clear(encoder); } -static const struct drm_encoder_funcs encoder_funcs = { - .destroy = tegra_encoder_destroy, -}; - -static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct tegra_output *output = encoder_to_output(encoder); - struct drm_panel *panel = output->panel; - - if (mode != DRM_MODE_DPMS_ON) { - drm_panel_disable(panel); - tegra_output_disable(output); - drm_panel_unprepare(panel); - } else { - drm_panel_prepare(panel); - tegra_output_enable(output); - drm_panel_enable(panel); - } -} - -static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted) -{ - return true; -} - -static void tegra_encoder_prepare(struct drm_encoder *encoder) -{ - tegra_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); -} - -static void tegra_encoder_commit(struct drm_encoder *encoder) -{ - tegra_encoder_dpms(encoder, DRM_MODE_DPMS_ON); -} - -static void tegra_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted) -{ -} - -static const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .dpms = tegra_encoder_dpms, - .mode_fixup = tegra_encoder_mode_fixup, - .prepare = tegra_encoder_prepare, - .commit = tegra_encoder_commit, - .mode_set = tegra_encoder_mode_set, -}; - static irqreturn_t hpd_irq(int irq, void *data) { struct tegra_output *output = data; @@ -268,7 +173,7 @@ int tegra_output_probe(struct tegra_output *output) return 0; } -int tegra_output_remove(struct tegra_output *output) +void tegra_output_remove(struct tegra_output *output) { if (gpio_is_valid(output->hpd_gpio)) { free_irq(output->hpd_irq, output); @@ -277,56 +182,17 @@ int tegra_output_remove(struct tegra_output *output) if (output->ddc) put_device(&output->ddc->dev); - - return 0; } int tegra_output_init(struct drm_device *drm, struct tegra_output *output) { - int connector, encoder; - - switch (output->type) { - case TEGRA_OUTPUT_RGB: - connector = DRM_MODE_CONNECTOR_LVDS; - encoder = DRM_MODE_ENCODER_LVDS; - break; - - case TEGRA_OUTPUT_HDMI: - connector = DRM_MODE_CONNECTOR_HDMIA; - encoder = DRM_MODE_ENCODER_TMDS; - break; - - case TEGRA_OUTPUT_DSI: - connector = DRM_MODE_CONNECTOR_DSI; - encoder = DRM_MODE_ENCODER_DSI; - break; - - case TEGRA_OUTPUT_EDP: - connector = DRM_MODE_CONNECTOR_eDP; - encoder = DRM_MODE_ENCODER_TMDS; - break; - - default: - connector = DRM_MODE_CONNECTOR_Unknown; - encoder = DRM_MODE_ENCODER_NONE; - break; - } - - drm_connector_init(drm, &output->connector, &connector_funcs, - connector); - drm_connector_helper_add(&output->connector, &connector_helper_funcs); - output->connector.dpms = DRM_MODE_DPMS_OFF; - - if (output->panel) - drm_panel_attach(output->panel, &output->connector); - - drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder); - drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs); - - drm_mode_connector_attach_encoder(&output->connector, &output->encoder); - drm_connector_register(&output->connector); + int err; - output->encoder.possible_crtcs = 0x3; + if (output->panel) { + err = drm_panel_attach(output->panel, &output->connector); + if (err < 0) + return err; + } /* * The connector is now registered and ready to receive hotplug events @@ -338,7 +204,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output) return 0; } -int tegra_output_exit(struct tegra_output *output) +void tegra_output_exit(struct tegra_output *output) { /* * The connector is going away, so the interrupt must be disabled to @@ -349,6 +215,4 @@ int tegra_output_exit(struct tegra_output *output) if (output->panel) drm_panel_detach(output->panel); - - return 0; } diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index d6af9be48f42..7cd833f5b5b5 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -9,6 +9,9 @@ #include <linux/clk.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_panel.h> + #include "drm.h" #include "dc.h" @@ -85,13 +88,65 @@ static void tegra_dc_write_regs(struct tegra_dc *dc, tegra_dc_writel(dc, table[i].value, table[i].offset); } -static int tegra_output_rgb_enable(struct tegra_output *output) +static void tegra_rgb_connector_dpms(struct drm_connector *connector, + int mode) +{ +} + +static const struct drm_connector_funcs tegra_rgb_connector_funcs = { + .dpms = tegra_rgb_connector_dpms, + .reset = drm_atomic_helper_connector_reset, + .detect = tegra_output_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static enum drm_mode_status +tegra_rgb_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + /* + * FIXME: For now, always assume that the mode is okay. There are + * unresolved issues with clk_round_rate(), which doesn't always + * reliably report whether a frequency can be set or not. + */ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { + .get_modes = tegra_output_connector_get_modes, + .mode_valid = tegra_rgb_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_rgb_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_rgb_encoder_prepare(struct drm_encoder *encoder) { +} + +static void tegra_rgb_encoder_commit(struct drm_encoder *encoder) +{ +} + +static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct tegra_output *output = encoder_to_output(encoder); struct tegra_rgb *rgb = to_rgb(output); - unsigned long value; + u32 value; - if (rgb->enabled) - return 0; + if (output->panel) + drm_panel_prepare(output->panel); tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); @@ -113,64 +168,39 @@ static int tegra_output_rgb_enable(struct tegra_output *output) value = SC0_H_QUALIFIER_NONE | SC1_H_QUALIFIER_NONE; tegra_dc_writel(rgb->dc, value, DC_DISP_SHIFT_CLOCK_OPTIONS); - value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - value |= DISP_CTRL_MODE_C_DISPLAY; - tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND); + tegra_dc_commit(rgb->dc); - value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL); - value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); - - rgb->enabled = true; - - return 0; + if (output->panel) + drm_panel_enable(output->panel); } -static int tegra_output_rgb_disable(struct tegra_output *output) +static void tegra_rgb_encoder_disable(struct drm_encoder *encoder) { + struct tegra_output *output = encoder_to_output(encoder); struct tegra_rgb *rgb = to_rgb(output); - unsigned long value; - - if (!rgb->enabled) - return 0; - - value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL); - value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); - tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - tegra_dc_writel(rgb->dc, value, DC_CMD_DISPLAY_COMMAND); - tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); + if (output->panel) + drm_panel_disable(output->panel); tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); + tegra_dc_commit(rgb->dc); - rgb->enabled = false; - - return 0; + if (output->panel) + drm_panel_unprepare(output->panel); } -static int tegra_output_rgb_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk, - unsigned int *div) +static int +tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); + unsigned long pclk = crtc_state->mode.clock * 1000; struct tegra_rgb *rgb = to_rgb(output); + unsigned int div; int err; - err = clk_set_parent(clk, rgb->clk_parent); - if (err < 0) { - dev_err(output->dev, "failed to set parent: %d\n", err); - return err; - } - /* * We may not want to change the frequency of the parent clock, since * it may be a parent for other peripherals. This is due to the fact @@ -187,32 +217,26 @@ static int tegra_output_rgb_setup_clock(struct tegra_output *output, * and hope that the desired frequency can be matched (or at least * matched sufficiently close that the panel will still work). */ + div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2; + pclk = 0; - *div = ((clk_get_rate(clk) * 2) / pclk) - 2; - - return 0; -} - -static int tegra_output_rgb_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - /* - * FIXME: For now, always assume that the mode is okay. There are - * unresolved issues with clk_round_rate(), which doesn't always - * reliably report whether a frequency can be set or not. - */ - - *status = MODE_OK; + err = tegra_dc_state_setup_clock(dc, crtc_state, rgb->clk_parent, + pclk, div); + if (err < 0) { + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); + return err; + } - return 0; + return err; } -static const struct tegra_output_ops rgb_ops = { - .enable = tegra_output_rgb_enable, - .disable = tegra_output_rgb_disable, - .setup_clock = tegra_output_rgb_setup_clock, - .check_mode = tegra_output_rgb_check_mode, +static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = { + .dpms = tegra_rgb_encoder_dpms, + .prepare = tegra_rgb_encoder_prepare, + .commit = tegra_rgb_encoder_commit, + .mode_set = tegra_rgb_encoder_mode_set, + .disable = tegra_rgb_encoder_disable, + .atomic_check = tegra_rgb_encoder_atomic_check, }; int tegra_dc_rgb_probe(struct tegra_dc *dc) @@ -262,64 +286,58 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc) int tegra_dc_rgb_remove(struct tegra_dc *dc) { - int err; - if (!dc->rgb) return 0; - err = tegra_output_remove(dc->rgb); - if (err < 0) - return err; + tegra_output_remove(dc->rgb); + dc->rgb = NULL; return 0; } int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc) { - struct tegra_rgb *rgb = to_rgb(dc->rgb); + struct tegra_output *output = dc->rgb; int err; if (!dc->rgb) return -ENODEV; - rgb->output.type = TEGRA_OUTPUT_RGB; - rgb->output.ops = &rgb_ops; + drm_connector_init(drm, &output->connector, &tegra_rgb_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + drm_connector_helper_add(&output->connector, + &tegra_rgb_connector_helper_funcs); + output->connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs, + DRM_MODE_ENCODER_LVDS); + drm_encoder_helper_add(&output->encoder, + &tegra_rgb_encoder_helper_funcs); - err = tegra_output_init(dc->base.dev, &rgb->output); + drm_mode_connector_attach_encoder(&output->connector, + &output->encoder); + drm_connector_register(&output->connector); + + err = tegra_output_init(drm, output); if (err < 0) { - dev_err(dc->dev, "output setup failed: %d\n", err); + dev_err(output->dev, "failed to initialize output: %d\n", err); return err; } /* - * By default, outputs can be associated with each display controller. - * RGB outputs are an exception, so we make sure they can be attached - * to only their parent display controller. + * Other outputs can be attached to either display controller. The RGB + * outputs are an exception and work only with their parent display + * controller. */ - rgb->output.encoder.possible_crtcs = drm_crtc_mask(&dc->base); + output->encoder.possible_crtcs = drm_crtc_mask(&dc->base); return 0; } int tegra_dc_rgb_exit(struct tegra_dc *dc) { - if (dc->rgb) { - int err; - - err = tegra_output_disable(dc->rgb); - if (err < 0) { - dev_err(dc->dev, "output failed to disable: %d\n", err); - return err; - } - - err = tegra_output_exit(dc->rgb); - if (err < 0) { - dev_err(dc->dev, "output cleanup failed: %d\n", err); - return err; - } - - dc->rgb = NULL; - } + if (dc->rgb) + tegra_output_exit(dc->rgb); return 0; } diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 7829e81f065d..2afe478ded3b 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -8,13 +8,16 @@ #include <linux/clk.h> #include <linux/debugfs.h> +#include <linux/gpio.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <soc/tegra/pmc.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_dp_helper.h> +#include <drm/drm_panel.h> #include "dc.h" #include "drm.h" @@ -258,18 +261,8 @@ static int tegra_sor_attach(struct tegra_sor *sor) static int tegra_sor_wakeup(struct tegra_sor *sor) { - struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc); unsigned long value, timeout; - /* enable display controller outputs */ - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value |= PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); - timeout = jiffies + msecs_to_jiffies(250); /* wait for head to wake up */ @@ -482,10 +475,317 @@ static int tegra_sor_calc_config(struct tegra_sor *sor, return 0; } -static int tegra_output_sor_enable(struct tegra_output *output) +static int tegra_sor_detach(struct tegra_sor *sor) +{ + unsigned long value, timeout; + + /* switch to safe mode */ + value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value &= ~SOR_SUPER_STATE_MODE_NORMAL; + tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_super_update(sor); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_PWR); + if (value & SOR_PWR_MODE_SAFE) + break; + } + + if ((value & SOR_PWR_MODE_SAFE) == 0) + return -ETIMEDOUT; + + /* go to sleep */ + value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK; + tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_super_update(sor); + + /* detach */ + value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); + value &= ~SOR_SUPER_STATE_ATTACHED; + tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); + tegra_sor_super_update(sor); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_TEST); + if ((value & SOR_TEST_ATTACHED) == 0) + break; + + usleep_range(25, 100); + } + + if ((value & SOR_TEST_ATTACHED) != 0) + return -ETIMEDOUT; + + return 0; +} + +static int tegra_sor_power_down(struct tegra_sor *sor) +{ + unsigned long value, timeout; + int err; + + value = tegra_sor_readl(sor, SOR_PWR); + value &= ~SOR_PWR_NORMAL_STATE_PU; + value |= SOR_PWR_TRIGGER; + tegra_sor_writel(sor, value, SOR_PWR); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_PWR); + if ((value & SOR_PWR_TRIGGER) == 0) + return 0; + + usleep_range(25, 100); + } + + if ((value & SOR_PWR_TRIGGER) != 0) + return -ETIMEDOUT; + + err = clk_set_parent(sor->clk, sor->clk_safe); + if (err < 0) + dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + + value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); + value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | + SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); + tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); + + /* stop lane sequencer */ + value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | + SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; + tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) + break; + + usleep_range(25, 100); + } + + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) + return -ETIMEDOUT; + + value = tegra_sor_readl(sor, SOR_PLL_2); + value |= SOR_PLL_2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, SOR_PLL_2); + + usleep_range(20, 100); + + value = tegra_sor_readl(sor, SOR_PLL_0); + value |= SOR_PLL_0_POWER_OFF; + value |= SOR_PLL_0_VCOPD; + tegra_sor_writel(sor, value, SOR_PLL_0); + + value = tegra_sor_readl(sor, SOR_PLL_2); + value |= SOR_PLL_2_SEQ_PLLCAPPD; + value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; + tegra_sor_writel(sor, value, SOR_PLL_2); + + usleep_range(20, 100); + + return 0; +} + +static int tegra_sor_crc_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + + return 0; +} + +static int tegra_sor_crc_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout) +{ + u32 value; + + timeout = jiffies + msecs_to_jiffies(timeout); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_CRC_A); + if (value & SOR_CRC_A_VALID) + return 0; + + usleep_range(100, 200); + } + + return -ETIMEDOUT; +} + +static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer, + size_t size, loff_t *ppos) +{ + struct tegra_sor *sor = file->private_data; + ssize_t num, err; + char buf[10]; + u32 value; + + mutex_lock(&sor->lock); + + if (!sor->enabled) { + err = -EAGAIN; + goto unlock; + } + + value = tegra_sor_readl(sor, SOR_STATE_1); + value &= ~SOR_STATE_ASY_CRC_MODE_MASK; + tegra_sor_writel(sor, value, SOR_STATE_1); + + value = tegra_sor_readl(sor, SOR_CRC_CNTRL); + value |= SOR_CRC_CNTRL_ENABLE; + tegra_sor_writel(sor, value, SOR_CRC_CNTRL); + + value = tegra_sor_readl(sor, SOR_TEST); + value &= ~SOR_TEST_CRC_POST_SERIALIZE; + tegra_sor_writel(sor, value, SOR_TEST); + + err = tegra_sor_crc_wait(sor, 100); + if (err < 0) + goto unlock; + + tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A); + value = tegra_sor_readl(sor, SOR_CRC_B); + + num = scnprintf(buf, sizeof(buf), "%08x\n", value); + + err = simple_read_from_buffer(buffer, size, ppos, buf, num); + +unlock: + mutex_unlock(&sor->lock); + return err; +} + +static const struct file_operations tegra_sor_crc_fops = { + .owner = THIS_MODULE, + .open = tegra_sor_crc_open, + .read = tegra_sor_crc_read, + .release = tegra_sor_crc_release, +}; + +static int tegra_sor_debugfs_init(struct tegra_sor *sor, + struct drm_minor *minor) +{ + struct dentry *entry; + int err = 0; + + sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root); + if (!sor->debugfs) + return -ENOMEM; + + entry = debugfs_create_file("crc", 0644, sor->debugfs, sor, + &tegra_sor_crc_fops); + if (!entry) { + dev_err(sor->dev, + "cannot create /sys/kernel/debug/dri/%s/sor/crc\n", + minor->debugfs_root->d_name.name); + err = -ENOMEM; + goto remove; + } + + return err; + +remove: + debugfs_remove(sor->debugfs); + sor->debugfs = NULL; + return err; +} + +static void tegra_sor_debugfs_exit(struct tegra_sor *sor) +{ + debugfs_remove_recursive(sor->debugfs); + sor->debugfs = NULL; +} + +static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode) +{ +} + +static enum drm_connector_status +tegra_sor_connector_detect(struct drm_connector *connector, bool force) +{ + struct tegra_output *output = connector_to_output(connector); + struct tegra_sor *sor = to_sor(output); + + if (sor->dpaux) + return tegra_dpaux_detect(sor->dpaux); + + return connector_status_unknown; +} + +static const struct drm_connector_funcs tegra_sor_connector_funcs = { + .dpms = tegra_sor_connector_dpms, + .reset = drm_atomic_helper_connector_reset, + .detect = tegra_sor_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = tegra_output_connector_destroy, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int tegra_sor_connector_get_modes(struct drm_connector *connector) { - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); - struct drm_display_mode *mode = &dc->base.mode; + struct tegra_output *output = connector_to_output(connector); + struct tegra_sor *sor = to_sor(output); + int err; + + if (sor->dpaux) + tegra_dpaux_enable(sor->dpaux); + + err = tegra_output_connector_get_modes(connector); + + if (sor->dpaux) + tegra_dpaux_disable(sor->dpaux); + + return err; +} + +static enum drm_mode_status +tegra_sor_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = { + .get_modes = tegra_sor_connector_get_modes, + .mode_valid = tegra_sor_connector_mode_valid, + .best_encoder = tegra_output_connector_best_encoder, +}; + +static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { + .destroy = tegra_output_encoder_destroy, +}; + +static void tegra_sor_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static void tegra_sor_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void tegra_sor_encoder_commit(struct drm_encoder *encoder) +{ +} + +static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); unsigned int vbe, vse, hbe, hse, vbs, hbs, i; struct tegra_sor *sor = to_sor(output); struct tegra_sor_config config; @@ -505,6 +805,9 @@ static int tegra_output_sor_enable(struct tegra_output *output) reset_control_deassert(sor->rst); + if (output->panel) + drm_panel_prepare(output->panel); + /* FIXME: properly convert to struct drm_dp_aux */ aux = (struct drm_dp_aux *)sor->dpaux; @@ -800,18 +1103,6 @@ static int tegra_output_sor_enable(struct tegra_output *output) goto unlock; } - /* start display controller in continuous mode */ - value = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS); - value |= WRITE_MUX; - tegra_dc_writel(dc, value, DC_CMD_STATE_ACCESS); - - tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS); - tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND); - - value = tegra_dc_readl(dc, DC_CMD_STATE_ACCESS); - value &= ~WRITE_MUX; - tegra_dc_writel(dc, value, DC_CMD_STATE_ACCESS); - /* * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete * raster, associate with display controller) @@ -886,11 +1177,13 @@ static int tegra_output_sor_enable(struct tegra_output *output) goto unlock; } + tegra_sor_update(sor); + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); value |= SOR_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - tegra_sor_update(sor); + tegra_dc_commit(dc); err = tegra_sor_attach(sor); if (err < 0) { @@ -904,145 +1197,31 @@ static int tegra_output_sor_enable(struct tegra_output *output) goto unlock; } + if (output->panel) + drm_panel_enable(output->panel); + sor->enabled = true; unlock: mutex_unlock(&sor->lock); - return err; } -static int tegra_sor_detach(struct tegra_sor *sor) +static void tegra_sor_encoder_disable(struct drm_encoder *encoder) { - unsigned long value, timeout; - - /* switch to safe mode */ - value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); - value &= ~SOR_SUPER_STATE_MODE_NORMAL; - tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); - tegra_sor_super_update(sor); - - timeout = jiffies + msecs_to_jiffies(250); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_PWR); - if (value & SOR_PWR_MODE_SAFE) - break; - } - - if ((value & SOR_PWR_MODE_SAFE) == 0) - return -ETIMEDOUT; - - /* go to sleep */ - value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); - value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK; - tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); - tegra_sor_super_update(sor); - - /* detach */ - value = tegra_sor_readl(sor, SOR_SUPER_STATE_1); - value &= ~SOR_SUPER_STATE_ATTACHED; - tegra_sor_writel(sor, value, SOR_SUPER_STATE_1); - tegra_sor_super_update(sor); - - timeout = jiffies + msecs_to_jiffies(250); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_TEST); - if ((value & SOR_TEST_ATTACHED) == 0) - break; - - usleep_range(25, 100); - } - - if ((value & SOR_TEST_ATTACHED) != 0) - return -ETIMEDOUT; - - return 0; -} - -static int tegra_sor_power_down(struct tegra_sor *sor) -{ - unsigned long value, timeout; - int err; - - value = tegra_sor_readl(sor, SOR_PWR); - value &= ~SOR_PWR_NORMAL_STATE_PU; - value |= SOR_PWR_TRIGGER; - tegra_sor_writel(sor, value, SOR_PWR); - - timeout = jiffies + msecs_to_jiffies(250); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_PWR); - if ((value & SOR_PWR_TRIGGER) == 0) - return 0; - - usleep_range(25, 100); - } - - if ((value & SOR_PWR_TRIGGER) != 0) - return -ETIMEDOUT; - - err = clk_set_parent(sor->clk, sor->clk_safe); - if (err < 0) - dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); - - value = tegra_sor_readl(sor, SOR_DP_PADCTL_0); - value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | - SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); - tegra_sor_writel(sor, value, SOR_DP_PADCTL_0); - - /* stop lane sequencer */ - value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | - SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; - tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); - - timeout = jiffies + msecs_to_jiffies(250); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) - break; - - usleep_range(25, 100); - } - - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) - return -ETIMEDOUT; - - value = tegra_sor_readl(sor, SOR_PLL_2); - value |= SOR_PLL_2_PORT_POWERDOWN; - tegra_sor_writel(sor, value, SOR_PLL_2); - - usleep_range(20, 100); - - value = tegra_sor_readl(sor, SOR_PLL_0); - value |= SOR_PLL_0_POWER_OFF; - value |= SOR_PLL_0_VCOPD; - tegra_sor_writel(sor, value, SOR_PLL_0); - - value = tegra_sor_readl(sor, SOR_PLL_2); - value |= SOR_PLL_2_SEQ_PLLCAPPD; - value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE; - tegra_sor_writel(sor, value, SOR_PLL_2); - - usleep_range(20, 100); - - return 0; -} - -static int tegra_output_sor_disable(struct tegra_output *output) -{ - struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); struct tegra_sor *sor = to_sor(output); - unsigned long value; - int err = 0; + u32 value; + int err; mutex_lock(&sor->lock); if (!sor->enabled) goto unlock; + if (output->panel) + drm_panel_disable(output->panel); + err = tegra_sor_detach(sor); if (err < 0) { dev_err(sor->dev, "failed to detach SOR: %d\n", err); @@ -1057,31 +1236,11 @@ static int tegra_output_sor_disable(struct tegra_output *output) * sure it's only executed when the output is attached to one. */ if (dc) { - /* - * XXX: We can't do this here because it causes the SOR to go - * into an erroneous state and the output will look scrambled - * the next time it is enabled. Presumably this is because we - * should be doing this only on the next VBLANK. A possible - * solution would be to queue a "power-off" event to trigger - * this code to be run during the next VBLANK. - */ - /* - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL); - value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | - PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); - */ - - value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND); - value &= ~DISP_CTRL_MODE_MASK; - tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); value &= ~SOR_ENABLE; tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); - tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); + tegra_dc_commit(dc); } err = tegra_sor_power_down(sor); @@ -1104,187 +1263,48 @@ static int tegra_output_sor_disable(struct tegra_output *output) goto unlock; } - reset_control_assert(sor->rst); + if (output->panel) + drm_panel_unprepare(output->panel); + clk_disable_unprepare(sor->clk); + reset_control_assert(sor->rst); sor->enabled = false; unlock: mutex_unlock(&sor->lock); - return err; } -static int tegra_output_sor_setup_clock(struct tegra_output *output, - struct clk *clk, unsigned long pclk, - unsigned int *div) +static int +tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); + unsigned long pclk = crtc_state->mode.clock * 1000; struct tegra_sor *sor = to_sor(output); int err; - err = clk_set_parent(clk, sor->clk_parent); - if (err < 0) { - dev_err(sor->dev, "failed to set parent clock: %d\n", err); - return err; - } - - err = clk_set_rate(sor->clk_parent, pclk); + err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent, + pclk, 0); if (err < 0) { - dev_err(sor->dev, "failed to set clock rate to %lu Hz\n", pclk); + dev_err(output->dev, "failed to setup CRTC state: %d\n", err); return err; } - *div = 0; - return 0; } -static int tegra_output_sor_check_mode(struct tegra_output *output, - struct drm_display_mode *mode, - enum drm_mode_status *status) -{ - /* - * FIXME: For now, always assume that the mode is okay. - */ - - *status = MODE_OK; - - return 0; -} - -static enum drm_connector_status -tegra_output_sor_detect(struct tegra_output *output) -{ - struct tegra_sor *sor = to_sor(output); - - if (sor->dpaux) - return tegra_dpaux_detect(sor->dpaux); - - return connector_status_unknown; -} - -static const struct tegra_output_ops sor_ops = { - .enable = tegra_output_sor_enable, - .disable = tegra_output_sor_disable, - .setup_clock = tegra_output_sor_setup_clock, - .check_mode = tegra_output_sor_check_mode, - .detect = tegra_output_sor_detect, +static const struct drm_encoder_helper_funcs tegra_sor_encoder_helper_funcs = { + .dpms = tegra_sor_encoder_dpms, + .prepare = tegra_sor_encoder_prepare, + .commit = tegra_sor_encoder_commit, + .mode_set = tegra_sor_encoder_mode_set, + .disable = tegra_sor_encoder_disable, + .atomic_check = tegra_sor_encoder_atomic_check, }; -static int tegra_sor_crc_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - - return 0; -} - -static int tegra_sor_crc_release(struct inode *inode, struct file *file) -{ - return 0; -} - -static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout) -{ - u32 value; - - timeout = jiffies + msecs_to_jiffies(timeout); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_CRC_A); - if (value & SOR_CRC_A_VALID) - return 0; - - usleep_range(100, 200); - } - - return -ETIMEDOUT; -} - -static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer, - size_t size, loff_t *ppos) -{ - struct tegra_sor *sor = file->private_data; - ssize_t num, err; - char buf[10]; - u32 value; - - mutex_lock(&sor->lock); - - if (!sor->enabled) { - err = -EAGAIN; - goto unlock; - } - - value = tegra_sor_readl(sor, SOR_STATE_1); - value &= ~SOR_STATE_ASY_CRC_MODE_MASK; - tegra_sor_writel(sor, value, SOR_STATE_1); - - value = tegra_sor_readl(sor, SOR_CRC_CNTRL); - value |= SOR_CRC_CNTRL_ENABLE; - tegra_sor_writel(sor, value, SOR_CRC_CNTRL); - - value = tegra_sor_readl(sor, SOR_TEST); - value &= ~SOR_TEST_CRC_POST_SERIALIZE; - tegra_sor_writel(sor, value, SOR_TEST); - - err = tegra_sor_crc_wait(sor, 100); - if (err < 0) - goto unlock; - - tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A); - value = tegra_sor_readl(sor, SOR_CRC_B); - - num = scnprintf(buf, sizeof(buf), "%08x\n", value); - - err = simple_read_from_buffer(buffer, size, ppos, buf, num); - -unlock: - mutex_unlock(&sor->lock); - return err; -} - -static const struct file_operations tegra_sor_crc_fops = { - .owner = THIS_MODULE, - .open = tegra_sor_crc_open, - .read = tegra_sor_crc_read, - .release = tegra_sor_crc_release, -}; - -static int tegra_sor_debugfs_init(struct tegra_sor *sor, - struct drm_minor *minor) -{ - struct dentry *entry; - int err = 0; - - sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root); - if (!sor->debugfs) - return -ENOMEM; - - entry = debugfs_create_file("crc", 0644, sor->debugfs, sor, - &tegra_sor_crc_fops); - if (!entry) { - dev_err(sor->dev, - "cannot create /sys/kernel/debug/dri/%s/sor/crc\n", - minor->debugfs_root->d_name.name); - err = -ENOMEM; - goto remove; - } - - return err; - -remove: - debugfs_remove(sor->debugfs); - sor->debugfs = NULL; - return err; -} - -static int tegra_sor_debugfs_exit(struct tegra_sor *sor) -{ - debugfs_remove_recursive(sor->debugfs); - sor->debugfs = NULL; - - return 0; -} - static int tegra_sor_init(struct host1x_client *client) { struct drm_device *drm = dev_get_drvdata(client->parent); @@ -1294,17 +1314,32 @@ static int tegra_sor_init(struct host1x_client *client) if (!sor->dpaux) return -ENODEV; - sor->output.type = TEGRA_OUTPUT_EDP; - sor->output.dev = sor->dev; - sor->output.ops = &sor_ops; + + drm_connector_init(drm, &sor->output.connector, + &tegra_sor_connector_funcs, + DRM_MODE_CONNECTOR_eDP); + drm_connector_helper_add(&sor->output.connector, + &tegra_sor_connector_helper_funcs); + sor->output.connector.dpms = DRM_MODE_DPMS_OFF; + + drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs, + DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(&sor->output.encoder, + &tegra_sor_encoder_helper_funcs); + + drm_mode_connector_attach_encoder(&sor->output.connector, + &sor->output.encoder); + drm_connector_register(&sor->output.connector); err = tegra_output_init(drm, &sor->output); if (err < 0) { - dev_err(sor->dev, "output setup failed: %d\n", err); + dev_err(client->dev, "failed to initialize output: %d\n", err); return err; } + sor->output.encoder.possible_crtcs = 0x3; + if (IS_ENABLED(CONFIG_DEBUG_FS)) { err = tegra_sor_debugfs_init(sor, drm->primary); if (err < 0) @@ -1319,6 +1354,20 @@ static int tegra_sor_init(struct host1x_client *client) } } + err = clk_prepare_enable(sor->clk); + if (err < 0) { + dev_err(sor->dev, "failed to enable clock: %d\n", err); + return err; + } + + err = clk_prepare_enable(sor->clk_safe); + if (err < 0) + return err; + + err = clk_prepare_enable(sor->clk_dp); + if (err < 0) + return err; + return 0; } @@ -1327,11 +1376,7 @@ static int tegra_sor_exit(struct host1x_client *client) struct tegra_sor *sor = host1x_client_to_sor(client); int err; - err = tegra_output_disable(&sor->output); - if (err < 0) { - dev_err(sor->dev, "output failed to disable: %d\n", err); - return err; - } + tegra_output_exit(&sor->output); if (sor->dpaux) { err = tegra_dpaux_detach(sor->dpaux); @@ -1341,17 +1386,12 @@ static int tegra_sor_exit(struct host1x_client *client) } } - if (IS_ENABLED(CONFIG_DEBUG_FS)) { - err = tegra_sor_debugfs_exit(sor); - if (err < 0) - dev_err(sor->dev, "debugfs cleanup failed: %d\n", err); - } + clk_disable_unprepare(sor->clk_safe); + clk_disable_unprepare(sor->clk_dp); + clk_disable_unprepare(sor->clk); - err = tegra_output_exit(&sor->output); - if (err < 0) { - dev_err(sor->dev, "output cleanup failed: %d\n", err); - return err; - } + if (IS_ENABLED(CONFIG_DEBUG_FS)) + tegra_sor_debugfs_exit(sor); return 0; } @@ -1404,26 +1444,14 @@ static int tegra_sor_probe(struct platform_device *pdev) if (IS_ERR(sor->clk_parent)) return PTR_ERR(sor->clk_parent); - err = clk_prepare_enable(sor->clk_parent); - if (err < 0) - return err; - sor->clk_safe = devm_clk_get(&pdev->dev, "safe"); if (IS_ERR(sor->clk_safe)) return PTR_ERR(sor->clk_safe); - err = clk_prepare_enable(sor->clk_safe); - if (err < 0) - return err; - sor->clk_dp = devm_clk_get(&pdev->dev, "dp"); if (IS_ERR(sor->clk_dp)) return PTR_ERR(sor->clk_dp); - err = clk_prepare_enable(sor->clk_dp); - if (err < 0) - return err; - INIT_LIST_HEAD(&sor->client.list); sor->client.ops = &sor_client_ops; sor->client.dev = &pdev->dev; @@ -1454,10 +1482,7 @@ static int tegra_sor_remove(struct platform_device *pdev) return err; } - clk_disable_unprepare(sor->clk_parent); - clk_disable_unprepare(sor->clk_safe); - clk_disable_unprepare(sor->clk_dp); - clk_disable_unprepare(sor->clk); + tegra_output_remove(&sor->output); return 0; } diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index 7c3ef79fcb37..8394a0b3993e 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -1,6 +1,6 @@ config DRM_TILCDC tristate "DRM Support for TI LCDC Display Controller" - depends on DRM && OF && ARM + depends on DRM && OF && ARM && HAVE_DMA_ATTRS select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 1701f1dfb23f..677190a65e82 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -340,11 +340,11 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc, wrptr = udl_dummy_render(wrptr); - ufb->active_16 = true; if (old_fb) { struct udl_framebuffer *uold_fb = to_udl_fb(old_fb); uold_fb->active_16 = false; } + ufb->active_16 = true; udl->mode_buf_len = wrptr - buf; /* damage all of it */ @@ -373,6 +373,13 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; unsigned long flags; + struct drm_framebuffer *old_fb = crtc->primary->fb; + if (old_fb) { + struct udl_framebuffer *uold_fb = to_udl_fb(old_fb); + uold_fb->active_16 = false; + } + ufb->active_16 = true; + udl_handle_damage(ufb, 0, 0, fb->width, fb->height); spin_lock_irqsave(&dev->event_lock, flags); diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index f343db73e095..917dcb978c2c 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -82,12 +82,14 @@ static inline u16 pixel32_to_be16(const uint32_t pixel) ((pixel >> 8) & 0xf800)); } -static bool pixel_repeats(const void *pixel, const uint32_t repeat, int bpp) +static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp) { + u16 pixel_val16 = 0; if (bpp == 2) - return *(const uint16_t *)pixel == repeat; - else - return *(const uint32_t *)pixel == repeat; + pixel_val16 = *(const uint16_t *)pixel; + else if (bpp == 4) + pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel); + return pixel_val16; } /* @@ -134,6 +136,7 @@ static void udl_compress_hline16( uint8_t *cmd_pixels_count_byte = NULL; const u8 *raw_pixel_start = NULL; const u8 *cmd_pixel_start, *cmd_pixel_end = NULL; + uint16_t pixel_val16; prefetchw((void *) cmd); /* pull in one cache line at least */ @@ -154,33 +157,29 @@ static void udl_compress_hline16( (int)(cmd_buffer_end - cmd) / 2))) * bpp; prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); + pixel_val16 = get_pixel_val16(pixel, bpp); while (pixel < cmd_pixel_end) { const u8 *const start = pixel; - u32 repeating_pixel; - - if (bpp == 2) { - repeating_pixel = *(uint16_t *)pixel; - *(uint16_t *)cmd = cpu_to_be16(repeating_pixel); - } else { - repeating_pixel = *(uint32_t *)pixel; - *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16(repeating_pixel)); - } + const uint16_t repeating_pixel_val16 = pixel_val16; + + *(uint16_t *)cmd = cpu_to_be16(pixel_val16); cmd += 2; pixel += bpp; - if (unlikely((pixel < cmd_pixel_end) && - (pixel_repeats(pixel, repeating_pixel, bpp)))) { + while (pixel < cmd_pixel_end) { + pixel_val16 = get_pixel_val16(pixel, bpp); + if (pixel_val16 != repeating_pixel_val16) + break; + pixel += bpp; + } + + if (unlikely(pixel > start + bpp)) { /* go back and fill in raw pixel count */ *raw_pixels_count_byte = (((start - raw_pixel_start) / bpp) + 1) & 0xFF; - while ((pixel < cmd_pixel_end) && - (pixel_repeats(pixel, repeating_pixel, bpp))) { - pixel += bpp; - } - /* immediately after raw data is repeat byte */ *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 7b5d22110f25..6c6b655defcf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv, if (unlikely(ret != 0)) --dev_priv->num_3d_resources; } else if (unhide_svga) { - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) & ~SVGA_REG_ENABLE_HIDE); - mutex_unlock(&dev_priv->hw_mutex); } mutex_unlock(&dev_priv->release_mutex); @@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, mutex_lock(&dev_priv->release_mutex); if (unlikely(--dev_priv->num_3d_resources == 0)) vmw_release_device(dev_priv); - else if (hide_svga) { - mutex_lock(&dev_priv->hw_mutex); + else if (hide_svga) vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) | SVGA_REG_ENABLE_HIDE); - mutex_unlock(&dev_priv->hw_mutex); - } n3d = (int32_t) dev_priv->num_3d_resources; mutex_unlock(&dev_priv->release_mutex); @@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->dev = dev; dev_priv->vmw_chipset = chipset; dev_priv->last_read_seqno = (uint32_t) -100; - mutex_init(&dev_priv->hw_mutex); mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->binding_mutex); rwlock_init(&dev_priv->resource_lock); ttm_lock_init(&dev_priv->reservation_sem); + spin_lock_init(&dev_priv->hw_lock); + spin_lock_init(&dev_priv->waiter_lock); + spin_lock_init(&dev_priv->cap_lock); for (i = vmw_res_context; i < vmw_res_max; ++i) { idr_init(&dev_priv->res_idr[i]); @@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->enable_fb = enable_fbdev; - mutex_lock(&dev_priv->hw_mutex); - vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); svga_id = vmw_read(dev_priv, SVGA_REG_ID); if (svga_id != SVGA_ID_2) { ret = -ENOSYS; DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); - mutex_unlock(&dev_priv->hw_mutex); goto out_err0; } @@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->prim_bb_mem = dev_priv->vram_size; ret = vmw_dma_masks(dev_priv); - if (unlikely(ret != 0)) { - mutex_unlock(&dev_priv->hw_mutex); + if (unlikely(ret != 0)) goto out_err0; - } /* * Limit back buffer size to VRAM size. Remove this once @@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (dev_priv->prim_bb_mem > dev_priv->vram_size) dev_priv->prim_bb_mem = dev_priv->vram_size; - mutex_unlock(&dev_priv->hw_mutex); - vmw_print_capabilities(dev_priv->capabilities); if (dev_priv->capabilities & SVGA_CAP_GMR2) { @@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev, if (unlikely(ret != 0)) return ret; vmw_kms_save_vga(dev_priv); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 0); - mutex_unlock(&dev_priv->hw_mutex); } if (active) { @@ -1196,9 +1184,7 @@ out_no_active_lock: if (!dev_priv->enable_fb) { vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); - mutex_unlock(&dev_priv->hw_mutex); } return ret; } @@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev, DRM_ERROR("Unable to clean VRAM on master drop.\n"); vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); - mutex_unlock(&dev_priv->hw_mutex); } dev_priv->active_master = &dev_priv->fbdev_master; @@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev) struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); (void) vmw_read(dev_priv, SVGA_REG_ID); - mutex_unlock(&dev_priv->hw_mutex); /** * Reclaim 3d reference held by fbdev and potentially diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4ee799b43d5d..d26a6daa9719 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -399,7 +399,8 @@ struct vmw_private { uint32_t memory_size; bool has_gmr; bool has_mob; - struct mutex hw_mutex; + spinlock_t hw_lock; + spinlock_t cap_lock; /* * VGA registers. @@ -449,8 +450,9 @@ struct vmw_private { atomic_t marker_seq; wait_queue_head_t fence_queue; wait_queue_head_t fifo_queue; - int fence_queue_waiters; /* Protected by hw_mutex */ - int goal_queue_waiters; /* Protected by hw_mutex */ + spinlock_t waiter_lock; + int fence_queue_waiters; /* Protected by waiter_lock */ + int goal_queue_waiters; /* Protected by waiter_lock */ atomic_t fifo_queue_waiters; uint32_t last_read_seqno; spinlock_t irq_lock; @@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master) return (struct vmw_master *) master->driver_priv; } +/* + * The locking here is fine-grained, so that it is performed once + * for every read- and write operation. This is of course costly, but we + * don't perform much register access in the timing critical paths anyway. + * Instead we have the extra benefit of being sure that we don't forget + * the hw lock around register accesses. + */ static inline void vmw_write(struct vmw_private *dev_priv, unsigned int offset, uint32_t value) { + unsigned long irq_flags; + + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); } static inline uint32_t vmw_read(struct vmw_private *dev_priv, unsigned int offset) { - uint32_t val; + unsigned long irq_flags; + u32 val; + spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); + spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); + return val; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index b7594cb758af..945f1e0dad92 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -35,7 +35,7 @@ struct vmw_fence_manager { struct vmw_private *dev_priv; spinlock_t lock; struct list_head fence_list; - struct work_struct work, ping_work; + struct work_struct work; u32 user_fence_size; u32 fence_size; u32 event_fence_action_size; @@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f) return "svga"; } -static void vmw_fence_ping_func(struct work_struct *work) -{ - struct vmw_fence_manager *fman = - container_of(work, struct vmw_fence_manager, ping_work); - - vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC); -} - static bool vmw_fence_enable_signaling(struct fence *f) { struct vmw_fence_obj *fence = @@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f) if (seqno - fence->base.seqno < VMW_FENCE_WRAP) return false; - if (mutex_trylock(&dev_priv->hw_mutex)) { - vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC); - mutex_unlock(&dev_priv->hw_mutex); - } else - schedule_work(&fman->ping_work); + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); return true; } @@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) INIT_LIST_HEAD(&fman->fence_list); INIT_LIST_HEAD(&fman->cleanup_list); INIT_WORK(&fman->work, &vmw_fence_work_func); - INIT_WORK(&fman->ping_work, &vmw_fence_ping_func); fman->fifo_down = true; fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); @@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) bool lists_empty; (void) cancel_work_sync(&fman->work); - (void) cancel_work_sync(&fman->ping_work); spin_lock_irqsave(&fman->lock, irq_flags); lists_empty = list_empty(&fman->fence_list) && diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 09e10aefcd8e..39f2b03888e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) if (!dev_priv->has_mob) return false; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); return (result != 0); } @@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); - mutex_lock(&dev_priv->hw_mutex); dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); @@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) mb(); vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); - mutex_unlock(&dev_priv->hw_mutex); max = ioread32(fifo_mem + SVGA_FIFO_MAX); min = ioread32(fifo_mem + SVGA_FIFO_MIN); @@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) return vmw_fifo_send_fence(dev_priv, &dummy); } -void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) +void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + static DEFINE_SPINLOCK(ping_lock); + unsigned long irq_flags; + /* + * The ping_lock is needed because we don't have an atomic + * test-and-set of the SVGA_FIFO_BUSY register. + */ + spin_lock_irqsave(&ping_lock, irq_flags); if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); vmw_write(dev_priv, SVGA_REG_SYNC, reason); } -} - -void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) -{ - mutex_lock(&dev_priv->hw_mutex); - - vmw_fifo_ping_host_locked(dev_priv, reason); - - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock_irqrestore(&ping_lock, irq_flags); } void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; - mutex_lock(&dev_priv->hw_mutex); - vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) ; @@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) vmw_write(dev_priv, SVGA_REG_TRACES, dev_priv->traces_state); - mutex_unlock(&dev_priv->hw_mutex); vmw_marker_queue_takedown(&fifo->marker_queue); if (likely(fifo->static_buffer != NULL)) { @@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, return vmw_fifo_wait_noirq(dev_priv, bytes, interruptible, timeout); - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); outl(SVGA_IRQFLAG_FIFO_PROGRESS, @@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); if (interruptible) ret = wait_event_interruptible_timeout @@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, else if (likely(ret > 0)) ret = 0; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 37881ecf5d7a..69c8ce23123c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); for (i = 0; i < max_size; ++i) { vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); compat_cap->pairs[i][0] = i; compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); return 0; } @@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, if (num > SVGA3D_DEVCAP_MAX) num = SVGA3D_DEVCAP_MAX; - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->cap_lock); for (i = 0; i < num; ++i) { vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->cap_lock); } else if (gb_objects) { ret = vmw_fill_compat_cap(dev_priv, bounce, size); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 0c423766c441..9fe9827ee499 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg) static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) { - uint32_t busy; - mutex_lock(&dev_priv->hw_mutex); - busy = vmw_read(dev_priv, SVGA_REG_BUSY); - mutex_unlock(&dev_priv->hw_mutex); - - return (busy == 0); + return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); } void vmw_update_seqno(struct vmw_private *dev_priv, @@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, void vmw_seqno_waiter_add(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (dev_priv->fence_queue_waiters++ == 0) { unsigned long irq_flags; @@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (--dev_priv->fence_queue_waiters == 0) { unsigned long irq_flags; @@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_goal_waiter_add(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (dev_priv->goal_queue_waiters++ == 0) { unsigned long irq_flags; @@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } void vmw_goal_waiter_remove(struct vmw_private *dev_priv) { - mutex_lock(&dev_priv->hw_mutex); + spin_lock(&dev_priv->waiter_lock); if (--dev_priv->goal_queue_waiters == 0) { unsigned long irq_flags; @@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); } - mutex_unlock(&dev_priv->hw_mutex); + spin_unlock(&dev_priv->waiter_lock); } int vmw_wait_seqno(struct vmw_private *dev_priv, @@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev) if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) return; - mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); - mutex_unlock(&dev_priv->hw_mutex); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3725b521d931..8725b79e7847 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force) struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_display_unit *du = vmw_connector_to_du(connector); - mutex_lock(&dev_priv->hw_mutex); num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); - mutex_unlock(&dev_priv->hw_mutex); return ((vmw_connector_to_du(connector)->unit < num_displays && du->pref_active) ? diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index aaf54859adb0..4a99c6416e6a 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -72,13 +72,14 @@ static void host1x_subdev_del(struct host1x_subdev *subdev) /** * host1x_device_parse_dt() - scan device tree and add matching subdevices */ -static int host1x_device_parse_dt(struct host1x_device *device) +static int host1x_device_parse_dt(struct host1x_device *device, + struct host1x_driver *driver) { struct device_node *np; int err; for_each_child_of_node(device->dev.parent->of_node, np) { - if (of_match_node(device->driver->subdevs, np) && + if (of_match_node(driver->subdevs, np) && of_device_is_available(np)) { err = host1x_subdev_add(device, np); if (err < 0) @@ -109,14 +110,12 @@ static void host1x_subdev_register(struct host1x_device *device, mutex_unlock(&device->clients_lock); mutex_unlock(&device->subdevs_lock); - /* - * When all subdevices have been registered, the composite device is - * ready to be probed. - */ if (list_empty(&device->subdevs)) { - err = device->driver->probe(device); + err = device_add(&device->dev); if (err < 0) - dev_err(&device->dev, "probe failed: %d\n", err); + dev_err(&device->dev, "failed to add: %d\n", err); + else + device->registered = true; } } @@ -124,16 +123,16 @@ static void __host1x_subdev_unregister(struct host1x_device *device, struct host1x_subdev *subdev) { struct host1x_client *client = subdev->client; - int err; /* * If all subdevices have been activated, we're about to remove the * first active subdevice, so unload the driver first. */ if (list_empty(&device->subdevs)) { - err = device->driver->remove(device); - if (err < 0) - dev_err(&device->dev, "remove failed: %d\n", err); + if (device->registered) { + device->registered = false; + device_del(&device->dev); + } } /* @@ -260,24 +259,113 @@ static int host1x_del_client(struct host1x *host1x, return -ENODEV; } -static struct bus_type host1x_bus_type = { - .name = "host1x", -}; +static int host1x_device_match(struct device *dev, struct device_driver *drv) +{ + return strcmp(dev_name(dev), drv->name) == 0; +} + +static int host1x_device_probe(struct device *dev) +{ + struct host1x_driver *driver = to_host1x_driver(dev->driver); + struct host1x_device *device = to_host1x_device(dev); + + if (driver->probe) + return driver->probe(device); + + return 0; +} -int host1x_bus_init(void) +static int host1x_device_remove(struct device *dev) { - return bus_register(&host1x_bus_type); + struct host1x_driver *driver = to_host1x_driver(dev->driver); + struct host1x_device *device = to_host1x_device(dev); + + if (driver->remove) + return driver->remove(device); + + return 0; +} + +static void host1x_device_shutdown(struct device *dev) +{ + struct host1x_driver *driver = to_host1x_driver(dev->driver); + struct host1x_device *device = to_host1x_device(dev); + + if (driver->shutdown) + driver->shutdown(device); } -void host1x_bus_exit(void) +static const struct dev_pm_ops host1x_device_pm_ops = { + .suspend = pm_generic_suspend, + .resume = pm_generic_resume, + .freeze = pm_generic_freeze, + .thaw = pm_generic_thaw, + .poweroff = pm_generic_poweroff, + .restore = pm_generic_restore, +}; + +struct bus_type host1x_bus_type = { + .name = "host1x", + .match = host1x_device_match, + .probe = host1x_device_probe, + .remove = host1x_device_remove, + .shutdown = host1x_device_shutdown, + .pm = &host1x_device_pm_ops, +}; + +static void __host1x_device_del(struct host1x_device *device) { - bus_unregister(&host1x_bus_type); + struct host1x_subdev *subdev, *sd; + struct host1x_client *client, *cl; + + mutex_lock(&device->subdevs_lock); + + /* unregister subdevices */ + list_for_each_entry_safe(subdev, sd, &device->active, list) { + /* + * host1x_subdev_unregister() will remove the client from + * any lists, so we'll need to manually add it back to the + * list of idle clients. + * + * XXX: Alternatively, perhaps don't remove the client from + * any lists in host1x_subdev_unregister() and instead do + * that explicitly from host1x_unregister_client()? + */ + client = subdev->client; + + __host1x_subdev_unregister(device, subdev); + + /* add the client to the list of idle clients */ + mutex_lock(&clients_lock); + list_add_tail(&client->list, &clients); + mutex_unlock(&clients_lock); + } + + /* remove subdevices */ + list_for_each_entry_safe(subdev, sd, &device->subdevs, list) + host1x_subdev_del(subdev); + + mutex_unlock(&device->subdevs_lock); + + /* move clients to idle list */ + mutex_lock(&clients_lock); + mutex_lock(&device->clients_lock); + + list_for_each_entry_safe(client, cl, &device->clients, list) + list_move_tail(&client->list, &clients); + + mutex_unlock(&device->clients_lock); + mutex_unlock(&clients_lock); + + /* finally remove the device */ + list_del_init(&device->list); } static void host1x_device_release(struct device *dev) { struct host1x_device *device = to_host1x_device(dev); + __host1x_device_del(device); kfree(device); } @@ -293,6 +381,8 @@ static int host1x_device_add(struct host1x *host1x, if (!device) return -ENOMEM; + device_initialize(&device->dev); + mutex_init(&device->subdevs_lock); INIT_LIST_HEAD(&device->subdevs); INIT_LIST_HEAD(&device->active); @@ -303,24 +393,18 @@ static int host1x_device_add(struct host1x *host1x, device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; device->dev.dma_mask = &device->dev.coherent_dma_mask; + dev_set_name(&device->dev, "%s", driver->driver.name); device->dev.release = host1x_device_release; - dev_set_name(&device->dev, "%s", driver->name); device->dev.bus = &host1x_bus_type; device->dev.parent = host1x->dev; - err = device_register(&device->dev); - if (err < 0) - return err; - - err = host1x_device_parse_dt(device); + err = host1x_device_parse_dt(device, driver); if (err < 0) { - device_unregister(&device->dev); + kfree(device); return err; } - mutex_lock(&host1x->devices_lock); list_add_tail(&device->list, &host1x->devices); - mutex_unlock(&host1x->devices_lock); mutex_lock(&clients_lock); @@ -347,51 +431,12 @@ static int host1x_device_add(struct host1x *host1x, static void host1x_device_del(struct host1x *host1x, struct host1x_device *device) { - struct host1x_subdev *subdev, *sd; - struct host1x_client *client, *cl; - - mutex_lock(&device->subdevs_lock); - - /* unregister subdevices */ - list_for_each_entry_safe(subdev, sd, &device->active, list) { - /* - * host1x_subdev_unregister() will remove the client from - * any lists, so we'll need to manually add it back to the - * list of idle clients. - * - * XXX: Alternatively, perhaps don't remove the client from - * any lists in host1x_subdev_unregister() and instead do - * that explicitly from host1x_unregister_client()? - */ - client = subdev->client; - - __host1x_subdev_unregister(device, subdev); - - /* add the client to the list of idle clients */ - mutex_lock(&clients_lock); - list_add_tail(&client->list, &clients); - mutex_unlock(&clients_lock); + if (device->registered) { + device->registered = false; + device_del(&device->dev); } - /* remove subdevices */ - list_for_each_entry_safe(subdev, sd, &device->subdevs, list) - host1x_subdev_del(subdev); - - mutex_unlock(&device->subdevs_lock); - - /* move clients to idle list */ - mutex_lock(&clients_lock); - mutex_lock(&device->clients_lock); - - list_for_each_entry_safe(client, cl, &device->clients, list) - list_move_tail(&client->list, &clients); - - mutex_unlock(&device->clients_lock); - mutex_unlock(&clients_lock); - - /* finally remove the device */ - list_del_init(&device->list); - device_unregister(&device->dev); + put_device(&device->dev); } static void host1x_attach_driver(struct host1x *host1x, @@ -409,11 +454,11 @@ static void host1x_attach_driver(struct host1x *host1x, } } - mutex_unlock(&host1x->devices_lock); - err = host1x_device_add(host1x, driver); if (err < 0) dev_err(host1x->dev, "failed to allocate device: %d\n", err); + + mutex_unlock(&host1x->devices_lock); } static void host1x_detach_driver(struct host1x *host1x, @@ -466,7 +511,8 @@ int host1x_unregister(struct host1x *host1x) return 0; } -int host1x_driver_register(struct host1x_driver *driver) +int host1x_driver_register_full(struct host1x_driver *driver, + struct module *owner) { struct host1x *host1x; @@ -483,9 +529,12 @@ int host1x_driver_register(struct host1x_driver *driver) mutex_unlock(&devices_lock); - return 0; + driver->driver.bus = &host1x_bus_type; + driver->driver.owner = owner; + + return driver_register(&driver->driver); } -EXPORT_SYMBOL(host1x_driver_register); +EXPORT_SYMBOL(host1x_driver_register_full); void host1x_driver_unregister(struct host1x_driver *driver) { diff --git a/drivers/gpu/host1x/bus.h b/drivers/gpu/host1x/bus.h index 4099e99212c8..88fb1c4aac68 100644 --- a/drivers/gpu/host1x/bus.h +++ b/drivers/gpu/host1x/bus.h @@ -18,10 +18,10 @@ #ifndef HOST1X_BUS_H #define HOST1X_BUS_H +struct bus_type; struct host1x; -int host1x_bus_init(void); -void host1x_bus_exit(void); +extern struct bus_type host1x_bus_type; int host1x_register(struct host1x *host1x); int host1x_unregister(struct host1x *host1x); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 2529908d304b..53d3d1d45b48 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -216,7 +216,7 @@ static int __init tegra_host1x_init(void) { int err; - err = host1x_bus_init(); + err = bus_register(&host1x_bus_type); if (err < 0) return err; @@ -233,7 +233,7 @@ static int __init tegra_host1x_init(void) unregister_host1x: platform_driver_unregister(&tegra_host1x_driver); unregister_bus: - host1x_bus_exit(); + bus_unregister(&host1x_bus_type); return err; } module_init(tegra_host1x_init); @@ -242,7 +242,7 @@ static void __exit tegra_host1x_exit(void) { platform_driver_unregister(&tegra_mipi_driver); platform_driver_unregister(&tegra_host1x_driver); - host1x_bus_exit(); + bus_unregister(&host1x_bus_type); } module_exit(tegra_host1x_exit); diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index f707d25ae78f..67bab5c36056 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -742,7 +742,7 @@ static struct ipu_devtype ipu_type_imx51 = { .tpm_ofs = 0x1f060000, .csi0_ofs = 0x1f030000, .csi1_ofs = 0x1f038000, - .ic_ofs = 0x1f020000, + .ic_ofs = 0x1e020000, .disp0_ofs = 0x1e040000, .disp1_ofs = 0x1e048000, .dc_tmpl_ofs = 0x1f080000, @@ -758,7 +758,7 @@ static struct ipu_devtype ipu_type_imx53 = { .tpm_ofs = 0x07060000, .csi0_ofs = 0x07030000, .csi1_ofs = 0x07038000, - .ic_ofs = 0x07020000, + .ic_ofs = 0x06020000, .disp0_ofs = 0x06040000, .disp1_ofs = 0x06048000, .dc_tmpl_ofs = 0x07080000, diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c index 323203d0503a..4864f8300797 100644 --- a/drivers/gpu/ipu-v3/ipu-dc.c +++ b/drivers/gpu/ipu-v3/ipu-dc.c @@ -277,7 +277,8 @@ static irqreturn_t dc_irq_handler(int irq, void *dev_id) void ipu_dc_disable_channel(struct ipu_dc *dc) { struct ipu_dc_priv *priv = dc->priv; - int irq, ret; + int irq; + unsigned long ret; u32 val; /* TODO: Handle MEM_FG_SYNC differently from MEM_BG_SYNC */ @@ -292,7 +293,7 @@ void ipu_dc_disable_channel(struct ipu_dc *dc) enable_irq(irq); ret = wait_for_completion_timeout(&priv->comp, msecs_to_jiffies(50)); disable_irq(irq); - if (ret <= 0) { + if (ret == 0) { dev_warn(priv->dev, "DC stop timeout after 50 ms\n"); val = readl(dc->base + DC_WR_CH_CONF); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 6529c09c46f0..a7de26d1ac80 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -574,6 +574,16 @@ config SENSORS_IIO_HWMON for those channels specified in the map. This map can be provided either via platform data or the device tree bindings. +config SENSORS_I5500 + tristate "Intel 5500/5520/X58 temperature sensor" + depends on X86 && PCI + help + If you say yes here you get support for the temperature + sensor inside the Intel 5500, 5520 and X58 chipsets. + + This driver can also be built as a module. If so, the module + will be called i5500_temp. + config SENSORS_CORETEMP tristate "Intel Core/Core2/Atom temperature sensor" depends on X86 diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 67280643bcf0..6c941472e707 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -68,6 +68,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o obj-$(CONFIG_SENSORS_HTU21) += htu21.o obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o +obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c new file mode 100644 index 000000000000..3e3ccbf18b4e --- /dev/null +++ b/drivers/hwmon/i5500_temp.c @@ -0,0 +1,149 @@ +/* + * i5500_temp - Driver for Intel 5500/5520/X58 chipset thermal sensor + * + * Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/jiffies.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/err.h> +#include <linux/mutex.h> + +/* Register definitions from datasheet */ +#define REG_TSTHRCATA 0xE2 +#define REG_TSCTRL 0xE8 +#define REG_TSTHRRPEX 0xEB +#define REG_TSTHRLO 0xEC +#define REG_TSTHRHI 0xEE +#define REG_CTHINT 0xF0 +#define REG_TSFSC 0xF3 +#define REG_CTSTS 0xF4 +#define REG_TSTHRRQPI 0xF5 +#define REG_CTCTRL 0xF7 +#define REG_TSTIMER 0xF8 + +/* + * Sysfs stuff + */ + +/* Sensor resolution : 0.5 degree C */ +static ssize_t show_temp(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev->parent); + long temp; + u16 tsthrhi; + s8 tsfsc; + + pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi); + pci_read_config_byte(pdev, REG_TSFSC, &tsfsc); + temp = ((long)tsthrhi - tsfsc) * 500; + + return sprintf(buf, "%ld\n", temp); +} + +static ssize_t show_thresh(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev->parent); + int reg = to_sensor_dev_attr(devattr)->index; + long temp; + u16 tsthr; + + pci_read_config_word(pdev, reg, &tsthr); + temp = tsthr * 500; + + return sprintf(buf, "%ld\n", temp); +} + +static ssize_t show_alarm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev->parent); + int nr = to_sensor_dev_attr(devattr)->index; + u8 ctsts; + + pci_read_config_byte(pdev, REG_CTSTS, &ctsts); + return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr)); +} + +static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL); +static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2); +static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC); +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE); +static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1); + +static struct attribute *i5500_temp_attrs[] = { + &dev_attr_temp1_input.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(i5500_temp); + +static const struct pci_device_id i5500_temp_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) }, + { 0 }, +}; + +MODULE_DEVICE_TABLE(pci, i5500_temp_ids); + +static int i5500_temp_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int err; + struct device *hwmon_dev; + u32 tstimer; + s8 tsfsc; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to enable device\n"); + return err; + } + + pci_read_config_byte(pdev, REG_TSFSC, &tsfsc); + pci_read_config_dword(pdev, REG_TSTIMER, &tstimer); + if (tsfsc == 0x7F && tstimer == 0x07D30D40) { + dev_notice(&pdev->dev, "Sensor seems to be disabled\n"); + return -ENODEV; + } + + hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev, + "intel5500", NULL, + i5500_temp_groups); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static struct pci_driver i5500_temp_driver = { + .name = "i5500_temp", + .id_table = i5500_temp_ids, + .probe = i5500_temp_probe, +}; + +module_pci_driver(i5500_temp_driver); + +MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); +MODULE_DESCRIPTION("Intel 5500/5520/X58 chipset thermal sensor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index d111ac779c40..63cd031b2c28 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c @@ -28,7 +28,7 @@ #define AT91_AIC_IRQ_MIN_PRIORITY 0 #define AT91_AIC_IRQ_MAX_PRIORITY 7 -#define AT91_AIC_SRCTYPE GENMASK(7, 6) +#define AT91_AIC_SRCTYPE GENMASK(6, 5) #define AT91_AIC_SRCTYPE_LOW (0 << 5) #define AT91_AIC_SRCTYPE_FALLING (1 << 5) #define AT91_AIC_SRCTYPE_HIGH (2 << 5) @@ -74,7 +74,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val) return -EINVAL; } - *val &= AT91_AIC_SRCTYPE; + *val &= ~AT91_AIC_SRCTYPE; *val |= aic_type; return 0; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 86e4684adeb1..d8996bdf0f61 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1053,7 +1053,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, * of two entries. No, the architecture doesn't let you * express an ITT with a single entry. */ - nr_ites = max(2, roundup_pow_of_two(nvecs)); + nr_ites = max(2UL, roundup_pow_of_two(nvecs)); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; itt = kmalloc(sz, GFP_KERNEL); diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 29b8f21b74d0..6bc2deb73d53 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -381,7 +381,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent) * It will be refined as each CPU probes its ID. */ for (i = 0; i < NR_HIP04_CPU_IF; i++) - hip04_cpu_map[i] = 0xff; + hip04_cpu_map[i] = 0xffff; /* * Find out how many interrupts are supported. diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index 7e342df6a62f..0b0d2c00a2df 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c @@ -137,9 +137,9 @@ static int __init mtk_sysirq_of_init(struct device_node *node, return -ENOMEM; chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol"); - if (!chip_data->intpol_base) { + if (IS_ERR(chip_data->intpol_base)) { pr_err("mtk_sysirq: unable to map sysirq register\n"); - ret = -ENOMEM; + ret = PTR_ERR(chip_data->intpol_base); goto out_free; } diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c index 28718d3e8281..c03f140acbae 100644 --- a/drivers/irqchip/irq-omap-intc.c +++ b/drivers/irqchip/irq-omap-intc.c @@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node) return ret; } -static int __init omap_init_irq_legacy(u32 base) +static int __init omap_init_irq_legacy(u32 base, struct device_node *node) { int j, irq_base; @@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base) irq_base = 0; } - domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0, + domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0, &irq_domain_simple_ops, NULL); omap_irq_soft_reset(); @@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node) { int ret; - if (node) + /* + * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c + * depends is still not ready for linear IRQ domains; because of that + * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using + * linear IRQ Domain until that driver is finally fixed. + */ + if (of_device_is_compatible(node, "ti,omap2-intc") || + of_device_is_compatible(node, "ti,omap3-intc")) { + struct resource res; + + if (of_address_to_resource(node, 0, &res)) + return -ENOMEM; + + base = res.start; + ret = omap_init_irq_legacy(base, node); + } else if (node) { ret = omap_init_irq_of(node); - else - ret = omap_init_irq_legacy(base); + } else { + ret = omap_init_irq_legacy(base, NULL); + } if (ret == 0) omap_irq_enable_protection(); diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 9fc616c2755e..21b156242e42 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -94,6 +94,9 @@ struct cache_disk_superblock { } __packed; struct dm_cache_metadata { + atomic_t ref_count; + struct list_head list; + struct block_device *bdev; struct dm_block_manager *bm; struct dm_space_map *metadata_sm; @@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags) /*----------------------------------------------------------------*/ -struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, - sector_t data_block_size, - bool may_format_device, - size_t policy_hint_size) +static struct dm_cache_metadata *metadata_open(struct block_device *bdev, + sector_t data_block_size, + bool may_format_device, + size_t policy_hint_size) { int r; struct dm_cache_metadata *cmd; @@ -683,6 +686,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, return NULL; } + atomic_set(&cmd->ref_count, 1); init_rwsem(&cmd->root_lock); cmd->bdev = bdev; cmd->data_block_size = data_block_size; @@ -705,10 +709,95 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, return cmd; } +/* + * We keep a little list of ref counted metadata objects to prevent two + * different target instances creating separate bufio instances. This is + * an issue if a table is reloaded before the suspend. + */ +static DEFINE_MUTEX(table_lock); +static LIST_HEAD(table); + +static struct dm_cache_metadata *lookup(struct block_device *bdev) +{ + struct dm_cache_metadata *cmd; + + list_for_each_entry(cmd, &table, list) + if (cmd->bdev == bdev) { + atomic_inc(&cmd->ref_count); + return cmd; + } + + return NULL; +} + +static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, + sector_t data_block_size, + bool may_format_device, + size_t policy_hint_size) +{ + struct dm_cache_metadata *cmd, *cmd2; + + mutex_lock(&table_lock); + cmd = lookup(bdev); + mutex_unlock(&table_lock); + + if (cmd) + return cmd; + + cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); + if (cmd) { + mutex_lock(&table_lock); + cmd2 = lookup(bdev); + if (cmd2) { + mutex_unlock(&table_lock); + __destroy_persistent_data_objects(cmd); + kfree(cmd); + return cmd2; + } + list_add(&cmd->list, &table); + mutex_unlock(&table_lock); + } + + return cmd; +} + +static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size) +{ + if (cmd->data_block_size != data_block_size) { + DMERR("data_block_size (%llu) different from that in metadata (%llu)\n", + (unsigned long long) data_block_size, + (unsigned long long) cmd->data_block_size); + return false; + } + + return true; +} + +struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, + sector_t data_block_size, + bool may_format_device, + size_t policy_hint_size) +{ + struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, + may_format_device, policy_hint_size); + if (cmd && !same_params(cmd, data_block_size)) { + dm_cache_metadata_close(cmd); + return NULL; + } + + return cmd; +} + void dm_cache_metadata_close(struct dm_cache_metadata *cmd) { - __destroy_persistent_data_objects(cmd); - kfree(cmd); + if (atomic_dec_and_test(&cmd->ref_count)) { + mutex_lock(&table_lock); + list_del(&cmd->list); + mutex_unlock(&table_lock); + + __destroy_persistent_data_objects(cmd); + kfree(cmd); + } } /* diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1e96d7889f51..e1650539cc2f 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -221,7 +221,13 @@ struct cache { struct list_head need_commit_migrations; sector_t migration_threshold; wait_queue_head_t migration_wait; - atomic_t nr_migrations; + atomic_t nr_allocated_migrations; + + /* + * The number of in flight migrations that are performing + * background io. eg, promotion, writeback. + */ + atomic_t nr_io_migrations; wait_queue_head_t quiescing_wait; atomic_t quiescing; @@ -258,7 +264,6 @@ struct cache { struct dm_deferred_set *all_io_ds; mempool_t *migration_pool; - struct dm_cache_migration *next_migration; struct dm_cache_policy *policy; unsigned policy_nr_args; @@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel dm_bio_prison_free_cell(cache->prison, cell); } +static struct dm_cache_migration *alloc_migration(struct cache *cache) +{ + struct dm_cache_migration *mg; + + mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); + if (mg) { + mg->cache = cache; + atomic_inc(&mg->cache->nr_allocated_migrations); + } + + return mg; +} + +static void free_migration(struct dm_cache_migration *mg) +{ + if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations)) + wake_up(&mg->cache->migration_wait); + + mempool_free(mg, mg->cache->migration_pool); +} + static int prealloc_data_structs(struct cache *cache, struct prealloc *p) { if (!p->mg) { - p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); + p->mg = alloc_migration(cache); if (!p->mg) return -ENOMEM; } @@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p) free_prison_cell(cache, p->cell1); if (p->mg) - mempool_free(p->mg, cache->migration_pool); + free_migration(p->mg); } static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) @@ -854,24 +880,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, * Migration covers moving data from the origin device to the cache, or * vice versa. *--------------------------------------------------------------*/ -static void free_migration(struct dm_cache_migration *mg) -{ - mempool_free(mg, mg->cache->migration_pool); -} - -static void inc_nr_migrations(struct cache *cache) +static void inc_io_migrations(struct cache *cache) { - atomic_inc(&cache->nr_migrations); + atomic_inc(&cache->nr_io_migrations); } -static void dec_nr_migrations(struct cache *cache) +static void dec_io_migrations(struct cache *cache) { - atomic_dec(&cache->nr_migrations); - - /* - * Wake the worker in case we're suspending the target. - */ - wake_up(&cache->migration_wait); + atomic_dec(&cache->nr_io_migrations); } static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, @@ -894,11 +910,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, wake_worker(cache); } -static void cleanup_migration(struct dm_cache_migration *mg) +static void free_io_migration(struct dm_cache_migration *mg) { - struct cache *cache = mg->cache; + dec_io_migrations(mg->cache); free_migration(mg); - dec_nr_migrations(cache); } static void migration_failure(struct dm_cache_migration *mg) @@ -923,7 +938,7 @@ static void migration_failure(struct dm_cache_migration *mg) cell_defer(cache, mg->new_ocell, true); } - cleanup_migration(mg); + free_io_migration(mg); } static void migration_success_pre_commit(struct dm_cache_migration *mg) @@ -934,7 +949,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) if (mg->writeback) { clear_dirty(cache, mg->old_oblock, mg->cblock); cell_defer(cache, mg->old_ocell, false); - cleanup_migration(mg); + free_io_migration(mg); return; } else if (mg->demote) { @@ -944,14 +959,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) mg->old_oblock); if (mg->promote) cell_defer(cache, mg->new_ocell, true); - cleanup_migration(mg); + free_io_migration(mg); return; } } else { if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); policy_remove_mapping(cache->policy, mg->new_oblock); - cleanup_migration(mg); + free_io_migration(mg); return; } } @@ -984,7 +999,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) } else { if (mg->invalidate) policy_remove_mapping(cache->policy, mg->old_oblock); - cleanup_migration(mg); + free_io_migration(mg); } } else { @@ -999,7 +1014,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) bio_endio(mg->new_ocell->holder, 0); cell_defer(cache, mg->new_ocell, false); } - cleanup_migration(mg); + free_io_migration(mg); } } @@ -1251,7 +1266,7 @@ static void promote(struct cache *cache, struct prealloc *structs, mg->new_ocell = cell; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1275,7 +1290,7 @@ static void writeback(struct cache *cache, struct prealloc *structs, mg->new_ocell = NULL; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1302,7 +1317,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs, mg->new_ocell = new_ocell; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1330,7 +1345,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs, mg->new_ocell = NULL; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1412,7 +1427,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs, static bool spare_migration_bandwidth(struct cache *cache) { - sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) * + sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * cache->sectors_per_block; return current_volume < cache->migration_threshold; } @@ -1764,7 +1779,7 @@ static void stop_quiescing(struct cache *cache) static void wait_for_migrations(struct cache *cache) { - wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations)); + wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations)); } static void stop_worker(struct cache *cache) @@ -1876,9 +1891,6 @@ static void destroy(struct cache *cache) { unsigned i; - if (cache->next_migration) - mempool_free(cache->next_migration, cache->migration_pool); - if (cache->migration_pool) mempool_destroy(cache->migration_pool); @@ -2424,7 +2436,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations); - atomic_set(&cache->nr_migrations, 0); + atomic_set(&cache->nr_allocated_migrations, 0); + atomic_set(&cache->nr_io_migrations, 0); init_waitqueue_head(&cache->migration_wait); init_waitqueue_head(&cache->quiescing_wait); @@ -2487,8 +2500,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) goto bad; } - cache->next_migration = NULL; - cache->need_tick_bio = true; cache->sized = false; cache->invalidate = false; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b98cd9d84435..2caf5b374649 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -206,6 +206,9 @@ struct mapped_device { /* zero-length flush that will be cloned and submitted to targets */ struct bio flush_bio; + /* the number of internal suspends */ + unsigned internal_suspend_count; + struct dm_stats stats; }; @@ -2928,7 +2931,7 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla { struct dm_table *map = NULL; - if (dm_suspended_internally_md(md)) + if (md->internal_suspend_count++) return; /* nested internal suspend */ if (dm_suspended_md(md)) { @@ -2953,7 +2956,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla static void __dm_internal_resume(struct mapped_device *md) { - if (!dm_suspended_internally_md(md)) + BUG_ON(!md->internal_suspend_count); + + if (--md->internal_suspend_count) return; /* resume from nested internal suspend */ if (dm_suspended_md(md)) diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index db99ca2613ba..06931f6fa26c 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c @@ -614,7 +614,7 @@ struct cx23885_board cx23885_boards[] = { .portb = CX23885_MPEG_DVB, }, [CX23885_BOARD_HAUPPAUGE_HVR4400] = { - .name = "Hauppauge WinTV-HVR4400", + .name = "Hauppauge WinTV-HVR4400/HVR5500", .porta = CX23885_ANALOG_VIDEO, .portb = CX23885_MPEG_DVB, .portc = CX23885_MPEG_DVB, @@ -622,6 +622,10 @@ struct cx23885_board cx23885_boards[] = { .tuner_addr = 0x60, /* 0xc0 >> 1 */ .tuner_bus = 1, }, + [CX23885_BOARD_HAUPPAUGE_STARBURST] = { + .name = "Hauppauge WinTV Starburst", + .portb = CX23885_MPEG_DVB, + }, [CX23885_BOARD_AVERMEDIA_HC81R] = { .name = "AVerTV Hybrid Express Slim HC81R", .tuner_type = TUNER_XC2028, @@ -936,19 +940,19 @@ struct cx23885_subid cx23885_subids[] = { }, { .subvendor = 0x0070, .subdevice = 0xc108, - .card = CX23885_BOARD_HAUPPAUGE_HVR4400, + .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-4400 (Model 121xxx, Hybrid DVB-T/S2, IR) */ }, { .subvendor = 0x0070, .subdevice = 0xc138, - .card = CX23885_BOARD_HAUPPAUGE_HVR4400, + .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */ }, { .subvendor = 0x0070, .subdevice = 0xc12a, - .card = CX23885_BOARD_HAUPPAUGE_HVR4400, + .card = CX23885_BOARD_HAUPPAUGE_STARBURST, /* Hauppauge WinTV Starburst (Model 121x00, DVB-S2, IR) */ }, { .subvendor = 0x0070, .subdevice = 0xc1f8, - .card = CX23885_BOARD_HAUPPAUGE_HVR4400, + .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */ }, { .subvendor = 0x1461, .subdevice = 0xd939, @@ -1545,8 +1549,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev) cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/ break; case CX23885_BOARD_HAUPPAUGE_HVR4400: + case CX23885_BOARD_HAUPPAUGE_STARBURST: /* GPIO-8 tda10071 demod reset */ - /* GPIO-9 si2165 demod reset */ + /* GPIO-9 si2165 demod reset (only HVR4400/HVR5500)*/ /* Put the parts into reset and back */ cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1); @@ -1872,6 +1877,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_HAUPPAUGE_HVR1850: case CX23885_BOARD_HAUPPAUGE_HVR1290: case CX23885_BOARD_HAUPPAUGE_HVR4400: + case CX23885_BOARD_HAUPPAUGE_STARBURST: case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE: if (dev->i2c_bus[0].i2c_rc == 0) hauppauge_eeprom(dev, eeprom+0xc0); @@ -1980,6 +1986,11 @@ void cx23885_card_setup(struct cx23885_dev *dev) ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; break; + case CX23885_BOARD_HAUPPAUGE_STARBURST: + ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ + ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */ + ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; + break; case CX23885_BOARD_DVBSKY_T9580: case CX23885_BOARD_DVBSKY_T982: ts1->gen_ctrl_val = 0x5; /* Parallel */ diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 1d9d0f86ca8c..1ad49946d7fa 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -2049,11 +2049,11 @@ static void cx23885_finidev(struct pci_dev *pci_dev) cx23885_shutdown(dev); - pci_disable_device(pci_dev); - /* unregister stuff */ free_irq(pci_dev->irq, dev); + pci_disable_device(pci_dev); + cx23885_dev_unregister(dev); vb2_dma_sg_cleanup_ctx(dev->alloc_ctx); v4l2_ctrl_handler_free(&dev->ctrl_handler); diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c index c47d18270cfc..a9c450d4b54e 100644 --- a/drivers/media/pci/cx23885/cx23885-dvb.c +++ b/drivers/media/pci/cx23885/cx23885-dvb.c @@ -1710,6 +1710,17 @@ static int dvb_register(struct cx23885_tsport *port) break; } break; + case CX23885_BOARD_HAUPPAUGE_STARBURST: + i2c_bus = &dev->i2c_bus[0]; + fe0->dvb.frontend = dvb_attach(tda10071_attach, + &hauppauge_tda10071_config, + &i2c_bus->i2c_adap); + if (fe0->dvb.frontend != NULL) { + dvb_attach(a8293_attach, fe0->dvb.frontend, + &i2c_bus->i2c_adap, + &hauppauge_a8293_config); + } + break; case CX23885_BOARD_DVBSKY_T9580: case CX23885_BOARD_DVBSKY_S950: i2c_bus = &dev->i2c_bus[0]; diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h index f55cd12da0fd..36f2f96c40e4 100644 --- a/drivers/media/pci/cx23885/cx23885.h +++ b/drivers/media/pci/cx23885/cx23885.h @@ -99,6 +99,7 @@ #define CX23885_BOARD_DVBSKY_S950 49 #define CX23885_BOARD_DVBSKY_S952 50 #define CX23885_BOARD_DVBSKY_T982 51 +#define CX23885_BOARD_HAUPPAUGE_STARBURST 52 #define GPIO_0 0x00000001 #define GPIO_1 0x00000002 diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index b463fe172d16..3fe9047ef466 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c @@ -602,10 +602,13 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) strlcpy(cap->card, video->video.name, sizeof(cap->card)); strlcpy(cap->bus_info, "media", sizeof(cap->bus_info)); + cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT + | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS; + if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; else - cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; return 0; } diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c index 8efe40337608..6d885239b16a 100644 --- a/drivers/media/platform/soc_camera/atmel-isi.c +++ b/drivers/media/platform/soc_camera/atmel-isi.c @@ -760,8 +760,9 @@ static int isi_camera_querycap(struct soc_camera_host *ici, { strcpy(cap->driver, "atmel-isi"); strcpy(cap->card, "Atmel Image Sensor Interface"); - cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE | - V4L2_CAP_STREAMING); + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + return 0; } diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c index ce72bd26a6ac..192377f55840 100644 --- a/drivers/media/platform/soc_camera/mx2_camera.c +++ b/drivers/media/platform/soc_camera/mx2_camera.c @@ -1256,7 +1256,8 @@ static int mx2_camera_querycap(struct soc_camera_host *ici, { /* cap->name is set by the friendly caller:-> */ strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c index a60c3bb0e4cc..0b3299dee05d 100644 --- a/drivers/media/platform/soc_camera/mx3_camera.c +++ b/drivers/media/platform/soc_camera/mx3_camera.c @@ -967,7 +967,8 @@ static int mx3_camera_querycap(struct soc_camera_host *ici, { /* cap->name is set by the firendly caller:-> */ strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c index e6b93281f246..16f65ecb70a3 100644 --- a/drivers/media/platform/soc_camera/omap1_camera.c +++ b/drivers/media/platform/soc_camera/omap1_camera.c @@ -1427,7 +1427,8 @@ static int omap1_cam_querycap(struct soc_camera_host *ici, { /* cap->name is set by the friendly caller:-> */ strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c index 951226af0eba..8d6e343fec0f 100644 --- a/drivers/media/platform/soc_camera/pxa_camera.c +++ b/drivers/media/platform/soc_camera/pxa_camera.c @@ -1576,7 +1576,8 @@ static int pxa_camera_querycap(struct soc_camera_host *ici, { /* cap->name is set by the firendly caller:-> */ strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c index 0c1f55648106..9f1473c0a0cf 100644 --- a/drivers/media/platform/soc_camera/rcar_vin.c +++ b/drivers/media/platform/soc_camera/rcar_vin.c @@ -1799,7 +1799,9 @@ static int rcar_vin_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + return 0; } diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c index 8b27b3eb2b25..71787702d4a2 100644 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c @@ -1652,7 +1652,9 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + return 0; } diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 0f345b1f9014..f327c49d7e09 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -2232,7 +2232,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = { { "Mygica T230 DVB-T/T2/C", { NULL }, - { &cxusb_table[22], NULL }, + { &cxusb_table[20], NULL }, }, } }; diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c index 1b158f1167ed..536210b39428 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c @@ -89,16 +89,6 @@ static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; module_param_array(vbi_nr, int, NULL, 0444); MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor"); -static struct v4l2_capability pvr_capability ={ - .driver = "pvrusb2", - .card = "Hauppauge WinTV pvr-usb2", - .bus_info = "usb", - .version = LINUX_VERSION_CODE, - .capabilities = (V4L2_CAP_VIDEO_CAPTURE | - V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO | - V4L2_CAP_READWRITE), -}; - static struct v4l2_fmtdesc pvr_fmtdesc [] = { { .index = 0, @@ -160,10 +150,22 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability * struct pvr2_v4l2_fh *fh = file->private_data; struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; - memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability)); + strlcpy(cap->driver, "pvrusb2", sizeof(cap->driver)); strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw), sizeof(cap->bus_info)); strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card)); + cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | + V4L2_CAP_AUDIO | V4L2_CAP_RADIO | + V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS; + switch (fh->pdi->devbase.vfl_type) { + case VFL_TYPE_GRABBER: + cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO; + break; + case VFL_TYPE_RADIO: + cap->device_caps = V4L2_CAP_RADIO; + break; + } + cap->device_caps |= V4L2_CAP_TUNER | V4L2_CAP_READWRITE; return 0; } diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index d09a8916e940..bc08a829bc13 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -3146,27 +3146,26 @@ static int vb2_thread(void *data) prequeue--; } else { call_void_qop(q, wait_finish, q); - ret = vb2_internal_dqbuf(q, &fileio->b, 0); + if (!threadio->stop) + ret = vb2_internal_dqbuf(q, &fileio->b, 0); call_void_qop(q, wait_prepare, q); dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); } - if (threadio->stop) - break; - if (ret) + if (ret || threadio->stop) break; try_to_freeze(); vb = q->bufs[fileio->b.index]; if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR)) - ret = threadio->fnc(vb, threadio->priv); - if (ret) - break; + if (threadio->fnc(vb, threadio->priv)) + break; call_void_qop(q, wait_finish, q); if (set_timestamp) v4l2_get_timestamp(&fileio->b.timestamp); - ret = vb2_internal_qbuf(q, &fileio->b); + if (!threadio->stop) + ret = vb2_internal_qbuf(q, &fileio->b); call_void_qop(q, wait_prepare, q); - if (ret) + if (ret || threadio->stop) break; } @@ -3235,11 +3234,11 @@ int vb2_thread_stop(struct vb2_queue *q) threadio->stop = true; vb2_internal_streamoff(q, q->type); call_void_qop(q, wait_prepare, q); + err = kthread_stop(threadio->thread); q->fileio = NULL; fileio->req.count = 0; vb2_reqbufs(q, &fileio->req); kfree(fileio); - err = kthread_stop(threadio->thread); threadio->thread = NULL; kfree(threadio); q->fileio = NULL; diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index f94a9fa60488..c672c4dcffac 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev) c_can_irq_control(priv, false); + /* put ctrl to init on stop to end ongoing transmission */ + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT); + /* deactivate pins */ pinctrl_pm_select_sleep_state(dev->dev.parent); priv->can.state = CAN_STATE_STOPPED; diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index c32cd61073bc..7af379ca861b 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), buf, msg->len, - kvaser_usb_simple_msg_callback, priv); + kvaser_usb_simple_msg_callback, netdev); usb_anchor_urb(urb, &priv->tx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); @@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, priv = dev->nets[channel]; stats = &priv->netdev->stats; - if (status & M16C_STATE_BUS_RESET) { - kvaser_usb_unlink_tx_urbs(priv); - return; - } - skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) { stats->rx_dropped++; @@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); - if (status & M16C_STATE_BUS_OFF) { + if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { cf->can_id |= CAN_ERR_BUSOFF; priv->can.can_stats.bus_off++; @@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, } new_state = CAN_STATE_ERROR_PASSIVE; - } - - if (status == M16C_STATE_BUS_ERROR) { + } else if (status & M16C_STATE_BUS_ERROR) { if ((priv->can.state < CAN_STATE_ERROR_WARNING) && ((txerr >= 96) || (rxerr >= 96))) { cf->can_id |= CAN_ERR_CRTL; @@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, priv->can.can_stats.error_warning++; new_state = CAN_STATE_ERROR_WARNING; - } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) { + } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) && + ((txerr < 96) && (rxerr < 96))) { cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; @@ -1590,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf, { struct kvaser_usb *dev; int err = -ENOMEM; - int i; + int i, retry = 3; dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); if (!dev) @@ -1608,7 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf, usb_set_intfdata(intf, dev); - err = kvaser_usb_get_software_info(dev); + /* On some x86 laptops, plugging a Kvaser device again after + * an unplug makes the firmware always ignore the very first + * command. For such a case, provide some room for retries + * instead of completely exiting the driver. + */ + do { + err = kvaser_usb_get_software_info(dev); + } while (--retry && err == -ETIMEDOUT); + if (err) { dev_err(&intf->dev, "Cannot get software infos, error %d\n", err); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 75b08c63d39f..29a09271b64a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -767,16 +767,17 @@ #define MTL_Q_RQOMR 0x40 #define MTL_Q_RQMPOCR 0x44 #define MTL_Q_RQDR 0x4c +#define MTL_Q_RQFCR 0x50 #define MTL_Q_IER 0x70 #define MTL_Q_ISR 0x74 /* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQFCR_RFA_INDEX 1 +#define MTL_Q_RQFCR_RFA_WIDTH 6 +#define MTL_Q_RQFCR_RFD_INDEX 17 +#define MTL_Q_RQFCR_RFD_WIDTH 6 #define MTL_Q_RQOMR_EHFC_INDEX 7 #define MTL_Q_RQOMR_EHFC_WIDTH 1 -#define MTL_Q_RQOMR_RFA_INDEX 8 -#define MTL_Q_RQOMR_RFA_WIDTH 3 -#define MTL_Q_RQOMR_RFD_INDEX 13 -#define MTL_Q_RQOMR_RFD_WIDTH 3 #define MTL_Q_RQOMR_RQS_INDEX 16 #define MTL_Q_RQOMR_RQS_WIDTH 9 #define MTL_Q_RQOMR_RSF_INDEX 5 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 53f5f66ec2ee..4c66cd1d1e60 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->rx_q_count; i++) { /* Activate flow control when less than 4k left in fifo */ - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2); /* De-activate flow control when more than 6k left in fifo */ - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4); } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1d1147c93d59..e468ed3f210f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) } #endif if (!bnx2x_fp_lock_napi(fp)) - return work_done; + return budget; for_each_cos_in_tx_queue(fp, cos) if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b29e027c476e..e356afa44e7d 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) int err; if (!enic_poll_lock_napi(&enic->rq[rq])) - return work_done; + return budget; /* Service RQ */ diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index a62fc38f045e..1c75829eb166 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4"; #define IS_TSO_HEADER(txq, addr) \ ((addr >= txq->tso_hdrs_dma) && \ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) + +#define DESC_DMA_MAP_SINGLE 0 +#define DESC_DMA_MAP_PAGE 1 + /* * RX/TX descriptors. */ @@ -362,6 +366,7 @@ struct tx_queue { dma_addr_t tso_hdrs_dma; struct tx_desc *tx_desc_area; + char *tx_desc_mapping; /* array to track the type of the dma mapping */ dma_addr_t tx_desc_dma; int tx_desc_area_size; @@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; desc->l4i_chk = 0; desc->byte_cnt = length; @@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) skb_frag_t *this_frag; int tx_index; struct tx_desc *desc; - void *addr; this_frag = &skb_shinfo(skb)->frags[frag]; - addr = page_address(this_frag->page.p) + this_frag->page_offset; tx_index = txq->tx_curr_desc++; if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; /* * The last fragment will generate an interrupt @@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) desc->l4i_chk = 0; desc->byte_cnt = skb_frag_size(this_frag); - desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr, - desc->byte_cnt, DMA_TO_DEVICE); + desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, + this_frag, 0, desc->byte_cnt, + DMA_TO_DEVICE); } } @@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, if (txq->tx_curr_desc == txq->tx_ring_size) txq->tx_curr_desc = 0; desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; if (nr_frags) { txq_submit_frag_skb(txq, skb); @@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) int tx_index; struct tx_desc *desc; u32 cmd_sts; + char desc_dma_map; tx_index = txq->tx_used_desc; desc = &txq->tx_desc_area[tx_index]; + desc_dma_map = txq->tx_desc_mapping[tx_index]; + cmd_sts = desc->cmd_sts; if (cmd_sts & BUFFER_OWNED_BY_DMA) { @@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) reclaimed++; txq->tx_desc_count--; - if (!IS_TSO_HEADER(txq, desc->buf_ptr)) - dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, - desc->byte_cnt, DMA_TO_DEVICE); + if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { + + if (desc_dma_map == DESC_DMA_MAP_PAGE) + dma_unmap_page(mp->dev->dev.parent, + desc->buf_ptr, + desc->byte_cnt, + DMA_TO_DEVICE); + else + dma_unmap_single(mp->dev->dev.parent, + desc->buf_ptr, + desc->byte_cnt, + DMA_TO_DEVICE); + } if (cmd_sts & TX_ENABLE_INTERRUPT) { struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); @@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) struct tx_queue *txq = mp->txq + index; struct tx_desc *tx_desc; int size; + int ret; int i; txq->index = index; @@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) nexti * sizeof(struct tx_desc); } + txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), + GFP_KERNEL); + if (!txq->tx_desc_mapping) { + ret = -ENOMEM; + goto err_free_desc_area; + } + /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, txq->tx_ring_size * TSO_HEADER_SIZE, &txq->tso_hdrs_dma, GFP_KERNEL); if (txq->tso_hdrs == NULL) { - dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, - txq->tx_desc_area, txq->tx_desc_dma); - return -ENOMEM; + ret = -ENOMEM; + goto err_free_desc_mapping; } skb_queue_head_init(&txq->tx_skb); return 0; + +err_free_desc_mapping: + kfree(txq->tx_desc_mapping); +err_free_desc_area: + if (index == 0 && size <= mp->tx_desc_sram_size) + iounmap(txq->tx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, + txq->tx_desc_area, txq->tx_desc_dma); + return ret; } static void txq_deinit(struct tx_queue *txq) @@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq) else dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, txq->tx_desc_area, txq->tx_desc_dma); + kfree(txq->tx_desc_mapping); + if (txq->tso_hdrs) dma_free_coherent(mp->dev->dev.parent, txq->tx_ring_size * TSO_HEADER_SIZE, diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 613037584d08..c531c8ae1be4 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) work_done = netxen_process_rcv_ring(sds_ring, budget); - if ((work_done < budget) && tx_complete) { + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { napi_complete(&sds_ring->napi); if (test_bit(__NX_DEV_UP, &adapter->state)) netxen_nic_enable_int(sds_ring); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 6576243222af..04283fe0e6a7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { [TSU_ADRL31] = 0x01fc, }; +static void sh_eth_rcv_snd_disable(struct net_device *ndev); +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); + static bool sh_eth_is_gether(struct sh_eth_private *mdp) { return mdp->reg_offset == sh_eth_offset_gigabit; @@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev) int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; + dma_addr_t dma_addr; mdp->cur_rx = 0; mdp->cur_tx = 0; @@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev) /* skb */ mdp->rx_skbuff[i] = NULL; skb = netdev_alloc_skb(ndev, skbuff_size); - mdp->rx_skbuff[i] = skb; if (skb == NULL) break; sh_eth_set_receive_align(skb); @@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev) rxdesc = &mdp->rx_ring[i]; /* The size of the buffer is a multiple of 16 bytes. */ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); - dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, - DMA_FROM_DEVICE); - rxdesc->addr = virt_to_phys(skb->data); + dma_addr = dma_map_single(&ndev->dev, skb->data, + rxdesc->buffer_length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, dma_addr)) { + kfree_skb(skb); + break; + } + mdp->rx_skbuff[i] = skb; + rxdesc->addr = dma_addr; rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); /* Rx descriptor address set */ @@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) RFLR); sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); - if (start) + if (start) { + mdp->irq_enabled = true; sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); + } /* PAUSE Prohibition */ val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | @@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) return ret; } +static void sh_eth_dev_exit(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i; + + /* Deactivate all TX descriptors, so DMA should stop at next + * packet boundary if it's currently running + */ + for (i = 0; i < mdp->num_tx_ring; i++) + mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); + + /* Disable TX FIFO egress to MAC */ + sh_eth_rcv_snd_disable(ndev); + + /* Stop RX DMA at next packet boundary */ + sh_eth_write(ndev, 0, EDRRR); + + /* Aside from TX DMA, we can't tell when the hardware is + * really stopped, so we need to reset to make sure. + * Before doing that, wait for long enough to *probably* + * finish transmitting the last packet and poll stats. + */ + msleep(2); /* max frame time at 10 Mbps < 1250 us */ + sh_eth_get_stats(ndev); + sh_eth_reset(ndev); +} + /* free Tx skb function */ static int sh_eth_txfree(struct net_device *ndev) { @@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) u16 pkt_len = 0; u32 desc_status; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; + dma_addr_t dma_addr; boguscnt = min(boguscnt, *quota); limit = boguscnt; @@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) mdp->rx_skbuff[entry] = NULL; if (mdp->cd->rpadir) skb_reserve(skb, NET_IP_ALIGN); - dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, - ALIGN(mdp->rx_buf_sz, 16), - DMA_FROM_DEVICE); + dma_unmap_single(&ndev->dev, rxdesc->addr, + ALIGN(mdp->rx_buf_sz, 16), + DMA_FROM_DEVICE); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); @@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) if (mdp->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb(ndev, skbuff_size); - mdp->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ sh_eth_set_receive_align(skb); - dma_map_single(&ndev->dev, skb->data, - rxdesc->buffer_length, DMA_FROM_DEVICE); + dma_addr = dma_map_single(&ndev->dev, skb->data, + rxdesc->buffer_length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, dma_addr)) { + kfree_skb(skb); + break; + } + mdp->rx_skbuff[entry] = skb; skb_checksum_none_assert(skb); - rxdesc->addr = virt_to_phys(skb->data); + rxdesc->addr = dma_addr; } if (entry >= mdp->num_rx_ring - 1) rxdesc->status |= @@ -1573,7 +1617,6 @@ ignore_link: if (intr_status & EESR_RFRMER) { /* Receive Frame Overflow int */ ndev->stats.rx_frame_errors++; - netif_err(mdp, rx_err, ndev, "Receive Abort\n"); } } @@ -1592,13 +1635,11 @@ ignore_link: if (intr_status & EESR_RDE) { /* Receive Descriptor Empty int */ ndev->stats.rx_over_errors++; - netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n"); } if (intr_status & EESR_RFE) { /* Receive FIFO Overflow int */ ndev->stats.rx_fifo_errors++; - netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n"); } if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { @@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) ret = IRQ_HANDLED; else - goto other_irq; + goto out; + + if (!likely(mdp->irq_enabled)) { + sh_eth_write(ndev, 0, EESIPR); + goto out; + } if (intr_status & EESR_RX_CHECK) { if (napi_schedule_prep(&mdp->napi)) { @@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) sh_eth_error(ndev, intr_status); } -other_irq: +out: spin_unlock(&mdp->lock); return ret; @@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget) napi_complete(napi); /* Reenable Rx interrupts */ - sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); + if (mdp->irq_enabled) + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); out: return budget - quota; } @@ -1968,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev, return -EINVAL; if (netif_running(ndev)) { + netif_device_detach(ndev); netif_tx_disable(ndev); - /* Disable interrupts by clearing the interrupt mask. */ - sh_eth_write(ndev, 0x0000, EESIPR); - /* Stop the chip's Tx and Rx processes. */ - sh_eth_write(ndev, 0, EDTRR); - sh_eth_write(ndev, 0, EDRRR); + + /* Serialise with the interrupt handler and NAPI, then + * disable interrupts. We have to clear the + * irq_enabled flag first to ensure that interrupts + * won't be re-enabled. + */ + mdp->irq_enabled = false; synchronize_irq(ndev->irq); - } + napi_synchronize(&mdp->napi); + sh_eth_write(ndev, 0x0000, EESIPR); - /* Free all the skbuffs in the Rx queue. */ - sh_eth_ring_free(ndev); - /* Free DMA buffer */ - sh_eth_free_dma_buffer(mdp); + sh_eth_dev_exit(ndev); + + /* Free all the skbuffs in the Rx queue. */ + sh_eth_ring_free(ndev); + /* Free DMA buffer */ + sh_eth_free_dma_buffer(mdp); + } /* Set new parameters */ mdp->num_rx_ring = ring->rx_pending; mdp->num_tx_ring = ring->tx_pending; - ret = sh_eth_ring_init(ndev); - if (ret < 0) { - netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__); - return ret; - } - ret = sh_eth_dev_init(ndev, false); - if (ret < 0) { - netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); - return ret; - } - if (netif_running(ndev)) { + ret = sh_eth_ring_init(ndev); + if (ret < 0) { + netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", + __func__); + return ret; + } + ret = sh_eth_dev_init(ndev, false); + if (ret < 0) { + netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", + __func__); + return ret; + } + + mdp->irq_enabled = true; sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); /* Setting the Rx mode will start the Rx process. */ sh_eth_write(ndev, EDRRR_R, EDRRR); - netif_wake_queue(ndev); + netif_device_attach(ndev); } return 0; @@ -2117,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) } spin_unlock_irqrestore(&mdp->lock, flags); + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + entry = mdp->cur_tx % mdp->num_tx_ring; mdp->tx_skbuff[entry] = skb; txdesc = &mdp->tx_ring[entry]; @@ -2126,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) skb->len + 2); txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE); - if (skb->len < ETH_ZLEN) - txdesc->buffer_length = ETH_ZLEN; - else - txdesc->buffer_length = skb->len; + if (dma_mapping_error(&ndev->dev, txdesc->addr)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + txdesc->buffer_length = skb->len; if (entry >= mdp->num_tx_ring - 1) txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); @@ -2181,14 +2242,17 @@ static int sh_eth_close(struct net_device *ndev) netif_stop_queue(ndev); - /* Disable interrupts by clearing the interrupt mask. */ + /* Serialise with the interrupt handler and NAPI, then disable + * interrupts. We have to clear the irq_enabled flag first to + * ensure that interrupts won't be re-enabled. + */ + mdp->irq_enabled = false; + synchronize_irq(ndev->irq); + napi_disable(&mdp->napi); sh_eth_write(ndev, 0x0000, EESIPR); - /* Stop the chip's Tx and Rx processes. */ - sh_eth_write(ndev, 0, EDTRR); - sh_eth_write(ndev, 0, EDRRR); + sh_eth_dev_exit(ndev); - sh_eth_get_stats(ndev); /* PHY Disconnect */ if (mdp->phydev) { phy_stop(mdp->phydev); @@ -2198,8 +2262,6 @@ static int sh_eth_close(struct net_device *ndev) free_irq(ndev->irq, ndev); - napi_disable(&mdp->napi); - /* Free all the skbuffs in the Rx queue. */ sh_eth_ring_free(ndev); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 71f5de1171bd..332d3c16d483 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -513,6 +513,7 @@ struct sh_eth_private { u32 rx_buf_sz; /* Based on MTU+slack. */ int edmac_endian; struct napi_struct napi; + bool irq_enabled; /* MII transceiver section. */ u32 phy_id; /* PHY ID */ struct mii_bus *mii_bus; /* MDIO bus control */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8c6b7c1651e5..cf62ff4c8c56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) * @addr: iobase memory address * Description: this is the main probe function used to * call the alloc_etherdev, allocate the priv structure. + * Return: + * on success the new private structure is returned, otherwise the error + * pointer. */ struct stmmac_priv *stmmac_dvr_probe(struct device *device, struct plat_stmmacenet_data *plat_dat, @@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, ndev = alloc_etherdev(sizeof(struct stmmac_priv)); if (!ndev) - return NULL; + return ERR_PTR(-ENOMEM); SET_NETDEV_DEV(ndev, device); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e068d48b0f21..a39131f494ec 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1683,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, if (vid == priv->data.default_vlan) return 0; + if (priv->data.dual_emac) { + /* In dual EMAC, reserved VLAN id should not be used for + * creating VLAN interfaces as this can break the dual + * EMAC port separation + */ + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (vid == priv->slaves[i].port_vlan) + return -EINVAL; + } + } + dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); return cpsw_add_vlan_ale_entry(priv, vid); } @@ -1696,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, if (vid == priv->data.default_vlan) return 0; + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (vid == priv->slaves[i].port_vlan) + return -EINVAL; + } + } + dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); ret = cpsw_ale_del_vlan(priv->ale, vid, 0); if (ret != 0) diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index a14d87783245..2e195289ddf4 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) }; dst = ip6_route_output(dev_net(dev), NULL, &fl6); - if (IS_ERR(dst)) + if (dst->error) { + ret = dst->error; + dst_release(dst); goto err; - + } skb_dst_drop(skb); skb_dst_set(skb, dst); err = ip6_local_out(skb); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9a72640237cb..62b0bf4fdf6b 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) __ath_cancel_work(sc); + disable_irq(sc->irq); tasklet_disable(&sc->intr_tq); tasklet_disable(&sc->bcon_tasklet); spin_lock_bh(&sc->sc_pcu_lock); @@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) r = -EIO; out: + enable_irq(sc->irq); spin_unlock_bh(&sc->sc_pcu_lock); tasklet_enable(&sc->bcon_tasklet); tasklet_enable(&sc->intr_tq); @@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev) if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) return IRQ_NONE; - if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) - return IRQ_NONE; - /* shared irq, not for us */ if (!ath9k_hw_intrpend(ah)) return IRQ_NONE; @@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev) ath9k_debug_sync_cause(sc, sync_cause); status &= ah->imask; /* discard unasked-for bits */ - if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) + if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) return IRQ_HANDLED; /* diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 1bbe4fc47b97..660ddb1b7d8a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag { * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, * regardless of the band or the number of the probes. FW will calculate * the actual dwell time. + * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too. */ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), @@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), + IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), }; /** diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 201846de94e7..cfc0e65b34a5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -653,8 +653,11 @@ enum iwl_scan_channel_flags { }; /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S - * @flags: enum iwl_scan_channel_flgs - * @non_ebs_ratio: how many regular scan iteration before EBS + * @flags: enum iwl_scan_channel_flags + * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is + * involved. + * 1 - EBS is disabled. + * 2 - every second scan will be full scan(and so on). */ struct iwl_scan_channel_opt { __le16 flags; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index e880f9d4717b..20915587c820 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, msk |= mvmsta->tfd_queue_msk; } - if (drop) { - if (iwl_mvm_flush_tx_path(mvm, msk, true)) - IWL_ERR(mvm, "flush request fail\n"); - mutex_unlock(&mvm->mutex); - } else { - mutex_unlock(&mvm->mutex); + msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); - /* this can take a while, and we may need/want other operations - * to succeed while doing this, so do it without the mutex held - */ - iwl_trans_wait_tx_queue_empty(mvm->trans, msk); - } + if (iwl_mvm_flush_tx_path(mvm, msk, true)) + IWL_ERR(mvm, "flush request fail\n"); + mutex_unlock(&mvm->mutex); + + /* this can take a while, and we may need/want other operations + * to succeed while doing this, so do it without the mutex held + */ + iwl_trans_wait_tx_queue_empty(mvm->trans, msk); } const struct ieee80211_ops iwl_mvm_hw_ops = { diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index ec9a8e7bae1d..844bf7c4c8de 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -72,6 +72,8 @@ #define IWL_PLCP_QUIET_THRESH 1 #define IWL_ACTIVE_QUIET_TIME 10 +#define IWL_DENSE_EBS_SCAN_RATIO 5 +#define IWL_SPARSE_EBS_SCAN_RATIO 1 struct iwl_mvm_scan_params { u32 max_out_time; @@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, notify); + if (mvm->scan_status == IWL_MVM_SCAN_NONE) + return 0; + + if (iwl_mvm_is_radio_killed(mvm)) + goto out; + if (mvm->scan_status != IWL_MVM_SCAN_SCHED && (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || mvm->scan_status != IWL_MVM_SCAN_OS)) { @@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) if (mvm->scan_status == IWL_MVM_SCAN_OS) iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); +out: mvm->scan_status = IWL_MVM_SCAN_NONE; if (notify) { @@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm, cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); cmd->iter_num = cpu_to_le32(1); - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && - mvm->last_ebs_successful) { - cmd->channel_opt[0].flags = - cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | - IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | - IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); - cmd->channel_opt[1].flags = - cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | - IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | - IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); - } - if (iwl_mvm_rrm_scan_needed(mvm)) cmd->scan_flags |= cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); @@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, cmd->schedule[1].iterations = 0; cmd->schedule[1].full_scan_mul = 0; + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS && + mvm->last_ebs_successful) { + cmd->channel_opt[0].flags = + cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); + cmd->channel_opt[0].non_ebs_ratio = + cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); + cmd->channel_opt[1].flags = + cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); + cmd->channel_opt[1].non_ebs_ratio = + cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); + } + for (i = 1; i <= req->req.n_ssids; i++) ssid_bitmap |= BIT(i); @@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, cmd->schedule[1].iterations = 0xff; cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && + mvm->last_ebs_successful) { + cmd->channel_opt[0].flags = + cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); + cmd->channel_opt[0].non_ebs_ratio = + cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); + cmd->channel_opt[1].flags = + cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); + cmd->channel_opt[1].non_ebs_ratio = + cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); + } + iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, ssid_bitmap, cmd); diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4333306ccdee..c59d07567d90 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, if (ieee80211_is_probe_resp(fc)) tx_flags |= TX_CMD_FLG_TSF; - else if (ieee80211_is_back_req(fc)) - tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; if (ieee80211_has_morefrags(fc)) tx_flags |= TX_CMD_FLG_MORE_FRAG; @@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL; + } else if (ieee80211_is_back_req(fc)) { + struct ieee80211_bar *bar = (void *)skb->data; + u16 control = le16_to_cpu(bar->control); + + tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; + tx_cmd->tid_tspec = (control & + IEEE80211_BAR_CTRL_TID_INFO_MASK) >> + IEEE80211_BAR_CTRL_TID_INFO_SHIFT; + WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); } else { tx_cmd->tid_tspec = IWL_TID_NON_QOS; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index ea63fbd228ed..352b4f28f82c 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -114,17 +114,6 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, ret = of_overlay_apply_one(ov, tchild, child); if (ret) return ret; - - /* The properties are already copied, now do the child nodes */ - for_each_child_of_node(child, grandchild) { - ret = of_overlay_apply_single_device_node(ov, tchild, grandchild); - if (ret) { - pr_err("%s: Failed to apply single node @%s/%s\n", - __func__, tchild->full_name, - grandchild->name); - return ret; - } - } } return ret; diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 5b33c6a21807..b0d50d70a8a1 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -188,7 +188,7 @@ static void of_dma_configure(struct device *dev) size = dev->coherent_dma_mask; } else { offset = PFN_DOWN(paddr - dma_addr); - dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset); + dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); } dev->dma_pfn_offset = offset; @@ -566,6 +566,10 @@ static int of_platform_notify(struct notifier_block *nb, if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS)) return NOTIFY_OK; /* not for us */ + /* already populated? (driver using of_populate manually) */ + if (of_node_check_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; + /* pdev_parent may be NULL when no bus platform device */ pdev_parent = of_find_device_by_node(rd->dn->parent); pdev = of_platform_device_create(rd->dn, NULL, @@ -581,6 +585,11 @@ static int of_platform_notify(struct notifier_block *nb, break; case OF_RECONFIG_CHANGE_REMOVE: + + /* already depopulated? */ + if (!of_node_check_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; + /* find our device by node */ pdev = of_find_device_by_node(rd->dn); if (pdev == NULL) diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi index 75976da22b2e..a2b687d5f324 100644 --- a/drivers/of/unittest-data/tests-overlay.dtsi +++ b/drivers/of/unittest-data/tests-overlay.dtsi @@ -176,5 +176,60 @@ }; }; + overlay10 { + fragment@0 { + target-path = "/testcase-data/overlay-node/test-bus"; + __overlay__ { + + /* suppress DTC warning */ + #address-cells = <1>; + #size-cells = <0>; + + test-selftest10 { + compatible = "selftest"; + status = "okay"; + reg = <10>; + + #address-cells = <1>; + #size-cells = <0>; + + test-selftest101 { + compatible = "selftest"; + status = "okay"; + reg = <1>; + }; + + }; + }; + }; + }; + + overlay11 { + fragment@0 { + target-path = "/testcase-data/overlay-node/test-bus"; + __overlay__ { + + /* suppress DTC warning */ + #address-cells = <1>; + #size-cells = <0>; + + test-selftest11 { + compatible = "selftest"; + status = "okay"; + reg = <11>; + + #address-cells = <1>; + #size-cells = <0>; + + test-selftest111 { + compatible = "selftest"; + status = "okay"; + reg = <1>; + }; + + }; + }; + }; + }; }; }; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 844838e11ef1..41a4a138f53b 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -978,6 +978,9 @@ static int selftest_probe(struct platform_device *pdev) } dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); + + of_platform_populate(np, NULL, NULL, &pdev->dev); + return 0; } @@ -1385,6 +1388,39 @@ static void of_selftest_overlay_8(void) selftest(1, "overlay test %d passed\n", 8); } +/* test insertion of a bus with parent devices */ +static void of_selftest_overlay_10(void) +{ + int ret; + char *child_path; + + /* device should disable */ + ret = of_selftest_apply_overlay_check(10, 10, 0, 1); + if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 10)) + return; + + child_path = kasprintf(GFP_KERNEL, "%s/test-selftest101", + selftest_path(10)); + if (selftest(child_path, "overlay test %d failed; kasprintf\n", 10)) + return; + + ret = of_path_platform_device_exists(child_path); + kfree(child_path); + if (selftest(ret, "overlay test %d failed; no child device\n", 10)) + return; +} + +/* test insertion of a bus with parent devices (and revert) */ +static void of_selftest_overlay_11(void) +{ + int ret; + + /* device should disable */ + ret = of_selftest_apply_revert_overlay_check(11, 11, 0, 1); + if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 11)) + return; +} + static void __init of_selftest_overlay(void) { struct device_node *bus_np = NULL; @@ -1433,6 +1469,9 @@ static void __init of_selftest_overlay(void) of_selftest_overlay_6(); of_selftest_overlay_8(); + of_selftest_overlay_10(); + of_selftest_overlay_11(); + out: of_node_put(bus_np); } diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 37e71ff6408d..dceb9ddfd99a 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -694,9 +694,8 @@ lba_fixup_bus(struct pci_bus *bus) int i; /* PCI-PCI Bridge */ pci_read_bridge_bases(bus); - for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { - pci_claim_resource(bus->self, i); - } + for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) + pci_claim_bridge_resource(bus->self, i); } else { /* Host-PCI Bridge */ int err; diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 73aef51a28f0..8fb16188cd82 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -228,6 +228,49 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, } EXPORT_SYMBOL(pci_bus_alloc_resource); +/* + * The @idx resource of @dev should be a PCI-PCI bridge window. If this + * resource fits inside a window of an upstream bridge, do nothing. If it + * overlaps an upstream window but extends outside it, clip the resource so + * it fits completely inside. + */ +bool pci_bus_clip_resource(struct pci_dev *dev, int idx) +{ + struct pci_bus *bus = dev->bus; + struct resource *res = &dev->resource[idx]; + struct resource orig_res = *res; + struct resource *r; + int i; + + pci_bus_for_each_resource(bus, r, i) { + resource_size_t start, end; + + if (!r) + continue; + + if (resource_type(res) != resource_type(r)) + continue; + + start = max(r->start, res->start); + end = min(r->end, res->end); + + if (start > end) + continue; /* no overlap */ + + if (res->start == start && res->end == end) + return false; /* no change */ + + res->start = start; + res->end = end; + dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n", + &orig_res, res); + + return true; + } + + return false; +} + void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } /** diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index cab05f31223f..e9d4fd861ba1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3271,7 +3271,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe) { struct pci_dev *pdev; - if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) + if (pci_is_root_bus(dev->bus) || dev->subordinate || + !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) return -ENOTTY; list_for_each_entry(pdev, &dev->bus->devices, bus_list) @@ -3305,7 +3306,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) { struct pci_dev *pdev; - if (dev->subordinate || !dev->slot) + if (dev->subordinate || !dev->slot || + dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) return -ENOTTY; list_for_each_entry(pdev, &dev->bus->devices, bus_list) @@ -3557,6 +3559,20 @@ int pci_try_reset_function(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_try_reset_function); +/* Do any devices on or below this bus prevent a bus reset? */ +static bool pci_bus_resetable(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || + (dev->subordinate && !pci_bus_resetable(dev->subordinate))) + return false; + } + + return true; +} + /* Lock devices from the top of the tree down */ static void pci_bus_lock(struct pci_bus *bus) { @@ -3607,6 +3623,22 @@ unlock: return 0; } +/* Do any devices on or below this slot prevent a bus reset? */ +static bool pci_slot_resetable(struct pci_slot *slot) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || + (dev->subordinate && !pci_bus_resetable(dev->subordinate))) + return false; + } + + return true; +} + /* Lock devices from the top of the tree down */ static void pci_slot_lock(struct pci_slot *slot) { @@ -3728,7 +3760,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe) { int rc; - if (!slot) + if (!slot || !pci_slot_resetable(slot)) return -ENOTTY; if (!probe) @@ -3820,7 +3852,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot); static int pci_bus_reset(struct pci_bus *bus, int probe) { - if (!bus->self) + if (!bus->self || !pci_bus_resetable(bus)) return -ENOTTY; if (probe) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 8aff29a804ff..d54632a1db43 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -208,6 +208,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, void __pci_bus_assign_resources(const struct pci_bus *bus, struct list_head *realloc_head, struct list_head *fail_head); +bool pci_bus_clip_resource(struct pci_dev *dev, int idx); /** * pci_ari_enabled - query ARI forwarding status diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ed6f89b6efe5..e52356aa09b8 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3028,6 +3028,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, quirk_broken_intx_masking); +static void quirk_no_bus_reset(struct pci_dev *dev) +{ + dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; +} + +/* + * Atheros AR93xx chips do not behave after a bus reset. The device will + * throw a Link Down error on AER-capable systems and regardless of AER, + * config space of the device is never accessible again and typically + * causes the system to hang or reset when access is attempted. + * http://www.spinics.net/lists/linux-pci/msg34797.html + */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); + #ifdef CONFIG_ACPI /* * Apple: Shutdown Cactus Ridge Thunderbolt controller. diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 0482235eee92..e3e17f3c0f0f 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -530,9 +530,8 @@ EXPORT_SYMBOL(pci_setup_cardbus); config space writes, so it's quite possible that an I/O window of the bridge will have some undesirable address (e.g. 0) after the first write. Ditto 64-bit prefetchable MMIO. */ -static void pci_setup_bridge_io(struct pci_bus *bus) +static void pci_setup_bridge_io(struct pci_dev *bridge) { - struct pci_dev *bridge = bus->self; struct resource *res; struct pci_bus_region region; unsigned long io_mask; @@ -545,7 +544,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus) io_mask = PCI_IO_1K_RANGE_MASK; /* Set up the top and bottom of the PCI I/O segment for this bus. */ - res = bus->resource[0]; + res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0]; pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_IO) { pci_read_config_word(bridge, PCI_IO_BASE, &l); @@ -568,15 +567,14 @@ static void pci_setup_bridge_io(struct pci_bus *bus) pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); } -static void pci_setup_bridge_mmio(struct pci_bus *bus) +static void pci_setup_bridge_mmio(struct pci_dev *bridge) { - struct pci_dev *bridge = bus->self; struct resource *res; struct pci_bus_region region; u32 l; /* Set up the top and bottom of the PCI Memory segment for this bus. */ - res = bus->resource[1]; + res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1]; pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_MEM) { l = (region.start >> 16) & 0xfff0; @@ -588,9 +586,8 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus) pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); } -static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) +static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) { - struct pci_dev *bridge = bus->self; struct resource *res; struct pci_bus_region region; u32 l, bu, lu; @@ -602,7 +599,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) /* Set up PREF base/limit. */ bu = lu = 0; - res = bus->resource[2]; + res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2]; pcibios_resource_to_bus(bridge->bus, ®ion, res); if (res->flags & IORESOURCE_PREFETCH) { l = (region.start >> 16) & 0xfff0; @@ -630,13 +627,13 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) &bus->busn_res); if (type & IORESOURCE_IO) - pci_setup_bridge_io(bus); + pci_setup_bridge_io(bridge); if (type & IORESOURCE_MEM) - pci_setup_bridge_mmio(bus); + pci_setup_bridge_mmio(bridge); if (type & IORESOURCE_PREFETCH) - pci_setup_bridge_mmio_pref(bus); + pci_setup_bridge_mmio_pref(bridge); pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); } @@ -649,6 +646,41 @@ void pci_setup_bridge(struct pci_bus *bus) __pci_setup_bridge(bus, type); } + +int pci_claim_bridge_resource(struct pci_dev *bridge, int i) +{ + if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END) + return 0; + + if (pci_claim_resource(bridge, i) == 0) + return 0; /* claimed the window */ + + if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI) + return 0; + + if (!pci_bus_clip_resource(bridge, i)) + return -EINVAL; /* clipping didn't change anything */ + + switch (i - PCI_BRIDGE_RESOURCES) { + case 0: + pci_setup_bridge_io(bridge); + break; + case 1: + pci_setup_bridge_mmio(bridge); + break; + case 2: + pci_setup_bridge_mmio_pref(bridge); + break; + default: + return -EINVAL; + } + + if (pci_claim_resource(bridge, i) == 0) + return 0; /* claimed a smaller window */ + + return -EINVAL; +} + /* Check whether the bridge supports optional I/O and prefetchable memory ranges. If not, the respective base/limit registers must be read-only and read as 0. */ diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 9411eae39a4e..3d21efe11d7b 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -2,11 +2,9 @@ * Driver for Dell laptop extras * * Copyright (c) Red Hat <mjg@redhat.com> - * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com> - * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com> * - * Based on documentation in the libsmbios package: - * Copyright (C) 2005-2014 Dell Inc. + * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell + * Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -34,13 +32,6 @@ #include "../../firmware/dcdbas.h" #define BRIGHTNESS_TOKEN 0x7d -#define KBD_LED_OFF_TOKEN 0x01E1 -#define KBD_LED_ON_TOKEN 0x01E2 -#define KBD_LED_AUTO_TOKEN 0x01E3 -#define KBD_LED_AUTO_25_TOKEN 0x02EA -#define KBD_LED_AUTO_50_TOKEN 0x02EB -#define KBD_LED_AUTO_75_TOKEN 0x02EC -#define KBD_LED_AUTO_100_TOKEN 0x02F6 /* This structure will be modified by the firmware when we enter * system management mode, hence the volatiles */ @@ -71,13 +62,6 @@ struct calling_interface_structure { struct quirk_entry { u8 touchpad_led; - - int needs_kbd_timeouts; - /* - * Ordered list of timeouts expressed in seconds. - * The list must end with -1 - */ - int kbd_timeouts[]; }; static struct quirk_entry *quirks; @@ -92,15 +76,6 @@ static int __init dmi_matched(const struct dmi_system_id *dmi) return 1; } -/* - * These values come from Windows utility provided by Dell. If any other value - * is used then BIOS silently set timeout to 0 without any error message. - */ -static struct quirk_entry quirk_dell_xps13_9333 = { - .needs_kbd_timeouts = 1, - .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 }, -}; - static int da_command_address; static int da_command_code; static int da_num_tokens; @@ -292,15 +267,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = { }, .driver_data = &quirk_dell_vostro_v130, }, - { - .callback = dmi_matched, - .ident = "Dell XPS13 9333", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"), - }, - .driver_data = &quirk_dell_xps13_9333, - }, { } }; @@ -365,29 +331,17 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy) } } -static int find_token_id(int tokenid) +static int find_token_location(int tokenid) { int i; - for (i = 0; i < da_num_tokens; i++) { if (da_tokens[i].tokenID == tokenid) - return i; + return da_tokens[i].location; } return -1; } -static int find_token_location(int tokenid) -{ - int id; - - id = find_token_id(tokenid); - if (id == -1) - return -1; - - return da_tokens[id].location; -} - static struct calling_interface_buffer * dell_send_request(struct calling_interface_buffer *buffer, int class, int select) @@ -408,20 +362,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class, return buffer; } -static inline int dell_smi_error(int value) -{ - switch (value) { - case 0: /* Completed successfully */ - return 0; - case -1: /* Completed with error */ - return -EIO; - case -2: /* Function not supported */ - return -ENXIO; - default: /* Unknown error */ - return -EINVAL; - } -} - /* Derived from information in DellWirelessCtl.cpp: Class 17, select 11 is radio control. It returns an array of 32-bit values. @@ -776,7 +716,7 @@ static int dell_send_intensity(struct backlight_device *bd) else dell_send_request(buffer, 1, 1); - out: +out: release_buffer(); return ret; } @@ -800,7 +740,7 @@ static int dell_get_intensity(struct backlight_device *bd) ret = buffer->output[1]; - out: +out: release_buffer(); return ret; } @@ -849,984 +789,6 @@ static void touchpad_led_exit(void) led_classdev_unregister(&touchpad_led); } -/* - * Derived from information in smbios-keyboard-ctl: - * - * cbClass 4 - * cbSelect 11 - * Keyboard illumination - * cbArg1 determines the function to be performed - * - * cbArg1 0x0 = Get Feature Information - * cbRES1 Standard return codes (0, -1, -2) - * cbRES2, word0 Bitmap of user-selectable modes - * bit 0 Always off (All systems) - * bit 1 Always on (Travis ATG, Siberia) - * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG) - * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off - * bit 4 Auto: Input-activity-based On; input-activity based Off - * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off - * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off - * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off - * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off - * bits 9-15 Reserved for future use - * cbRES2, byte2 Reserved for future use - * cbRES2, byte3 Keyboard illumination type - * 0 Reserved - * 1 Tasklight - * 2 Backlight - * 3-255 Reserved for future use - * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap. - * bit 0 Any keystroke - * bit 1 Touchpad activity - * bit 2 Pointing stick - * bit 3 Any mouse - * bits 4-7 Reserved for future use - * cbRES3, byte1 Supported timeout unit bitmap - * bit 0 Seconds - * bit 1 Minutes - * bit 2 Hours - * bit 3 Days - * bits 4-7 Reserved for future use - * cbRES3, byte2 Number of keyboard light brightness levels - * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported). - * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported). - * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported). - * cbRES4, byte3 Maximum acceptable days value (0 if days not supported) - * - * cbArg1 0x1 = Get Current State - * cbRES1 Standard return codes (0, -1, -2) - * cbRES2, word0 Bitmap of current mode state - * bit 0 Always off (All systems) - * bit 1 Always on (Travis ATG, Siberia) - * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG) - * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off - * bit 4 Auto: Input-activity-based On; input-activity based Off - * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off - * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off - * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off - * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off - * bits 9-15 Reserved for future use - * Note: Only One bit can be set - * cbRES2, byte2 Currently active auto keyboard illumination triggers. - * bit 0 Any keystroke - * bit 1 Touchpad activity - * bit 2 Pointing stick - * bit 3 Any mouse - * bits 4-7 Reserved for future use - * cbRES2, byte3 Current Timeout - * bits 7:6 Timeout units indicator: - * 00b Seconds - * 01b Minutes - * 10b Hours - * 11b Days - * bits 5:0 Timeout value (0-63) in sec/min/hr/day - * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte - * are set upon return from the [Get feature information] call. - * cbRES3, byte0 Current setting of ALS value that turns the light on or off. - * cbRES3, byte1 Current ALS reading - * cbRES3, byte2 Current keyboard light level. - * - * cbArg1 0x2 = Set New State - * cbRES1 Standard return codes (0, -1, -2) - * cbArg2, word0 Bitmap of current mode state - * bit 0 Always off (All systems) - * bit 1 Always on (Travis ATG, Siberia) - * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG) - * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off - * bit 4 Auto: Input-activity-based On; input-activity based Off - * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off - * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off - * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off - * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off - * bits 9-15 Reserved for future use - * Note: Only One bit can be set - * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow - * keyboard to turn off automatically. - * bit 0 Any keystroke - * bit 1 Touchpad activity - * bit 2 Pointing stick - * bit 3 Any mouse - * bits 4-7 Reserved for future use - * cbArg2, byte3 Desired Timeout - * bits 7:6 Timeout units indicator: - * 00b Seconds - * 01b Minutes - * 10b Hours - * 11b Days - * bits 5:0 Timeout value (0-63) in sec/min/hr/day - * cbArg3, byte0 Desired setting of ALS value that turns the light on or off. - * cbArg3, byte2 Desired keyboard light level. - */ - - -enum kbd_timeout_unit { - KBD_TIMEOUT_SECONDS = 0, - KBD_TIMEOUT_MINUTES, - KBD_TIMEOUT_HOURS, - KBD_TIMEOUT_DAYS, -}; - -enum kbd_mode_bit { - KBD_MODE_BIT_OFF = 0, - KBD_MODE_BIT_ON, - KBD_MODE_BIT_ALS, - KBD_MODE_BIT_TRIGGER_ALS, - KBD_MODE_BIT_TRIGGER, - KBD_MODE_BIT_TRIGGER_25, - KBD_MODE_BIT_TRIGGER_50, - KBD_MODE_BIT_TRIGGER_75, - KBD_MODE_BIT_TRIGGER_100, -}; - -#define kbd_is_als_mode_bit(bit) \ - ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS) -#define kbd_is_trigger_mode_bit(bit) \ - ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100) -#define kbd_is_level_mode_bit(bit) \ - ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100) - -struct kbd_info { - u16 modes; - u8 type; - u8 triggers; - u8 levels; - u8 seconds; - u8 minutes; - u8 hours; - u8 days; -}; - -struct kbd_state { - u8 mode_bit; - u8 triggers; - u8 timeout_value; - u8 timeout_unit; - u8 als_setting; - u8 als_value; - u8 level; -}; - -static const int kbd_tokens[] = { - KBD_LED_OFF_TOKEN, - KBD_LED_AUTO_25_TOKEN, - KBD_LED_AUTO_50_TOKEN, - KBD_LED_AUTO_75_TOKEN, - KBD_LED_AUTO_100_TOKEN, - KBD_LED_ON_TOKEN, -}; - -static u16 kbd_token_bits; - -static struct kbd_info kbd_info; -static bool kbd_als_supported; -static bool kbd_triggers_supported; - -static u8 kbd_mode_levels[16]; -static int kbd_mode_levels_count; - -static u8 kbd_previous_level; -static u8 kbd_previous_mode_bit; - -static bool kbd_led_present; - -/* - * NOTE: there are three ways to set the keyboard backlight level. - * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value). - * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels). - * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens) - * - * There are laptops which support only one of these methods. If we want to - * support as many machines as possible we need to implement all three methods. - * The first two methods use the kbd_state structure. The third uses SMBIOS - * tokens. If kbd_info.levels == 0, the machine does not support setting the - * keyboard backlight level via kbd_state.level. - */ - -static int kbd_get_info(struct kbd_info *info) -{ - u8 units; - int ret; - - get_buffer(); - - buffer->input[0] = 0x0; - dell_send_request(buffer, 4, 11); - ret = buffer->output[0]; - - if (ret) { - ret = dell_smi_error(ret); - goto out; - } - - info->modes = buffer->output[1] & 0xFFFF; - info->type = (buffer->output[1] >> 24) & 0xFF; - info->triggers = buffer->output[2] & 0xFF; - units = (buffer->output[2] >> 8) & 0xFF; - info->levels = (buffer->output[2] >> 16) & 0xFF; - - if (units & BIT(0)) - info->seconds = (buffer->output[3] >> 0) & 0xFF; - if (units & BIT(1)) - info->minutes = (buffer->output[3] >> 8) & 0xFF; - if (units & BIT(2)) - info->hours = (buffer->output[3] >> 16) & 0xFF; - if (units & BIT(3)) - info->days = (buffer->output[3] >> 24) & 0xFF; - - out: - release_buffer(); - return ret; -} - -static unsigned int kbd_get_max_level(void) -{ - if (kbd_info.levels != 0) - return kbd_info.levels; - if (kbd_mode_levels_count > 0) - return kbd_mode_levels_count - 1; - return 0; -} - -static int kbd_get_level(struct kbd_state *state) -{ - int i; - - if (kbd_info.levels != 0) - return state->level; - - if (kbd_mode_levels_count > 0) { - for (i = 0; i < kbd_mode_levels_count; ++i) - if (kbd_mode_levels[i] == state->mode_bit) - return i; - return 0; - } - - return -EINVAL; -} - -static int kbd_set_level(struct kbd_state *state, u8 level) -{ - if (kbd_info.levels != 0) { - if (level != 0) - kbd_previous_level = level; - if (state->level == level) - return 0; - state->level = level; - if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF) - state->mode_bit = kbd_previous_mode_bit; - else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) { - kbd_previous_mode_bit = state->mode_bit; - state->mode_bit = KBD_MODE_BIT_OFF; - } - return 0; - } - - if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) { - if (level != 0) - kbd_previous_level = level; - state->mode_bit = kbd_mode_levels[level]; - return 0; - } - - return -EINVAL; -} - -static int kbd_get_state(struct kbd_state *state) -{ - int ret; - - get_buffer(); - - buffer->input[0] = 0x1; - dell_send_request(buffer, 4, 11); - ret = buffer->output[0]; - - if (ret) { - ret = dell_smi_error(ret); - goto out; - } - - state->mode_bit = ffs(buffer->output[1] & 0xFFFF); - if (state->mode_bit != 0) - state->mode_bit--; - - state->triggers = (buffer->output[1] >> 16) & 0xFF; - state->timeout_value = (buffer->output[1] >> 24) & 0x3F; - state->timeout_unit = (buffer->output[1] >> 30) & 0x3; - state->als_setting = buffer->output[2] & 0xFF; - state->als_value = (buffer->output[2] >> 8) & 0xFF; - state->level = (buffer->output[2] >> 16) & 0xFF; - - out: - release_buffer(); - return ret; -} - -static int kbd_set_state(struct kbd_state *state) -{ - int ret; - - get_buffer(); - buffer->input[0] = 0x2; - buffer->input[1] = BIT(state->mode_bit) & 0xFFFF; - buffer->input[1] |= (state->triggers & 0xFF) << 16; - buffer->input[1] |= (state->timeout_value & 0x3F) << 24; - buffer->input[1] |= (state->timeout_unit & 0x3) << 30; - buffer->input[2] = state->als_setting & 0xFF; - buffer->input[2] |= (state->level & 0xFF) << 16; - dell_send_request(buffer, 4, 11); - ret = buffer->output[0]; - release_buffer(); - - return dell_smi_error(ret); -} - -static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old) -{ - int ret; - - ret = kbd_set_state(state); - if (ret == 0) - return 0; - - /* - * When setting the new state fails,try to restore the previous one. - * This is needed on some machines where BIOS sets a default state when - * setting a new state fails. This default state could be all off. - */ - - if (kbd_set_state(old)) - pr_err("Setting old previous keyboard state failed\n"); - - return ret; -} - -static int kbd_set_token_bit(u8 bit) -{ - int id; - int ret; - - if (bit >= ARRAY_SIZE(kbd_tokens)) - return -EINVAL; - - id = find_token_id(kbd_tokens[bit]); - if (id == -1) - return -EINVAL; - - get_buffer(); - buffer->input[0] = da_tokens[id].location; - buffer->input[1] = da_tokens[id].value; - dell_send_request(buffer, 1, 0); - ret = buffer->output[0]; - release_buffer(); - - return dell_smi_error(ret); -} - -static int kbd_get_token_bit(u8 bit) -{ - int id; - int ret; - int val; - - if (bit >= ARRAY_SIZE(kbd_tokens)) - return -EINVAL; - - id = find_token_id(kbd_tokens[bit]); - if (id == -1) - return -EINVAL; - - get_buffer(); - buffer->input[0] = da_tokens[id].location; - dell_send_request(buffer, 0, 0); - ret = buffer->output[0]; - val = buffer->output[1]; - release_buffer(); - - if (ret) - return dell_smi_error(ret); - - return (val == da_tokens[id].value); -} - -static int kbd_get_first_active_token_bit(void) -{ - int i; - int ret; - - for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) { - ret = kbd_get_token_bit(i); - if (ret == 1) - return i; - } - - return ret; -} - -static int kbd_get_valid_token_counts(void) -{ - return hweight16(kbd_token_bits); -} - -static inline int kbd_init_info(void) -{ - struct kbd_state state; - int ret; - int i; - - ret = kbd_get_info(&kbd_info); - if (ret) - return ret; - - kbd_get_state(&state); - - /* NOTE: timeout value is stored in 6 bits so max value is 63 */ - if (kbd_info.seconds > 63) - kbd_info.seconds = 63; - if (kbd_info.minutes > 63) - kbd_info.minutes = 63; - if (kbd_info.hours > 63) - kbd_info.hours = 63; - if (kbd_info.days > 63) - kbd_info.days = 63; - - /* NOTE: On tested machines ON mode did not work and caused - * problems (turned backlight off) so do not use it - */ - kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON); - - kbd_previous_level = kbd_get_level(&state); - kbd_previous_mode_bit = state.mode_bit; - - if (kbd_previous_level == 0 && kbd_get_max_level() != 0) - kbd_previous_level = 1; - - if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) { - kbd_previous_mode_bit = - ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF)); - if (kbd_previous_mode_bit != 0) - kbd_previous_mode_bit--; - } - - if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) | - BIT(KBD_MODE_BIT_TRIGGER_ALS))) - kbd_als_supported = true; - - if (kbd_info.modes & ( - BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) | - BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) | - BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100) - )) - kbd_triggers_supported = true; - - /* kbd_mode_levels[0] is reserved, see below */ - for (i = 0; i < 16; ++i) - if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes)) - kbd_mode_levels[1 + kbd_mode_levels_count++] = i; - - /* - * Find the first supported mode and assign to kbd_mode_levels[0]. - * This should be 0 (off), but we cannot depend on the BIOS to - * support 0. - */ - if (kbd_mode_levels_count > 0) { - for (i = 0; i < 16; ++i) { - if (BIT(i) & kbd_info.modes) { - kbd_mode_levels[0] = i; - break; - } - } - kbd_mode_levels_count++; - } - - return 0; - -} - -static inline void kbd_init_tokens(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) - if (find_token_id(kbd_tokens[i]) != -1) - kbd_token_bits |= BIT(i); -} - -static void kbd_init(void) -{ - int ret; - - ret = kbd_init_info(); - kbd_init_tokens(); - - if (kbd_token_bits != 0 || ret == 0) - kbd_led_present = true; -} - -static ssize_t kbd_led_timeout_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct kbd_state new_state; - struct kbd_state state; - bool convert; - int value; - int ret; - char ch; - u8 unit; - int i; - - ret = sscanf(buf, "%d %c", &value, &ch); - if (ret < 1) - return -EINVAL; - else if (ret == 1) - ch = 's'; - - if (value < 0) - return -EINVAL; - - convert = false; - - switch (ch) { - case 's': - if (value > kbd_info.seconds) - convert = true; - unit = KBD_TIMEOUT_SECONDS; - break; - case 'm': - if (value > kbd_info.minutes) - convert = true; - unit = KBD_TIMEOUT_MINUTES; - break; - case 'h': - if (value > kbd_info.hours) - convert = true; - unit = KBD_TIMEOUT_HOURS; - break; - case 'd': - if (value > kbd_info.days) - convert = true; - unit = KBD_TIMEOUT_DAYS; - break; - default: - return -EINVAL; - } - - if (quirks && quirks->needs_kbd_timeouts) - convert = true; - - if (convert) { - /* Convert value from current units to seconds */ - switch (unit) { - case KBD_TIMEOUT_DAYS: - value *= 24; - case KBD_TIMEOUT_HOURS: - value *= 60; - case KBD_TIMEOUT_MINUTES: - value *= 60; - unit = KBD_TIMEOUT_SECONDS; - } - - if (quirks && quirks->needs_kbd_timeouts) { - for (i = 0; quirks->kbd_timeouts[i] != -1; i++) { - if (value <= quirks->kbd_timeouts[i]) { - value = quirks->kbd_timeouts[i]; - break; - } - } - } - - if (value <= kbd_info.seconds && kbd_info.seconds) { - unit = KBD_TIMEOUT_SECONDS; - } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) { - value /= 60; - unit = KBD_TIMEOUT_MINUTES; - } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) { - value /= (60 * 60); - unit = KBD_TIMEOUT_HOURS; - } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) { - value /= (60 * 60 * 24); - unit = KBD_TIMEOUT_DAYS; - } else { - return -EINVAL; - } - } - - ret = kbd_get_state(&state); - if (ret) - return ret; - - new_state = state; - new_state.timeout_value = value; - new_state.timeout_unit = unit; - - ret = kbd_set_state_safe(&new_state, &state); - if (ret) - return ret; - - return count; -} - -static ssize_t kbd_led_timeout_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kbd_state state; - int ret; - int len; - - ret = kbd_get_state(&state); - if (ret) - return ret; - - len = sprintf(buf, "%d", state.timeout_value); - - switch (state.timeout_unit) { - case KBD_TIMEOUT_SECONDS: - return len + sprintf(buf+len, "s\n"); - case KBD_TIMEOUT_MINUTES: - return len + sprintf(buf+len, "m\n"); - case KBD_TIMEOUT_HOURS: - return len + sprintf(buf+len, "h\n"); - case KBD_TIMEOUT_DAYS: - return len + sprintf(buf+len, "d\n"); - default: - return -EINVAL; - } - - return len; -} - -static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR, - kbd_led_timeout_show, kbd_led_timeout_store); - -static const char * const kbd_led_triggers[] = { - "keyboard", - "touchpad", - /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */ - "mouse", -}; - -static ssize_t kbd_led_triggers_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct kbd_state new_state; - struct kbd_state state; - bool triggers_enabled = false; - bool als_enabled = false; - bool disable_als = false; - bool enable_als = false; - int trigger_bit = -1; - char trigger[21]; - int i, ret; - - ret = sscanf(buf, "%20s", trigger); - if (ret != 1) - return -EINVAL; - - if (trigger[0] != '+' && trigger[0] != '-') - return -EINVAL; - - ret = kbd_get_state(&state); - if (ret) - return ret; - - if (kbd_als_supported) - als_enabled = kbd_is_als_mode_bit(state.mode_bit); - - if (kbd_triggers_supported) - triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit); - - if (kbd_als_supported) { - if (strcmp(trigger, "+als") == 0) { - if (als_enabled) - return count; - enable_als = true; - } else if (strcmp(trigger, "-als") == 0) { - if (!als_enabled) - return count; - disable_als = true; - } - } - - if (enable_als || disable_als) { - new_state = state; - if (enable_als) { - if (triggers_enabled) - new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS; - else - new_state.mode_bit = KBD_MODE_BIT_ALS; - } else { - if (triggers_enabled) { - new_state.mode_bit = KBD_MODE_BIT_TRIGGER; - kbd_set_level(&new_state, kbd_previous_level); - } else { - new_state.mode_bit = KBD_MODE_BIT_ON; - } - } - if (!(kbd_info.modes & BIT(new_state.mode_bit))) - return -EINVAL; - ret = kbd_set_state_safe(&new_state, &state); - if (ret) - return ret; - kbd_previous_mode_bit = new_state.mode_bit; - return count; - } - - if (kbd_triggers_supported) { - for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) { - if (!(kbd_info.triggers & BIT(i))) - continue; - if (!kbd_led_triggers[i]) - continue; - if (strcmp(trigger+1, kbd_led_triggers[i]) != 0) - continue; - if (trigger[0] == '+' && - triggers_enabled && (state.triggers & BIT(i))) - return count; - if (trigger[0] == '-' && - (!triggers_enabled || !(state.triggers & BIT(i)))) - return count; - trigger_bit = i; - break; - } - } - - if (trigger_bit != -1) { - new_state = state; - if (trigger[0] == '+') - new_state.triggers |= BIT(trigger_bit); - else { - new_state.triggers &= ~BIT(trigger_bit); - /* NOTE: trackstick bit (2) must be disabled when - * disabling touchpad bit (1), otherwise touchpad - * bit (1) will not be disabled */ - if (trigger_bit == 1) - new_state.triggers &= ~BIT(2); - } - if ((kbd_info.triggers & new_state.triggers) != - new_state.triggers) - return -EINVAL; - if (new_state.triggers && !triggers_enabled) { - if (als_enabled) - new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS; - else { - new_state.mode_bit = KBD_MODE_BIT_TRIGGER; - kbd_set_level(&new_state, kbd_previous_level); - } - } else if (new_state.triggers == 0) { - if (als_enabled) - new_state.mode_bit = KBD_MODE_BIT_ALS; - else - kbd_set_level(&new_state, 0); - } - if (!(kbd_info.modes & BIT(new_state.mode_bit))) - return -EINVAL; - ret = kbd_set_state_safe(&new_state, &state); - if (ret) - return ret; - if (new_state.mode_bit != KBD_MODE_BIT_OFF) - kbd_previous_mode_bit = new_state.mode_bit; - return count; - } - - return -EINVAL; -} - -static ssize_t kbd_led_triggers_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kbd_state state; - bool triggers_enabled; - int level, i, ret; - int len = 0; - - ret = kbd_get_state(&state); - if (ret) - return ret; - - len = 0; - - if (kbd_triggers_supported) { - triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit); - level = kbd_get_level(&state); - for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) { - if (!(kbd_info.triggers & BIT(i))) - continue; - if (!kbd_led_triggers[i]) - continue; - if ((triggers_enabled || level <= 0) && - (state.triggers & BIT(i))) - buf[len++] = '+'; - else - buf[len++] = '-'; - len += sprintf(buf+len, "%s ", kbd_led_triggers[i]); - } - } - - if (kbd_als_supported) { - if (kbd_is_als_mode_bit(state.mode_bit)) - len += sprintf(buf+len, "+als "); - else - len += sprintf(buf+len, "-als "); - } - - if (len) - buf[len - 1] = '\n'; - - return len; -} - -static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR, - kbd_led_triggers_show, kbd_led_triggers_store); - -static ssize_t kbd_led_als_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct kbd_state state; - struct kbd_state new_state; - u8 setting; - int ret; - - ret = kstrtou8(buf, 10, &setting); - if (ret) - return ret; - - ret = kbd_get_state(&state); - if (ret) - return ret; - - new_state = state; - new_state.als_setting = setting; - - ret = kbd_set_state_safe(&new_state, &state); - if (ret) - return ret; - - return count; -} - -static ssize_t kbd_led_als_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct kbd_state state; - int ret; - - ret = kbd_get_state(&state); - if (ret) - return ret; - - return sprintf(buf, "%d\n", state.als_setting); -} - -static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR, - kbd_led_als_show, kbd_led_als_store); - -static struct attribute *kbd_led_attrs[] = { - &dev_attr_stop_timeout.attr, - &dev_attr_start_triggers.attr, - &dev_attr_als_setting.attr, - NULL, -}; -ATTRIBUTE_GROUPS(kbd_led); - -static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev) -{ - int ret; - u16 num; - struct kbd_state state; - - if (kbd_get_max_level()) { - ret = kbd_get_state(&state); - if (ret) - return 0; - ret = kbd_get_level(&state); - if (ret < 0) - return 0; - return ret; - } - - if (kbd_get_valid_token_counts()) { - ret = kbd_get_first_active_token_bit(); - if (ret < 0) - return 0; - for (num = kbd_token_bits; num != 0 && ret > 0; --ret) - num &= num - 1; /* clear the first bit set */ - if (num == 0) - return 0; - return ffs(num) - 1; - } - - pr_warn("Keyboard brightness level control not supported\n"); - return 0; -} - -static void kbd_led_level_set(struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct kbd_state state; - struct kbd_state new_state; - u16 num; - - if (kbd_get_max_level()) { - if (kbd_get_state(&state)) - return; - new_state = state; - if (kbd_set_level(&new_state, value)) - return; - kbd_set_state_safe(&new_state, &state); - return; - } - - if (kbd_get_valid_token_counts()) { - for (num = kbd_token_bits; num != 0 && value > 0; --value) - num &= num - 1; /* clear the first bit set */ - if (num == 0) - return; - kbd_set_token_bit(ffs(num) - 1); - return; - } - - pr_warn("Keyboard brightness level control not supported\n"); -} - -static struct led_classdev kbd_led = { - .name = "dell::kbd_backlight", - .brightness_set = kbd_led_level_set, - .brightness_get = kbd_led_level_get, - .groups = kbd_led_groups, -}; - -static int __init kbd_led_init(struct device *dev) -{ - kbd_init(); - if (!kbd_led_present) - return -ENODEV; - kbd_led.max_brightness = kbd_get_max_level(); - if (!kbd_led.max_brightness) { - kbd_led.max_brightness = kbd_get_valid_token_counts(); - if (kbd_led.max_brightness) - kbd_led.max_brightness--; - } - return led_classdev_register(dev, &kbd_led); -} - -static void brightness_set_exit(struct led_classdev *led_cdev, - enum led_brightness value) -{ - /* Don't change backlight level on exit */ -}; - -static void kbd_led_exit(void) -{ - if (!kbd_led_present) - return; - kbd_led.brightness_set = brightness_set_exit; - led_classdev_unregister(&kbd_led); -} - static int __init dell_init(void) { int max_intensity = 0; @@ -1879,8 +841,6 @@ static int __init dell_init(void) if (quirks && quirks->touchpad_led) touchpad_led_init(&platform_device->dev); - kbd_led_init(&platform_device->dev); - dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); if (dell_laptop_dir != NULL) debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL, @@ -1948,7 +908,6 @@ static void __exit dell_exit(void) debugfs_remove_recursive(dell_laptop_dir); if (quirks && quirks->touchpad_led) touchpad_led_exit(); - kbd_led_exit(); i8042_remove_filter(dell_laptop_i8042_filter); cancel_delayed_work_sync(&dell_rfkill_work); backlight_device_unregister(dell_backlight_device); @@ -1965,7 +924,5 @@ module_init(dell_init); module_exit(dell_exit); MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); -MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); -MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); MODULE_DESCRIPTION("Dell laptop driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index e225711bb8bc..9c48fb32f660 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id) } EXPORT_SYMBOL_GPL(regulator_get_optional); -/* Locks held by regulator_put() */ +/* regulator_list_mutex lock held by regulator_put() */ static void _regulator_put(struct regulator *regulator) { struct regulator_dev *rdev; @@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator) /* remove any sysfs entries */ if (regulator->dev) sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); + mutex_lock(&rdev->mutex); kfree(regulator->supply_name); list_del(®ulator->list); kfree(regulator); rdev->open_count--; rdev->exclusive = 0; + mutex_unlock(&rdev->mutex); module_put(rdev->owner); } diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 2809ae0d6bcd..ff828117798f 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops; .enable_mask = S2MPS14_ENABLE_MASK \ } +#define regulator_desc_s2mps13_buck7(num, min, step, min_sel) { \ + .name = "BUCK"#num, \ + .id = S2MPS13_BUCK##num, \ + .ops = &s2mps14_reg_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = min, \ + .uV_step = step, \ + .linear_min_sel = min_sel, \ + .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ + .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \ + .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \ + .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ + .enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \ + .enable_mask = S2MPS14_ENABLE_MASK \ +} + +#define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) { \ + .name = "BUCK"#num, \ + .id = S2MPS13_BUCK##num, \ + .ops = &s2mps14_reg_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = min, \ + .uV_step = step, \ + .linear_min_sel = min_sel, \ + .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ + .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \ + .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \ + .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ + .enable_reg = S2MPS13_REG_B1CTRL + (num) * 2 - 1, \ + .enable_mask = S2MPS14_ENABLE_MASK \ +} + static const struct regulator_desc s2mps13_regulators[] = { regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00), regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C), @@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = { regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10), regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10), regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10), - regulator_desc_s2mps13_buck(7, MIN_500_MV, STEP_6_25_MV, 0x10), - regulator_desc_s2mps13_buck(8, MIN_1000_MV, STEP_12_5_MV, 0x20), - regulator_desc_s2mps13_buck(9, MIN_1000_MV, STEP_12_5_MV, 0x20), - regulator_desc_s2mps13_buck(10, MIN_500_MV, STEP_6_25_MV, 0x10), + regulator_desc_s2mps13_buck7(7, MIN_500_MV, STEP_6_25_MV, 0x10), + regulator_desc_s2mps13_buck8_10(8, MIN_1000_MV, STEP_12_5_MV, 0x20), + regulator_desc_s2mps13_buck8_10(9, MIN_1000_MV, STEP_12_5_MV, 0x20), + regulator_desc_s2mps13_buck8_10(10, MIN_500_MV, STEP_6_25_MV, 0x10), }; static int s2mps14_regulator_enable(struct regulator_dev *rdev) diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index b5e7c4670205..89ac1d5083c6 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c @@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume); static const struct platform_device_id s5m_rtc_id[] = { { "s5m-rtc", S5M8767X }, { "s2mps14-rtc", S2MPS14X }, + { }, }; static struct platform_driver s5m_rtc_driver = { diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index f407e3763432..642c77c76b84 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, QETH_DBF_TEXT(SETUP, 2, "idxanswr"); card = CARD_FROM_CDEV(channel->ccwdev); iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; iob->callback = idx_reply_cb; memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); channel->ccw.count = QETH_BUFSIZE; @@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, QETH_DBF_TEXT(SETUP, 2, "idxactch"); iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; iob->callback = idx_reply_cb; memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); channel->ccw.count = IDX_ACTIVATE_SIZE; @@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len, } EXPORT_SYMBOL_GPL(qeth_prepare_control_data); +/** + * qeth_send_control_data() - send control command to the card + * @card: qeth_card structure pointer + * @len: size of the command buffer + * @iob: qeth_cmd_buffer pointer + * @reply_cb: callback function pointer + * @cb_card: pointer to the qeth_card structure + * @cb_reply: pointer to the qeth_reply structure + * @cb_cmd: pointer to the original iob for non-IPA + * commands, or to the qeth_ipa_cmd structure + * for the IPA commands. + * @reply_param: private pointer passed to the callback + * + * Returns the value of the `return_code' field of the response + * block returned from the hardware, or other error indication. + * Value of zero indicates successful execution of the command. + * + * Callback function gets called one or more times, with cb_cmd + * pointing to the response returned by the hardware. Callback + * function must return non-zero if more reply blocks are expected, + * and zero if the last or only reply block is received. Callback + * function can get the value of the reply_param pointer from the + * field 'param' of the structure qeth_reply. + */ + int qeth_send_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob, - int (*reply_cb)(struct qeth_card *, struct qeth_reply *, - unsigned long), + int (*reply_cb)(struct qeth_card *cb_card, + struct qeth_reply *cb_reply, + unsigned long cb_cmd), void *reply_param) { int rc; @@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - iob = qeth_wait_for_buffer(&card->write); - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); + iob = qeth_get_buffer(&card->write); + if (iob) { + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); + } else { + dev_warn(&card->gdev->dev, + "The qeth driver ran out of channel command buffers\n"); + QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers", + dev_name(&card->gdev->dev)); + } return iob; } @@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, } EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); +/** + * qeth_send_ipa_cmd() - send an IPA command + * + * See qeth_send_control_data() for explanation of the arguments. + */ + int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), @@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "strtlan"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); + if (!iob) + return -ENOMEM; rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } @@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4); - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.setadapterparms.hdr.cmdlength = cmdlen; - cmd->data.setadapterparms.hdr.command_code = command; - cmd->data.setadapterparms.hdr.used_total = 1; - cmd->data.setadapterparms.hdr.seq_no = 1; + if (iob) { + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setadapterparms.hdr.cmdlength = cmdlen; + cmd->data.setadapterparms.hdr.command_code = command; + cmd->data.setadapterparms.hdr.used_total = 1; + cmd->data.setadapterparms.hdr.seq_no = 1; + } return iob; } @@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card) QETH_CARD_TEXT(card, 3, "queryadp"); iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, sizeof(struct qeth_ipacmd_setadpparms)); + if (!iob) + return -ENOMEM; rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); return rc; } @@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); + if (!iob) + return -ENOMEM; rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); return rc; } @@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card, return -ENOMEDIUM; iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, sizeof(struct qeth_ipacmd_setadpparms_hdr)); + if (!iob) + return -ENOMEM; return qeth_send_ipa_cmd(card, iob, qeth_query_switch_attributes_cb, sw_info); } @@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "qdiagass"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.diagass.subcmd_len = 16; cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; @@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) QETH_DBF_TEXT(SETUP, 2, "diagtrap"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.diagass.subcmd_len = 80; cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; @@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, sizeof(struct qeth_ipacmd_setadpparms)); + if (!iob) + return; cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.data.mode = mode; qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); @@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, sizeof(struct qeth_ipacmd_setadpparms)); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; @@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, sizeof(struct qeth_ipacmd_setadpparms_hdr) + sizeof(struct qeth_set_access_ctrl)); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; access_ctrl_req->subcmd_code = isolation; @@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, QETH_SNMP_SETADP_CMDLENGTH + req_len); + if (!iob) { + rc = -ENOMEM; + goto out; + } cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, @@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) rc = -EFAULT; } - +out: kfree(ureq); kfree(qinfo.udata); return rc; @@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata) iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, sizeof(struct qeth_ipacmd_setadpparms_hdr) + sizeof(struct qeth_query_oat)); + if (!iob) { + rc = -ENOMEM; + goto out_free; + } cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); oat_req = &cmd->data.setadapterparms.data.query_oat; oat_req->subcmd_code = oat_data.command; @@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card, return -EOPNOTSUPP; iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, sizeof(struct qeth_ipacmd_setadpparms_hdr)); + if (!iob) + return -ENOMEM; return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, (void *)carrier_info); } @@ -5060,11 +5133,23 @@ retriable: card->options.adp.supported_funcs = 0; card->options.sbp.supported_funcs = 0; card->info.diagass_support = 0; - qeth_query_ipassists(card, QETH_PROT_IPV4); - if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) - qeth_query_setadapterparms(card); - if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) - qeth_query_setdiagass(card); + rc = qeth_query_ipassists(card, QETH_PROT_IPV4); + if (rc == -ENOMEM) + goto out; + if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { + rc = qeth_query_setadapterparms(card); + if (rc < 0) { + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + goto out; + } + } + if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { + rc = qeth_query_setdiagass(card); + if (rc < 0) { + QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); + goto out; + } + } return 0; out: dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d02cd1a67943..ce87ae72edbd 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); static int qeth_l2_stop(struct net_device *); static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, - enum qeth_ipa_cmds, - int (*reply_cb) (struct qeth_card *, - struct qeth_reply*, - unsigned long)); + enum qeth_ipa_cmds); static void qeth_l2_set_multicast_list(struct net_device *); static int qeth_l2_recover(void *); static void qeth_bridgeport_query_support(struct qeth_card *card); @@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no) return ndev; } -static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, - struct qeth_reply *reply, - unsigned long data) +static int qeth_setdel_makerc(struct qeth_card *card, int retcode) { - struct qeth_ipa_cmd *cmd; - __u8 *mac; + int rc; - QETH_CARD_TEXT(card, 2, "L2Sgmacb"); - cmd = (struct qeth_ipa_cmd *) data; - mac = &cmd->data.setdelmac.mac[0]; - /* MAC already registered, needed in couple/uncouple case */ - if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) { - QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n", - mac, QETH_CARD_IFNAME(card)); - cmd->hdr.return_code = 0; + if (retcode) + QETH_CARD_TEXT_(card, 2, "err%04x", retcode); + switch (retcode) { + case IPA_RC_SUCCESS: + rc = 0; + break; + case IPA_RC_L2_UNSUPPORTED_CMD: + rc = -ENOSYS; + break; + case IPA_RC_L2_ADDR_TABLE_FULL: + rc = -ENOSPC; + break; + case IPA_RC_L2_DUP_MAC: + case IPA_RC_L2_DUP_LAYER3_MAC: + rc = -EEXIST; + break; + case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: + case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: + rc = -EPERM; + break; + case IPA_RC_L2_MAC_NOT_FOUND: + rc = -ENOENT; + break; + case -ENOMEM: + rc = -ENOMEM; + break; + default: + rc = -EIO; + break; } - if (cmd->hdr.return_code) - QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n", - mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); - return 0; + return rc; } static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) { - QETH_CARD_TEXT(card, 2, "L2Sgmac"); - return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, - qeth_l2_send_setgroupmac_cb); -} - -static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, - struct qeth_reply *reply, - unsigned long data) -{ - struct qeth_ipa_cmd *cmd; - __u8 *mac; + int rc; - QETH_CARD_TEXT(card, 2, "L2Dgmacb"); - cmd = (struct qeth_ipa_cmd *) data; - mac = &cmd->data.setdelmac.mac[0]; - if (cmd->hdr.return_code) - QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n", - mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); - return 0; + QETH_CARD_TEXT(card, 2, "L2Sgmac"); + rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, + IPA_CMD_SETGMAC)); + if (rc == -EEXIST) + QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n", + mac, QETH_CARD_IFNAME(card)); + else if (rc) + QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n", + mac, QETH_CARD_IFNAME(card), rc); + return rc; } static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) { + int rc; + QETH_CARD_TEXT(card, 2, "L2Dgmac"); - return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, - qeth_l2_send_delgroupmac_cb); + rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, + IPA_CMD_DELGMAC)); + if (rc) + QETH_DBF_MESSAGE(2, + "Could not delete group MAC %pM on %s: %d\n", + mac, QETH_CARD_IFNAME(card), rc); + return rc; } static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) @@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) mc->is_vmac = vmac; if (vmac) { - rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, - NULL); + rc = qeth_setdel_makerc(card, + qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC)); } else { - rc = qeth_l2_send_setgroupmac(card, mac); + rc = qeth_setdel_makerc(card, + qeth_l2_send_setgroupmac(card, mac)); } if (!rc) @@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del) if (del) { if (mc->is_vmac) qeth_l2_send_setdelmac(card, mc->mc_addr, - IPA_CMD_DELVMAC, NULL); + IPA_CMD_DELVMAC); else qeth_l2_send_delgroupmac(card, mc->mc_addr); } @@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setdelvlan.vlan_id = i; return qeth_send_ipa_cmd(card, iob, @@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, { struct qeth_card *card = dev->ml_priv; struct qeth_vlan_vid *id; + int rc; QETH_CARD_TEXT_(card, 4, "aid:%d", vid); if (!vid) @@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); if (id) { id->vid = vid; - qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); + rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); + if (rc) { + kfree(id); + return rc; + } spin_lock_bh(&card->vlanlock); list_add_tail(&id->list, &card->vid_list); spin_unlock_bh(&card->vlanlock); @@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, { struct qeth_vlan_vid *id, *tmpid = NULL; struct qeth_card *card = dev->ml_priv; + int rc = 0; QETH_CARD_TEXT_(card, 4, "kid:%d", vid); if (card->info.type == QETH_CARD_TYPE_OSM) { @@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, } spin_unlock_bh(&card->vlanlock); if (tmpid) { - qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); + rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); kfree(tmpid); } qeth_l2_set_multicast_list(card->dev); - return 0; + return rc; } static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) @@ -539,91 +560,62 @@ out: } static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, - enum qeth_ipa_cmds ipacmd, - int (*reply_cb) (struct qeth_card *, - struct qeth_reply*, - unsigned long)) + enum qeth_ipa_cmds ipacmd) { struct qeth_ipa_cmd *cmd; struct qeth_cmd_buffer *iob; QETH_CARD_TEXT(card, 2, "L2sdmac"); iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); - return qeth_send_ipa_cmd(card, iob, reply_cb, NULL); + return qeth_send_ipa_cmd(card, iob, NULL, NULL); } -static int qeth_l2_send_setmac_cb(struct qeth_card *card, - struct qeth_reply *reply, - unsigned long data) +static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) { - struct qeth_ipa_cmd *cmd; + int rc; - QETH_CARD_TEXT(card, 2, "L2Smaccb"); - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code) { - QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code); + QETH_CARD_TEXT(card, 2, "L2Setmac"); + rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, + IPA_CMD_SETVMAC)); + if (rc == 0) { + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; + memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); + dev_info(&card->gdev->dev, + "MAC address %pM successfully registered on device %s\n", + card->dev->dev_addr, card->dev->name); + } else { card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - switch (cmd->hdr.return_code) { - case IPA_RC_L2_DUP_MAC: - case IPA_RC_L2_DUP_LAYER3_MAC: + switch (rc) { + case -EEXIST: dev_warn(&card->gdev->dev, - "MAC address %pM already exists\n", - cmd->data.setdelmac.mac); + "MAC address %pM already exists\n", mac); break; - case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: - case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: + case -EPERM: dev_warn(&card->gdev->dev, - "MAC address %pM is not authorized\n", - cmd->data.setdelmac.mac); - break; - default: + "MAC address %pM is not authorized\n", mac); break; } - } else { - card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, - OSA_ADDR_LEN); - dev_info(&card->gdev->dev, - "MAC address %pM successfully registered on device %s\n", - card->dev->dev_addr, card->dev->name); - } - return 0; -} - -static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) -{ - QETH_CARD_TEXT(card, 2, "L2Setmac"); - return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, - qeth_l2_send_setmac_cb); -} - -static int qeth_l2_send_delmac_cb(struct qeth_card *card, - struct qeth_reply *reply, - unsigned long data) -{ - struct qeth_ipa_cmd *cmd; - - QETH_CARD_TEXT(card, 2, "L2Dmaccb"); - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code) { - QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); - return 0; } - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - - return 0; + return rc; } static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) { + int rc; + QETH_CARD_TEXT(card, 2, "L2Delmac"); if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) return 0; - return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, - qeth_l2_send_delmac_cb); + rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, + IPA_CMD_DELVMAC)); + if (rc == 0) + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + return rc; } static int qeth_l2_request_initial_mac(struct qeth_card *card) @@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) if (rc) { QETH_DBF_MESSAGE(2, "couldn't get MAC address on " "device %s: x%x\n", CARD_BUS_ID(card), rc); - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc); return rc; } QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); @@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return -ERESTARTSYS; } rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); - if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) + if (!rc || (rc == -ENOENT)) rc = qeth_l2_send_setmac(card, addr->sa_data); return rc ? -EINVAL : 0; } @@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) recover_flag = card->state; rc = qeth_core_hardsetup_card(card); if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); rc = -ENODEV; goto out_remove; } @@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card) QETH_CARD_TEXT(card, 2, "brqsuppo"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); + if (!iob) + return; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + @@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) return -EOPNOTSUPP; iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr); @@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, if (rc) return rc; rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); - if (rc) - return rc; - return 0; + return rc; } EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); @@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) if (!(card->options.sbp.supported_funcs & setcmd)) return -EOPNOTSUPP; iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.sbp.hdr.cmdlength = cmdlength; cmd->data.sbp.hdr.command_code = setcmd; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 625227ad16ee..e2a0ee845399 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "setdelmc"); iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); if (addr->proto == QETH_PROT_IPV6) @@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, QETH_CARD_TEXT_(card, 4, "flags%02X", flags); iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); if (addr->proto == QETH_PROT_IPV6) { memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, @@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "setroutg"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setrtg.type = (type); rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); @@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( QETH_CARD_TEXT(card, 4, "getasscm"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.setassparms.hdr.assist_no = ipa_func; - cmd->data.setassparms.hdr.length = 8 + len; - cmd->data.setassparms.hdr.command_code = cmd_code; - cmd->data.setassparms.hdr.return_code = 0; - cmd->data.setassparms.hdr.seq_no = 0; + if (iob) { + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setassparms.hdr.assist_no = ipa_func; + cmd->data.setassparms.hdr.length = 8 + len; + cmd->data.setassparms.hdr.command_code = cmd_code; + cmd->data.setassparms.hdr.return_code = 0; + cmd->data.setassparms.hdr.seq_no = 0; + } return iob; } @@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "simassp6"); iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, 0, QETH_PROT_IPV6); + if (!iob) + return -ENOMEM; rc = qeth_l3_send_setassparms(card, iob, 0, 0, qeth_l3_default_setassparms_cb, NULL); return rc; @@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card, length = sizeof(__u32); iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, length, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; rc = qeth_l3_send_setassparms(card, iob, length, data, qeth_l3_default_setassparms_cb, NULL); return rc; @@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = card->info.unique_id; @@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card) iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = card->info.unique_id; @@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) QETH_DBF_TEXT(SETUP, 2, "diagtrac"); iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.diagass.subcmd_len = 16; cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; @@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card, IPA_CMD_ASS_ARP_QUERY_INFO, sizeof(struct qeth_arp_query_data) - sizeof(char), prot); + if (!iob) + return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setassparms.data.query_arp.request_bits = 0x000F; cmd->data.setassparms.data.query_arp.reply_bits = 0; @@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, IPA_CMD_ASS_ARP_ADD_ENTRY, sizeof(struct qeth_arp_cache_entry), QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; rc = qeth_l3_send_setassparms(card, iob, sizeof(struct qeth_arp_cache_entry), (unsigned long) entry, @@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, IPA_CMD_ASS_ARP_REMOVE_ENTRY, 12, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; rc = qeth_l3_send_setassparms(card, iob, 12, (unsigned long)buf, qeth_l3_default_setassparms_cb, NULL); @@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { static int qeth_l3_setup_netdev(struct qeth_card *card) { + int rc; + if (card->info.type == QETH_CARD_TYPE_OSD || card->info.type == QETH_CARD_TYPE_OSX) { if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || @@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) return -ENODEV; card->dev->flags |= IFF_NOARP; card->dev->netdev_ops = &qeth_l3_netdev_ops; - qeth_l3_iqd_read_initial_mac(card); + rc = qeth_l3_iqd_read_initial_mac(card); + if (rc) + return rc; if (card->options.hsuid[0]) memcpy(card->dev->perm_addr, card->options.hsuid, 9); } else @@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) recover_flag = card->state; rc = qeth_core_hardsetup_card(card); if (rc) { - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); rc = -ENODEV; goto out_remove; } @@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) contin: rc = qeth_l3_setadapter_parms(card); if (rc) - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); if (!card->options.sniffer) { rc = qeth_l3_start_ipassists(card); if (rc) { @@ -3410,10 +3438,10 @@ contin: } rc = qeth_l3_setrouting_v4(card); if (rc) - QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc); rc = qeth_l3_setrouting_v6(card); if (rc) - QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); + QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc); } netif_tx_disable(card->dev); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index df4e27cd996a..9219953ee949 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, ipr_reinit_ipr_cmnd(ipr_cmd); ipr_cmd->u.scratch = 0; ipr_cmd->sibling = NULL; + ipr_cmd->eh_comp = NULL; ipr_cmd->fast_done = fast_done; init_timer(&ipr_cmd->timer); } @@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) scsi_dma_unmap(ipr_cmd->scsi_cmd); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } @@ -4811,6 +4814,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev) return rc; } +/** + * ipr_match_lun - Match function for specified LUN + * @ipr_cmd: ipr command struct + * @device: device to match (sdev) + * + * Returns: + * 1 if command matches sdev / 0 if command does not match sdev + **/ +static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) +{ + if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) + return 1; + return 0; +} + +/** + * ipr_wait_for_ops - Wait for matching commands to complete + * @ipr_cmd: ipr command struct + * @device: device to match (sdev) + * @match: match function to use + * + * Returns: + * SUCCESS / FAILED + **/ +static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, + int (*match)(struct ipr_cmnd *, void *)) +{ + struct ipr_cmnd *ipr_cmd; + int wait; + unsigned long flags; + struct ipr_hrr_queue *hrrq; + signed long timeout = IPR_ABORT_TASK_TIMEOUT; + DECLARE_COMPLETION_ONSTACK(comp); + + ENTER; + do { + wait = 0; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock_irqsave(hrrq->lock, flags); + list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { + if (match(ipr_cmd, device)) { + ipr_cmd->eh_comp = ∁ + wait++; + } + } + spin_unlock_irqrestore(hrrq->lock, flags); + } + + if (wait) { + timeout = wait_for_completion_timeout(&comp, timeout); + + if (!timeout) { + wait = 0; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock_irqsave(hrrq->lock, flags); + list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { + if (match(ipr_cmd, device)) { + ipr_cmd->eh_comp = NULL; + wait++; + } + } + spin_unlock_irqrestore(hrrq->lock, flags); + } + + if (wait) + dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); + LEAVE; + return wait ? FAILED : SUCCESS; + } + } + } while (wait); + + LEAVE; + return SUCCESS; +} + static int ipr_eh_host_reset(struct scsi_cmnd *cmd) { struct ipr_ioa_cfg *ioa_cfg; @@ -5030,11 +5111,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) { int rc; + struct ipr_ioa_cfg *ioa_cfg; + + ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; spin_lock_irq(cmd->device->host->host_lock); rc = __ipr_eh_dev_reset(cmd); spin_unlock_irq(cmd->device->host->host_lock); + if (rc == SUCCESS) + rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); + return rc; } @@ -5234,13 +5321,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) { unsigned long flags; int rc; + struct ipr_ioa_cfg *ioa_cfg; ENTER; + ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; + spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); rc = ipr_cancel_op(scsi_cmd); spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); + if (rc == SUCCESS) + rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); LEAVE; return rc; } diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index b4f3eec51bc9..ec03b42fa2b9 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1606,6 +1606,7 @@ struct ipr_cmnd { struct scsi_device *sdev; } u; + struct completion *eh_comp; struct ipr_hrr_queue *hrrq; struct ipr_ioa_cfg *ioa_cfg; }; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index e02885451425..9b3829931f40 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev) return -ENXIO; if (!get_device(&sdev->sdev_gendev)) return -ENXIO; - /* We can fail this if we're doing SCSI operations + /* We can fail try_module_get if we're doing SCSI operations * from module exit (like cache flush) */ - try_module_get(sdev->host->hostt->module); + __module_get(sdev->host->hostt->module); return 0; } @@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get); */ void scsi_device_put(struct scsi_device *sdev) { -#ifdef CONFIG_MODULE_UNLOAD - struct module *module = sdev->host->hostt->module; - - /* The module refcount will be zero if scsi_device_get() - * was called from a module removal routine */ - if (module && module_refcount(module) != 0) - module_put(module); -#endif + module_put(sdev->host->hostt->module); put_device(&sdev->sdev_gendev); } EXPORT_SYMBOL(scsi_device_put); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 7b8b51bc29b4..4aca1b0378c2 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1623,7 +1623,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) req_opcode = cmd[3]; req_sa = get_unaligned_be16(cmd + 4); alloc_len = get_unaligned_be32(cmd + 6); - if (alloc_len < 4 && alloc_len > 0xffff) { + if (alloc_len < 4 || alloc_len > 0xffff) { mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); return check_condition_result; } @@ -1631,7 +1631,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) a_len = 8192; else a_len = alloc_len; - arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL); + arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC); if (NULL == arr) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6d5c0b8cb0bb..17bb541f7cc2 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1143,7 +1143,17 @@ int scsi_init_io(struct scsi_cmnd *cmd) struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; int ivecs, count; - BUG_ON(prot_sdb == NULL); + if (prot_sdb == NULL) { + /* + * This can happen if someone (e.g. multipath) + * queues a command to a device on an adapter + * that does not support DIX. + */ + WARN_ON_ONCE(1); + error = BLKPREP_KILL; + goto err_exit; + } + ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 7281316a5ecb..a67d37c7e3c0 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -271,7 +271,6 @@ int dw_spi_mid_init(struct dw_spi *dws) iounmap(clk_reg); dws->num_cs = 16; - dws->fifo_len = 40; /* FIFO has 40 words buffer */ #ifdef CONFIG_SPI_DW_MID_DMA dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index d0d5542efc06..8edcd1b84562 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws) if (!dws->fifo_len) { u32 fifo; - for (fifo = 2; fifo <= 257; fifo++) { + for (fifo = 2; fifo <= 256; fifo++) { dw_writew(dws, DW_SPI_TXFLTR, fifo); if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) break; } - dws->fifo_len = (fifo == 257) ? 0 : fifo; + dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; dw_writew(dws, DW_SPI_TXFLTR, 0); } } @@ -673,7 +673,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) if (dws->dma_ops && dws->dma_ops->dma_init) { ret = dws->dma_ops->dma_init(dws); if (ret) { - dev_warn(&master->dev, "DMA init failed\n"); + dev_warn(dev, "DMA init failed\n"); dws->dma_inited = 0; } } diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 05c623cfb078..23822e7df6c1 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -546,8 +546,8 @@ static void giveback(struct driver_data *drv_data) cs_deassert(drv_data); } - spi_finalize_current_message(drv_data->master); drv_data->cur_chip = NULL; + spi_finalize_current_message(drv_data->master); } static void reset_sccr1(struct driver_data *drv_data) diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 96a5fc0878d8..3ab7a21445fc 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -82,7 +82,7 @@ struct sh_msiof_spi_priv { #define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ #define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ #define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ -#define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */ +#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */ #define MDR1_FLD_SHIFT 2 #define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ /* TMDR1 */ diff --git a/drivers/staging/media/tlg2300/Kconfig b/drivers/staging/media/tlg2300/Kconfig index 81784c6f7b88..77d8753f6ba4 100644 --- a/drivers/staging/media/tlg2300/Kconfig +++ b/drivers/staging/media/tlg2300/Kconfig @@ -1,6 +1,7 @@ config VIDEO_TLG2300 tristate "Telegent TLG2300 USB video capture support (Deprecated)" depends on VIDEO_DEV && I2C && SND && DVB_CORE + depends on MEDIA_USB_SUPPORT select VIDEO_TUNER select VIDEO_TVEEPROM depends on RC_CORE diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c index 5927c0a98a74..bcfd2a22208f 100644 --- a/drivers/watchdog/cadence_wdt.c +++ b/drivers/watchdog/cadence_wdt.c @@ -503,7 +503,6 @@ static struct platform_driver cdns_wdt_driver = { .shutdown = cdns_wdt_shutdown, .driver = { .name = "cdns-wdt", - .owner = THIS_MODULE, .of_match_table = cdns_wdt_of_match, .pm = &cdns_wdt_pm_ops, }, diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index d6add516a7a7..5142bbabe027 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -52,6 +52,8 @@ #define IMX2_WDT_WRSR 0x04 /* Reset Status Register */ #define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */ +#define IMX2_WDT_WMCR 0x08 /* Misc Register */ + #define IMX2_WDT_MAX_TIME 128 #define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */ @@ -274,6 +276,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) imx2_wdt_ping_if_active(wdog); + /* + * Disable the watchdog power down counter at boot. Otherwise the power + * down counter will pull down the #WDOG interrupt line for one clock + * cycle. + */ + regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0); + ret = watchdog_register_device(wdog); if (ret) { dev_err(&pdev->dev, "cannot register watchdog device\n"); @@ -327,18 +336,21 @@ static void imx2_wdt_shutdown(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -/* Disable watchdog if it is active during suspend */ +/* Disable watchdog if it is active or non-active but still running */ static int imx2_wdt_suspend(struct device *dev) { struct watchdog_device *wdog = dev_get_drvdata(dev); struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); - imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); - imx2_wdt_ping(wdog); + /* The watchdog IP block is running */ + if (imx2_wdt_is_running(wdev)) { + imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); + imx2_wdt_ping(wdog); - /* Watchdog has been stopped but IP block is still running */ - if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev)) - del_timer_sync(&wdev->timer); + /* The watchdog is not active */ + if (!watchdog_active(wdog)) + del_timer_sync(&wdev->timer); + } clk_disable_unprepare(wdev->clk); @@ -354,15 +366,25 @@ static int imx2_wdt_resume(struct device *dev) clk_prepare_enable(wdev->clk); if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) { - /* Resumes from deep sleep we need restart - * the watchdog again. + /* + * If the watchdog is still active and resumes + * from deep sleep state, need to restart the + * watchdog again. */ imx2_wdt_setup(wdog); imx2_wdt_set_timeout(wdog, wdog->timeout); imx2_wdt_ping(wdog); } else if (imx2_wdt_is_running(wdev)) { + /* Resuming from non-deep sleep state. */ + imx2_wdt_set_timeout(wdog, wdog->timeout); imx2_wdt_ping(wdog); - mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2); + /* + * But the watchdog is not active, then start + * the timer again. + */ + if (!watchdog_active(wdog)) + mod_timer(&wdev->timer, + jiffies + wdog->timeout * HZ / 2); } return 0; diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c index ef6a298e8c45..1f4155ee3404 100644 --- a/drivers/watchdog/meson_wdt.c +++ b/drivers/watchdog/meson_wdt.c @@ -215,7 +215,6 @@ static struct platform_driver meson_wdt_driver = { .remove = meson_wdt_remove, .shutdown = meson_wdt_shutdown, .driver = { - .owner = THIS_MODULE, .name = DRV_NAME, .of_match_table = meson_wdt_dt_ids, }, |