diff options
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r-- | drivers/spi/spi.c | 431 |
1 files changed, 344 insertions, 87 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 75ac046cae52..38b4c78df506 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -92,7 +92,7 @@ static ssize_t driver_override_store(struct device *dev, if (len) { spi->driver_override = driver_override; } else { - /* Emptry string, disable driver override */ + /* Empty string, disable driver override */ spi->driver_override = NULL; kfree(driver_override); } @@ -469,7 +469,7 @@ static LIST_HEAD(board_list); static LIST_HEAD(spi_controller_list); /* - * Used to protect add/del opertion for board_info list and + * Used to protect add/del operation for board_info list and * spi_controller list, and their matching process * also used to protect object of type struct idr */ @@ -775,6 +775,15 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n) static void spi_set_cs(struct spi_device *spi, bool enable) { + bool enable1 = enable; + + if (!spi->controller->set_cs_timing) { + if (enable1) + spi_delay_exec(&spi->controller->cs_setup, NULL); + else + spi_delay_exec(&spi->controller->cs_hold, NULL); + } + if (spi->mode & SPI_CS_HIGH) enable = !enable; @@ -800,6 +809,11 @@ static void spi_set_cs(struct spi_device *spi, bool enable) } else if (spi->controller->set_cs) { spi->controller->set_cs(spi, !enable); } + + if (!spi->controller->set_cs_timing) { + if (!enable1) + spi_delay_exec(&spi->controller->cs_inactive, NULL); + } } #ifdef CONFIG_HAS_DMA @@ -1106,42 +1120,79 @@ static void _spi_transfer_delay_ns(u32 ns) } } -static void _spi_transfer_cs_change_delay(struct spi_message *msg, - struct spi_transfer *xfer) +int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) { - u32 delay = xfer->cs_change_delay; - u32 unit = xfer->cs_change_delay_unit; + u32 delay = _delay->value; + u32 unit = _delay->unit; u32 hz; - /* return early on "fast" mode - for everything but USECS */ - if (!delay && unit != SPI_DELAY_UNIT_USECS) - return; + if (!delay) + return 0; switch (unit) { case SPI_DELAY_UNIT_USECS: - /* for compatibility use default of 10us */ - if (!delay) - delay = 10000; - else - delay *= 1000; + delay *= 1000; break; case SPI_DELAY_UNIT_NSECS: /* nothing to do here */ break; case SPI_DELAY_UNIT_SCK: + /* clock cycles need to be obtained from spi_transfer */ + if (!xfer) + return -EINVAL; /* if there is no effective speed know, then approximate * by underestimating with half the requested hz */ hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; + if (!hz) + return -EINVAL; delay *= DIV_ROUND_UP(1000000000, hz); break; default: + return -EINVAL; + } + + return delay; +} +EXPORT_SYMBOL_GPL(spi_delay_to_ns); + +int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) +{ + int delay; + + if (!_delay) + return -EINVAL; + + delay = spi_delay_to_ns(_delay, xfer); + if (delay < 0) + return delay; + + _spi_transfer_delay_ns(delay); + + return 0; +} +EXPORT_SYMBOL_GPL(spi_delay_exec); + +static void _spi_transfer_cs_change_delay(struct spi_message *msg, + struct spi_transfer *xfer) +{ + u32 delay = xfer->cs_change_delay.value; + u32 unit = xfer->cs_change_delay.unit; + int ret; + + /* return early on "fast" mode - for everything but USECS */ + if (!delay) { + if (unit == SPI_DELAY_UNIT_USECS) + _spi_transfer_delay_ns(10000); + return; + } + + ret = spi_delay_exec(&xfer->cs_change_delay, xfer); + if (ret) { dev_err_once(&msg->spi->dev, "Use of unsupported delay unit %i, using default of 10us\n", - xfer->cs_change_delay_unit); - delay = 10000; + unit); + _spi_transfer_delay_ns(10000); } - /* now sleep for the requested amount of time */ - _spi_transfer_delay_ns(delay); } /* @@ -1171,6 +1222,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, spi_statistics_add_transfer_stats(statm, xfer, ctlr); spi_statistics_add_transfer_stats(stats, xfer, ctlr); + if (!ctlr->ptp_sts_supported) { + xfer->ptp_sts_word_pre = 0; + ptp_read_system_prets(xfer->ptp_sts); + } + if (xfer->tx_buf || xfer->rx_buf) { reinit_completion(&ctlr->xfer_completion); @@ -1197,13 +1253,17 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, xfer->len); } + if (!ctlr->ptp_sts_supported) { + ptp_read_system_postts(xfer->ptp_sts); + xfer->ptp_sts_word_post = xfer->len; + } + trace_spi_transfer_stop(msg, xfer); if (msg->status != -EINPROGRESS) goto out; - if (xfer->delay_usecs) - _spi_transfer_delay_ns(xfer->delay_usecs * 1000); + spi_transfer_delay_exec(xfer); if (xfer->cs_change) { if (list_is_last(&xfer->transfer_list, @@ -1265,8 +1325,10 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); */ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) { - unsigned long flags; + struct spi_transfer *xfer; + struct spi_message *msg; bool was_busy = false; + unsigned long flags; int ret; /* Lock queue */ @@ -1325,10 +1387,10 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) } /* Extract head of queue */ - ctlr->cur_msg = - list_first_entry(&ctlr->queue, struct spi_message, queue); + msg = list_first_entry(&ctlr->queue, struct spi_message, queue); + ctlr->cur_msg = msg; - list_del_init(&ctlr->cur_msg->queue); + list_del_init(&msg->queue); if (ctlr->busy) was_busy = true; else @@ -1361,7 +1423,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) if (ctlr->auto_runtime_pm) pm_runtime_put(ctlr->dev.parent); - ctlr->cur_msg->status = ret; + msg->status = ret; spi_finalize_current_message(ctlr); mutex_unlock(&ctlr->io_mutex); @@ -1369,28 +1431,35 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) } } - trace_spi_message_start(ctlr->cur_msg); + trace_spi_message_start(msg); if (ctlr->prepare_message) { - ret = ctlr->prepare_message(ctlr, ctlr->cur_msg); + ret = ctlr->prepare_message(ctlr, msg); if (ret) { dev_err(&ctlr->dev, "failed to prepare message: %d\n", ret); - ctlr->cur_msg->status = ret; + msg->status = ret; spi_finalize_current_message(ctlr); goto out; } ctlr->cur_msg_prepared = true; } - ret = spi_map_msg(ctlr, ctlr->cur_msg); + ret = spi_map_msg(ctlr, msg); if (ret) { - ctlr->cur_msg->status = ret; + msg->status = ret; spi_finalize_current_message(ctlr); goto out; } - ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg); + if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + xfer->ptp_sts_word_pre = 0; + ptp_read_system_prets(xfer->ptp_sts); + } + } + + ret = ctlr->transfer_one_message(ctlr, msg); if (ret) { dev_err(&ctlr->dev, "failed to transfer one message from queue\n"); @@ -1418,6 +1487,93 @@ static void spi_pump_messages(struct kthread_work *work) } /** + * spi_take_timestamp_pre - helper for drivers to collect the beginning of the + * TX timestamp for the requested byte from the SPI + * transfer. The frequency with which this function + * must be called (once per word, once for the whole + * transfer, once per batch of words etc) is arbitrary + * as long as the @tx buffer offset is greater than or + * equal to the requested byte at the time of the + * call. The timestamp is only taken once, at the + * first such call. It is assumed that the driver + * advances its @tx buffer pointer monotonically. + * @ctlr: Pointer to the spi_controller structure of the driver + * @xfer: Pointer to the transfer being timestamped + * @progress: How many words (not bytes) have been transferred so far + * @irqs_off: If true, will disable IRQs and preemption for the duration of the + * transfer, for less jitter in time measurement. Only compatible + * with PIO drivers. If true, must follow up with + * spi_take_timestamp_post or otherwise system will crash. + * WARNING: for fully predictable results, the CPU frequency must + * also be under control (governor). + */ +void spi_take_timestamp_pre(struct spi_controller *ctlr, + struct spi_transfer *xfer, + size_t progress, bool irqs_off) +{ + if (!xfer->ptp_sts) + return; + + if (xfer->timestamped_pre) + return; + + if (progress < xfer->ptp_sts_word_pre) + return; + + /* Capture the resolution of the timestamp */ + xfer->ptp_sts_word_pre = progress; + + xfer->timestamped_pre = true; + + if (irqs_off) { + local_irq_save(ctlr->irq_flags); + preempt_disable(); + } + + ptp_read_system_prets(xfer->ptp_sts); +} +EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); + +/** + * spi_take_timestamp_post - helper for drivers to collect the end of the + * TX timestamp for the requested byte from the SPI + * transfer. Can be called with an arbitrary + * frequency: only the first call where @tx exceeds + * or is equal to the requested word will be + * timestamped. + * @ctlr: Pointer to the spi_controller structure of the driver + * @xfer: Pointer to the transfer being timestamped + * @progress: How many words (not bytes) have been transferred so far + * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. + */ +void spi_take_timestamp_post(struct spi_controller *ctlr, + struct spi_transfer *xfer, + size_t progress, bool irqs_off) +{ + if (!xfer->ptp_sts) + return; + + if (xfer->timestamped_post) + return; + + if (progress < xfer->ptp_sts_word_post) + return; + + ptp_read_system_postts(xfer->ptp_sts); + + if (irqs_off) { + local_irq_restore(ctlr->irq_flags); + preempt_enable(); + } + + /* Capture the resolution of the timestamp */ + xfer->ptp_sts_word_post = progress; + + xfer->timestamped_post = true; +} +EXPORT_SYMBOL_GPL(spi_take_timestamp_post); + +/** * spi_set_thread_rt - set the controller to pump at realtime priority * @ctlr: controller to boost priority of * @@ -1434,7 +1590,7 @@ static void spi_pump_messages(struct kthread_work *work) */ static void spi_set_thread_rt(struct spi_controller *ctlr) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; + struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; dev_info(&ctlr->dev, "will run message pump with realtime priority\n"); @@ -1502,6 +1658,7 @@ EXPORT_SYMBOL_GPL(spi_get_next_queued_message); */ void spi_finalize_current_message(struct spi_controller *ctlr) { + struct spi_transfer *xfer; struct spi_message *mesg; unsigned long flags; int ret; @@ -1510,6 +1667,20 @@ void spi_finalize_current_message(struct spi_controller *ctlr) mesg = ctlr->cur_msg; spin_unlock_irqrestore(&ctlr->queue_lock, flags); + if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { + list_for_each_entry(xfer, &mesg->transfers, transfer_list) { + ptp_read_system_postts(xfer->ptp_sts); + xfer->ptp_sts_word_post = xfer->len; + } + } + + if (unlikely(ctlr->ptp_sts_supported)) { + list_for_each_entry(xfer, &mesg->transfers, transfer_list) { + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre); + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post); + } + } + spi_unmap_msg(ctlr, mesg); if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { @@ -1710,15 +1881,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, spi->mode |= SPI_3WIRE; if (of_property_read_bool(nc, "spi-lsb-first")) spi->mode |= SPI_LSB_FIRST; - - /* - * For descriptors associated with the device, polarity inversion is - * handled in the gpiolib, so all chip selects are "active high" in - * the logical sense, the gpiolib will invert the line if need be. - */ - if (ctlr->use_gpio_descriptors) - spi->mode |= SPI_CS_HIGH; - else if (of_property_read_bool(nc, "spi-cs-high")) + if (of_property_read_bool(nc, "spi-cs-high")) spi->mode |= SPI_CS_HIGH; /* Device DUAL/QUAD mode */ @@ -1782,6 +1945,15 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, } spi->chip_select = value; + /* + * For descriptors associated with the device, polarity inversion is + * handled in the gpiolib, so all gpio chip selects are "active high" + * in the logical sense, the gpiolib will invert the line if need be. + */ + if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods && + ctlr->cs_gpiods[spi->chip_select]) + spi->mode |= SPI_CS_HIGH; + /* Device speed */ rc = of_property_read_u32(nc, "spi-max-frequency", &value); if (rc) { @@ -2105,8 +2277,8 @@ static int match_true(struct device *dev, void *data) return 1; } -static ssize_t spi_slave_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t slave_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct spi_controller *ctlr = container_of(dev, struct spi_controller, dev); @@ -2117,9 +2289,8 @@ static ssize_t spi_slave_show(struct device *dev, child ? to_spi_device(child)->modalias : NULL); } -static ssize_t spi_slave_store(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) +static ssize_t slave_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct spi_controller *ctlr = container_of(dev, struct spi_controller, dev); @@ -2157,7 +2328,7 @@ static ssize_t spi_slave_store(struct device *dev, return count; } -static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store); +static DEVICE_ATTR_RW(slave); static struct attribute *spi_slave_attrs[] = { &dev_attr_slave.attr, @@ -2188,8 +2359,10 @@ extern struct class spi_slave_class; /* dummy */ * __spi_alloc_controller - allocate an SPI master or slave controller * @dev: the controller, possibly using the platform_bus * @size: how much zeroed driver-private data to allocate; the pointer to this - * memory is in the driver_data field of the returned device, - * accessible with spi_controller_get_devdata(). + * memory is in the driver_data field of the returned device, accessible + * with spi_controller_get_devdata(); the memory is cacheline aligned; + * drivers granting DMA access to portions of their private data need to + * round up @size using ALIGN(size, dma_get_cache_alignment()). * @slave: flag indicating whether to allocate an SPI master (false) or SPI * slave (true) controller * Context: can sleep @@ -2211,11 +2384,12 @@ struct spi_controller *__spi_alloc_controller(struct device *dev, unsigned int size, bool slave) { struct spi_controller *ctlr; + size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); if (!dev) return NULL; - ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL); + ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); if (!ctlr) return NULL; @@ -2229,14 +2403,14 @@ struct spi_controller *__spi_alloc_controller(struct device *dev, ctlr->dev.class = &spi_master_class; ctlr->dev.parent = dev; pm_suspend_ignore_children(&ctlr->dev, true); - spi_controller_set_devdata(ctlr, &ctlr[1]); + spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); return ctlr; } EXPORT_SYMBOL_GPL(__spi_alloc_controller); #ifdef CONFIG_OF -static int of_spi_register_master(struct spi_controller *ctlr) +static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) { int nb, i, *cs; struct device_node *np = ctlr->dev.of_node; @@ -2269,7 +2443,7 @@ static int of_spi_register_master(struct spi_controller *ctlr) return 0; } #else -static int of_spi_register_master(struct spi_controller *ctlr) +static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) { return 0; } @@ -2284,6 +2458,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr) int nb, i; struct gpio_desc **cs; struct device *dev = &ctlr->dev; + unsigned long native_cs_mask = 0; + unsigned int num_cs_gpios = 0; nb = gpiod_count(dev, "cs"); ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); @@ -2325,7 +2501,22 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr) if (!gpioname) return -ENOMEM; gpiod_set_consumer_name(cs[i], gpioname); + num_cs_gpios++; + continue; + } + + if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { + dev_err(dev, "Invalid native chip select %d\n", i); + return -EINVAL; } + native_cs_mask |= BIT(i); + } + + ctlr->unused_native_cs = ffz(native_cs_mask); + if (num_cs_gpios && ctlr->max_native_cs && + ctlr->unused_native_cs >= ctlr->max_native_cs) { + dev_err(dev, "No unused native chip select available\n"); + return -EINVAL; } return 0; @@ -2456,7 +2647,7 @@ int spi_register_controller(struct spi_controller *ctlr) ctlr->mode_bits |= SPI_CS_HIGH; } else { /* Legacy code path for GPIOs from DT */ - status = of_spi_register_master(ctlr); + status = of_spi_get_gpio_numbers(ctlr); if (status) return status; } @@ -2868,10 +3059,11 @@ struct spi_replaced_transfers *spi_replace_transfers( /* add to list */ list_add(&xfer->transfer_list, rxfer->replaced_after); - /* clear cs_change and delay_usecs for all but the last */ + /* clear cs_change and delay for all but the last */ if (i) { xfer->cs_change = false; xfer->delay_usecs = 0; + xfer->delay.value = 0; } } @@ -3088,7 +3280,29 @@ int spi_setup(struct spi_device *spi) if (spi->controller->setup) status = spi->controller->setup(spi); - spi_set_cs(spi, false); + if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { + status = pm_runtime_get_sync(spi->controller->dev.parent); + if (status < 0) { + pm_runtime_put_noidle(spi->controller->dev.parent); + dev_err(&spi->controller->dev, "Failed to power device: %d\n", + status); + return status; + } + + /* + * We do not want to return positive value from pm_runtime_get, + * there are many instances of devices calling spi_setup() and + * checking for a non-zero return value instead of a negative + * return value. + */ + status = 0; + + spi_set_cs(spi, false); + pm_runtime_mark_last_busy(spi->controller->dev.parent); + pm_runtime_put_autosuspend(spi->controller->dev.parent); + } else { + spi_set_cs(spi, false); + } if (spi->rt && !spi->controller->rt) { spi->controller->rt = true; @@ -3111,18 +3325,71 @@ EXPORT_SYMBOL_GPL(spi_setup); /** * spi_set_cs_timing - configure CS setup, hold, and inactive delays * @spi: the device that requires specific CS timing configuration - * @setup: CS setup time in terms of clock count - * @hold: CS hold time in terms of clock count - * @inactive_dly: CS inactive delay between transfers in terms of clock count + * @setup: CS setup time specified via @spi_delay + * @hold: CS hold time specified via @spi_delay + * @inactive: CS inactive delay between transfers specified via @spi_delay + * + * Return: zero on success, else a negative error code. */ -void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold, - u8 inactive_dly) +int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup, + struct spi_delay *hold, struct spi_delay *inactive) { + size_t len; + if (spi->controller->set_cs_timing) - spi->controller->set_cs_timing(spi, setup, hold, inactive_dly); + return spi->controller->set_cs_timing(spi, setup, hold, + inactive); + + if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) || + (hold && hold->unit == SPI_DELAY_UNIT_SCK) || + (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) { + dev_err(&spi->dev, + "Clock-cycle delays for CS not supported in SW mode\n"); + return -ENOTSUPP; + } + + len = sizeof(struct spi_delay); + + /* copy delays to controller */ + if (setup) + memcpy(&spi->controller->cs_setup, setup, len); + else + memset(&spi->controller->cs_setup, 0, len); + + if (hold) + memcpy(&spi->controller->cs_hold, hold, len); + else + memset(&spi->controller->cs_hold, 0, len); + + if (inactive) + memcpy(&spi->controller->cs_inactive, inactive, len); + else + memset(&spi->controller->cs_inactive, 0, len); + + return 0; } EXPORT_SYMBOL_GPL(spi_set_cs_timing); +static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, + struct spi_device *spi) +{ + int delay1, delay2; + + delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); + if (delay1 < 0) + return delay1; + + delay2 = spi_delay_to_ns(&spi->word_delay, xfer); + if (delay2 < 0) + return delay2; + + if (delay1 < delay2) + memcpy(&xfer->word_delay, &spi->word_delay, + sizeof(xfer->word_delay)); + + return 0; +} + static int __spi_validate(struct spi_device *spi, struct spi_message *message) { struct spi_controller *ctlr = spi->controller; @@ -3258,8 +3525,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) return -EINVAL; } - if (xfer->word_delay_usecs < spi->word_delay_usecs) - xfer->word_delay_usecs = spi->word_delay_usecs; + if (_spi_xfer_word_delay_update(xfer, spi)) + return -EINVAL; } message->status = -EINPROGRESS; @@ -3270,6 +3537,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) static int __spi_async(struct spi_device *spi, struct spi_message *message) { struct spi_controller *ctlr = spi->controller; + struct spi_transfer *xfer; /* * Some controllers do not support doing regular SPI transfers. Return @@ -3285,6 +3553,13 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) trace_spi_message_submit(message); + if (!ctlr->ptp_sts_supported) { + list_for_each_entry(xfer, &message->transfers, transfer_list) { + xfer->ptp_sts_word_pre = 0; + ptp_read_system_prets(xfer->ptp_sts); + } + } + return ctlr->transfer(spi, message); } @@ -3652,37 +3927,25 @@ EXPORT_SYMBOL_GPL(spi_write_then_read); /*-------------------------------------------------------------------------*/ #if IS_ENABLED(CONFIG_OF) -static int __spi_of_device_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /* must call put_device() when done with returned spi_device device */ struct spi_device *of_find_spi_device_by_node(struct device_node *node) { - struct device *dev = bus_find_device(&spi_bus_type, NULL, node, - __spi_of_device_match); + struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); + return dev ? to_spi_device(dev) : NULL; } EXPORT_SYMBOL_GPL(of_find_spi_device_by_node); #endif /* IS_ENABLED(CONFIG_OF) */ #if IS_ENABLED(CONFIG_OF_DYNAMIC) -static int __spi_of_controller_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /* the spi controllers are not using spi_bus, so we find it with another way */ static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) { struct device *dev; - dev = class_find_device(&spi_master_class, NULL, node, - __spi_of_controller_match); + dev = class_find_device_by_of_node(&spi_master_class, node); if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) - dev = class_find_device(&spi_slave_class, NULL, node, - __spi_of_controller_match); + dev = class_find_device_by_of_node(&spi_slave_class, node); if (!dev) return NULL; @@ -3753,11 +4016,6 @@ static int spi_acpi_controller_match(struct device *dev, const void *data) return ACPI_COMPANION(dev->parent) == data; } -static int spi_acpi_device_match(struct device *dev, const void *data) -{ - return ACPI_COMPANION(dev) == data; -} - static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) { struct device *dev; @@ -3777,8 +4035,7 @@ static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) { struct device *dev; - dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); - + dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); return dev ? to_spi_device(dev) : NULL; } |