summaryrefslogtreecommitdiffstats
path: root/drivers/s390/crypto/ap_bus.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2016-09-21 14:12:53 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2016-12-14 16:33:38 +0100
commit9af3e04ee41e6841b2accb9dc96562bcf4e59916 (patch)
tree96bedbba070cc7d46a1470b1de0248bd3447a9b5 /drivers/s390/crypto/ap_bus.c
parent0db78559f965a2e652dbe8acf35333f2081bf872 (diff)
downloadtalos-op-linux-9af3e04ee41e6841b2accb9dc96562bcf4e59916.tar.gz
talos-op-linux-9af3e04ee41e6841b2accb9dc96562bcf4e59916.zip
s390/zcrypt: get rid of ap_poll_requests
The poll thread of the AP bus is burning CPU while waiting for crypto requests to complete. We can as well burn a few more cycles in the poll thread to check if there are pending requests and remove the atomic operations with the ap_poll_requests. This improves the code if the machine has adapter interrupts. Signed-off-by: Harald Freudenberger <freude@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/crypto/ap_bus.c')
-rw-r--r--drivers/s390/crypto/ap_bus.c34
1 files changed, 24 insertions, 10 deletions
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f6de22a4f7d9..fe1cfa4b22c9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -91,7 +91,6 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus);
*/
static void ap_tasklet_fn(unsigned long);
static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
-static atomic_t ap_poll_requests = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread = NULL;
static DEFINE_MUTEX(ap_poll_thread_mutex);
@@ -419,7 +418,6 @@ static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev)
ap_dev->reply->message, ap_dev->reply->length);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- atomic_dec(&ap_poll_requests);
ap_dev->queue_count--;
if (ap_dev->queue_count > 0)
mod_timer(&ap_dev->timeout,
@@ -436,7 +434,6 @@ static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev)
if (!status.queue_empty || ap_dev->queue_count <= 0)
break;
/* The card shouldn't forget requests but who knows. */
- atomic_sub(ap_dev->queue_count, &ap_poll_requests);
ap_dev->queue_count = 0;
list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
ap_dev->requestq_count += ap_dev->pendingq_count;
@@ -524,7 +521,6 @@ static enum ap_wait ap_sm_write(struct ap_device *ap_dev)
ap_msg->message, ap_msg->length, ap_msg->special);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- atomic_inc(&ap_poll_requests);
ap_dev->queue_count++;
if (ap_dev->queue_count == 1)
mod_timer(&ap_dev->timeout,
@@ -796,6 +792,27 @@ static void ap_tasklet_fn(unsigned long dummy)
ap_sm_wait(wait);
}
+static int ap_pending_requests(void)
+{
+ struct ap_device *ap_dev;
+ int id, pending = 0;
+
+ for (id = 0; pending == 0 && id < AP_DEVICES; id++) {
+ spin_lock_bh(&ap_device_list_lock);
+ list_for_each_entry(ap_dev, &ap_device_list, list) {
+ spin_lock_bh(&ap_dev->lock);
+ if (ap_dev->queue_count)
+ pending = 1;
+ spin_unlock_bh(&ap_dev->lock);
+ if (pending)
+ break;
+ }
+ spin_unlock_bh(&ap_device_list_lock);
+ }
+
+ return pending;
+}
+
/**
* ap_poll_thread(): Thread that polls for finished requests.
* @data: Unused pointer
@@ -815,8 +832,7 @@ static int ap_poll_thread(void *data)
while (!kthread_should_stop()) {
add_wait_queue(&ap_poll_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- if (ap_suspend_flag ||
- atomic_read(&ap_poll_requests) <= 0) {
+ if (ap_suspend_flag || !ap_pending_requests()) {
schedule();
try_to_freeze();
}
@@ -828,7 +844,8 @@ static int ap_poll_thread(void *data)
continue;
}
ap_tasklet_fn(0);
- } while (!kthread_should_stop());
+ }
+
return 0;
}
@@ -1267,9 +1284,6 @@ static int ap_device_remove(struct device *dev)
spin_unlock_bh(&ap_device_list_lock);
if (ap_drv->remove)
ap_drv->remove(ap_dev);
- spin_lock_bh(&ap_dev->lock);
- atomic_sub(ap_dev->queue_count, &ap_poll_requests);
- spin_unlock_bh(&ap_dev->lock);
return 0;
}
OpenPOWER on IntegriCloud