summaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_req.c
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2011-11-14 15:42:37 +0100
committerPhilipp Reisner <philipp.reisner@linbit.com>2012-11-08 16:58:34 +0100
commitd5b27b01f17ef1f0badc45f9eea521be3457c9cb (patch)
treee0760531801c0b5b51ea8b3f05f9c0c5d85ff60e /drivers/block/drbd/drbd_req.c
parent8c0785a5c9a0f2472aff68dc32247be01728c416 (diff)
downloadblackbird-op-linux-d5b27b01f17ef1f0badc45f9eea521be3457c9cb.tar.gz
blackbird-op-linux-d5b27b01f17ef1f0badc45f9eea521be3457c9cb.zip
drbd: move the drbd_work_queue from drbd_socket to drbd_connection
cherry-picked and adapted from drbd 9 devel branch In 8.4, we don't distinguish between "resource work" and "connection work" yet, we have one worker for both, as we still have only one connection. We only ever used the "data.work", no need to keep the "meta.work" around. Move tconn->data.work to tconn->sender_work. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_req.c')
-rw-r--r--drivers/block/drbd/drbd_req.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index a131174b6677..e609557a9425 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -170,7 +170,7 @@ static void queue_barrier(struct drbd_conf *mdev)
* dec_ap_pending will be done in got_BarrierAck
* or (on connection loss) in tl_clear. */
inc_ap_pending(mdev);
- drbd_queue_work(&tconn->data.work, &b->w);
+ drbd_queue_work(&tconn->sender_work, &b->w);
set_bit(CREATE_BARRIER, &tconn->flags);
}
@@ -483,7 +483,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_read_req;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
case QUEUE_FOR_NET_WRITE:
@@ -527,7 +527,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT(req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_dblock;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
/* close the epoch, in case it outgrew the limit */
rcu_read_lock();
@@ -542,7 +542,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
case QUEUE_FOR_SEND_OOS:
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_out_of_sync;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
case READ_RETRY_REMOTE_CANCELED:
@@ -682,7 +682,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
get_ldev(mdev);
req->w.cb = w_restart_disk_io;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
case RESEND:
@@ -692,7 +692,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
During connection handshake, we ensure that the peer was not rebooted. */
if (!(req->rq_state & RQ_NET_OK)) {
if (req->w.cb) {
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
}
break;
OpenPOWER on IntegriCloud