diff options
Diffstat (limited to 'net/rds')
-rw-r--r-- | net/rds/af_rds.c | 11 | ||||
-rw-r--r-- | net/rds/cong.c | 2 | ||||
-rw-r--r-- | net/rds/ib_cm.c | 3 | ||||
-rw-r--r-- | net/rds/ib_rdma.c | 5 | ||||
-rw-r--r-- | net/rds/ib_recv.c | 4 | ||||
-rw-r--r-- | net/rds/ib_send.c | 20 | ||||
-rw-r--r-- | net/rds/iw_cm.c | 4 | ||||
-rw-r--r-- | net/rds/iw_recv.c | 4 | ||||
-rw-r--r-- | net/rds/iw_send.c | 3 | ||||
-rw-r--r-- | net/rds/loop.c | 7 | ||||
-rw-r--r-- | net/rds/rdma.c | 4 | ||||
-rw-r--r-- | net/rds/rdma_transport.c | 5 | ||||
-rw-r--r-- | net/rds/rds.h | 4 | ||||
-rw-r--r-- | net/rds/recv.c | 2 | ||||
-rw-r--r-- | net/rds/send.c | 40 | ||||
-rw-r--r-- | net/rds/tcp_recv.c | 1 | ||||
-rw-r--r-- | net/rds/tcp_send.c | 4 | ||||
-rw-r--r-- | net/rds/threads.c | 2 |
18 files changed, 71 insertions, 54 deletions
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index f81862baf4d0..aebfecbdb841 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -158,9 +158,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock, unsigned int mask = 0; unsigned long flags; - poll_wait(file, sk->sk_sleep, wait); + poll_wait(file, sk_sleep(sk), wait); - poll_wait(file, &rds_poll_waitq, wait); + if (rs->rs_seen_congestion) + poll_wait(file, &rds_poll_waitq, wait); read_lock_irqsave(&rs->rs_recv_lock, flags); if (!rs->rs_cong_monitor) { @@ -182,6 +183,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock, mask |= (POLLOUT | POLLWRNORM); read_unlock_irqrestore(&rs->rs_recv_lock, flags); + /* clear state any time we wake a seen-congested socket */ + if (mask) + rs->rs_seen_congestion = 0; + return mask; } @@ -447,7 +452,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, struct rds_info_lengths *lens) { struct rds_sock *rs; - struct sock *sk; struct rds_incoming *inc; unsigned long flags; unsigned int total = 0; @@ -457,7 +461,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, spin_lock_irqsave(&rds_sock_lock, flags); list_for_each_entry(rs, &rds_sock_list, rs_item) { - sk = rds_rs_to_sk(rs); read_lock(&rs->rs_recv_lock); /* XXX too lazy to maintain counts.. */ diff --git a/net/rds/cong.c b/net/rds/cong.c index f1da27ceb064..0871a29f0780 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c @@ -219,8 +219,6 @@ void rds_cong_queue_updates(struct rds_cong_map *map) spin_lock_irqsave(&rds_cong_lock, flags); list_for_each_entry(conn, &map->m_conn_list, c_map_item) { - if (conn->c_loopback) - continue; if (!test_and_set_bit(0, &conn->c_map_queued)) { rds_stats_inc(s_cong_update_queued); queue_delayed_work(rds_wq, &conn->c_send_w, 0); diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 88d0856cb797..10ed0d55f759 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -204,9 +204,10 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data) rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); break; default: - rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u " + rdsdebug("Fatal QP Event %u " "- connection %pI4->%pI4, reconnecting\n", event->event, &conn->c_laddr, &conn->c_faddr); + rds_conn_drop(conn); break; } } diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 059989fdb7d7..a54cd63f9e35 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -235,8 +235,8 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) { flush_workqueue(rds_wq); rds_ib_flush_mr_pool(pool, 1); - BUG_ON(atomic_read(&pool->item_count)); - BUG_ON(atomic_read(&pool->free_pinned)); + WARN_ON(atomic_read(&pool->item_count)); + WARN_ON(atomic_read(&pool->free_pinned)); kfree(pool); } @@ -441,6 +441,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) /* FIXME we need a way to tell a r/w MR * from a r/o MR */ + BUG_ON(in_interrupt()); set_page_dirty(page); put_page(page); } diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index c7dd11b835f0..c74e9904a6b2 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -469,8 +469,8 @@ static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credi set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); rds_ib_stats_inc(s_ib_ack_send_failure); - /* Need to finesse this later. */ - BUG(); + + rds_ib_conn_error(ic->conn, "sending ack failed\n"); } else rds_ib_stats_inc(s_ib_ack_sent); } diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index a10fab6886d1..17fa80803ab0 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -243,8 +243,12 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) struct rds_message *rm; rm = rds_send_get_message(conn, send->s_op); - if (rm) + if (rm) { + if (rm->m_rdma_op) + rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); rds_ib_send_rdma_complete(rm, wc.status); + rds_message_put(rm); + } } oldest = (oldest + 1) % ic->i_send_ring.w_nr; @@ -482,6 +486,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, BUG_ON(off % RDS_FRAG_SIZE); BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); + /* Do not send cong updates to IB loopback */ + if (conn->c_loopback + && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { + rds_cong_map_updated(conn->c_fcong, ~(u64) 0); + return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + } + /* FIXME we may overallocate here */ if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) i = 1; @@ -574,8 +585,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); adv_credits += posted; BUG_ON(adv_credits > 255); - } else if (ic->i_rm != rm) - BUG(); + } send = &ic->i_sends[pos]; first = send; @@ -714,8 +724,8 @@ add_header: ic->i_rm = prev->s_rm; prev->s_rm = NULL; } - /* Finesse this later */ - BUG(); + + rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); goto out; } diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index 3e9460f935d8..a9d951b4fbae 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c @@ -157,9 +157,11 @@ static void rds_iw_qp_event_handler(struct ib_event *event, void *data) case IB_EVENT_QP_REQ_ERR: case IB_EVENT_QP_FATAL: default: - rds_iw_conn_error(conn, "RDS/IW: Fatal QP Event %u - connection %pI4->%pI4...reconnecting\n", + rdsdebug("Fatal QP Event %u " + "- connection %pI4->%pI4, reconnecting\n", event->event, &conn->c_laddr, &conn->c_faddr); + rds_conn_drop(conn); break; } } diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index da43ee840ca3..3d479067d54d 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c @@ -469,8 +469,8 @@ static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credi set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); rds_iw_stats_inc(s_iw_ack_send_failure); - /* Need to finesse this later. */ - BUG(); + + rds_iw_conn_error(ic->conn, "sending ack failed\n"); } else rds_iw_stats_inc(s_iw_ack_sent); } diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 1379e9d66a78..52182ff7519e 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c @@ -616,8 +616,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); adv_credits += posted; BUG_ON(adv_credits > 255); - } else if (ic->i_rm != rm) - BUG(); + } send = &ic->i_sends[pos]; first = send; diff --git a/net/rds/loop.c b/net/rds/loop.c index 0d7a159158b8..dd9879379457 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c @@ -81,16 +81,9 @@ static int rds_loop_xmit_cong_map(struct rds_connection *conn, struct rds_cong_map *map, unsigned long offset) { - unsigned long i; - BUG_ON(offset); BUG_ON(map != conn->c_lcong); - for (i = 0; i < RDS_CONG_MAP_PAGES; i++) { - memcpy((void *)conn->c_fcong->m_page_addrs[i], - (void *)map->m_page_addrs[i], PAGE_SIZE); - } - rds_cong_map_updated(conn->c_fcong, ~(u64) 0); return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 5ce9437cad67..75fd13bb631b 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -439,8 +439,10 @@ void rds_rdma_free_op(struct rds_rdma_op *ro) /* Mark page dirty if it was possibly modified, which * is the case for a RDMA_READ which copies from remote * to local memory */ - if (!ro->r_write) + if (!ro->r_write) { + BUG_ON(in_interrupt()); set_page_dirty(page); + } put_page(page); } diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 7b155081b4dc..e599ba2f950d 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -101,7 +101,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, break; case RDMA_CM_EVENT_DISCONNECTED: - printk(KERN_WARNING "RDS/RDMA: DISCONNECT event - dropping connection " + rdsdebug("DISCONNECT event - dropping connection " "%pI4->%pI4\n", &conn->c_laddr, &conn->c_faddr); rds_conn_drop(conn); @@ -109,8 +109,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, default: /* things like device disconnect? */ - printk(KERN_ERR "unknown event %u\n", event->event); - BUG(); + printk(KERN_ERR "RDS: unknown event %u!\n", event->event); break; } diff --git a/net/rds/rds.h b/net/rds/rds.h index 85d6f897ecc7..c224b5bb3ba9 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -388,6 +388,8 @@ struct rds_sock { /* flag indicating we were congested or not */ int rs_congested; + /* seen congestion (ENOBUFS) when sending? */ + int rs_seen_congestion; /* rs_lock protects all these adjacent members before the newline */ spinlock_t rs_lock; @@ -490,7 +492,7 @@ void rds_sock_put(struct rds_sock *rs); void rds_wake_sk_sleep(struct rds_sock *rs); static inline void __rds_wake_sk_sleep(struct sock *sk) { - wait_queue_head_t *waitq = sk->sk_sleep; + wait_queue_head_t *waitq = sk_sleep(sk); if (!sock_flag(sk, SOCK_DEAD) && waitq) wake_up(waitq); diff --git a/net/rds/recv.c b/net/rds/recv.c index e2a2b9344f7b..795a00b7f2cb 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -432,7 +432,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, break; } - timeo = wait_event_interruptible_timeout(*sk->sk_sleep, + timeo = wait_event_interruptible_timeout(*sk_sleep(sk), (!list_empty(&rs->rs_notify_queue) || rs->rs_cong_notify || rds_next_incoming(rs, &inc)), timeo); diff --git a/net/rds/send.c b/net/rds/send.c index f04b929ded92..9c1c6bcaa6c9 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -508,12 +508,13 @@ EXPORT_SYMBOL_GPL(rds_send_get_message); */ void rds_send_remove_from_sock(struct list_head *messages, int status) { - unsigned long flags = 0; /* silence gcc :P */ + unsigned long flags; struct rds_sock *rs = NULL; struct rds_message *rm; - local_irq_save(flags); while (!list_empty(messages)) { + int was_on_sock = 0; + rm = list_entry(messages->next, struct rds_message, m_conn_item); list_del_init(&rm->m_conn_item); @@ -528,20 +529,19 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) * while we're messing with it. It does not prevent the * message from being removed from the socket, though. */ - spin_lock(&rm->m_rs_lock); + spin_lock_irqsave(&rm->m_rs_lock, flags); if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) goto unlock_and_drop; if (rs != rm->m_rs) { if (rs) { - spin_unlock(&rs->rs_lock); rds_wake_sk_sleep(rs); sock_put(rds_rs_to_sk(rs)); } rs = rm->m_rs; - spin_lock(&rs->rs_lock); sock_hold(rds_rs_to_sk(rs)); } + spin_lock(&rs->rs_lock); if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { struct rds_rdma_op *ro = rm->m_rdma_op; @@ -558,21 +558,22 @@ void rds_send_remove_from_sock(struct list_head *messages, int status) notifier->n_status = status; rm->m_rdma_op->r_notifier = NULL; } - rds_message_put(rm); + was_on_sock = 1; rm->m_rs = NULL; } + spin_unlock(&rs->rs_lock); unlock_and_drop: - spin_unlock(&rm->m_rs_lock); + spin_unlock_irqrestore(&rm->m_rs_lock, flags); rds_message_put(rm); + if (was_on_sock) + rds_message_put(rm); } if (rs) { - spin_unlock(&rs->rs_lock); rds_wake_sk_sleep(rs); sock_put(rds_rs_to_sk(rs)); } - local_irq_restore(flags); } /* @@ -634,9 +635,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) list_move(&rm->m_sock_item, &list); rds_send_sndbuf_remove(rs, rm); clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); - - /* If this is a RDMA operation, notify the app. */ - __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED); } /* order flag updates with the rs lock */ @@ -645,9 +643,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) spin_unlock_irqrestore(&rs->rs_lock, flags); - if (wake) - rds_wake_sk_sleep(rs); - conn = NULL; /* now remove the messages from the conn list as needed */ @@ -655,6 +650,10 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) /* We do this here rather than in the loop above, so that * we don't have to nest m_rs_lock under rs->rs_lock */ spin_lock_irqsave(&rm->m_rs_lock, flags2); + /* If this is a RDMA operation, notify the app. */ + spin_lock(&rs->rs_lock); + __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED); + spin_unlock(&rs->rs_lock); rm->m_rs = NULL; spin_unlock_irqrestore(&rm->m_rs_lock, flags2); @@ -683,6 +682,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) if (conn) spin_unlock_irqrestore(&conn->c_lock, flags); + if (wake) + rds_wake_sk_sleep(rs); + while (!list_empty(&list)) { rm = list_entry(list.next, struct rds_message, m_sock_item); list_del_init(&rm->m_sock_item); @@ -816,7 +818,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, int ret = 0; int queued = 0, allocated_mr = 0; int nonblock = msg->msg_flags & MSG_DONTWAIT; - long timeo = sock_rcvtimeo(sk, nonblock); + long timeo = sock_sndtimeo(sk, nonblock); /* Mirror Linux UDP mirror of BSD error message compatibility */ /* XXX: Perhaps MSG_MORE someday */ @@ -895,8 +897,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, queue_delayed_work(rds_wq, &conn->c_conn_w, 0); ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); - if (ret) + if (ret) { + rs->rs_seen_congestion = 1; goto out; + } while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, dport, &queued)) { @@ -911,7 +915,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, goto out; } - timeo = wait_event_interruptible_timeout(*sk->sk_sleep, + timeo = wait_event_interruptible_timeout(*sk_sleep(sk), rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, dport, diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index e08ec912d8b0..1aba6878fa5d 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c @@ -98,6 +98,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, goto out; } + rds_stats_add(s_copy_to_user, to_copy); size -= to_copy; ret += to_copy; skb_off += to_copy; diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index 34fdcc059e54..a28b895ff0d1 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c @@ -240,7 +240,9 @@ void rds_tcp_write_space(struct sock *sk) tc->t_last_seen_una = rds_tcp_snd_una(tc); rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); - queue_delayed_work(rds_wq, &conn->c_send_w, 0); + if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) + queue_delayed_work(rds_wq, &conn->c_send_w, 0); + out: read_unlock(&sk->sk_callback_lock); diff --git a/net/rds/threads.c b/net/rds/threads.c index 00fa10e59af8..786c20eaaf5e 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c @@ -259,7 +259,7 @@ void rds_threads_exit(void) int __init rds_threads_init(void) { - rds_wq = create_singlethread_workqueue("krdsd"); + rds_wq = create_workqueue("krdsd"); if (rds_wq == NULL) return -ENOMEM; |