summaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2015-07-30 18:24:24 -0400
committerDavid S. Miller <davem@davemloft.net>2015-07-30 17:25:14 -0700
commit23d8335d786472021b5c733f228c7074208dcfa0 (patch)
treec7bcdc08b5567835c5a38f0d93cc46c7e8a55fea /net/tipc
parent598411d70f85dcf5b5c6c2369cc48637c251b656 (diff)
downloadtalos-op-linux-23d8335d786472021b5c733f228c7074208dcfa0.tar.gz
talos-op-linux-23d8335d786472021b5c733f228c7074208dcfa0.zip
tipc: remove implicit message delivery in node_unlock()
After the most recent changes, all access calls to a link which may entail addition of messages to the link's input queue are postpended by an explicit call to tipc_sk_rcv(), using a reference to the correct queue. This means that the potentially hazardous implicit delivery, using tipc_node_unlock() in combination with a binary flag and a cached queue pointer, now has become redundant. This commit removes this implicit delivery mechanism both for regular data messages and for binding table update messages. Tested-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/link.c21
-rw-r--r--net/tipc/msg.h22
-rw-r--r--net/tipc/node.c26
-rw-r--r--net/tipc/node.h4
4 files changed, 10 insertions, 63 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 3a92924711a1..2aa19de715f6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -559,8 +559,6 @@ void link_prepare_wakeup(struct tipc_link *l)
break;
skb_unlink(skb, &l->wakeupq);
skb_queue_tail(l->inputq, skb);
- l->owner->inputq = l->inputq;
- l->owner->action_flags |= TIPC_MSG_EVT;
}
}
@@ -598,8 +596,6 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
void tipc_link_reset(struct tipc_link *l)
{
- struct tipc_node *owner = l->owner;
-
tipc_link_fsm_evt(l, LINK_RESET_EVT);
/* Link is down, accept any session */
@@ -611,14 +607,10 @@ void tipc_link_reset(struct tipc_link *l)
/* Prepare for renewed mtu size negotiation */
l->mtu = l->advertised_mtu;
- /* Clean up all queues, except inputq: */
+ /* Clean up all queues: */
__skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq);
- if (!owner->inputq)
- owner->inputq = l->inputq;
- skb_queue_splice_init(&l->wakeupq, owner->inputq);
- if (!skb_queue_empty(owner->inputq))
- owner->action_flags |= TIPC_MSG_EVT;
+ skb_queue_splice_init(&l->wakeupq, l->inputq);
tipc_link_purge_backlog(l);
kfree_skb(l->reasm_buf);
@@ -972,7 +964,6 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
{
struct tipc_node *node = link->owner;
struct tipc_msg *msg = buf_msg(skb);
- u32 dport = msg_destport(msg);
switch (msg_user(msg)) {
case TIPC_LOW_IMPORTANCE:
@@ -980,17 +971,11 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
case TIPC_HIGH_IMPORTANCE:
case TIPC_CRITICAL_IMPORTANCE:
case CONN_MANAGER:
- if (tipc_skb_queue_tail(link->inputq, skb, dport)) {
- node->inputq = link->inputq;
- node->action_flags |= TIPC_MSG_EVT;
- }
+ skb_queue_tail(link->inputq, skb);
return true;
case NAME_DISTRIBUTOR:
node->bclink.recv_permitted = true;
- node->namedq = link->namedq;
skb_queue_tail(link->namedq, skb);
- if (skb_queue_len(link->namedq) == 1)
- node->action_flags |= TIPC_NAMED_MSG_EVT;
return true;
case MSG_BUNDLER:
case TUNNEL_PROTOCOL:
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 53d98ef78650..a82c5848d4bc 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -862,28 +862,6 @@ static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
return skb;
}
-/* tipc_skb_queue_tail(): add buffer to tail of list;
- * @list: list to be appended to
- * @skb: buffer to append. Always appended
- * @dport: the destination port of the buffer
- * returns true if dport differs from previous destination
- */
-static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
- struct sk_buff *skb, u32 dport)
-{
- struct sk_buff *_skb = NULL;
- bool rv = false;
-
- spin_lock_bh(&list->lock);
- _skb = skb_peek_tail(list);
- if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
- (skb_queue_len(list) > 32))
- rv = true;
- __skb_queue_tail(list, skb);
- spin_unlock_bh(&list->lock);
- return rv;
-}
-
/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
* @list: list to be appended to
* @skb: buffer to add
diff --git a/net/tipc/node.c b/net/tipc/node.c
index cdca57be85bf..9e9b0938bd17 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -873,10 +873,8 @@ static void node_lost_contact(struct tipc_node *n_ptr,
SHORT_H_SIZE, 0, tn->own_addr,
conn->peer_node, conn->port,
conn->peer_port, TIPC_ERR_NO_NODE);
- if (likely(skb)) {
+ if (likely(skb))
skb_queue_tail(inputq, skb);
- n_ptr->action_flags |= TIPC_MSG_EVT;
- }
list_del(&conn->list);
kfree(conn);
}
@@ -923,27 +921,20 @@ void tipc_node_unlock(struct tipc_node *node)
u32 flags = node->action_flags;
u32 link_id = 0;
struct list_head *publ_list;
- struct sk_buff_head *inputq = node->inputq;
- struct sk_buff_head *namedq;
- if (likely(!flags || (flags == TIPC_MSG_EVT))) {
- node->action_flags = 0;
+ if (likely(!flags)) {
spin_unlock_bh(&node->lock);
- if (flags == TIPC_MSG_EVT)
- tipc_sk_rcv(net, inputq);
return;
}
addr = node->addr;
link_id = node->link_id;
- namedq = node->namedq;
publ_list = &node->publ_list;
- node->action_flags &= ~(TIPC_MSG_EVT |
- TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
+ node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
- TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
+ TIPC_BCAST_RESET);
spin_unlock_bh(&node->lock);
@@ -964,12 +955,6 @@ void tipc_node_unlock(struct tipc_node *node)
tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
link_id, addr);
- if (flags & TIPC_MSG_EVT)
- tipc_sk_rcv(net, inputq);
-
- if (flags & TIPC_NAMED_MSG_EVT)
- tipc_named_rcv(net, namedq);
-
if (flags & TIPC_BCAST_MSG_EVT)
tipc_bclink_input(net);
@@ -1270,6 +1255,9 @@ unlock:
if (unlikely(rc & TIPC_LINK_DOWN_EVT))
tipc_node_link_down(n, bearer_id, false);
+ if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
+ tipc_named_rcv(net, &n->bclink.namedq);
+
if (!skb_queue_empty(&le->inputq))
tipc_sk_rcv(net, &le->inputq);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 9a977467fc46..344b3e7594fd 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -53,13 +53,11 @@
* TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
*/
enum {
- TIPC_MSG_EVT = 1,
TIPC_NOTIFY_NODE_DOWN = (1 << 3),
TIPC_NOTIFY_NODE_UP = (1 << 4),
TIPC_WAKEUP_BCAST_USERS = (1 << 5),
TIPC_NOTIFY_LINK_UP = (1 << 6),
TIPC_NOTIFY_LINK_DOWN = (1 << 7),
- TIPC_NAMED_MSG_EVT = (1 << 8),
TIPC_BCAST_MSG_EVT = (1 << 9),
TIPC_BCAST_RESET = (1 << 10)
};
@@ -124,8 +122,6 @@ struct tipc_node {
spinlock_t lock;
struct net *net;
struct hlist_node hash;
- struct sk_buff_head *inputq;
- struct sk_buff_head *namedq;
int active_links[2];
struct tipc_link_entry links[MAX_BEARERS];
int action_flags;
OpenPOWER on IntegriCloud