summaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/bcast.c6
-rw-r--r--net/tipc/bearer.c14
-rw-r--r--net/tipc/diag.c2
-rw-r--r--net/tipc/group.c42
-rw-r--r--net/tipc/group.h1
-rw-r--r--net/tipc/link.c201
-rw-r--r--net/tipc/link.h5
-rw-r--r--net/tipc/monitor.c3
-rw-r--r--net/tipc/msg.c109
-rw-r--r--net/tipc/msg.h11
-rw-r--r--net/tipc/name_distr.c14
-rw-r--r--net/tipc/name_table.c13
-rw-r--r--net/tipc/name_table.h10
-rw-r--r--net/tipc/netlink.c2
-rw-r--r--net/tipc/netlink_compat.c5
-rw-r--r--net/tipc/node.c104
-rw-r--r--net/tipc/node.h16
-rw-r--r--net/tipc/socket.c321
-rw-r--r--net/tipc/socket.h3
-rw-r--r--net/tipc/topsrv.c16
-rw-r--r--net/tipc/udp_media.c18
21 files changed, 629 insertions, 287 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index f3711176be45..d8026543bf4c 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link";
* struct tipc_bc_base - base structure for keeping broadcast send state
* @link: broadcast send link structure
* @inputq: data input queue; will only carry SOCK_WAKEUP messages
- * @dest: array keeping number of reachable destinations per bearer
+ * @dests: array keeping number of reachable destinations per bearer
* @primary_bearer: a bearer having links to all broadcast destinations, if any
* @bcast_support: indicates if primary bearer, if any, supports broadcast
* @rcast_support: indicates if all peer nodes support replicast
* @rc_ratio: dest count as percentage of cluster size where send method changes
- * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
+ * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
*/
struct tipc_bc_base {
struct tipc_link *link;
@@ -512,7 +512,7 @@ int tipc_bcast_init(struct net *net)
struct tipc_bc_base *bb = NULL;
struct tipc_link *l = NULL;
- bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
+ bb = kzalloc(sizeof(*bb), GFP_KERNEL);
if (!bb)
goto enomem;
tn->bcbase = bb;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 2dfb492a7c94..e65c3a8551e4 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -395,6 +395,7 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
tipc_net_init(net, node_id, 0);
}
if (!tipc_own_id(net)) {
+ dev_put(dev);
pr_warn("Failed to obtain node identity\n");
return -EINVAL;
}
@@ -576,7 +577,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
rcu_dereference_rtnl(orig_dev->tipc_ptr);
if (likely(b && test_bit(0, &b->up) &&
(skb->pkt_type <= PACKET_MULTICAST))) {
- skb->next = NULL;
+ skb_mark_not_on_list(skb);
tipc_rcv(dev_net(b->pt.dev), skb, b);
rcu_read_unlock();
return NET_RX_SUCCESS;
@@ -608,15 +609,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
switch (evt) {
case NETDEV_CHANGE:
- if (netif_carrier_ok(dev))
+ if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
+ test_and_set_bit_lock(0, &b->up);
break;
- case NETDEV_UP:
- test_and_set_bit_lock(0, &b->up);
- break;
+ }
+ /* fall through */
case NETDEV_GOING_DOWN:
clear_bit_unlock(0, &b->up);
tipc_reset_bearer(net, b);
break;
+ case NETDEV_UP:
+ test_and_set_bit_lock(0, &b->up);
+ break;
case NETDEV_CHANGEMTU:
if (tipc_mtu_bad(dev, 0)) {
bearer_disable(net, b);
diff --git a/net/tipc/diag.c b/net/tipc/diag.c
index aaabb0b776dd..73137f4aeb68 100644
--- a/net/tipc/diag.c
+++ b/net/tipc/diag.c
@@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
if (h->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
+ .start = tipc_dump_start,
.dump = tipc_diag_dump,
+ .done = tipc_dump_done,
};
netlink_dump_start(net->diag_nlsk, skb, h, &c);
return 0;
diff --git a/net/tipc/group.c b/net/tipc/group.c
index d7a7befeddd4..06fee142f09f 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -159,11 +159,6 @@ u32 tipc_group_exclude(struct tipc_group *grp)
return 0;
}
-int tipc_group_size(struct tipc_group *grp)
-{
- return grp->member_cnt;
-}
-
struct tipc_group *tipc_group_create(struct net *net, u32 portid,
struct tipc_group_req *mreq,
bool *group_is_open)
@@ -232,8 +227,8 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp)
kfree(grp);
}
-struct tipc_member *tipc_group_find_member(struct tipc_group *grp,
- u32 node, u32 port)
+static struct tipc_member *tipc_group_find_member(struct tipc_group *grp,
+ u32 node, u32 port)
{
struct rb_node *n = grp->members.rb_node;
u64 nkey, key = (u64)node << 32 | port;
@@ -671,6 +666,7 @@ static void tipc_group_create_event(struct tipc_group *grp,
struct sk_buff *skb;
struct tipc_msg *hdr;
+ memset(&evt, 0, sizeof(evt));
evt.event = event;
evt.found_lower = m->instance;
evt.found_upper = m->instance;
@@ -918,3 +914,35 @@ void tipc_group_member_evt(struct tipc_group *grp,
}
*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
}
+
+int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
+{
+ struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP);
+
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
+ grp->type) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
+ grp->instance) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT,
+ grp->bc_snd_nxt))
+ goto group_msg_cancel;
+
+ if (grp->scope == TIPC_NODE_SCOPE)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_NODE_SCOPE))
+ goto group_msg_cancel;
+
+ if (grp->scope == TIPC_CLUSTER_SCOPE)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE))
+ goto group_msg_cancel;
+
+ if (*grp->open)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_OPEN))
+ goto group_msg_cancel;
+
+ nla_nest_end(skb, group);
+ return 0;
+
+group_msg_cancel:
+ nla_nest_cancel(skb, group);
+ return -1;
+}
diff --git a/net/tipc/group.h b/net/tipc/group.h
index 5996af6e9f1d..76b4e5a7b39d 100644
--- a/net/tipc/group.h
+++ b/net/tipc/group.h
@@ -72,4 +72,5 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
u32 port, struct sk_buff_head *xmitq);
u16 tipc_group_bc_snd_nxt(struct tipc_group *grp);
void tipc_group_update_member(struct tipc_member *m, int len);
+int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb);
#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 695acb783969..201c3b5bc96b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -106,7 +106,8 @@ struct tipc_stats {
* @backlogq: queue for messages waiting to be sent
* @snt_nxt: next sequence number to use for outbound messages
* @last_retransmitted: sequence number of most recently retransmitted message
- * @stale_count: # of identical retransmit requests made by peer
+ * @stale_cnt: counter for number of identical retransmit attempts
+ * @stale_limit: time when repeated identical retransmits must force link reset
* @ackers: # of peers that needs to ack each packet before it can be released
* @acked: # last packet acked by a certain peer. Used for broadcast.
* @rcv_nxt: next sequence number to expect for inbound messages
@@ -127,14 +128,17 @@ struct tipc_link {
struct net *net;
/* Management and link supervision data */
- u32 peer_session;
- u32 session;
+ u16 peer_session;
+ u16 session;
+ u16 snd_nxt_state;
+ u16 rcv_nxt_state;
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
u32 abort_limit;
u32 state;
u16 peer_caps;
+ bool in_session;
bool active;
u32 silent_intv_cnt;
char if_name[TIPC_MAX_IF_NAME];
@@ -161,7 +165,8 @@ struct tipc_link {
u16 snd_nxt;
u16 last_retransm;
u16 window;
- u32 stale_count;
+ u16 stale_cnt;
+ unsigned long stale_limit;
/* Reception */
u16 rcv_nxt;
@@ -212,11 +217,6 @@ enum {
*/
#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
-/* Wildcard value for link session numbers. When it is known that
- * peer endpoint is down, any session number must be accepted.
- */
-#define ANY_SESSION 0x10000
-
/* Link FSM states:
*/
enum {
@@ -297,11 +297,6 @@ static bool link_is_bc_rcvlink(struct tipc_link *l)
return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
}
-int tipc_link_is_active(struct tipc_link *l)
-{
- return l->active;
-}
-
void tipc_link_set_active(struct tipc_link *l, bool active)
{
l->active = active;
@@ -337,6 +332,11 @@ char tipc_link_plane(struct tipc_link *l)
return l->net_plane;
}
+void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
+{
+ l->peer_caps = capabilities;
+}
+
void tipc_link_add_bc_peer(struct tipc_link *snd_l,
struct tipc_link *uc_l,
struct sk_buff_head *xmitq)
@@ -373,7 +373,7 @@ int tipc_link_bc_peers(struct tipc_link *l)
return l->ackers;
}
-u16 link_bc_rcv_gap(struct tipc_link *l)
+static u16 link_bc_rcv_gap(struct tipc_link *l)
{
struct sk_buff *skb = skb_peek(&l->deferdq);
u16 gap = 0;
@@ -410,6 +410,11 @@ char *tipc_link_name(struct tipc_link *l)
return l->name;
}
+u32 tipc_link_state(struct tipc_link *l)
+{
+ return l->state;
+}
+
/**
* tipc_link_create - create a new link
* @n: pointer to associated node
@@ -469,9 +474,11 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
l->addr = peer;
l->peer_caps = peer_caps;
l->net = net;
- l->peer_session = ANY_SESSION;
+ l->in_session = false;
l->bearer_id = bearer_id;
l->tolerance = tolerance;
+ if (bc_rcvlink)
+ bc_rcvlink->tolerance = tolerance;
l->net_plane = net_plane;
l->advertised_mtu = mtu;
l->mtu = mtu;
@@ -820,7 +827,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
* Wake up a number of waiting users, as permitted by available space
* in the send queue
*/
-void link_prepare_wakeup(struct tipc_link *l)
+static void link_prepare_wakeup(struct tipc_link *l)
{
struct sk_buff *skb, *tmp;
int imp, i = 0;
@@ -838,12 +845,24 @@ void link_prepare_wakeup(struct tipc_link *l)
void tipc_link_reset(struct tipc_link *l)
{
- l->peer_session = ANY_SESSION;
+ struct sk_buff_head list;
+
+ __skb_queue_head_init(&list);
+
+ l->in_session = false;
l->session++;
l->mtu = l->advertised_mtu;
+
+ spin_lock_bh(&l->wakeupq.lock);
+ skb_queue_splice_init(&l->wakeupq, &list);
+ spin_unlock_bh(&l->wakeupq.lock);
+
+ spin_lock_bh(&l->inputq->lock);
+ skb_queue_splice_init(&list, l->inputq);
+ spin_unlock_bh(&l->inputq->lock);
+
__skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq);
- skb_queue_splice_init(&l->wakeupq, l->inputq);
__skb_queue_purge(&l->backlogq);
l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
@@ -857,10 +876,12 @@ void tipc_link_reset(struct tipc_link *l)
l->rcv_unacked = 0;
l->snd_nxt = 1;
l->rcv_nxt = 1;
+ l->snd_nxt_state = 1;
+ l->rcv_nxt_state = 1;
l->acked = 0;
l->silent_intv_cnt = 0;
l->rst_cnt = 0;
- l->stale_count = 0;
+ l->stale_cnt = 0;
l->bc_peer_is_up = false;
memset(&l->mon_state, 0, sizeof(l->mon_state));
tipc_link_reset_stats(l);
@@ -954,7 +975,8 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
return rc;
}
-void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
+static void tipc_link_advance_backlog(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
{
struct sk_buff *skb, *_skb;
struct tipc_msg *hdr;
@@ -997,39 +1019,42 @@ static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
}
-int tipc_link_retrans(struct tipc_link *l, struct tipc_link *nacker,
- u16 from, u16 to, struct sk_buff_head *xmitq)
+/* tipc_link_retrans() - retransmit one or more packets
+ * @l: the link to transmit on
+ * @r: the receiving link ordering the retransmit. Same as l if unicast
+ * @from: retransmit from (inclusive) this sequence number
+ * @to: retransmit to (inclusive) this sequence number
+ * xmitq: queue for accumulating the retransmitted packets
+ */
+static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
+ u16 from, u16 to, struct sk_buff_head *xmitq)
{
struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
- struct tipc_msg *hdr;
- u16 ack = l->rcv_nxt - 1;
u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+ u16 ack = l->rcv_nxt - 1;
+ struct tipc_msg *hdr;
if (!skb)
return 0;
/* Detect repeated retransmit failures on same packet */
- if (nacker->last_retransm != buf_seqno(skb)) {
- nacker->last_retransm = buf_seqno(skb);
- nacker->stale_count = 1;
- } else if (++nacker->stale_count > 100) {
+ if (r->last_retransm != buf_seqno(skb)) {
+ r->last_retransm = buf_seqno(skb);
+ r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
+ r->stale_cnt = 0;
+ } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
link_retransmit_failure(l, skb);
- nacker->stale_count = 0;
if (link_is_bc_sndlink(l))
return TIPC_LINK_DOWN_EVT;
return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
- /* Move forward to where retransmission should start */
skb_queue_walk(&l->transmq, skb) {
- if (!less(buf_seqno(skb), from))
- break;
- }
-
- skb_queue_walk_from(&l->transmq, skb) {
- if (more(buf_seqno(skb), to))
- break;
hdr = buf_msg(skb);
+ if (less(msg_seqno(hdr), from))
+ continue;
+ if (more(msg_seqno(hdr), to))
+ break;
_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
if (!_skb)
return 0;
@@ -1063,6 +1088,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
skb_queue_tail(mc_inputq, skb);
return true;
}
+ /* else: fall through */
case CONN_MANAGER:
skb_queue_tail(inputq, skb);
return true;
@@ -1271,6 +1297,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
/* Forward queues and wake up waiting users */
if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
+ l->stale_cnt = 0;
tipc_link_advance_backlog(l, xmitq);
if (unlikely(!skb_queue_empty(&l->wakeupq)))
link_prepare_wakeup(l);
@@ -1347,6 +1374,8 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
if (mtyp == STATE_MSG) {
+ if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
+ msg_set_seqno(hdr, l->snd_nxt_state++);
msg_set_seq_gap(hdr, rcvgap);
msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
msg_set_probe(hdr, probe);
@@ -1371,6 +1400,36 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
__skb_queue_tail(xmitq, skb);
}
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
+{
+ u32 onode = tipc_own_addr(l->net);
+ struct tipc_msg *hdr, *ihdr;
+ struct sk_buff_head tnlq;
+ struct sk_buff *skb;
+ u32 dnode = l->addr;
+
+ skb_queue_head_init(&tnlq);
+ skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
+ INT_H_SIZE, BASIC_H_SIZE,
+ dnode, onode, 0, 0, 0);
+ if (!skb) {
+ pr_warn("%sunable to create tunnel packet\n", link_co_err);
+ return;
+ }
+
+ hdr = buf_msg(skb);
+ msg_set_msgcnt(hdr, 1);
+ msg_set_bearer_id(hdr, l->peer_bearer_id);
+
+ ihdr = (struct tipc_msg *)msg_data(hdr);
+ tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+ BASIC_H_SIZE, dnode);
+ msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
+ __skb_queue_tail(&tnlq, skb);
+ tipc_link_xmit(l, &tnlq, xmitq);
+}
+
/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
* with contents of the link's transmit and backlog queues.
*/
@@ -1438,6 +1497,47 @@ tnl:
}
}
+/* tipc_link_validate_msg(): validate message against current link state
+ * Returns true if message should be accepted, otherwise false
+ */
+bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
+{
+ u16 curr_session = l->peer_session;
+ u16 session = msg_session(hdr);
+ int mtyp = msg_type(hdr);
+
+ if (msg_user(hdr) != LINK_PROTOCOL)
+ return true;
+
+ switch (mtyp) {
+ case RESET_MSG:
+ if (!l->in_session)
+ return true;
+ /* Accept only RESET with new session number */
+ return more(session, curr_session);
+ case ACTIVATE_MSG:
+ if (!l->in_session)
+ return true;
+ /* Accept only ACTIVATE with new or current session number */
+ return !less(session, curr_session);
+ case STATE_MSG:
+ /* Accept only STATE with current session number */
+ if (!l->in_session)
+ return false;
+ if (session != curr_session)
+ return false;
+ /* Extra sanity check */
+ if (!link_is_up(l) && msg_ack(hdr))
+ return false;
+ if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
+ return true;
+ /* Accept only STATE with new sequence number */
+ return !less(msg_seqno(hdr), l->rcv_nxt_state);
+ default:
+ return false;
+ }
+}
+
/* tipc_link_proto_rcv(): receive link level protocol message :
* Note that network plane id propagates through the network, and may
* change at any time. The node with lowest numerical id determines
@@ -1471,17 +1571,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
hdr = buf_msg(skb);
data = msg_data(hdr);
+ if (!tipc_link_validate_msg(l, hdr))
+ goto exit;
+
switch (mtyp) {
case RESET_MSG:
-
- /* Ignore duplicate RESET with old session number */
- if ((less_eq(msg_session(hdr), l->peer_session)) &&
- (l->peer_session != ANY_SESSION))
- break;
- /* fall thru' */
-
case ACTIVATE_MSG:
-
/* Complete own link name with peer's interface name */
if_name = strrchr(l->name, ':') + 1;
if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
@@ -1491,9 +1586,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
strncpy(if_name, data, TIPC_MAX_IF_NAME);
/* Update own tolerance if peer indicates a non-zero value */
- if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
-
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
/* Update own priority if peer's priority is higher */
if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
l->priority = peers_prio;
@@ -1509,17 +1605,20 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
rc = TIPC_LINK_UP_EVT;
l->peer_session = msg_session(hdr);
+ l->in_session = true;
l->peer_bearer_id = msg_bearer_id(hdr);
if (l->mtu > msg_max_pkt(hdr))
l->mtu = msg_max_pkt(hdr);
break;
case STATE_MSG:
+ l->rcv_nxt_state = msg_seqno(hdr) + 1;
/* Update own tolerance if peer indicates a non-zero value */
- if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
-
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
/* Update own prio if peer indicates a different value */
if ((peers_prio != l->priority) &&
in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
@@ -2136,6 +2235,8 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
struct sk_buff_head *xmitq)
{
l->tolerance = tol;
+ if (l->bc_rcvlink)
+ l->bc_rcvlink->tolerance = tol;
if (link_is_up(l))
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index ec59348a81e8..90488c538a4e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -88,6 +88,8 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
struct tipc_link **link);
void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl,
+ struct sk_buff_head *xmitq);
void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
int tipc_link_fsm_evt(struct tipc_link *l, int evt);
bool tipc_link_is_up(struct tipc_link *l);
@@ -107,9 +109,12 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
u16 tipc_link_acked(struct tipc_link *l);
u32 tipc_link_id(struct tipc_link *l);
char *tipc_link_name(struct tipc_link *l);
+u32 tipc_link_state(struct tipc_link *l);
char tipc_link_plane(struct tipc_link *l);
int tipc_link_prio(struct tipc_link *l);
int tipc_link_window(struct tipc_link *l);
+void tipc_link_update_caps(struct tipc_link *l, u16 capabilities);
+bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr);
unsigned long tipc_link_tolerance(struct tipc_link *l);
void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
struct sk_buff_head *xmitq);
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 5453e564da82..67f69389ec17 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -684,7 +684,8 @@ int tipc_nl_monitor_get_threshold(struct net *net)
return tn->mon_threshold;
}
-int __tipc_nl_add_monitor_peer(struct tipc_peer *peer, struct tipc_nl_msg *msg)
+static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer,
+ struct tipc_nl_msg *msg)
{
struct tipc_mon_domain *dom = peer->domain;
struct nlattr *attrs;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index b6c45dccba3d..f48e5857210f 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -416,26 +416,31 @@ bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
*/
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
{
- struct tipc_msg *msg;
- int imsz, offset;
+ struct tipc_msg *hdr, *ihdr;
+ int imsz;
*iskb = NULL;
if (unlikely(skb_linearize(skb)))
goto none;
- msg = buf_msg(skb);
- offset = msg_hdr_sz(msg) + *pos;
- if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE)))
+ hdr = buf_msg(skb);
+ if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
goto none;
- *iskb = skb_clone(skb, GFP_ATOMIC);
- if (unlikely(!*iskb))
+ ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
+ imsz = msg_size(ihdr);
+
+ if ((*pos + imsz) > msg_data_sz(hdr))
goto none;
- skb_pull(*iskb, offset);
- imsz = msg_size(buf_msg(*iskb));
- skb_trim(*iskb, imsz);
+
+ *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
+ if (!*iskb)
+ goto none;
+
+ skb_copy_to_linear_data(*iskb, ihdr, imsz);
if (unlikely(!tipc_msg_validate(iskb)))
goto none;
+
*pos += align(imsz);
return true;
none:
@@ -494,60 +499,56 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
/**
* tipc_msg_reverse(): swap source and destination addresses and add error code
* @own_node: originating node id for reversed message
- * @skb: buffer containing message to be reversed; may be replaced.
+ * @skb: buffer containing message to be reversed; will be consumed
* @err: error code to be set in message, if any
- * Consumes buffer at failure
+ * Replaces consumed buffer with new one when successful
* Returns true if success, otherwise false
*/
bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
{
struct sk_buff *_skb = *skb;
- struct tipc_msg *hdr;
- struct tipc_msg ohdr;
- int dlen;
+ struct tipc_msg *_hdr, *hdr;
+ int hlen, dlen;
if (skb_linearize(_skb))
goto exit;
- hdr = buf_msg(_skb);
- dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
- if (msg_dest_droppable(hdr))
+ _hdr = buf_msg(_skb);
+ dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
+ hlen = msg_hdr_sz(_hdr);
+
+ if (msg_dest_droppable(_hdr))
goto exit;
- if (msg_errcode(hdr))
+ if (msg_errcode(_hdr))
goto exit;
- /* Take a copy of original header before altering message */
- memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
-
- /* Never return SHORT header; expand by replacing buffer if necessary */
- if (msg_short(hdr)) {
- *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
- if (!*skb)
- goto exit;
- memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
- kfree_skb(_skb);
- _skb = *skb;
- hdr = buf_msg(_skb);
- memcpy(hdr, &ohdr, BASIC_H_SIZE);
- msg_set_hdr_sz(hdr, BASIC_H_SIZE);
- }
+ /* Never return SHORT header */
+ if (hlen == SHORT_H_SIZE)
+ hlen = BASIC_H_SIZE;
- if (skb_cloned(_skb) &&
- pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
+ /* Don't return data along with SYN+, - sender has a clone */
+ if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
+ dlen = 0;
+
+ /* Allocate new buffer to return */
+ *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
+ if (!*skb)
goto exit;
+ memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
+ memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
- /* reassign after skb header modifications */
- hdr = buf_msg(_skb);
- /* Now reverse the concerned fields */
+ /* Build reverse header in new buffer */
+ hdr = buf_msg(*skb);
+ msg_set_hdr_sz(hdr, hlen);
msg_set_errcode(hdr, err);
msg_set_non_seq(hdr, 0);
- msg_set_origport(hdr, msg_destport(&ohdr));
- msg_set_destport(hdr, msg_origport(&ohdr));
- msg_set_destnode(hdr, msg_prevnode(&ohdr));
+ msg_set_origport(hdr, msg_destport(_hdr));
+ msg_set_destport(hdr, msg_origport(_hdr));
+ msg_set_destnode(hdr, msg_prevnode(_hdr));
msg_set_prevnode(hdr, own_node);
msg_set_orignode(hdr, own_node);
- msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
- skb_trim(_skb, msg_size(hdr));
+ msg_set_size(hdr, hlen + dlen);
skb_orphan(_skb);
+ kfree_skb(_skb);
return true;
exit:
kfree_skb(_skb);
@@ -555,6 +556,22 @@ exit:
return false;
}
+bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
+{
+ struct sk_buff *skb, *_skb;
+
+ skb_queue_walk(msg, skb) {
+ _skb = skb_clone(skb, GFP_ATOMIC);
+ if (!_skb) {
+ __skb_queue_purge(cpy);
+ pr_err_ratelimited("Failed to clone buffer chain\n");
+ return false;
+ }
+ __skb_queue_tail(cpy, _skb);
+ }
+ return true;
+}
+
/**
* tipc_msg_lookup_dest(): try to find new destination for named message
* @skb: the buffer containing the message.
@@ -595,10 +612,6 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
if (!skb_cloned(skb))
return true;
- /* Unclone buffer in case it was bundled */
- if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
- return false;
-
return true;
}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index a4e944d59394..a2879e6ec5b6 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -216,6 +216,16 @@ static inline void msg_set_non_seq(struct tipc_msg *m, u32 n)
msg_set_bits(m, 0, 20, 1, n);
}
+static inline int msg_is_syn(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 17, 1);
+}
+
+static inline void msg_set_syn(struct tipc_msg *m, u32 d)
+{
+ msg_set_bits(m, 0, 17, 1, d);
+}
+
static inline int msg_dest_droppable(struct tipc_msg *m)
{
return msg_bits(m, 0, 19, 1);
@@ -970,6 +980,7 @@ bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
struct sk_buff_head *cpy);
void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
struct sk_buff *skb);
+bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy);
static inline u16 buf_seqno(struct sk_buff *skb)
{
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 51b4b96f89db..61219f0b9677 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -94,8 +94,9 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
list_add_tail_rcu(&publ->binding_node, &nt->node_scope);
return NULL;
}
- list_add_tail_rcu(&publ->binding_node, &nt->cluster_scope);
-
+ write_lock_bh(&nt->cluster_scope_lock);
+ list_add_tail(&publ->binding_node, &nt->cluster_scope);
+ write_unlock_bh(&nt->cluster_scope_lock);
skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
if (!skb) {
pr_warn("Publication distribution failure\n");
@@ -112,11 +113,13 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
*/
struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
{
+ struct name_table *nt = tipc_name_table(net);
struct sk_buff *buf;
struct distr_item *item;
+ write_lock_bh(&nt->cluster_scope_lock);
list_del(&publ->binding_node);
-
+ write_unlock_bh(&nt->cluster_scope_lock);
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
@@ -189,11 +192,10 @@ void tipc_named_node_up(struct net *net, u32 dnode)
__skb_queue_head_init(&head);
- rcu_read_lock();
+ read_lock_bh(&nt->cluster_scope_lock);
named_distribute(net, &head, dnode, &nt->cluster_scope);
- rcu_read_unlock();
-
tipc_node_xmit(net, &head, dnode, 0);
+ read_unlock_bh(&nt->cluster_scope_lock);
}
/**
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index bebe88cae07b..bff241f03525 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -735,7 +735,7 @@ int tipc_nametbl_init(struct net *net)
struct name_table *nt;
int i;
- nt = kzalloc(sizeof(*nt), GFP_ATOMIC);
+ nt = kzalloc(sizeof(*nt), GFP_KERNEL);
if (!nt)
return -ENOMEM;
@@ -744,6 +744,7 @@ int tipc_nametbl_init(struct net *net)
INIT_LIST_HEAD(&nt->node_scope);
INIT_LIST_HEAD(&nt->cluster_scope);
+ rwlock_init(&nt->cluster_scope_lock);
tn->nametbl = nt;
spin_lock_init(&tn->nametbl_lock);
return 0;
@@ -980,20 +981,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
{
- u64 value = (u64)node << 32 | port;
struct tipc_dest *dst;
list_for_each_entry(dst, l, list) {
- if (dst->value != value)
- continue;
- return dst;
+ if (dst->node == node && dst->port == port)
+ return dst;
}
return NULL;
}
bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
{
- u64 value = (u64)node << 32 | port;
struct tipc_dest *dst;
if (tipc_dest_find(l, node, port))
@@ -1002,7 +1000,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
if (unlikely(!dst))
return false;
- dst->value = value;
+ dst->node = node;
+ dst->port = port;
list_add(&dst->list, l);
return true;
}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 0febba41da86..f79066334cc8 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -100,6 +100,7 @@ struct name_table {
struct hlist_head services[TIPC_NAMETBL_SIZE];
struct list_head node_scope;
struct list_head cluster_scope;
+ rwlock_t cluster_scope_lock;
u32 local_publ_count;
};
@@ -133,13 +134,8 @@ void tipc_nametbl_stop(struct net *net);
struct tipc_dest {
struct list_head list;
- union {
- struct {
- u32 port;
- u32 node;
- };
- u64 value;
- };
+ u32 port;
+ u32 node;
};
struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 6ff2254088f6..99ee419210ba 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
},
{
.cmd = TIPC_NL_SOCK_GET,
+ .start = tipc_dump_start,
.dumpit = tipc_nl_sk_dump,
+ .done = tipc_dump_done,
.policy = tipc_nl_policy,
},
{
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index a2f76743c73a..6376467e78f8 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -185,6 +185,10 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
return -ENOMEM;
buf->sk = msg->dst_sk;
+ if (__tipc_dump_start(&cb, msg->net)) {
+ kfree_skb(buf);
+ return -ENOMEM;
+ }
do {
int rem;
@@ -216,6 +220,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
err = 0;
err_out:
+ tipc_dump_done(&cb);
kfree_skb(buf);
if (err == -EMSGSIZE) {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 0453bd451ce8..2afc4f8c37a7 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -45,6 +45,7 @@
#include "netlink.h"
#define INVALID_NODE_SIG 0x10000
+#define NODE_CLEANUP_AFTER 300000
/* Flags used to take different actions according to flag type
* TIPC_NOTIFY_NODE_DOWN: notify node is down
@@ -96,6 +97,7 @@ struct tipc_bclink_entry {
* @link_id: local and remote bearer ids of changing link, if any
* @publ_list: list of publications
* @rcu: rcu struct for tipc_node
+ * @delete_at: indicates the time for deleting a down node
*/
struct tipc_node {
u32 addr;
@@ -109,6 +111,7 @@ struct tipc_node {
int action_flags;
struct list_head list;
int state;
+ bool failover_sent;
u16 sync_point;
int link_cnt;
u16 working_links;
@@ -121,6 +124,7 @@ struct tipc_node {
unsigned long keepalive_intv;
struct timer_list timer;
struct rcu_head rcu;
+ unsigned long delete_at;
};
/* Node FSM states and events:
@@ -160,6 +164,7 @@ static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
static void tipc_node_put(struct tipc_node *node);
static bool node_is_up(struct tipc_node *n);
+static void tipc_node_delete_from_list(struct tipc_node *node);
struct tipc_sock_conn {
u32 port;
@@ -359,13 +364,24 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *n, *temp_node;
+ struct tipc_link *l;
+ int bearer_id;
int i;
spin_lock_bh(&tn->node_list_lock);
n = tipc_node_find(net, addr);
if (n) {
+ if (n->capabilities == capabilities)
+ goto exit;
/* Same node may come back with new capabilities */
+ write_lock_bh(&n->lock);
n->capabilities = capabilities;
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ l = n->links[bearer_id].link;
+ if (l)
+ tipc_link_update_caps(l, capabilities);
+ }
+ write_unlock_bh(&n->lock);
goto exit;
}
n = kzalloc(sizeof(*n), GFP_ATOMIC);
@@ -390,6 +406,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
for (i = 0; i < MAX_BEARERS; i++)
spin_lock_init(&n->links[i].lock);
n->state = SELF_DOWN_PEER_LEAVING;
+ n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
n->signature = INVALID_NODE_SIG;
n->active_links[0] = INVALID_BEARER_ID;
n->active_links[1] = INVALID_BEARER_ID;
@@ -433,11 +450,16 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
}
-static void tipc_node_delete(struct tipc_node *node)
+static void tipc_node_delete_from_list(struct tipc_node *node)
{
list_del_rcu(&node->list);
hlist_del_rcu(&node->hash);
tipc_node_put(node);
+}
+
+static void tipc_node_delete(struct tipc_node *node)
+{
+ tipc_node_delete_from_list(node);
del_timer_sync(&node->timer);
tipc_node_put(node);
@@ -544,6 +566,42 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
tipc_node_put(node);
}
+static void tipc_node_clear_links(struct tipc_node *node)
+{
+ int i;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ struct tipc_link_entry *le = &node->links[i];
+
+ if (le->link) {
+ kfree(le->link);
+ le->link = NULL;
+ node->link_cnt--;
+ }
+ }
+}
+
+/* tipc_node_cleanup - delete nodes that does not
+ * have active links for NODE_CLEANUP_AFTER time
+ */
+static int tipc_node_cleanup(struct tipc_node *peer)
+{
+ struct tipc_net *tn = tipc_net(peer->net);
+ bool deleted = false;
+
+ spin_lock_bh(&tn->node_list_lock);
+ tipc_node_write_lock(peer);
+
+ if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
+ tipc_node_clear_links(peer);
+ tipc_node_delete_from_list(peer);
+ deleted = true;
+ }
+ tipc_node_write_unlock(peer);
+ spin_unlock_bh(&tn->node_list_lock);
+ return deleted;
+}
+
/* tipc_node_timeout - handle expiration of node timer
*/
static void tipc_node_timeout(struct timer_list *t)
@@ -551,21 +609,29 @@ static void tipc_node_timeout(struct timer_list *t)
struct tipc_node *n = from_timer(n, t, timer);
struct tipc_link_entry *le;
struct sk_buff_head xmitq;
+ int remains = n->link_cnt;
int bearer_id;
int rc = 0;
+ if (!node_is_up(n) && tipc_node_cleanup(n)) {
+ /*Removing the reference of Timer*/
+ tipc_node_put(n);
+ return;
+ }
+
__skb_queue_head_init(&xmitq);
- for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
tipc_node_read_lock(n);
le = &n->links[bearer_id];
- spin_lock_bh(&le->lock);
if (le->link) {
+ spin_lock_bh(&le->lock);
/* Link tolerance may change asynchronously: */
tipc_node_calculate_timer(n, le->link);
rc = tipc_link_timeout(le->link, &xmitq);
+ spin_unlock_bh(&le->lock);
+ remains--;
}
- spin_unlock_bh(&le->lock);
tipc_node_read_unlock(n);
tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
if (rc & TIPC_LINK_DOWN_EVT)
@@ -615,6 +681,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
*slot0 = bearer_id;
*slot1 = bearer_id;
tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
+ n->failover_sent = false;
n->action_flags |= TIPC_NOTIFY_NODE_UP;
tipc_link_set_active(nl, true);
tipc_bcast_add_peer(n->net, nl, xmitq);
@@ -846,6 +913,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
bool reset = true;
char *if_name;
unsigned long intv;
+ u16 session;
*dupl_addr = false;
*respond = false;
@@ -932,9 +1000,10 @@ void tipc_node_check_dest(struct net *net, u32 addr,
goto exit;
if_name = strchr(b->name, ':') + 1;
+ get_random_bytes(&session, sizeof(u16));
if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
b->net_plane, b->mtu, b->priority,
- b->window, mod(tipc_net(net)->random),
+ b->window, session,
tipc_own_addr(net), addr, peer_id,
n->capabilities,
tipc_bc_sndlink(n->net), n->bc_entry.link,
@@ -1174,6 +1243,7 @@ static void node_lost_contact(struct tipc_node *n,
uint i;
pr_debug("Lost contact with %x\n", n->addr);
+ n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
/* Clean up broadcast state */
tipc_bcast_remove_peer(n->net, n->bc_entry.link);
@@ -1481,7 +1551,7 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
* tipc_node_check_state - check and if necessary update node state
* @skb: TIPC packet
* @bearer_id: identity of bearer delivering the packet
- * Returns true if state is ok, otherwise consumes buffer and returns false
+ * Returns true if state and msg are ok, otherwise false
*/
static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
int bearer_id, struct sk_buff_head *xmitq)
@@ -1515,6 +1585,9 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
}
}
+ if (!tipc_link_validate_msg(l, hdr))
+ return false;
+
/* Check and update node accesibility if applicable */
if (state == SELF_UP_PEER_COMING) {
if (!tipc_link_is_up(l))
@@ -1546,6 +1619,14 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
tipc_link_inputq(l));
}
+ /* If parallel link was already down, and this happened before
+ * the tunnel link came up, FAILOVER was never sent. Ensure that
+ * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
+ */
+ if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
+ tipc_link_create_dummy_tnl_msg(l, xmitq);
+ n->failover_sent = true;
+ }
/* If pkts arrive out of order, use lowest calculated syncpt */
if (less(syncpt, n->sync_point))
n->sync_point = syncpt;
@@ -1743,7 +1824,6 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
struct tipc_node *peer;
u32 addr;
int err;
- int i;
/* We identify the peer by its net */
if (!info->attrs[TIPC_NLA_NET])
@@ -1778,15 +1858,7 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
goto err_out;
}
- for (i = 0; i < MAX_BEARERS; i++) {
- struct tipc_link_entry *le = &peer->links[i];
-
- if (le->link) {
- kfree(le->link);
- le->link = NULL;
- peer->link_cnt--;
- }
- }
+ tipc_node_clear_links(peer);
tipc_node_write_unlock(peer);
tipc_node_delete(peer);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 846c8f240872..03f5efb62cfb 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -45,18 +45,22 @@
/* Optional capabilities supported by this code version
*/
enum {
+ TIPC_SYN_BIT = (1),
TIPC_BCAST_SYNCH = (1 << 1),
TIPC_BCAST_STATE_NACK = (1 << 2),
TIPC_BLOCK_FLOWCTL = (1 << 3),
TIPC_BCAST_RCAST = (1 << 4),
- TIPC_NODE_ID128 = (1 << 5)
+ TIPC_NODE_ID128 = (1 << 5),
+ TIPC_LINK_PROTO_SEQNO = (1 << 6)
};
-#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \
- TIPC_BCAST_STATE_NACK | \
- TIPC_BCAST_RCAST | \
- TIPC_BLOCK_FLOWCTL | \
- TIPC_NODE_ID128)
+#define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \
+ TIPC_BCAST_SYNCH | \
+ TIPC_BCAST_STATE_NACK | \
+ TIPC_BCAST_RCAST | \
+ TIPC_BLOCK_FLOWCTL | \
+ TIPC_NODE_ID128 | \
+ TIPC_LINK_PROTO_SEQNO)
#define INVALID_BEARER_ID -1
void tipc_node_stop(struct net *net);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 930852c54d7a..636e6131769d 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -47,7 +47,7 @@
#include "netlink.h"
#include "group.h"
-#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
+#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
#define TIPC_FWD_MSG 1
#define TIPC_MAX_PORT 0xffffffff
@@ -80,7 +80,6 @@ struct sockaddr_pair {
* @publications: list of publications for port
* @blocking_link: address of the congested link we are currently sleeping on
* @pub_count: total # of publications port has made during its lifetime
- * @probing_state:
* @conn_timeout: the time we can wait for an unresponded setup request
* @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
* @cong_link_cnt: number of congested links
@@ -102,8 +101,8 @@ struct tipc_sock {
struct list_head cong_links;
struct list_head publications;
u32 pub_count;
- uint conn_timeout;
atomic_t dupl_rcvcnt;
+ u16 conn_timeout;
bool probe_unacked;
u16 cong_link_cnt;
u16 snt_unacked;
@@ -411,7 +410,6 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
static int tipc_sk_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
- struct tipc_net *tn;
const struct proto_ops *ops;
struct sock *sk;
struct tipc_sock *tsk;
@@ -446,7 +444,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
INIT_LIST_HEAD(&tsk->publications);
INIT_LIST_HEAD(&tsk->cong_links);
msg = &tsk->phdr;
- tn = net_generic(sock_net(sk), tipc_net_id);
/* Finish initializing socket data structures */
sock->ops = ops;
@@ -509,6 +506,9 @@ static void __tipc_shutdown(struct socket *sock, int error)
tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
!tsk_conn_cong(tsk)));
+ /* Remove any pending SYN message */
+ __skb_queue_purge(&sk->sk_write_queue);
+
/* Reject all unreceived messages, except on an active connection
* (which disconnects locally & sends a 'FIN+' to peer).
*/
@@ -578,6 +578,7 @@ static int tipc_release(struct socket *sock)
sk_stop_timer(sk, &sk->sk_timer);
tipc_sk_remove(tsk);
+ sock_orphan(sk);
/* Reject any messages that accumulated in backlog queue */
release_sock(sk);
tipc_dest_list_purge(&tsk->cong_links);
@@ -716,7 +717,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
struct tipc_sock *tsk = tipc_sk(sk);
__poll_t revents = 0;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, sock, wait);
if (sk->sk_shutdown & RCV_SHUTDOWN)
revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
@@ -1117,7 +1118,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
u32 self = tipc_own_addr(net);
u32 type, lower, upper, scope;
struct sk_buff *skb, *_skb;
- u32 portid, oport, onode;
+ u32 portid, onode;
struct sk_buff_head tmpq;
struct list_head dports;
struct tipc_msg *hdr;
@@ -1133,7 +1134,6 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
user = msg_user(hdr);
mtyp = msg_type(hdr);
hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
- oport = msg_origport(hdr);
onode = msg_orignode(hdr);
type = msg_nametype(hdr);
@@ -1198,6 +1198,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
* @skb: pointer to message buffer.
*/
static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *inputq,
struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr = buf_msg(skb);
@@ -1215,7 +1216,16 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
tsk_peer_port(tsk));
sk->sk_state_change(sk);
- goto exit;
+
+ /* State change is ignored if socket already awake,
+ * - convert msg to abort msg and add to inqueue
+ */
+ msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
+ msg_set_type(hdr, TIPC_CONN_MSG);
+ msg_set_size(hdr, BASIC_H_SIZE);
+ msg_set_hdr_sz(hdr, BASIC_H_SIZE);
+ __skb_queue_tail(inputq, skb);
+ return;
}
tsk->probe_unacked = false;
@@ -1321,6 +1331,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
tsk->conn_type = dest->addr.name.name.type;
tsk->conn_instance = dest->addr.name.name.instance;
}
+ msg_set_syn(hdr, 1);
}
seq = &dest->addr.nameseq;
@@ -1363,6 +1374,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
+ if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
+ return -ENOMEM;
rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
if (unlikely(rc == -ELINKCONG)) {
@@ -1421,8 +1434,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
/* Handle implicit connection setup */
if (unlikely(dest)) {
rc = __tipc_sendmsg(sock, m, dlen);
- if (dlen && (dlen == rc))
+ if (dlen && dlen == rc) {
+ tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
+ }
return rc;
}
@@ -1480,6 +1495,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
struct net *net = sock_net(sk);
struct tipc_msg *msg = &tsk->phdr;
+ msg_set_syn(msg, 0);
msg_set_destnode(msg, peer_node);
msg_set_destport(msg, peer_port);
msg_set_type(msg, TIPC_CONN_MSG);
@@ -1491,6 +1507,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
+ __skb_queue_purge(&sk->sk_write_queue);
if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
return;
@@ -1936,7 +1953,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
switch (msg_user(hdr)) {
case CONN_MANAGER:
- tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
+ tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
return;
case SOCK_WAKEUP:
tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
@@ -1961,91 +1978,90 @@ static void tipc_sk_proto_rcv(struct sock *sk,
}
/**
- * tipc_filter_connect - Handle incoming message for a connection-based socket
+ * tipc_sk_filter_connect - check incoming message for a connection-based socket
* @tsk: TIPC socket
- * @skb: pointer to message buffer. Set to NULL if buffer is consumed
- *
- * Returns true if everything ok, false otherwise
+ * @skb: pointer to message buffer.
+ * Returns true if message should be added to receive queue, false otherwise
*/
static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
struct tipc_msg *hdr = buf_msg(skb);
- u32 pport = msg_origport(hdr);
- u32 pnode = msg_orignode(hdr);
+ bool con_msg = msg_connected(hdr);
+ u32 pport = tsk_peer_port(tsk);
+ u32 pnode = tsk_peer_node(tsk);
+ u32 oport = msg_origport(hdr);
+ u32 onode = msg_orignode(hdr);
+ int err = msg_errcode(hdr);
+ unsigned long delay;
if (unlikely(msg_mcast(hdr)))
return false;
switch (sk->sk_state) {
case TIPC_CONNECTING:
- /* Accept only ACK or NACK message */
- if (unlikely(!msg_connected(hdr))) {
- if (pport != tsk_peer_port(tsk) ||
- pnode != tsk_peer_node(tsk))
- return false;
-
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- sk->sk_err = ECONNREFUSED;
- sk->sk_state_change(sk);
- return true;
- }
-
- if (unlikely(msg_errcode(hdr))) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- sk->sk_err = ECONNREFUSED;
- sk->sk_state_change(sk);
- return true;
- }
-
- if (unlikely(!msg_isdata(hdr))) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- sk->sk_err = EINVAL;
- sk->sk_state_change(sk);
- return true;
+ /* Setup ACK */
+ if (likely(con_msg)) {
+ if (err)
+ break;
+ tipc_sk_finish_conn(tsk, oport, onode);
+ msg_set_importance(&tsk->phdr, msg_importance(hdr));
+ /* ACK+ message with data is added to receive queue */
+ if (msg_data_sz(hdr))
+ return true;
+ /* Empty ACK-, - wake up sleeping connect() and drop */
+ sk->sk_data_ready(sk);
+ msg_set_dest_droppable(hdr, 1);
+ return false;
}
+ /* Ignore connectionless message if not from listening socket */
+ if (oport != pport || onode != pnode)
+ return false;
- tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
- msg_set_importance(&tsk->phdr, msg_importance(hdr));
-
- /* If 'ACK+' message, add to socket receive queue */
- if (msg_data_sz(hdr))
- return true;
-
- /* If empty 'ACK-' message, wake up sleeping connect() */
- sk->sk_data_ready(sk);
+ /* Rejected SYN */
+ if (err != TIPC_ERR_OVERLOAD)
+ break;
- /* 'ACK-' message is neither accepted nor rejected: */
- msg_set_dest_droppable(hdr, 1);
+ /* Prepare for new setup attempt if we have a SYN clone */
+ if (skb_queue_empty(&sk->sk_write_queue))
+ break;
+ get_random_bytes(&delay, 2);
+ delay %= (tsk->conn_timeout / 4);
+ delay = msecs_to_jiffies(delay + 100);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
return false;
-
case TIPC_OPEN:
case TIPC_DISCONNECTING:
- break;
+ return false;
case TIPC_LISTEN:
/* Accept only SYN message */
- if (!msg_connected(hdr) && !(msg_errcode(hdr)))
+ if (!msg_is_syn(hdr) &&
+ tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
+ return false;
+ if (!con_msg && !err)
return true;
- break;
+ return false;
case TIPC_ESTABLISHED:
/* Accept only connection-based messages sent by peer */
- if (unlikely(!tsk_peer_msg(tsk, hdr)))
+ if (likely(con_msg && !err && pport == oport && pnode == onode))
+ return true;
+ if (!tsk_peer_msg(tsk, hdr))
return false;
-
- if (unlikely(msg_errcode(hdr))) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- /* Let timer expire on it's own */
- tipc_node_remove_conn(net, tsk_peer_node(tsk),
- tsk->portid);
- sk->sk_state_change(sk);
- }
+ if (!err)
+ return true;
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ tipc_node_remove_conn(net, pnode, tsk->portid);
+ sk->sk_state_change(sk);
return true;
default:
pr_err("Unknown sk_state %u\n", sk->sk_state);
}
-
- return false;
+ /* Abort connection setup attempt */
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ sk->sk_err = ECONNREFUSED;
+ sk->sk_state_change(sk);
+ return true;
}
/**
@@ -2547,43 +2563,78 @@ static int tipc_shutdown(struct socket *sock, int how)
return res;
}
+static void tipc_sk_check_probing_state(struct sock *sk,
+ struct sk_buff_head *list)
+{
+ struct tipc_sock *tsk = tipc_sk(sk);
+ u32 pnode = tsk_peer_node(tsk);
+ u32 pport = tsk_peer_port(tsk);
+ u32 self = tsk_own_node(tsk);
+ u32 oport = tsk->portid;
+ struct sk_buff *skb;
+
+ if (tsk->probe_unacked) {
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ sk->sk_err = ECONNABORTED;
+ tipc_node_remove_conn(sock_net(sk), pnode, pport);
+ sk->sk_state_change(sk);
+ return;
+ }
+ /* Prepare new probe */
+ skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
+ pnode, self, pport, oport, TIPC_OK);
+ if (skb)
+ __skb_queue_tail(list, skb);
+ tsk->probe_unacked = true;
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
+}
+
+static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
+{
+ struct tipc_sock *tsk = tipc_sk(sk);
+
+ /* Try again later if dest link is congested */
+ if (tsk->cong_link_cnt) {
+ sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
+ return;
+ }
+ /* Prepare SYN for retransmit */
+ tipc_msg_skb_clone(&sk->sk_write_queue, list);
+}
+
static void tipc_sk_timeout(struct timer_list *t)
{
struct sock *sk = from_timer(sk, t, sk_timer);
struct tipc_sock *tsk = tipc_sk(sk);
- u32 peer_port = tsk_peer_port(tsk);
- u32 peer_node = tsk_peer_node(tsk);
- u32 own_node = tsk_own_node(tsk);
- u32 own_port = tsk->portid;
- struct net *net = sock_net(sk);
- struct sk_buff *skb = NULL;
+ u32 pnode = tsk_peer_node(tsk);
+ struct sk_buff_head list;
+ int rc = 0;
+ skb_queue_head_init(&list);
bh_lock_sock(sk);
- if (!tipc_sk_connected(sk))
- goto exit;
/* Try again later if socket is busy */
if (sock_owned_by_user(sk)) {
sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
- goto exit;
+ bh_unlock_sock(sk);
+ return;
}
- if (tsk->probe_unacked) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- tipc_node_remove_conn(net, peer_node, peer_port);
- sk->sk_state_change(sk);
- goto exit;
- }
- /* Send new probe */
- skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
- peer_node, own_node, peer_port, own_port,
- TIPC_OK);
- tsk->probe_unacked = true;
- sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
-exit:
+ if (sk->sk_state == TIPC_ESTABLISHED)
+ tipc_sk_check_probing_state(sk, &list);
+ else if (sk->sk_state == TIPC_CONNECTING)
+ tipc_sk_retry_connect(sk, &list);
+
bh_unlock_sock(sk);
- if (skb)
- tipc_node_xmit_skb(net, skb, peer_node, own_port);
+
+ if (!skb_queue_empty(&list))
+ rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
+
+ /* SYN messages may cause link congestion */
+ if (rc == -ELINKCONG) {
+ tipc_dest_push(&tsk->cong_links, pnode, 0);
+ tsk->cong_link_cnt = 1;
+ }
sock_put(sk);
}
@@ -2675,6 +2726,8 @@ void tipc_sk_reinit(struct net *net)
rhashtable_walk_stop(&iter);
} while (tsk == ERR_PTR(-EAGAIN));
+
+ rhashtable_walk_exit(&iter);
}
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
@@ -3230,45 +3283,74 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
struct netlink_callback *cb,
struct tipc_sock *tsk))
{
- struct net *net = sock_net(skb->sk);
- struct tipc_net *tn = tipc_net(net);
- const struct bucket_table *tbl;
- u32 prev_portid = cb->args[1];
- u32 tbl_id = cb->args[0];
- struct rhash_head *pos;
+ struct rhashtable_iter *iter = (void *)cb->args[4];
struct tipc_sock *tsk;
int err;
- rcu_read_lock();
- tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
- for (; tbl_id < tbl->size; tbl_id++) {
- rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
- spin_lock_bh(&tsk->sk.sk_lock.slock);
- if (prev_portid && prev_portid != tsk->portid) {
- spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ rhashtable_walk_start(iter);
+ while ((tsk = rhashtable_walk_next(iter)) != NULL) {
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ if (err == -EAGAIN) {
+ err = 0;
continue;
}
+ break;
+ }
- err = skb_handler(skb, cb, tsk);
- if (err) {
- prev_portid = tsk->portid;
- spin_unlock_bh(&tsk->sk.sk_lock.slock);
- goto out;
- }
-
- prev_portid = 0;
- spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ sock_hold(&tsk->sk);
+ rhashtable_walk_stop(iter);
+ lock_sock(&tsk->sk);
+ err = skb_handler(skb, cb, tsk);
+ if (err) {
+ release_sock(&tsk->sk);
+ sock_put(&tsk->sk);
+ goto out;
}
+ release_sock(&tsk->sk);
+ rhashtable_walk_start(iter);
+ sock_put(&tsk->sk);
}
+ rhashtable_walk_stop(iter);
out:
- rcu_read_unlock();
- cb->args[0] = tbl_id;
- cb->args[1] = prev_portid;
-
return skb->len;
}
EXPORT_SYMBOL(tipc_nl_sk_walk);
+int tipc_dump_start(struct netlink_callback *cb)
+{
+ return __tipc_dump_start(cb, sock_net(cb->skb->sk));
+}
+EXPORT_SYMBOL(tipc_dump_start);
+
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
+{
+ /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
+ struct rhashtable_iter *iter = (void *)cb->args[4];
+ struct tipc_net *tn = tipc_net(net);
+
+ if (!iter) {
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+
+ cb->args[4] = (long)iter;
+ }
+
+ rhashtable_walk_enter(&tn->sk_rht, iter);
+ return 0;
+}
+
+int tipc_dump_done(struct netlink_callback *cb)
+{
+ struct rhashtable_iter *hti = (void *)cb->args[4];
+
+ rhashtable_walk_exit(hti);
+ kfree(hti);
+ return 0;
+}
+EXPORT_SYMBOL(tipc_dump_done);
+
int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
struct tipc_sock *tsk, u32 sk_filter_state,
u64 (*tipc_diag_gen_cookie)(struct sock *sk))
@@ -3320,6 +3402,11 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
goto stat_msg_cancel;
nla_nest_end(skb, stat);
+
+ if (tsk->group)
+ if (tipc_group_fill_sock_diag(tsk->group, skb))
+ goto stat_msg_cancel;
+
nla_nest_end(skb, attrs);
return 0;
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index aff9b2ae5a1f..5e575f205afe 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -68,4 +68,7 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
int (*skb_handler)(struct sk_buff *skb,
struct netlink_callback *cb,
struct tipc_sock *tsk));
+int tipc_dump_start(struct netlink_callback *cb);
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
+int tipc_dump_done(struct netlink_callback *cb);
#endif
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index c8e34ef22c30..4bdea0057171 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -57,16 +57,12 @@
* @idr_lock: protect the connection identifier set
* @idr_in_use: amount of allocated identifier entry
* @net: network namspace instance
- * @rcvbuf_cache: memory cache of server receive buffer
+ * @awork: accept work item
* @rcv_wq: receive workqueue
* @send_wq: send workqueue
* @max_rcvbuf_size: maximum permitted receive message length
- * @tipc_conn_new: callback will be called when new connection is incoming
- * @tipc_conn_release: callback will be called before releasing the connection
- * @tipc_conn_recvmsg: callback will be called when message arrives
+ * @listener: topsrv listener socket
* @name: server name
- * @imp: message importance
- * @type: socket type
*/
struct tipc_topsrv {
struct idr conn_idr;
@@ -90,9 +86,7 @@ struct tipc_topsrv {
* @server: pointer to connected server
* @sub_list: lsit to all pertaing subscriptions
* @sub_lock: lock protecting the subscription list
- * @outqueue_lock: control access to the outqueue
* @rwork: receive work item
- * @rx_action: what to do when connection socket is active
* @outqueue: pointer to first outbound message in queue
* @outqueue_lock: control access to the outqueue
* @swork: send work item
@@ -313,8 +307,8 @@ static void tipc_conn_send_work(struct work_struct *work)
conn_put(con);
}
-/* tipc_conn_queue_evt() - interrupt level call from a subscription instance
- * The queued work is launched into tipc_send_work()->tipc_send_to_sock()
+/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance
+ * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock()
*/
void tipc_topsrv_queue_evt(struct net *net, int conid,
u32 event, struct tipc_event *evt)
@@ -657,7 +651,7 @@ int tipc_topsrv_start(struct net *net)
srv->max_rcvbuf_size = sizeof(struct tipc_subscr);
INIT_WORK(&srv->awork, tipc_topsrv_accept);
- strncpy(srv->name, name, strlen(name) + 1);
+ strscpy(srv->name, name, sizeof(srv->name));
tn->topsrv = srv;
atomic_set(&tn->subscription_count, 0);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 9783101bc4a9..10dc59ce9c82 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -650,6 +650,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
u8 node_id[NODE_ID_LEN] = {0,};
+ int rmcast = 0;
ub = kzalloc(sizeof(*ub), GFP_ATOMIC);
if (!ub)
@@ -680,6 +681,9 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
if (err)
goto err;
+ /* Checking remote ip address */
+ rmcast = tipc_udp_is_mcast_addr(&remote);
+
/* Autoconfigure own node identity if needed */
if (!tipc_own_id(net)) {
memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16);
@@ -705,7 +709,12 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
goto err;
}
udp_conf.family = AF_INET;
- udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+
+ /* Switch to use ANY to receive packets from group */
+ if (rmcast)
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ else
+ udp_conf.local_ip.s_addr = local.ipv4.s_addr;
udp_conf.use_udp_checksums = false;
ub->ifindex = dev->ifindex;
if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
@@ -719,7 +728,10 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
udp_conf.family = AF_INET6;
udp_conf.use_udp6_tx_checksums = true;
udp_conf.use_udp6_rx_checksums = true;
- udp_conf.local_ip6 = in6addr_any;
+ if (rmcast)
+ udp_conf.local_ip6 = in6addr_any;
+ else
+ udp_conf.local_ip6 = local.ipv6;
b->mtu = 1280;
#endif
} else {
@@ -741,7 +753,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
* is used if it's a multicast address.
*/
memcpy(&b->bcast_addr.value, &remote, sizeof(remote));
- if (tipc_udp_is_mcast_addr(&remote))
+ if (rmcast)
err = enable_mcast(ub, &remote);
else
err = tipc_udp_rcast_add(b, &remote);
OpenPOWER on IntegriCloud