diff options
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/debug.c | 4 | ||||
-rw-r--r-- | net/sctp/input.c | 14 | ||||
-rw-r--r-- | net/sctp/ipv6.c | 36 | ||||
-rw-r--r-- | net/sctp/output.c | 16 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 6 | ||||
-rw-r--r-- | net/sctp/protocol.c | 31 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 37 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 43 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 6 | ||||
-rw-r--r-- | net/sctp/socket.c | 216 | ||||
-rw-r--r-- | net/sctp/transport.c | 9 |
11 files changed, 188 insertions, 230 deletions
diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 67715f4eb849..7ff548a30cfb 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c @@ -86,6 +86,9 @@ const char *sctp_cname(const sctp_subtype_t cid) case SCTP_CID_FWD_TSN: return "FWD_TSN"; + case SCTP_CID_AUTH: + return "AUTH"; + default: break; } @@ -135,6 +138,7 @@ static const char *sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = { "PRIMITIVE_ABORT", "PRIMITIVE_SEND", "PRIMITIVE_REQUESTHEARTBEAT", + "PRIMITIVE_ASCONF", }; /* Lookup primitive debug name. */ diff --git a/net/sctp/input.c b/net/sctp/input.c index 2e4a8646dbc3..d2e98803ffe3 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -83,14 +83,15 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb) { struct sk_buff *list = skb_shinfo(skb)->frag_list; struct sctphdr *sh = sctp_hdr(skb); - __be32 cmp = sh->checksum; - __be32 val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); + __le32 cmp = sh->checksum; + __le32 val; + __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); for (; list; list = list->next) - val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), - val); + tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), + tmp); - val = sctp_end_cksum(val); + val = sctp_end_cksum(tmp); if (val != cmp) { /* CRC failure, dump it. */ @@ -142,7 +143,8 @@ int sctp_rcv(struct sk_buff *skb) __skb_pull(skb, skb_transport_offset(skb)); if (skb->len < sizeof(struct sctphdr)) goto discard_it; - if (!skb_csum_unnecessary(skb) && sctp_rcv_checksum(skb) < 0) + if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) && + sctp_rcv_checksum(skb) < 0) goto discard_it; skb_pull(skb, sizeof(struct sctphdr)); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index ceaa4aa066ea..a63de3f7f185 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -97,8 +97,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, if (addr) { addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_port = 0; - memcpy(&addr->a.v6.sin6_addr, &ifa->addr, - sizeof(struct in6_addr)); + ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifa->addr); addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; addr->valid = 1; spin_lock_bh(&sctp_local_addr_lock); @@ -628,9 +627,7 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr) static struct sock *sctp_v6_create_accept_sk(struct sock *sk, struct sctp_association *asoc) { - struct inet_sock *inet = inet_sk(sk); struct sock *newsk; - struct inet_sock *newinet; struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct sctp6_sock *newsctp6sk; @@ -640,17 +637,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, sock_init_data(NULL, newsk); - newsk->sk_type = SOCK_STREAM; - - newsk->sk_prot = sk->sk_prot; - newsk->sk_no_check = sk->sk_no_check; - newsk->sk_reuse = sk->sk_reuse; - - newsk->sk_destruct = inet_sock_destruct; - newsk->sk_family = PF_INET6; - newsk->sk_protocol = IPPROTO_SCTP; - newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; - newsk->sk_shutdown = sk->sk_shutdown; + sctp_copy_sock(newsk, sk, asoc); sock_reset_flag(sk, SOCK_ZAPPED); newsctp6sk = (struct sctp6_sock *)newsk; @@ -658,7 +645,6 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, sctp_sk(newsk)->v4mapped = sctp_sk(sk)->v4mapped; - newinet = inet_sk(newsk); newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); @@ -666,26 +652,8 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() * and getpeername(). */ - newinet->sport = inet->sport; - newnp->saddr = np->saddr; - newnp->rcv_saddr = np->rcv_saddr; - newinet->dport = htons(asoc->peer.port); sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk); - /* Init the ipv4 part of the socket since we can have sockets - * using v6 API for ipv4. - */ - newinet->uc_ttl = -1; - newinet->mc_loop = 1; - newinet->mc_ttl = 1; - newinet->mc_index = 0; - newinet->mc_list = NULL; - - if (ipv4_config.no_pmtu_disc) - newinet->pmtudisc = IP_PMTUDISC_DONT; - else - newinet->pmtudisc = IP_PMTUDISC_WANT; - sk_refcnt_debug_inc(newsk); if (newsk->sk_prot->init(newsk)) { diff --git a/net/sctp/output.c b/net/sctp/output.c index 73639355157e..07d58903a746 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -367,7 +367,6 @@ int sctp_packet_transmit(struct sctp_packet *packet) struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; struct sctphdr *sh; - __be32 crc32 = __constant_cpu_to_be32(0); struct sk_buff *nskb; struct sctp_chunk *chunk, *tmp; struct sock *sk; @@ -531,17 +530,16 @@ int sctp_packet_transmit(struct sctp_packet *packet) * Note: Adler-32 is no longer applicable, as has been replaced * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. */ - if (!(dst->dev->features & NETIF_F_NO_CSUM)) { - crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); - crc32 = sctp_end_cksum(crc32); + if (!sctp_checksum_disable && !(dst->dev->features & NETIF_F_NO_CSUM)) { + __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); + + /* 3) Put the resultant value into the checksum field in the + * common header, and leave the rest of the bits unchanged. + */ + sh->checksum = sctp_end_cksum(crc32); } else nskb->ip_summed = CHECKSUM_UNNECESSARY; - /* 3) Put the resultant value into the checksum field in the - * common header, and leave the rest of the bits unchanged. - */ - sh->checksum = crc32; - /* IP layer ECN support * From RFC 2481 * "The ECN-Capable Transport (ECT) bit would be set by the diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index bc411c896216..d765fc53e74d 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -428,7 +428,8 @@ void sctp_retransmit_mark(struct sctp_outq *q, * retransmitting due to T3 timeout. */ if (reason == SCTP_RTXR_T3_RTX && - (jiffies - chunk->sent_at) < transport->last_rto) + time_before(jiffies, chunk->sent_at + + transport->last_rto)) continue; /* RFC 2960 6.2.1 Processing a Received SACK @@ -1757,6 +1758,9 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) struct sctp_chunk *chunk; struct list_head *lchunk, *temp; + if (!asoc->peer.prsctp_capable) + return; + /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the * received SACK. * diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index c4986d0f7419..cb198af8887c 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -589,46 +589,21 @@ static int sctp_v4_is_ce(const struct sk_buff *skb) static struct sock *sctp_v4_create_accept_sk(struct sock *sk, struct sctp_association *asoc) { - struct inet_sock *inet = inet_sk(sk); - struct inet_sock *newinet; struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL, sk->sk_prot); + struct inet_sock *newinet; if (!newsk) goto out; sock_init_data(NULL, newsk); - newsk->sk_type = SOCK_STREAM; - - newsk->sk_no_check = sk->sk_no_check; - newsk->sk_reuse = sk->sk_reuse; - newsk->sk_shutdown = sk->sk_shutdown; - - newsk->sk_destruct = inet_sock_destruct; - newsk->sk_family = PF_INET; - newsk->sk_protocol = IPPROTO_SCTP; - newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; + sctp_copy_sock(newsk, sk, asoc); sock_reset_flag(newsk, SOCK_ZAPPED); newinet = inet_sk(newsk); - /* Initialize sk's sport, dport, rcv_saddr and daddr for - * getsockname() and getpeername() - */ - newinet->sport = inet->sport; - newinet->saddr = inet->saddr; - newinet->rcv_saddr = inet->rcv_saddr; - newinet->dport = htons(asoc->peer.port); newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; - newinet->pmtudisc = inet->pmtudisc; - newinet->id = asoc->next_tsn ^ jiffies; - - newinet->uc_ttl = -1; - newinet->mc_loop = 1; - newinet->mc_ttl = 1; - newinet->mc_index = 0; - newinet->mc_list = NULL; sk_refcnt_debug_inc(newsk); @@ -1413,4 +1388,6 @@ MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132"); MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132"); MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>"); MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); +module_param_named(no_checksums, sctp_checksum_disable, bool, 0644); +MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification"); MODULE_LICENSE("GPL"); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index fd8acb48c3f2..6851ee94e974 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -100,11 +100,11 @@ int sctp_chunk_iif(const struct sctp_chunk *chunk) */ static const struct sctp_paramhdr ecap_param = { SCTP_PARAM_ECN_CAPABLE, - __constant_htons(sizeof(struct sctp_paramhdr)), + cpu_to_be16(sizeof(struct sctp_paramhdr)), }; static const struct sctp_paramhdr prsctp_param = { SCTP_PARAM_FWD_TSN_SUPPORT, - __constant_htons(sizeof(struct sctp_paramhdr)), + cpu_to_be16(sizeof(struct sctp_paramhdr)), }; /* A helper to initialize to initialize an op error inside a @@ -224,7 +224,9 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, num_ext += 2; } - chunksize += sizeof(aiparam); + if (sp->adaptation_ind) + chunksize += sizeof(aiparam); + chunksize += vparam_len; /* Account for AUTH related parameters */ @@ -304,10 +306,12 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, if (sctp_prsctp_enable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); - aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; - aiparam.param_hdr.length = htons(sizeof(aiparam)); - aiparam.adaptation_ind = htonl(sp->adaptation_ind); - sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); + if (sp->adaptation_ind) { + aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; + aiparam.param_hdr.length = htons(sizeof(aiparam)); + aiparam.adaptation_ind = htonl(sp->adaptation_ind); + sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); + } /* Add SCTP-AUTH chunks to the parameter list */ if (sctp_auth_enable) { @@ -332,6 +336,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, sctp_inithdr_t initack; struct sctp_chunk *retval; union sctp_params addrs; + struct sctp_sock *sp; int addrs_len; sctp_cookie_param_t *cookie; int cookie_len; @@ -366,22 +371,24 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, /* Calculate the total size of allocation, include the reserved * space for reporting unknown parameters if it is specified. */ + sp = sctp_sk(asoc->base.sk); chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len; /* Tell peer that we'll do ECN only if peer advertised such cap. */ if (asoc->peer.ecn_capable) chunksize += sizeof(ecap_param); - if (sctp_prsctp_enable) + if (asoc->peer.prsctp_capable) chunksize += sizeof(prsctp_param); - if (sctp_addip_enable) { + if (asoc->peer.asconf_capable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } - chunksize += sizeof(aiparam); + if (sp->adaptation_ind) + chunksize += sizeof(aiparam); if (asoc->peer.auth_capable) { auth_random = (sctp_paramhdr_t *)asoc->c.auth_random; @@ -432,10 +439,12 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, if (asoc->peer.prsctp_capable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); - aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; - aiparam.param_hdr.length = htons(sizeof(aiparam)); - aiparam.adaptation_ind = htonl(sctp_sk(asoc->base.sk)->adaptation_ind); - sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); + if (sp->adaptation_ind) { + aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; + aiparam.param_hdr.length = htons(sizeof(aiparam)); + aiparam.adaptation_ind = htonl(sp->adaptation_ind); + sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); + } if (asoc->peer.auth_capable) { sctp_addto_chunk(retval, ntohs(auth_random->length), diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index b5495aecab60..e2020eb2c8ca 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -434,7 +434,8 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { * */ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, - struct sctp_transport *transport) + struct sctp_transport *transport, + int is_hb) { /* The check for association's overall error counter exceeding the * threshold is done in the state function. @@ -461,9 +462,15 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, * expires, set RTO <- RTO * 2 ("back off the timer"). The * maximum value discussed in rule C7 above (RTO.max) may be * used to provide an upper bound to this doubling operation. + * + * Special Case: the first HB doesn't trigger exponential backoff. + * The first unacknowleged HB triggers it. We do this with a flag + * that indicates that we have an outstanding HB. */ - transport->last_rto = transport->rto; - transport->rto = min((transport->rto * 2), transport->asoc->rto_max); + if (!is_hb || transport->hb_sent) { + transport->last_rto = transport->rto; + transport->rto = min((transport->rto * 2), transport->asoc->rto_max); + } } /* Worker routine to handle INIT command failure. */ @@ -621,6 +628,11 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, t->error_count = 0; t->asoc->overall_error_count = 0; + /* Clear the hb_sent flag to signal that we had a good + * acknowledgement. + */ + t->hb_sent = 0; + /* Mark the destination transport address as active if it is not so * marked. */ @@ -646,18 +658,6 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, sctp_transport_hold(t); } -/* Helper function to do a transport reset at the expiry of the hearbeat - * timer. - */ -static void sctp_cmd_transport_reset(sctp_cmd_seq_t *cmds, - struct sctp_association *asoc, - struct sctp_transport *t) -{ - sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); - - /* Mark one strike against a transport. */ - sctp_do_8_2_transport_strike(asoc, t); -} /* Helper function to process the process SACK command. */ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, @@ -1458,12 +1458,19 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, case SCTP_CMD_STRIKE: /* Mark one strike against a transport. */ - sctp_do_8_2_transport_strike(asoc, cmd->obj.transport); + sctp_do_8_2_transport_strike(asoc, cmd->obj.transport, + 0); + break; + + case SCTP_CMD_TRANSPORT_IDLE: + t = cmd->obj.transport; + sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); break; - case SCTP_CMD_TRANSPORT_RESET: + case SCTP_CMD_TRANSPORT_HB_SENT: t = cmd->obj.transport; - sctp_cmd_transport_reset(commands, asoc, t); + sctp_do_8_2_transport_strike(asoc, t, 1); + t->hb_sent = 1; break; case SCTP_CMD_TRANSPORT_ON: diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index f88dfded0e3a..55a61aa69662 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -988,7 +988,9 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, /* Set transport error counter and association error counter * when sending heartbeat. */ - sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET, + sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE, + SCTP_TRANSPORT(transport)); + sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT, SCTP_TRANSPORT(transport)); } sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE, @@ -4955,7 +4957,7 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat( * to that address and not acknowledged within one RTO. * */ - sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET, + sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT, SCTP_TRANSPORT(arg)); return SCTP_DISPOSITION_CONSUME; } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ff0a8f88de04..5fb3a8c9792e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3069,9 +3069,6 @@ static int sctp_setsockopt_maxburst(struct sock *sk, int val; int assoc_id = 0; - if (optlen < sizeof(int)) - return -EINVAL; - if (optlen == sizeof(int)) { printk(KERN_WARNING "SCTP: Use of int in max_burst socket option deprecated\n"); @@ -3939,7 +3936,6 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc, { struct sock *sk = asoc->base.sk; struct socket *sock; - struct inet_sock *inetsk; struct sctp_af *af; int err = 0; @@ -3954,18 +3950,18 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc, if (err < 0) return err; - /* Populate the fields of the newsk from the oldsk and migrate the - * asoc to the newsk. - */ - sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); + sctp_copy_sock(sock->sk, sk, asoc); /* Make peeled-off sockets more like 1-1 accepted sockets. * Set the daddr and initialize id to something more random */ af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family); af->to_sk_daddr(&asoc->peer.primary_addr, sk); - inetsk = inet_sk(sock->sk); - inetsk->id = asoc->next_tsn ^ jiffies; + + /* Populate the fields of the newsk from the oldsk and migrate the + * asoc to the newsk. + */ + sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); *sockp = sock; @@ -5284,16 +5280,14 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, struct sctp_sock *sp; struct sctp_association *asoc; - if (len < sizeof(int)) - return -EINVAL; - if (len == sizeof(int)) { printk(KERN_WARNING "SCTP: Use of int in max_burst socket option deprecated\n"); printk(KERN_WARNING "SCTP: Use struct sctp_assoc_value instead\n"); params.assoc_id = 0; - } else if (len == sizeof (struct sctp_assoc_value)) { + } else if (len >= sizeof(struct sctp_assoc_value)) { + len = sizeof(struct sctp_assoc_value); if (copy_from_user(¶ms, optval, len)) return -EFAULT; } else @@ -5849,37 +5843,28 @@ static int sctp_get_port(struct sock *sk, unsigned short snum) } /* - * 3.1.3 listen() - UDP Style Syntax - * - * By default, new associations are not accepted for UDP style sockets. - * An application uses listen() to mark a socket as being able to - * accept new associations. + * Move a socket to LISTENING state. */ -SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog) +SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; + struct crypto_hash *tfm = NULL; - /* Only UDP style sockets that are not peeled off are allowed to - * listen(). - */ - if (!sctp_style(sk, UDP)) - return -EINVAL; - - /* If backlog is zero, disable listening. */ - if (!backlog) { - if (sctp_sstate(sk, CLOSED)) - return 0; - - sctp_unhash_endpoint(ep); - sk->sk_state = SCTP_SS_CLOSED; - return 0; + /* Allocate HMAC for generating cookie. */ + if (!sctp_sk(sk)->hmac && sctp_hmac_alg) { + tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + if (net_ratelimit()) { + printk(KERN_INFO + "SCTP: failed to load transform for %s: %ld\n", + sctp_hmac_alg, PTR_ERR(tfm)); + } + return -ENOSYS; + } + sctp_sk(sk)->hmac = tfm; } - /* Return if we are already listening. */ - if (sctp_sstate(sk, LISTENING)) - return 0; - /* * If a bind() or sctp_bindx() is not called prior to a listen() * call that allows new associations to be accepted, the system @@ -5890,7 +5875,6 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog) * extensions draft, but follows the practice as seen in TCP * sockets. * - * Additionally, turn off fastreuse flag since we are not listening */ sk->sk_state = SCTP_SS_LISTENING; if (!ep->base.bind_addr.port) { @@ -5901,113 +5885,71 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog) sk->sk_state = SCTP_SS_CLOSED; return -EADDRINUSE; } - sctp_sk(sk)->bind_hash->fastreuse = 0; } - sctp_hash_endpoint(ep); - return 0; -} - -/* - * 4.1.3 listen() - TCP Style Syntax - * - * Applications uses listen() to ready the SCTP endpoint for accepting - * inbound associations. - */ -SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog) -{ - struct sctp_sock *sp = sctp_sk(sk); - struct sctp_endpoint *ep = sp->ep; - - /* If backlog is zero, disable listening. */ - if (!backlog) { - if (sctp_sstate(sk, CLOSED)) - return 0; - - sctp_unhash_endpoint(ep); - sk->sk_state = SCTP_SS_CLOSED; - return 0; - } - - if (sctp_sstate(sk, LISTENING)) - return 0; - - /* - * If a bind() or sctp_bindx() is not called prior to a listen() - * call that allows new associations to be accepted, the system - * picks an ephemeral port and will choose an address set equivalent - * to binding with a wildcard address. - * - * This is not currently spelled out in the SCTP sockets - * extensions draft, but follows the practice as seen in TCP - * sockets. - */ - sk->sk_state = SCTP_SS_LISTENING; - if (!ep->base.bind_addr.port) { - if (sctp_autobind(sk)) - return -EAGAIN; - } else - sctp_sk(sk)->bind_hash->fastreuse = 0; - sk->sk_max_ack_backlog = backlog; sctp_hash_endpoint(ep); return 0; } /* + * 4.1.3 / 5.1.3 listen() + * + * By default, new associations are not accepted for UDP style sockets. + * An application uses listen() to mark a socket as being able to + * accept new associations. + * + * On TCP style sockets, applications use listen() to ready the SCTP + * endpoint for accepting inbound associations. + * + * On both types of endpoints a backlog of '0' disables listening. + * * Move a socket to LISTENING state. */ int sctp_inet_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; - struct crypto_hash *tfm = NULL; + struct sctp_endpoint *ep = sctp_sk(sk)->ep; int err = -EINVAL; if (unlikely(backlog < 0)) - goto out; + return err; sctp_lock_sock(sk); + /* Peeled-off sockets are not allowed to listen(). */ + if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) + goto out; + if (sock->state != SS_UNCONNECTED) goto out; - /* Allocate HMAC for generating cookie. */ - if (!sctp_sk(sk)->hmac && sctp_hmac_alg) { - tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) { - if (net_ratelimit()) { - printk(KERN_INFO - "SCTP: failed to load transform for %s: %ld\n", - sctp_hmac_alg, PTR_ERR(tfm)); - } - err = -ENOSYS; + /* If backlog is zero, disable listening. */ + if (!backlog) { + if (sctp_sstate(sk, CLOSED)) goto out; - } - } - switch (sock->type) { - case SOCK_SEQPACKET: - err = sctp_seqpacket_listen(sk, backlog); - break; - case SOCK_STREAM: - err = sctp_stream_listen(sk, backlog); - break; - default: - break; + err = 0; + sctp_unhash_endpoint(ep); + sk->sk_state = SCTP_SS_CLOSED; + if (sk->sk_reuse) + sctp_sk(sk)->bind_hash->fastreuse = 1; + goto out; } - if (err) - goto cleanup; + /* If we are already listening, just update the backlog */ + if (sctp_sstate(sk, LISTENING)) + sk->sk_max_ack_backlog = backlog; + else { + err = sctp_listen_start(sk, backlog); + if (err) + goto out; + } - /* Store away the transform reference. */ - if (!sctp_sk(sk)->hmac) - sctp_sk(sk)->hmac = tfm; + err = 0; out: sctp_release_sock(sk); return err; -cleanup: - crypto_free_hash(tfm); - goto out; } /* @@ -6700,6 +6642,48 @@ done: sctp_skb_set_owner_r(skb, sk); } +void sctp_copy_sock(struct sock *newsk, struct sock *sk, + struct sctp_association *asoc) +{ + struct inet_sock *inet = inet_sk(sk); + struct inet_sock *newinet = inet_sk(newsk); + + newsk->sk_type = sk->sk_type; + newsk->sk_bound_dev_if = sk->sk_bound_dev_if; + newsk->sk_flags = sk->sk_flags; + newsk->sk_no_check = sk->sk_no_check; + newsk->sk_reuse = sk->sk_reuse; + + newsk->sk_shutdown = sk->sk_shutdown; + newsk->sk_destruct = inet_sock_destruct; + newsk->sk_family = sk->sk_family; + newsk->sk_protocol = IPPROTO_SCTP; + newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; + newsk->sk_sndbuf = sk->sk_sndbuf; + newsk->sk_rcvbuf = sk->sk_rcvbuf; + newsk->sk_lingertime = sk->sk_lingertime; + newsk->sk_rcvtimeo = sk->sk_rcvtimeo; + newsk->sk_sndtimeo = sk->sk_sndtimeo; + + newinet = inet_sk(newsk); + + /* Initialize sk's sport, dport, rcv_saddr and daddr for + * getsockname() and getpeername() + */ + newinet->sport = inet->sport; + newinet->saddr = inet->saddr; + newinet->rcv_saddr = inet->rcv_saddr; + newinet->dport = htons(asoc->peer.port); + newinet->pmtudisc = inet->pmtudisc; + newinet->id = asoc->next_tsn ^ jiffies; + + newinet->uc_ttl = inet->uc_ttl; + newinet->mc_loop = 1; + newinet->mc_ttl = 1; + newinet->mc_index = 0; + newinet->mc_list = NULL; +} + /* Populate the fields of the newsk from the oldsk and migrate the assoc * and its messages to the newsk. */ diff --git a/net/sctp/transport.c b/net/sctp/transport.c index e745c118f239..e5dde45c79d3 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -79,6 +79,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, peer->rttvar = 0; peer->srtt = 0; peer->rto_pending = 0; + peer->hb_sent = 0; peer->fast_recovery = 0; peer->last_time_heard = jiffies; @@ -542,8 +543,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, * congestion indications more than once every window of * data (or more loosely more than once every round-trip time). */ - if ((jiffies - transport->last_time_ecne_reduced) > - transport->rtt) { + if (time_after(jiffies, transport->last_time_ecne_reduced + + transport->rtt)) { transport->ssthresh = max(transport->cwnd/2, 4*transport->asoc->pathmtu); transport->cwnd = transport->ssthresh; @@ -560,7 +561,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, * to be done every RTO interval, we do it every hearbeat * interval. */ - if ((jiffies - transport->last_time_used) > transport->rto) + if (time_after(jiffies, transport->last_time_used + + transport->rto)) transport->cwnd = max(transport->cwnd/2, 4*transport->asoc->pathmtu); break; @@ -608,6 +610,7 @@ void sctp_transport_reset(struct sctp_transport *t) t->flight_size = 0; t->error_count = 0; t->rto_pending = 0; + t->hb_sent = 0; t->fast_recovery = 0; /* Initialize the state information for SFR-CACC */ |