summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-05-03 10:18:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-03 10:18:21 -0700
commit4f9faaace217b854ae1e1746ee4ac44688ddadbd (patch)
tree34122d9e84349a394988d6bfc480115e4809776a /net/ipv4
parentbf640be423d60d954b9673527e106a461a129eb8 (diff)
parentf37f2c62a28e848e06399ea2f9be1e098212625c (diff)
downloadblackbird-op-linux-4f9faaace217b854ae1e1746ee4ac44688ddadbd.tar.gz
blackbird-op-linux-4f9faaace217b854ae1e1746ee4ac44688ddadbd.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (47 commits) rose: Wrong list_lock argument in rose_node seqops netns: Fix reassembly timer to use the right namespace netns: Fix device renaming for sysfs bnx2: Update version to 1.7.5. bnx2: Update RV2P firmware for 5709. bnx2: Zero out context memory for 5709. bnx2: Fix register test on 5709. bnx2: Fix remote PHY initial link state. bnx2: Refine remote PHY locking. bridge: forwarding table information for >256 devices tg3: Update version to 3.92 tg3: Add link state reporting to UMP firmware tg3: Fix ethtool loopback test for 5761 BX devices tg3: Fix 5761 NVRAM sizes tg3: Use constant 500KHz MI clock on adapters with a CPMU hci_usb.h: fix hard-to-trigger race dccp: ccid2.c, ccid3.c use clamp(), clamp_t() net: remove NR_CPUS arrays in net/core/dev.c net: use get/put_unaligned_* helpers bluetooth: use get/put_unaligned_* helpers ...
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/cipso_ipv4.c18
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c6
-rw-r--r--net/ipv4/tcp_hybla.c6
-rw-r--r--net/ipv4/tcp_input.c18
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_vegas.c6
-rw-r--r--net/ipv4/tcp_veno.c6
-rw-r--r--net/ipv4/udp.c7
8 files changed, 38 insertions, 39 deletions
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 4637ded3dba8..05afb576d935 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -983,7 +983,7 @@ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def,
return -EFAULT;
for (iter = 0; iter < enumcat_len; iter += 2) {
- cat = ntohs(get_unaligned((__be16 *)&enumcat[iter]));
+ cat = get_unaligned_be16(&enumcat[iter]);
if (cat <= cat_prev)
return -EFAULT;
cat_prev = cat;
@@ -1052,7 +1052,7 @@ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def,
for (iter = 0; iter < net_cat_len; iter += 2) {
ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat,
- ntohs(get_unaligned((__be16 *)&net_cat[iter])),
+ get_unaligned_be16(&net_cat[iter]),
GFP_ATOMIC);
if (ret_val != 0)
return ret_val;
@@ -1086,10 +1086,9 @@ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def,
return -EFAULT;
for (iter = 0; iter < rngcat_len; iter += 4) {
- cat_high = ntohs(get_unaligned((__be16 *)&rngcat[iter]));
+ cat_high = get_unaligned_be16(&rngcat[iter]);
if ((iter + 4) <= rngcat_len)
- cat_low = ntohs(
- get_unaligned((__be16 *)&rngcat[iter + 2]));
+ cat_low = get_unaligned_be16(&rngcat[iter + 2]);
else
cat_low = 0;
@@ -1188,10 +1187,9 @@ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def,
u16 cat_high;
for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) {
- cat_high = ntohs(get_unaligned((__be16 *)&net_cat[net_iter]));
+ cat_high = get_unaligned_be16(&net_cat[net_iter]);
if ((net_iter + 4) <= net_cat_len)
- cat_low = ntohs(
- get_unaligned((__be16 *)&net_cat[net_iter + 2]));
+ cat_low = get_unaligned_be16(&net_cat[net_iter + 2]);
else
cat_low = 0;
@@ -1562,7 +1560,7 @@ int cipso_v4_validate(unsigned char **option)
}
rcu_read_lock();
- doi_def = cipso_v4_doi_search(ntohl(get_unaligned((__be32 *)&opt[2])));
+ doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
if (doi_def == NULL) {
err_offset = 2;
goto validate_return_locked;
@@ -1843,7 +1841,7 @@ static int cipso_v4_getattr(const unsigned char *cipso,
if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0)
return 0;
- doi = ntohl(get_unaligned((__be32 *)&cipso[2]));
+ doi = get_unaligned_be32(&cipso[2]);
rcu_read_lock();
doi_def = cipso_v4_doi_search(doi);
if (doi_def == NULL)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 22d8e7cd9197..1819ad7ab910 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -169,14 +169,14 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
/* create proc dir entry */
sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip));
- c->pde = proc_create(buffer, S_IWUSR|S_IRUSR,
- clusterip_procdir, &clusterip_proc_fops);
+ c->pde = proc_create_data(buffer, S_IWUSR|S_IRUSR,
+ clusterip_procdir,
+ &clusterip_proc_fops, c);
if (!c->pde) {
kfree(c);
return NULL;
}
}
- c->pde->data = c;
#endif
write_lock_bh(&clusterip_lock);
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index 44618b675916..bfcbd148a89d 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -101,8 +101,10 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
if (!tcp_is_cwnd_limited(sk, in_flight))
return;
- if (!ca->hybla_en)
- return tcp_reno_cong_avoid(sk, ack, in_flight);
+ if (!ca->hybla_en) {
+ tcp_reno_cong_avoid(sk, ack, in_flight);
+ return;
+ }
if (ca->rho == 0)
hybla_recalc_param(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0298f80681f2..eda4f4a233f3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1172,8 +1172,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
struct tcp_sack_block_wire *sp, int num_sacks,
u32 prior_snd_una)
{
- u32 start_seq_0 = ntohl(get_unaligned(&sp[0].start_seq));
- u32 end_seq_0 = ntohl(get_unaligned(&sp[0].end_seq));
+ u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
+ u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
int dup_sack = 0;
if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
@@ -1181,8 +1181,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
tcp_dsack_seen(tp);
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
} else if (num_sacks > 1) {
- u32 end_seq_1 = ntohl(get_unaligned(&sp[1].end_seq));
- u32 start_seq_1 = ntohl(get_unaligned(&sp[1].start_seq));
+ u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
+ u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
if (!after(end_seq_0, end_seq_1) &&
!before(start_seq_0, start_seq_1)) {
@@ -1453,8 +1453,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
for (i = 0; i < num_sacks; i++) {
int dup_sack = !i && found_dup_sack;
- sp[used_sacks].start_seq = ntohl(get_unaligned(&sp_wire[i].start_seq));
- sp[used_sacks].end_seq = ntohl(get_unaligned(&sp_wire[i].end_seq));
+ sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
+ sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
if (!tcp_is_sackblock_valid(tp, dup_sack,
sp[used_sacks].start_seq,
@@ -3340,7 +3340,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
switch (opcode) {
case TCPOPT_MSS:
if (opsize == TCPOLEN_MSS && th->syn && !estab) {
- u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
+ u16 in_mss = get_unaligned_be16(ptr);
if (in_mss) {
if (opt_rx->user_mss &&
opt_rx->user_mss < in_mss)
@@ -3369,8 +3369,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
((estab && opt_rx->tstamp_ok) ||
(!estab && sysctl_tcp_timestamps))) {
opt_rx->saw_tstamp = 1;
- opt_rx->rcv_tsval = ntohl(get_unaligned((__be32 *)ptr));
- opt_rx->rcv_tsecr = ntohl(get_unaligned((__be32 *)(ptr+4)));
+ opt_rx->rcv_tsval = get_unaligned_be32(ptr);
+ opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
}
break;
case TCPOPT_SACK_PERM:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0e9bc120707d..cd601a866c2f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2214,9 +2214,6 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
struct tcp_iter_state *s;
int err;
- if (unlikely(afinfo == NULL))
- return -EINVAL;
-
err = seq_open_net(inode, file, &afinfo->seq_ops,
sizeof(struct tcp_iter_state));
if (err < 0)
@@ -2241,10 +2238,9 @@ int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
afinfo->seq_ops.next = tcp_seq_next;
afinfo->seq_ops.stop = tcp_seq_stop;
- p = proc_net_fops_create(net, afinfo->name, S_IRUGO, &afinfo->seq_fops);
- if (p)
- p->data = afinfo;
- else
+ p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
+ &afinfo->seq_fops, afinfo);
+ if (!p)
rc = -ENOMEM;
return rc;
}
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 0e1a8c91f78e..14504dada116 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -167,8 +167,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
struct tcp_sock *tp = tcp_sk(sk);
struct vegas *vegas = inet_csk_ca(sk);
- if (!vegas->doing_vegas_now)
- return tcp_reno_cong_avoid(sk, ack, in_flight);
+ if (!vegas->doing_vegas_now) {
+ tcp_reno_cong_avoid(sk, ack, in_flight);
+ return;
+ }
/* The key players are v_beg_snd_una and v_beg_snd_nxt.
*
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 2bf618a3b00b..d08b2e855c22 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -119,8 +119,10 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
struct tcp_sock *tp = tcp_sk(sk);
struct veno *veno = inet_csk_ca(sk);
- if (!veno->doing_veno_now)
- return tcp_reno_cong_avoid(sk, ack, in_flight);
+ if (!veno->doing_veno_now) {
+ tcp_reno_cong_avoid(sk, ack, in_flight);
+ return;
+ }
/* limited by applications */
if (!tcp_is_cwnd_limited(sk, in_flight))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1f535e315188..db1cb7c96d63 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1605,10 +1605,9 @@ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
afinfo->seq_ops.next = udp_seq_next;
afinfo->seq_ops.stop = udp_seq_stop;
- p = proc_net_fops_create(net, afinfo->name, S_IRUGO, &afinfo->seq_fops);
- if (p)
- p->data = afinfo;
- else
+ p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
+ &afinfo->seq_fops, afinfo);
+ if (!p)
rc = -ENOMEM;
return rc;
}
OpenPOWER on IntegriCloud