diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-12-24 06:56:49 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-12-24 16:13:27 -0500 |
commit | 60b778ce519625102d3f72a2071ea72a05e990ce (patch) | |
tree | ecae42cc8809809287e10007643370eaed7e3055 /net/core | |
parent | 035c4c16bea2814890c64c657d177e91cec1f473 (diff) | |
download | blackbird-op-linux-60b778ce519625102d3f72a2071ea72a05e990ce.tar.gz blackbird-op-linux-60b778ce519625102d3f72a2071ea72a05e990ce.zip |
rfs: better sizing of dev_flow_table
Aim of this patch is to provide full range of rps_flow_cnt on 64bit arches.
Theorical limit on number of flows is 2^32
Fix some buggy RPS/RFS macros as well.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Tom Herbert <therbert@google.com>
CC: Xi Wang <xi.wang@gmail.com>
CC: Laurent Chavey <chavey@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/net-sysfs.c | 44 |
1 files changed, 27 insertions, 17 deletions
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 4b4d0b0a3543..abf4393a77b3 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -622,15 +622,15 @@ static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, char *buf) { struct rps_dev_flow_table *flow_table; - unsigned int val = 0; + unsigned long val = 0; rcu_read_lock(); flow_table = rcu_dereference(queue->rps_flow_table); if (flow_table) - val = flow_table->mask + 1; + val = (unsigned long)flow_table->mask + 1; rcu_read_unlock(); - return sprintf(buf, "%u\n", val); + return sprintf(buf, "%lu\n", val); } static void rps_dev_flow_table_release_work(struct work_struct *work) @@ -654,36 +654,46 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, struct rx_queue_attribute *attr, const char *buf, size_t len) { - unsigned int count; - char *endp; + unsigned long mask, count; struct rps_dev_flow_table *table, *old_table; static DEFINE_SPINLOCK(rps_dev_flow_lock); + int rc; if (!capable(CAP_NET_ADMIN)) return -EPERM; - count = simple_strtoul(buf, &endp, 0); - if (endp == buf) - return -EINVAL; + rc = kstrtoul(buf, 0, &count); + if (rc < 0) + return rc; if (count) { - int i; - - if (count > INT_MAX) + mask = count - 1; + /* mask = roundup_pow_of_two(count) - 1; + * without overflows... + */ + while ((mask | (mask >> 1)) != mask) + mask |= (mask >> 1); + /* On 64 bit arches, must check mask fits in table->mask (u32), + * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1) + * doesnt overflow. + */ +#if BITS_PER_LONG > 32 + if (mask > (unsigned long)(u32)mask) return -EINVAL; - count = roundup_pow_of_two(count); - if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table)) +#else + if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1)) / sizeof(struct rps_dev_flow)) { /* Enforce a limit to prevent overflow */ return -EINVAL; } - table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count)); +#endif + table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1)); if (!table) return -ENOMEM; - table->mask = count - 1; - for (i = 0; i < count; i++) - table->flows[i].cpu = RPS_NO_CPU; + table->mask = mask; + for (count = 0; count <= mask; count++) + table->flows[count].cpu = RPS_NO_CPU; } else table = NULL; |