diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2014-03-13 21:26:42 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-14 22:41:36 -0400 |
commit | 57a7744e09867ebcfa0ccf1d6d529caa7728d552 (patch) | |
tree | 6407fee7138787a24bf9251abfeeae69a239028a /include/linux/u64_stats_sync.h | |
parent | 85dcce7a73f1cc59f7a96fe52713b1630f4ca272 (diff) | |
download | talos-obmc-linux-57a7744e09867ebcfa0ccf1d6d529caa7728d552.tar.gz talos-obmc-linux-57a7744e09867ebcfa0ccf1d6d529caa7728d552.zip |
net: Replace u64_stats_fetch_begin_bh to u64_stats_fetch_begin_irq
Replace the bh safe variant with the hard irq safe variant.
We need a hard irq safe variant to deal with netpoll transmitting
packets from hard irq context, and we need it in most if not all of
the places using the bh safe variant.
Except on 32bit uni-processor the code is exactly the same so don't
bother with a bh variant, just have a hard irq safe variant that
everyone can use.
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/u64_stats_sync.h')
-rw-r--r-- | include/linux/u64_stats_sync.h | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index 7bfabd20204c..4b4439e75f45 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -27,8 +27,8 @@ * (On UP, there is no seqcount_t protection, a reader allowing interrupts could * read partial values) * - * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and - * u64_stats_fetch_retry_bh() helpers + * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and + * u64_stats_fetch_retry_irq() helpers * * Usage : * @@ -114,31 +114,31 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, } /* - * In case softirq handlers can update u64 counters, readers can use following helpers + * In case irq handlers can update u64 counters, readers can use following helpers * - SMP 32bit arches use seqcount protection, irq safe. - * - UP 32bit must disable BH. + * - UP 32bit must disable irqs. * - 64bit have no problem atomically reading u64 values, irq safe. */ -static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp) +static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_begin(&syncp->seq); #else #if BITS_PER_LONG==32 - local_bh_disable(); + local_irq_disable(); #endif return 0; #endif } -static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp, +static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, unsigned int start) { #if BITS_PER_LONG==32 && defined(CONFIG_SMP) return read_seqcount_retry(&syncp->seq, start); #else #if BITS_PER_LONG==32 - local_bh_enable(); + local_irq_enable(); #endif return false; #endif |