summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-06-21 16:20:28 -0700
committerDavid S. Miller <davem@davemloft.net>2005-06-21 16:20:28 -0700
commit7049e6800f40046c384c522a990669024d5f5836 (patch)
tree5e790230b26721a89864fa610bc9d8e53114a881
parent8005aba69a6440a535a4cc2aed99ffca580847e0 (diff)
downloadblackbird-op-linux-7049e6800f40046c384c522a990669024d5f5836.tar.gz
blackbird-op-linux-7049e6800f40046c384c522a990669024d5f5836.zip
[SPARC64]: Add prefetch support.
The implementation is optimal for UltraSPARC-III and later. It will work, however suboptimally, on UltraSPARC-II and be treated as a NOP on UltraSPARC-I. It is not worth code patching this thing as the highest cost is the code space, and code patching cannot eliminate that. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/asm-sparc64/processor.h34
1 files changed, 34 insertions, 0 deletions
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index bc1445b904ef..d0bee2413560 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -192,6 +192,40 @@ extern unsigned long get_wchan(struct task_struct *task);
#define cpu_relax() barrier()
+/* Prefetch support. This is tuned for UltraSPARC-III and later.
+ * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
+ * a shallower prefetch queue than later chips.
+ */
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+static inline void prefetch(const void *x)
+{
+ /* We do not use the read prefetch mnemonic because that
+ * prefetches into the prefetch-cache which only is accessible
+ * by floating point operations in UltraSPARC-III and later.
+ * By contrast, "#one_write" prefetches into the L2 cache
+ * in shared state.
+ */
+ __asm__ __volatile__("prefetch [%0], #one_write"
+ : /* no outputs */
+ : "r" (x));
+}
+
+static inline void prefetchw(const void *x)
+{
+ /* The most optimal prefetch to use for writes is
+ * "#n_writes". This brings the cacheline into the
+ * L2 cache in "owned" state.
+ */
+ __asm__ __volatile__("prefetch [%0], #n_writes"
+ : /* no outputs */
+ : "r" (x));
+}
+
+#define spin_lock_prefetch(x) prefetchw(x)
+
#endif /* !(__ASSEMBLY__) */
#endif /* !(__ASM_SPARC64_PROCESSOR_H) */
OpenPOWER on IntegriCloud