summaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/atomic.h7
-rw-r--r--arch/s390/include/asm/perf_counter.h2
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/tlb.h9
4 files changed, 15 insertions, 5 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index fca9dffcc669..c7d0abfb0f00 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -268,7 +268,12 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#undef __CSG_LOOP
-#endif
+
+#else /* __s390x__ */
+
+#include <asm-generic/atomic64.h>
+
+#endif /* __s390x__ */
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
diff --git a/arch/s390/include/asm/perf_counter.h b/arch/s390/include/asm/perf_counter.h
index a7205a3828cb..7015188c2cc2 100644
--- a/arch/s390/include/asm/perf_counter.h
+++ b/arch/s390/include/asm/perf_counter.h
@@ -6,3 +6,5 @@
static inline void set_perf_counter_pending(void) {}
static inline void clear_perf_counter_pending(void) {}
+
+#define PERF_COUNTER_INDEX_OFFSET 0
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 925bcc649035..ba1cab9fc1f9 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -61,7 +61,7 @@ struct thread_info {
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
- .preempt_count = 1, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 3d8a96d39d9d..81150b053689 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -96,7 +96,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
*/
-static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
+static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ unsigned long address)
{
if (!tlb->fullmm) {
tlb->array[tlb->nr_ptes++] = pte;
@@ -113,7 +114,8 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
* as the pgd. pmd_free_tlb checks the asce_limit against 2GB
* to avoid the double free of the pmd in this case.
*/
-static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
+static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long address)
{
#ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 31))
@@ -134,7 +136,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
* as the pgd. pud_free_tlb checks the asce_limit against 4TB
* to avoid the double free of the pud in this case.
*/
-static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
+static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long address)
{
#ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 42))
OpenPOWER on IntegriCloud