summaryrefslogtreecommitdiffstats
path: root/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/arcregs.h13
-rw-r--r--arch/arc/include/asm/atomic.h263
-rw-r--r--arch/arc/include/asm/cache.h4
-rw-r--r--arch/arc/include/asm/dwarf.h38
-rw-r--r--arch/arc/include/asm/elf.h5
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h2
-rw-r--r--arch/arc/include/asm/linkage.h12
-rw-r--r--arch/arc/include/asm/mcip.h16
-rw-r--r--arch/arc/include/asm/module.h1
-rw-r--r--arch/arc/include/asm/perf_event.h3
-rw-r--r--arch/arc/include/asm/setup.h6
-rw-r--r--arch/arc/include/asm/syscalls.h1
-rw-r--r--arch/arc/include/uapi/asm/unistd.h9
14 files changed, 347 insertions, 27 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 0b10ef2a4372..c332604606dd 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += msgbuf.h
+generic-y += msi.h
generic-y += param.h
generic-y += parport.h
generic-y += pci.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 7fbaea00a336..7f3f9f63708c 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -95,7 +95,7 @@
/* Auxiliary registers */
#define AUX_IDENTITY 4
#define AUX_INTR_VEC_BASE 0x25
-#define AUX_NON_VOL 0x5e
+#define AUX_VOL 0x5e
/*
* Floating Pt Registers
@@ -240,14 +240,6 @@ struct bcr_extn_xymem {
#endif
};
-struct bcr_perip {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int start:8, pad2:8, sz:8, ver:8;
-#else
- unsigned int ver:8, sz:8, pad2:8, start:8;
-#endif
-};
-
struct bcr_iccm_arcompact {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int base:16, pad:5, sz:3, ver:8;
@@ -357,10 +349,11 @@ struct cpuinfo_arc {
struct cpuinfo_arc_bpu bpu;
struct bcr_identity core;
struct bcr_isa isa;
+ const char *details, *name;
unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
- unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
+ unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, pad2:6,
debug:1, ap:1, smart:1, rtt:1, pad3:4,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 4e3c1b6b0806..b65930a49589 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -20,6 +20,7 @@
#ifndef CONFIG_ARC_PLAT_EZNPS
#define atomic_read(v) READ_ONCE((v)->counter)
+#define ATOMIC_INIT(i) { (i) }
#ifdef CONFIG_ARC_HAS_LLSC
@@ -284,6 +285,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
#define atomic_sub(i, v) atomic_add(-(i), (v))
#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
+#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
@@ -292,6 +294,7 @@ ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
+#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v))
ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
@@ -343,10 +346,266 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
-#define ATOMIC_INIT(i) { (i) }
+
+#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
-#endif
+#else /* Kconfig ensures this is only enabled with needed h/w assist */
+
+/*
+ * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
+ * - The address HAS to be 64-bit aligned
+ * - There are 2 semantics involved here:
+ * = exclusive implies no interim update between load/store to same addr
+ * = both words are observed/updated together: this is guaranteed even
+ * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
+ * is NOT required to use LLOCKD+SCONDD, STD suffices
+ */
+
+typedef struct {
+ aligned_u64 counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(a) { (a) }
+
+static inline long long atomic64_read(const atomic64_t *v)
+{
+ unsigned long long val;
+
+ __asm__ __volatile__(
+ " ldd %0, [%1] \n"
+ : "=r"(val)
+ : "r"(&v->counter));
+
+ return val;
+}
+
+static inline void atomic64_set(atomic64_t *v, long long a)
+{
+ /*
+ * This could have been a simple assignment in "C" but would need
+ * explicit volatile. Otherwise gcc optimizers could elide the store
+ * which borked atomic64 self-test
+ * In the inline asm version, memory clobber needed for exact same
+ * reason, to tell gcc about the store.
+ *
+ * This however is not needed for sibling atomic64_add() etc since both
+ * load/store are explicitly done in inline asm. As long as API is used
+ * for each access, gcc has no way to optimize away any load/store
+ */
+ __asm__ __volatile__(
+ " std %0, [%1] \n"
+ :
+ : "r"(a), "r"(&v->counter)
+ : "memory");
+}
+
+#define ATOMIC64_OP(op, op1, op2) \
+static inline void atomic64_##op(long long a, atomic64_t *v) \
+{ \
+ unsigned long long val; \
+ \
+ __asm__ __volatile__( \
+ "1: \n" \
+ " llockd %0, [%1] \n" \
+ " " #op1 " %L0, %L0, %L2 \n" \
+ " " #op2 " %H0, %H0, %H2 \n" \
+ " scondd %0, [%1] \n" \
+ " bnz 1b \n" \
+ : "=&r"(val) \
+ : "r"(&v->counter), "ir"(a) \
+ : "cc"); \
+} \
+
+#define ATOMIC64_OP_RETURN(op, op1, op2) \
+static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \
+{ \
+ unsigned long long val; \
+ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ "1: \n" \
+ " llockd %0, [%1] \n" \
+ " " #op1 " %L0, %L0, %L2 \n" \
+ " " #op2 " %H0, %H0, %H2 \n" \
+ " scondd %0, [%1] \n" \
+ " bnz 1b \n" \
+ : [val] "=&r"(val) \
+ : "r"(&v->counter), "ir"(a) \
+ : "cc"); /* memory clobber comes from smp_mb() */ \
+ \
+ smp_mb(); \
+ \
+ return val; \
+}
+
+#define ATOMIC64_FETCH_OP(op, op1, op2) \
+static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
+{ \
+ unsigned long long val, orig; \
+ \
+ smp_mb(); \
+ \
+ __asm__ __volatile__( \
+ "1: \n" \
+ " llockd %0, [%2] \n" \
+ " " #op1 " %L1, %L0, %L3 \n" \
+ " " #op2 " %H1, %H0, %H3 \n" \
+ " scondd %1, [%2] \n" \
+ " bnz 1b \n" \
+ : "=&r"(orig), "=&r"(val) \
+ : "r"(&v->counter), "ir"(a) \
+ : "cc"); /* memory clobber comes from smp_mb() */ \
+ \
+ smp_mb(); \
+ \
+ return orig; \
+}
+
+#define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_OP_RETURN(op, op1, op2) \
+ ATOMIC64_FETCH_OP(op, op1, op2)
+
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC64_OPS(add, add.f, adc)
+ATOMIC64_OPS(sub, sub.f, sbc)
+ATOMIC64_OPS(and, and, and)
+ATOMIC64_OPS(andnot, bic, bic)
+ATOMIC64_OPS(or, or, or)
+ATOMIC64_OPS(xor, xor, xor)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+static inline long long
+atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
+{
+ long long prev;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%1] \n"
+ " brne %L0, %L2, 2f \n"
+ " brne %H0, %H2, 2f \n"
+ " scondd %3, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(prev)
+ : "r"(ptr), "ir"(expected), "r"(new)
+ : "cc"); /* memory clobber comes from smp_mb() */
+
+ smp_mb();
+
+ return prev;
+}
+
+static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+{
+ long long prev;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%1] \n"
+ " scondd %2, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(prev)
+ : "r"(ptr), "r"(new)
+ : "cc"); /* memory clobber comes from smp_mb() */
+
+ smp_mb();
+
+ return prev;
+}
+
+/**
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic64_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long val;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%1] \n"
+ " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
+ " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
+ " brlt %H0, 0, 2f \n"
+ " scondd %0, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(val)
+ : "r"(&v->counter)
+ : "cc"); /* memory clobber comes from smp_mb() */
+
+ smp_mb();
+
+ return val;
+}
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * if (v != u) { v += a; ret = 1} else {ret = 0}
+ * Returns 1 iff @v was not @u (i.e. if add actually happened)
+ */
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+{
+ long long val;
+ int op_done;
+
+ smp_mb();
+
+ __asm__ __volatile__(
+ "1: llockd %0, [%2] \n"
+ " mov %1, 1 \n"
+ " brne %L0, %L4, 2f # continue to add since v != u \n"
+ " breq.d %H0, %H4, 3f # return since v == u \n"
+ " mov %1, 0 \n"
+ "2: \n"
+ " add.f %L0, %L0, %L3 \n"
+ " adc %H0, %H0, %H3 \n"
+ " scondd %0, [%2] \n"
+ " bnz 1b \n"
+ "3: \n"
+ : "=&r"(val), "=&r" (op_done)
+ : "r"(&v->counter), "r"(a), "r"(u)
+ : "cc"); /* memory clobber comes from smp_mb() */
+
+ smp_mb();
+
+ return op_done;
+}
+
+#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+#define atomic64_inc(v) atomic64_add(1LL, (v))
+#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
+#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+#define atomic64_dec(v) atomic64_sub(1LL, (v))
+#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
+#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+
+#endif /* !CONFIG_GENERIC_ATOMIC64 */
+
+#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 23706c635c30..b3410ff6a62d 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -53,8 +53,8 @@ extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
-extern int ioc_exists;
-extern unsigned long perip_base;
+extern int ioc_enable;
+extern unsigned long perip_base, perip_end;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/dwarf.h b/arch/arc/include/asm/dwarf.h
new file mode 100644
index 000000000000..bb7bdbc59a44
--- /dev/null
+++ b/arch/arc/include/asm/dwarf.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_DWARF_H
+#define _ASM_ARC_DWARF_H
+
+#ifdef __ASSEMBLY__
+
+#ifdef ARC_DW2_UNWIND_AS_CFI
+
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA .cfi_def_cfa
+#define CFI_REGISTER .cfi_register
+#define CFI_REL_OFFSET .cfi_rel_offset
+#define CFI_UNDEFINED .cfi_undefined
+
+#else
+
+#define CFI_IGNORE #
+
+#define CFI_STARTPROC CFI_IGNORE
+#define CFI_ENDPROC CFI_IGNORE
+#define CFI_DEF_CFA CFI_IGNORE
+#define CFI_REGISTER CFI_IGNORE
+#define CFI_REL_OFFSET CFI_IGNORE
+#define CFI_UNDEFINED CFI_IGNORE
+
+#endif /* !ARC_DW2_UNWIND_AS_CFI */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_ARC_DWARF_H */
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index 51a99e25fe33..aa2d6da9d187 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -23,8 +23,7 @@
/* ARC Relocations (kernel Modules only) */
#define R_ARC_32 0x4
#define R_ARC_32_ME 0x1B
-#define R_ARC_S25H_PCREL 0x10
-#define R_ARC_S25W_PCREL 0x11
+#define R_ARC_32_PCREL 0x31
/*to set parameters in the core dumps */
#define ELF_ARCH EM_ARCOMPACT
@@ -55,7 +54,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)
/*
* When the program starts, a1 contains a pointer to a function to be
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index d1ec7f6b31e0..e880dfa3fcd3 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -112,7 +112,7 @@ static inline long arch_local_save_flags(void)
*/
temp = (1 << 5) |
((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
- (temp & CLRI_STATUS_E_MASK);
+ ((temp >> 1) & CLRI_STATUS_E_MASK);
return temp;
}
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index 5faad17118b4..b29f1a9fd6f7 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -9,6 +9,8 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
+#include <asm/dwarf.h>
+
#ifdef __ASSEMBLY__
#define ASM_NL ` /* use '`' to mark new line in macro */
@@ -32,6 +34,16 @@
#endif
.endm
+#define ENTRY_CFI(name) \
+ .globl name ASM_NL \
+ ALIGN ASM_NL \
+ name: ASM_NL \
+ CFI_STARTPROC ASM_NL
+
+#define END_CFI(name) \
+ CFI_ENDPROC ASM_NL \
+ .size name, .-name
+
#else /* !__ASSEMBLY__ */
#ifdef CONFIG_ARC_HAS_ICCM
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
index 847e3bbe387f..c8fbe4114bad 100644
--- a/arch/arc/include/asm/mcip.h
+++ b/arch/arc/include/asm/mcip.h
@@ -55,6 +55,22 @@ struct mcip_cmd {
#define IDU_M_DISTRI_DEST 0x2
};
+struct mcip_bcr {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad3:8,
+ idu:1, llm:1, num_cores:6,
+ iocoh:1, gfrc:1, dbg:1, pad2:1,
+ msg:1, sem:1, ipi:1, pad:1,
+ ver:8;
+#else
+ unsigned int ver:8,
+ pad:1, ipi:1, sem:1, msg:1,
+ pad2:1, dbg:1, gfrc:1, iocoh:1,
+ num_cores:6, llm:1, idu:1,
+ pad3:8;
+#endif
+};
+
/*
* MCIP programming model
*
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 518222bb3f8e..6e91d8b339c3 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -18,6 +18,7 @@
struct mod_arch_specific {
void *unw_info;
int unw_sec_idx;
+ const char *secstr;
};
#endif
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 5f071762fb1c..9185541035cc 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -118,6 +118,9 @@ static const char * const arc_pmu_ev_hw_map[] = {
[PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */
[PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */
[PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */
+
+ [PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc", /* Instr: mem read cached */
+ [PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */
};
#define C(_x) PERF_COUNT_HW_CACHE_##_x
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 48b37c693db3..cb954cdab070 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -27,11 +27,6 @@ struct id_to_str {
const char *str;
};
-struct cpuinfo_data {
- struct id_to_str info;
- int up_range;
-};
-
extern int root_mountflags, end_mem;
void setup_processor(void);
@@ -43,5 +38,6 @@ void __init setup_arch_memory(void);
#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
+#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index e56f9fcc5581..772b67ca56e7 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -17,6 +17,7 @@ int sys_clone_wrapper(int, int, int, int, int);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);
+int sys_arc_usr_cmpxchg(int *, int, int);
#include <asm-generic/syscalls.h>
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 41fa2ec9e02c..9a34136d84b2 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -27,18 +27,19 @@
#define NR_syscalls __NR_syscalls
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+
/* ARC specific syscall */
#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
+#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
__SYSCALL(__NR_cacheflush, sys_cacheflush)
__SYSCALL(__NR_arc_settls, sys_arc_settls)
__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
-
-
-/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
-#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
__SYSCALL(__NR_sysfs, sys_sysfs)
#undef __SYSCALL
OpenPOWER on IntegriCloud