summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c8
-rw-r--r--arch/powerpc/kvm/book3s_xics.c16
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/sh/mm/gup.c2
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/xen/p2m.c2
6 files changed, 18 insertions, 18 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 7b066f6b02ad..7c22997de906 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -152,7 +152,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* in virtual mode.
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
/* Down_CPPR */
new_state.cppr = new_cppr;
@@ -211,7 +211,7 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
* pending priority
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
if (!old_state.xisr)
@@ -277,7 +277,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
* whenever the MFRR is made less favored.
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
/* Set_MFRR */
new_state.mfrr = mfrr;
@@ -352,7 +352,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
icp_rm_clr_vcpu_irq(icp->vcpu);
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
reject = 0;
new_state.cppr = cppr;
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 807351f76f84..a4a8d9f0dcb7 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -327,7 +327,7 @@ static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
icp->server_num);
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
*reject = 0;
@@ -512,7 +512,7 @@ static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* in virtual mode.
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
/* Down_CPPR */
new_state.cppr = new_cppr;
@@ -567,7 +567,7 @@ static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
* pending priority
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
if (!old_state.xisr)
@@ -634,7 +634,7 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
* whenever the MFRR is made less favored.
*/
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
/* Set_MFRR */
new_state.mfrr = mfrr;
@@ -679,7 +679,7 @@ static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
if (!icp)
return H_PARAMETER;
}
- state = ACCESS_ONCE(icp->state);
+ state = READ_ONCE(icp->state);
kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
kvmppc_set_gpr(vcpu, 5, state.mfrr);
return H_SUCCESS;
@@ -721,7 +721,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
do {
- old_state = new_state = ACCESS_ONCE(icp->state);
+ old_state = new_state = READ_ONCE(icp->state);
reject = 0;
new_state.cppr = cppr;
@@ -885,7 +885,7 @@ static int xics_debug_show(struct seq_file *m, void *private)
if (!icp)
continue;
- state.raw = ACCESS_ONCE(icp->state.raw);
+ state.raw = READ_ONCE(icp->state.raw);
seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
icp->server_num, state.xisr,
state.pending_pri, state.cppr, state.mfrr,
@@ -1082,7 +1082,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
* the ICS states before the ICP states.
*/
do {
- old_state = ACCESS_ONCE(icp->state);
+ old_state = READ_ONCE(icp->state);
if (new_state.mfrr <= old_state.mfrr) {
resend = false;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index cf0464f4284f..7e408bfc7948 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -986,7 +986,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
*/
pdshift = PUD_SHIFT;
pudp = pud_offset(&pgd, ea);
- pud = ACCESS_ONCE(*pudp);
+ pud = READ_ONCE(*pudp);
if (pud_none(pud))
return NULL;
@@ -998,7 +998,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
else {
pdshift = PMD_SHIFT;
pmdp = pmd_offset(&pud, ea);
- pmd = ACCESS_ONCE(*pmdp);
+ pmd = READ_ONCE(*pmdp);
/*
* A hugepage collapse is captured by pmd_none, because
* it mark the pmd none and do a hpte invalidate.
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index e15f52a17b6c..e7af6a65baab 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -17,7 +17,7 @@
static inline pte_t gup_get_pte(pte_t *ptep)
{
#ifndef CONFIG_X2TLB
- return ACCESS_ONCE(*ptep);
+ return READ_ONCE(*ptep);
#else
/*
* With get_user_pages_fast, we walk down the pagetables without
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 625660f8a2fc..7050d864f520 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -183,10 +183,10 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- __ticket_t head = ACCESS_ONCE(lock->tickets.head);
+ __ticket_t head = READ_ONCE(lock->tickets.head);
for (;;) {
- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+ struct __raw_tickets tmp = READ_ONCE(lock->tickets);
/*
* We need to check "unlocked" in a loop, tmp.head == head
* can be false positive because of overflow.
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index f18fd1d411f6..740ae3026a14 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -550,7 +550,7 @@ static bool alloc_p2m(unsigned long pfn)
mid_mfn = NULL;
}
- p2m_pfn = pte_pfn(ACCESS_ONCE(*ptep));
+ p2m_pfn = pte_pfn(READ_ONCE(*ptep));
if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
/* p2m leaf page is missing */
OpenPOWER on IntegriCloud