From 805de8f43c20ba8b479bb598b543fa86b20067f6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 24 Apr 2015 01:12:32 +0200 Subject: atomic: Replace atomic_{set,clear}_mask() usage Replace the deprecated atomic_{set,clear}_mask() usage with the now ubiquous atomic_{or,andnot}() functions. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner --- arch/s390/kernel/time.c | 4 ++-- arch/s390/kvm/interrupt.c | 30 +++++++++++++++--------------- arch/s390/kvm/kvm-s390.c | 32 ++++++++++++++++---------------- 3 files changed, 33 insertions(+), 33 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 9e733d965e08..f5a0bd778ace 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy) * increase the "sequence" counter to avoid the race of an * etr event and the complete recovery against get_sync_clock. */ - atomic_clear_mask(0x80000000, sw_ptr); + atomic_andnot(0x80000000, sw_ptr); atomic_inc(sw_ptr); } @@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy) static void enable_sync_clock(void) { atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); - atomic_set_mask(0x80000000, sw_ptr); + atomic_or(0x80000000, sw_ptr); } /* diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index c98d89708e99..57309e9cdd80 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu) { - atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); } static void __unset_cpu_idle(struct kvm_vcpu *vcpu) { - atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); } static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) { - atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, - &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, + &vcpu->arch.sie_block->cpuflags); vcpu->arch.sie_block->lctl = 0x0000; vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); @@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) { - atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); + atomic_or(flag, &vcpu->arch.sie_block->cpuflags); } static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) @@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) spin_unlock(&li->lock); /* clear pending external calls set by sigp interpretation facility */ - atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); + atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; } @@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) li->irq.ext = irq->u.ext; set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id) /* another external call is pending */ return -EBUSY; } - atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); return 0; } @@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) return -EBUSY; *extcall = irq->u.extcall; - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) 0, 0, 2); set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) 0, 0, 2); set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) spin_lock(&li->lock); switch (type) { case KVM_S390_MCHK: - atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); + atomic_or(CPUSTAT_STOP_INT, li->cpuflags); break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); + atomic_or(CPUSTAT_IO_INT, li->cpuflags); break; default: - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); break; } spin_unlock(&li->lock); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2078f92d15ac..b73302fb0507 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); - atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); if (test_kvm_facility(vcpu->kvm, 129)) { save_fp_ctl(&vcpu->run->s.regs.fpc); @@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) CPUSTAT_STOPPED); if (test_kvm_facility(vcpu->kvm, 78)) - atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); else if (test_kvm_facility(vcpu->kvm, 8)) - atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); kvm_s390_vcpu_setup_model(vcpu); @@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) { - atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); + atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); exit_sie(vcpu); } void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) { - atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); + atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); } static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) { - atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); + atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); exit_sie(vcpu); } static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) { - atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); + atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); } /* @@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) * return immediately. */ void exit_sie(struct kvm_vcpu *vcpu) { - atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) cpu_relax(); } @@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, if (dbg->control & KVM_GUESTDBG_ENABLE) { vcpu->guest_debug = dbg->control; /* enforce guest PER */ - atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); if (dbg->control & KVM_GUESTDBG_USE_HW_BP) rc = kvm_s390_import_bp_data(vcpu, dbg); } else { - atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); vcpu->arch.guestdbg.last_bp = 0; } if (rc) { vcpu->guest_debug = 0; kvm_s390_clear_bp_data(vcpu); - atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); } return rc; @@ -1771,7 +1771,7 @@ retry: if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { if (!ibs_enabled(vcpu)) { trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); - atomic_set_mask(CPUSTAT_IBS, + atomic_or(CPUSTAT_IBS, &vcpu->arch.sie_block->cpuflags); } goto retry; @@ -1780,7 +1780,7 @@ retry: if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { if (ibs_enabled(vcpu)) { trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); - atomic_clear_mask(CPUSTAT_IBS, + atomic_andnot(CPUSTAT_IBS, &vcpu->arch.sie_block->cpuflags); } goto retry; @@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) __disable_ibs_on_all_vcpus(vcpu->kvm); } - atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); /* * Another VCPU might have used IBS while we were offline. * Let's play safe and flush the VCPU at startup. @@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ kvm_s390_clear_stop_irq(vcpu); - atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); __disable_ibs_on_vcpu(vcpu); for (i = 0; i < online_vcpus; i++) { -- cgit v1.2.1