summaryrefslogtreecommitdiffstats
path: root/drivers/staging/erofs
diff options
context:
space:
mode:
authorGao Xiang <gaoxiang25@huawei.com>2018-11-23 01:16:02 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-11-23 10:53:07 +0100
commit73f5c66df3e26ab750cefcb9a3e08c71c9f79cad (patch)
treeff1a903537a5b30759a8d918863ea72bc257614e /drivers/staging/erofs
parentdf134b8d17b90c1e7720e318d36416b57424ff7a (diff)
downloadtalos-obmc-linux-73f5c66df3e26ab750cefcb9a3e08c71c9f79cad.tar.gz
talos-obmc-linux-73f5c66df3e26ab750cefcb9a3e08c71c9f79cad.zip
staging: erofs: fix `erofs_workgroup_{try_to_freeze, unfreeze}'
There are two minor issues in the current freeze interface: 1) Freeze interfaces have not related with CONFIG_DEBUG_SPINLOCK, therefore fix the incorrect conditions; 2) For SMP platforms, it should also disable preemption before doing atomic_cmpxchg in case that some high priority tasks preempt between atomic_cmpxchg and disable_preempt, then spin on the locked refcount later. Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/erofs')
-rw-r--r--drivers/staging/erofs/internal.h41
1 files changed, 25 insertions, 16 deletions
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index f933ab602c37..399a7003e783 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -194,40 +194,49 @@ struct erofs_workgroup {
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
-static inline bool erofs_workgroup_try_to_freeze(
- struct erofs_workgroup *grp, int v)
+#if defined(CONFIG_SMP)
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+ int val)
{
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- if (v != atomic_cmpxchg(&grp->refcount,
- v, EROFS_LOCKED_MAGIC))
- return false;
preempt_disable();
-#else
- preempt_disable();
- if (atomic_read(&grp->refcount) != v) {
+ if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
preempt_enable();
return false;
}
-#endif
return true;
}
-static inline void erofs_workgroup_unfreeze(
- struct erofs_workgroup *grp, int v)
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+ int orig_val)
{
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- atomic_set(&grp->refcount, v);
-#endif
+ atomic_set(&grp->refcount, orig_val);
preempt_enable();
}
-#if defined(CONFIG_SMP)
static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
{
return atomic_cond_read_relaxed(&grp->refcount,
VAL != EROFS_LOCKED_MAGIC);
}
#else
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+ int val)
+{
+ preempt_disable();
+ /* no need to spin on UP platforms, let's just disable preemption. */
+ if (val != atomic_read(&grp->refcount)) {
+ preempt_enable();
+ return false;
+ }
+ return true;
+}
+
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+ int orig_val)
+{
+ preempt_enable();
+}
+
static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
{
int v = atomic_read(&grp->refcount);
OpenPOWER on IntegriCloud