summaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/ubi/ubi.h3
-rw-r--r--drivers/mtd/ubi/wl.c34
2 files changed, 25 insertions, 12 deletions
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 7a33470c0416..bc13d14e02c4 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -443,7 +443,8 @@ struct ubi_debug_info {
* @pq_head: protection queue head
* @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
* @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
- * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
+ * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
+ * and @fm_wl_pool fields
* @move_mutex: serializes eraseblock moves
* @work_sem: used to wait for all the scheduled works to finish and prevent
* new works from being submitted
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index bf66890fefad..2539a12140e7 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -626,24 +626,36 @@ void ubi_refill_pools(struct ubi_device *ubi)
*/
int ubi_wl_get_peb(struct ubi_device *ubi)
{
- int ret;
+ int ret, retried = 0;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
- if (!pool->size || !wl_pool->size || pool->used == pool->size ||
- wl_pool->used == wl_pool->size)
+again:
+ spin_lock(&ubi->wl_lock);
+ /* We check here also for the WL pool because at this point we can
+ * refill the WL pool synchronous. */
+ if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
+ spin_unlock(&ubi->wl_lock);
ubi_update_fastmap(ubi);
-
- /* we got not a single free PEB */
- if (!pool->size)
- ret = -ENOSPC;
- else {
spin_lock(&ubi->wl_lock);
- ret = pool->pebs[pool->used++];
- prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ }
+
+ if (pool->used == pool->size) {
spin_unlock(&ubi->wl_lock);
+ if (retried) {
+ ubi_err(ubi, "Unable to get a free PEB from user WL pool");
+ ret = -ENOSPC;
+ goto out;
+ }
+ retried = 1;
+ goto again;
}
+ ubi_assert(pool->used < pool->size);
+ ret = pool->pebs[pool->used++];
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ spin_unlock(&ubi->wl_lock);
+out:
return ret;
}
@@ -656,7 +668,7 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
- if (pool->used == pool->size || !pool->size) {
+ if (pool->used == pool->size) {
/* We cannot update the fastmap here because this
* function is called in atomic context.
* Let's fail here and refill/update it as soon as possible. */
OpenPOWER on IntegriCloud