summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDave Chinner <david@fromorbit.com>2014-08-04 13:54:46 +1000
committerDave Chinner <david@fromorbit.com>2014-08-04 13:54:46 +1000
commitb076d8720d793cde04b75b4941b8774e209649b4 (patch)
tree14f0aa5ac5850e2077076062340eb1ef15f7ccf1 /fs
parent4d7eece2c0dad832c5f224629eba3cced3f2d6cd (diff)
parent1e773c4989d2dfe08332b4c18f7e1d7ad633015c (diff)
downloadblackbird-op-linux-b076d8720d793cde04b75b4941b8774e209649b4.tar.gz
blackbird-op-linux-b076d8720d793cde04b75b4941b8774e209649b4.zip
Merge branch 'xfs-bulkstat-refactor' into for-next
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/xfs_ioctl.c4
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_itable.c569
-rw-r--r--fs/xfs/xfs_itable.h23
4 files changed, 283 insertions, 315 deletions
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 30983b8ceaa1..494237ed4a65 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -796,8 +796,8 @@ xfs_ioc_bulkstat(
error = xfs_inumbers(mp, &inlast, &count,
bulkreq.ubuffer, xfs_inumbers_fmt);
else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
- error = xfs_bulkstat_single(mp, &inlast,
- bulkreq.ubuffer, &done);
+ error = xfs_bulkstat_one(mp, inlast, bulkreq.ubuffer,
+ sizeof(xfs_bstat_t), NULL, &done);
else /* XFS_IOC_FSBULKSTAT */
error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one,
sizeof(xfs_bstat_t), bulkreq.ubuffer,
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index e65ea67e3ae3..cf63418bf05f 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -102,7 +102,7 @@ xfs_compat_growfs_rt_copyin(
STATIC int
xfs_inumbers_fmt_compat(
void __user *ubuffer,
- const xfs_inogrp_t *buffer,
+ const struct xfs_inogrp *buffer,
long count,
long *written)
{
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 7e54992bcae9..f71be9c68017 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -76,10 +76,8 @@ xfs_bulkstat_one_int(
error = xfs_iget(mp, NULL, ino,
(XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
XFS_ILOCK_SHARED, &ip);
- if (error) {
- *stat = BULKSTAT_RV_NOTHING;
+ if (error)
goto out_free;
- }
ASSERT(ip != NULL);
ASSERT(ip->i_imap.im_blkno != 0);
@@ -136,7 +134,6 @@ xfs_bulkstat_one_int(
IRELE(ip);
error = formatter(buffer, ubsize, ubused, buf);
-
if (!error)
*stat = BULKSTAT_RV_DIDONE;
@@ -175,9 +172,170 @@ xfs_bulkstat_one(
xfs_bulkstat_one_fmt, ubused, stat);
}
+/*
+ * Loop over all clusters in a chunk for a given incore inode allocation btree
+ * record. Do a readahead if there are any allocated inodes in that cluster.
+ */
+STATIC void
+xfs_bulkstat_ichunk_ra(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ struct xfs_inobt_rec_incore *irec)
+{
+ xfs_agblock_t agbno;
+ struct blk_plug plug;
+ int blks_per_cluster;
+ int inodes_per_cluster;
+ int i; /* inode chunk index */
+
+ agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
+ blks_per_cluster = xfs_icluster_size_fsb(mp);
+ inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
+
+ blk_start_plug(&plug);
+ for (i = 0; i < XFS_INODES_PER_CHUNK;
+ i += inodes_per_cluster, agbno += blks_per_cluster) {
+ if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) {
+ xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster,
+ &xfs_inode_buf_ops);
+ }
+ }
+ blk_finish_plug(&plug);
+}
+
+/*
+ * Lookup the inode chunk that the given inode lives in and then get the record
+ * if we found the chunk. If the inode was not the last in the chunk and there
+ * are some left allocated, update the data for the pointed-to record as well as
+ * return the count of grabbed inodes.
+ */
+STATIC int
+xfs_bulkstat_grab_ichunk(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agino_t agino, /* starting inode of chunk */
+ int *icount,/* return # of inodes grabbed */
+ struct xfs_inobt_rec_incore *irec) /* btree record */
+{
+ int idx; /* index into inode chunk */
+ int stat;
+ int error = 0;
+
+ /* Lookup the inode chunk that this inode lives in */
+ error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat);
+ if (error)
+ return error;
+ if (!stat) {
+ *icount = 0;
+ return error;
+ }
+
+ /* Get the record, should always work */
+ error = xfs_inobt_get_rec(cur, irec, &stat);
+ if (error)
+ return error;
+ XFS_WANT_CORRUPTED_RETURN(stat == 1);
+
+ /* Check if the record contains the inode in request */
+ if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino)
+ return -EINVAL;
+
+ idx = agino - irec->ir_startino + 1;
+ if (idx < XFS_INODES_PER_CHUNK &&
+ (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) {
+ int i;
+
+ /* We got a right chunk with some left inodes allocated at it.
+ * Grab the chunk record. Mark all the uninteresting inodes
+ * free -- because they're before our start point.
+ */
+ for (i = 0; i < idx; i++) {
+ if (XFS_INOBT_MASK(i) & ~irec->ir_free)
+ irec->ir_freecount++;
+ }
+
+ irec->ir_free |= xfs_inobt_maskn(0, idx);
+ *icount = XFS_INODES_PER_CHUNK - irec->ir_freecount;
+ }
+
+ return 0;
+}
+
#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
/*
+ * Process inodes in chunk with a pointer to a formatter function
+ * that will iget the inode and fill in the appropriate structure.
+ */
+int
+xfs_bulkstat_ag_ichunk(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ struct xfs_inobt_rec_incore *irbp,
+ bulkstat_one_pf formatter,
+ size_t statstruct_size,
+ struct xfs_bulkstat_agichunk *acp)
+{
+ xfs_ino_t lastino = acp->ac_lastino;
+ char __user **ubufp = acp->ac_ubuffer;
+ int ubleft = acp->ac_ubleft;
+ int ubelem = acp->ac_ubelem;
+ int chunkidx, clustidx;
+ int error = 0;
+ xfs_agino_t agino;
+
+ for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
+ XFS_BULKSTAT_UBLEFT(ubleft) &&
+ irbp->ir_freecount < XFS_INODES_PER_CHUNK;
+ chunkidx++, clustidx++, agino++) {
+ int fmterror; /* bulkstat formatter result */
+ int ubused;
+ xfs_ino_t ino = XFS_AGINO_TO_INO(mp, agno, agino);
+
+ ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
+
+ /* Skip if this inode is free */
+ if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
+ lastino = ino;
+ continue;
+ }
+
+ /*
+ * Count used inodes as free so we can tell when the
+ * chunk is used up.
+ */
+ irbp->ir_freecount++;
+
+ /* Get the inode and fill in a single buffer */
+ ubused = statstruct_size;
+ error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror);
+ if (fmterror == BULKSTAT_RV_NOTHING) {
+ if (error && error != -ENOENT && error != -EINVAL) {
+ ubleft = 0;
+ break;
+ }
+ lastino = ino;
+ continue;
+ }
+ if (fmterror == BULKSTAT_RV_GIVEUP) {
+ ubleft = 0;
+ ASSERT(error);
+ break;
+ }
+ if (*ubufp)
+ *ubufp += ubused;
+ ubleft -= ubused;
+ ubelem++;
+ lastino = ino;
+ }
+
+ acp->ac_lastino = lastino;
+ acp->ac_ubleft = ubleft;
+ acp->ac_ubelem = ubelem;
+
+ return error;
+}
+
+/*
* Return stat information in bulk (by-inode) for the filesystem.
*/
int /* error status */
@@ -190,13 +348,10 @@ xfs_bulkstat(
char __user *ubuffer, /* buffer with inode stats */
int *done) /* 1 if there are more stats to get */
{
- xfs_agblock_t agbno=0;/* allocation group block number */
xfs_buf_t *agbp; /* agi header buffer */
xfs_agi_t *agi; /* agi header data */
xfs_agino_t agino; /* inode # in allocation group */
xfs_agnumber_t agno; /* allocation group number */
- int chunkidx; /* current index into inode chunk */
- int clustidx; /* current index into inode cluster */
xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
int end_of_ag; /* set if we've seen the ag end */
int error; /* error code */
@@ -209,8 +364,6 @@ xfs_bulkstat(
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
xfs_ino_t lastino; /* last inode number returned */
- int blks_per_cluster; /* # of blocks per cluster */
- int inodes_per_cluster;/* # of inodes per cluster */
int nirbuf; /* size of irbuf */
int rval; /* return value error code */
int tmp; /* result value from btree calls */
@@ -218,7 +371,6 @@ xfs_bulkstat(
int ubleft; /* bytes left in user's buffer */
char __user *ubufp; /* pointer into user's buffer */
int ubelem; /* spaces used in user's buffer */
- int ubused; /* bytes used by formatter */
/*
* Get the last inode value, see if there's nothing to do.
@@ -233,17 +385,13 @@ xfs_bulkstat(
*ubcountp = 0;
return 0;
}
- if (!ubcountp || *ubcountp <= 0) {
- return -EINVAL;
- }
+
ubcount = *ubcountp; /* statstruct's */
ubleft = ubcount * statstruct_size; /* bytes */
*ubcountp = ubelem = 0;
*done = 0;
fmterror = 0;
ubufp = ubuffer;
- blks_per_cluster = xfs_icluster_size_fsb(mp);
- inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
if (!irbuf)
return -ENOMEM;
@@ -258,14 +406,8 @@ xfs_bulkstat(
while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
cond_resched();
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
- if (error) {
- /*
- * Skip this allocation group and go to the next one.
- */
- agno++;
- agino = 0;
- continue;
- }
+ if (error)
+ break;
agi = XFS_BUF_TO_AGI(agbp);
/*
* Allocate and initialize a btree cursor for ialloc btree.
@@ -275,96 +417,39 @@ xfs_bulkstat(
irbp = irbuf;
irbufend = irbuf + nirbuf;
end_of_ag = 0;
- /*
- * If we're returning in the middle of an allocation group,
- * we need to get the remainder of the chunk we're in.
- */
+ icount = 0;
if (agino > 0) {
- xfs_inobt_rec_incore_t r;
-
/*
- * Lookup the inode chunk that this inode lives in.
+ * In the middle of an allocation group, we need to get
+ * the remainder of the chunk we're in.
*/
- error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE,
- &tmp);
- if (!error && /* no I/O error */
- tmp && /* lookup succeeded */
- /* got the record, should always work */
- !(error = xfs_inobt_get_rec(cur, &r, &i)) &&
- i == 1 &&
- /* this is the right chunk */
- agino < r.ir_startino + XFS_INODES_PER_CHUNK &&
- /* lastino was not last in chunk */
- (chunkidx = agino - r.ir_startino + 1) <
- XFS_INODES_PER_CHUNK &&
- /* there are some left allocated */
- xfs_inobt_maskn(chunkidx,
- XFS_INODES_PER_CHUNK - chunkidx) &
- ~r.ir_free) {
- /*
- * Grab the chunk record. Mark all the
- * uninteresting inodes (because they're
- * before our start point) free.
- */
- for (i = 0; i < chunkidx; i++) {
- if (XFS_INOBT_MASK(i) & ~r.ir_free)
- r.ir_freecount++;
- }
- r.ir_free |= xfs_inobt_maskn(0, chunkidx);
+ struct xfs_inobt_rec_incore r;
+
+ error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
+ if (error)
+ break;
+ if (icount) {
irbp->ir_startino = r.ir_startino;
irbp->ir_freecount = r.ir_freecount;
irbp->ir_free = r.ir_free;
irbp++;
agino = r.ir_startino + XFS_INODES_PER_CHUNK;
- icount = XFS_INODES_PER_CHUNK - r.ir_freecount;
- } else {
- /*
- * If any of those tests failed, bump the
- * inode number (just in case).
- */
- agino++;
- icount = 0;
}
- /*
- * In any case, increment to the next record.
- */
- if (!error)
- error = xfs_btree_increment(cur, 0, &tmp);
+ /* Increment to the next record */
+ error = xfs_btree_increment(cur, 0, &tmp);
} else {
- /*
- * Start of ag. Lookup the first inode chunk.
- */
+ /* Start of ag. Lookup the first inode chunk */
error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
- icount = 0;
}
+ if (error)
+ break;
+
/*
* Loop through inode btree records in this ag,
* until we run out of inodes or space in the buffer.
*/
while (irbp < irbufend && icount < ubcount) {
- xfs_inobt_rec_incore_t r;
-
- /*
- * Loop as long as we're unable to read the
- * inode btree.
- */
- while (error) {
- agino += XFS_INODES_PER_CHUNK;
- if (XFS_AGINO_TO_AGBNO(mp, agino) >=
- be32_to_cpu(agi->agi_length))
- break;
- error = xfs_inobt_lookup(cur, agino,
- XFS_LOOKUP_GE, &tmp);
- cond_resched();
- }
- /*
- * If ran off the end of the ag either with an error,
- * or the normal way, set end and stop collecting.
- */
- if (error) {
- end_of_ag = 1;
- break;
- }
+ struct xfs_inobt_rec_incore r;
error = xfs_inobt_get_rec(cur, &r, &i);
if (error || i == 0) {
@@ -377,25 +462,7 @@ xfs_bulkstat(
* Also start read-ahead now for this chunk.
*/
if (r.ir_freecount < XFS_INODES_PER_CHUNK) {
- struct blk_plug plug;
- /*
- * Loop over all clusters in the next chunk.
- * Do a readahead if there are any allocated
- * inodes in that cluster.
- */
- blk_start_plug(&plug);
- agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino);
- for (chunkidx = 0;
- chunkidx < XFS_INODES_PER_CHUNK;
- chunkidx += inodes_per_cluster,
- agbno += blks_per_cluster) {
- if (xfs_inobt_maskn(chunkidx,
- inodes_per_cluster) & ~r.ir_free)
- xfs_btree_reada_bufs(mp, agno,
- agbno, blks_per_cluster,
- &xfs_inode_buf_ops);
- }
- blk_finish_plug(&plug);
+ xfs_bulkstat_ichunk_ra(mp, agno, &r);
irbp->ir_startino = r.ir_startino;
irbp->ir_freecount = r.ir_freecount;
irbp->ir_free = r.ir_free;
@@ -422,57 +489,20 @@ xfs_bulkstat(
irbufend = irbp;
for (irbp = irbuf;
irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
- /*
- * Now process this chunk of inodes.
- */
- for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
- XFS_BULKSTAT_UBLEFT(ubleft) &&
- irbp->ir_freecount < XFS_INODES_PER_CHUNK;
- chunkidx++, clustidx++, agino++) {
- ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
-
- ino = XFS_AGINO_TO_INO(mp, agno, agino);
- /*
- * Skip if this inode is free.
- */
- if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
- lastino = ino;
- continue;
- }
- /*
- * Count used inodes as free so we can tell
- * when the chunk is used up.
- */
- irbp->ir_freecount++;
-
- /*
- * Get the inode and fill in a single buffer.
- */
- ubused = statstruct_size;
- error = formatter(mp, ino, ubufp, ubleft,
- &ubused, &fmterror);
- if (fmterror == BULKSTAT_RV_NOTHING) {
- if (error && error != -ENOENT &&
- error != -EINVAL) {
- ubleft = 0;
- rval = error;
- break;
- }
- lastino = ino;
- continue;
- }
- if (fmterror == BULKSTAT_RV_GIVEUP) {
- ubleft = 0;
- ASSERT(error);
- rval = error;
- break;
- }
- if (ubufp)
- ubufp += ubused;
- ubleft -= ubused;
- ubelem++;
- lastino = ino;
- }
+ struct xfs_bulkstat_agichunk ac;
+
+ ac.ac_lastino = lastino;
+ ac.ac_ubuffer = &ubuffer;
+ ac.ac_ubleft = ubleft;
+ ac.ac_ubelem = ubelem;
+ error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
+ formatter, statstruct_size, &ac);
+ if (error)
+ rval = error;
+
+ lastino = ac.ac_lastino;
+ ubleft = ac.ac_ubleft;
+ ubelem = ac.ac_ubelem;
cond_resched();
}
@@ -512,58 +542,10 @@ xfs_bulkstat(
return rval;
}
-/*
- * Return stat information in bulk (by-inode) for the filesystem.
- * Special case for non-sequential one inode bulkstat.
- */
-int /* error status */
-xfs_bulkstat_single(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t *lastinop, /* inode to return */
- char __user *buffer, /* buffer with inode stats */
- int *done) /* 1 if there are more stats to get */
-{
- int count; /* count value for bulkstat call */
- int error; /* return value */
- xfs_ino_t ino; /* filesystem inode number */
- int res; /* result from bs1 */
-
- /*
- * note that requesting valid inode numbers which are not allocated
- * to inodes will most likely cause xfs_imap_to_bp to generate warning
- * messages about bad magic numbers. This is ok. The fact that
- * the inode isn't actually an inode is handled by the
- * error check below. Done this way to make the usual case faster
- * at the expense of the error case.
- */
-
- ino = *lastinop;
- error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t),
- NULL, &res);
- if (error) {
- /*
- * Special case way failed, do it the "long" way
- * to see if that works.
- */
- (*lastinop)--;
- count = 1;
- if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
- sizeof(xfs_bstat_t), buffer, done))
- return error;
- if (count == 0 || (xfs_ino_t)*lastinop != ino)
- return error == -EFSCORRUPTED ?
- EINVAL : error;
- else
- return 0;
- }
- *done = 0;
- return 0;
-}
-
int
xfs_inumbers_fmt(
void __user *ubuffer, /* buffer to write to */
- const xfs_inogrp_t *buffer, /* buffer to read from */
+ const struct xfs_inogrp *buffer, /* buffer to read from */
long count, /* # of elements to read */
long *written) /* # of bytes written */
{
@@ -578,127 +560,104 @@ xfs_inumbers_fmt(
*/
int /* error status */
xfs_inumbers(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t *lastino, /* last inode returned */
- int *count, /* size of buffer/count returned */
- void __user *ubuffer,/* buffer with inode descriptions */
- inumbers_fmt_pf formatter)
+ struct xfs_mount *mp,/* mount point for filesystem */
+ xfs_ino_t *lastino,/* last inode returned */
+ int *count,/* size of buffer/count returned */
+ void __user *ubuffer,/* buffer with inode descriptions */
+ inumbers_fmt_pf formatter)
{
- xfs_buf_t *agbp;
- xfs_agino_t agino;
- xfs_agnumber_t agno;
- int bcount;
- xfs_inogrp_t *buffer;
- int bufidx;
- xfs_btree_cur_t *cur;
- int error;
- xfs_inobt_rec_incore_t r;
- int i;
- xfs_ino_t ino;
- int left;
- int tmp;
-
- ino = (xfs_ino_t)*lastino;
- agno = XFS_INO_TO_AGNO(mp, ino);
- agino = XFS_INO_TO_AGINO(mp, ino);
- left = *count;
+ xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino);
+ xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino);
+ struct xfs_btree_cur *cur = NULL;
+ struct xfs_buf *agbp = NULL;
+ struct xfs_inogrp *buffer;
+ int bcount;
+ int left = *count;
+ int bufidx = 0;
+ int error = 0;
+
*count = 0;
+ if (agno >= mp->m_sb.sb_agcount ||
+ *lastino != XFS_AGINO_TO_INO(mp, agno, agino))
+ return error;
+
bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
- error = bufidx = 0;
- cur = NULL;
- agbp = NULL;
- while (left > 0 && agno < mp->m_sb.sb_agcount) {
- if (agbp == NULL) {
+ do {
+ struct xfs_inobt_rec_incore r;
+ int stat;
+
+ if (!agbp) {
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
- if (error) {
- /*
- * If we can't read the AGI of this ag,
- * then just skip to the next one.
- */
- ASSERT(cur == NULL);
- agbp = NULL;
- agno++;
- agino = 0;
- continue;
- }
+ if (error)
+ break;
+
cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
XFS_BTNUM_INO);
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE,
- &tmp);
- if (error) {
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
- cur = NULL;
- xfs_buf_relse(agbp);
- agbp = NULL;
- /*
- * Move up the last inode in the current
- * chunk. The lookup_ge will always get
- * us the first inode in the next chunk.
- */
- agino += XFS_INODES_PER_CHUNK - 1;
- continue;
- }
- }
- error = xfs_inobt_get_rec(cur, &r, &i);
- if (error || i == 0) {
- xfs_buf_relse(agbp);
- agbp = NULL;
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
- cur = NULL;
- agno++;
- agino = 0;
- continue;
+ &stat);
+ if (error)
+ break;
+ if (!stat)
+ goto next_ag;
}
+
+ error = xfs_inobt_get_rec(cur, &r, &stat);
+ if (error)
+ break;
+ if (!stat)
+ goto next_ag;
+
agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1;
buffer[bufidx].xi_startino =
XFS_AGINO_TO_INO(mp, agno, r.ir_startino);
buffer[bufidx].xi_alloccount =
XFS_INODES_PER_CHUNK - r.ir_freecount;
buffer[bufidx].xi_allocmask = ~r.ir_free;
- bufidx++;
- left--;
- if (bufidx == bcount) {
- long written;
- if (formatter(ubuffer, buffer, bufidx, &written)) {
- error = -EFAULT;
+ if (++bufidx == bcount) {
+ long written;
+
+ error = formatter(ubuffer, buffer, bufidx, &written);
+ if (error)
break;
- }
ubuffer += written;
*count += bufidx;
bufidx = 0;
}
- if (left) {
- error = xfs_btree_increment(cur, 0, &tmp);
- if (error) {
- xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
- cur = NULL;
- xfs_buf_relse(agbp);
- agbp = NULL;
- /*
- * The agino value has already been bumped.
- * Just try to skip up to it.
- */
- agino += XFS_INODES_PER_CHUNK;
- continue;
- }
- }
- }
+ if (!--left)
+ break;
+
+ error = xfs_btree_increment(cur, 0, &stat);
+ if (error)
+ break;
+ if (stat)
+ continue;
+
+next_ag:
+ xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ cur = NULL;
+ xfs_buf_relse(agbp);
+ agbp = NULL;
+ agino = 0;
+ } while (++agno < mp->m_sb.sb_agcount);
+
if (!error) {
if (bufidx) {
- long written;
- if (formatter(ubuffer, buffer, bufidx, &written))
- error = -EFAULT;
- else
+ long written;
+
+ error = formatter(ubuffer, buffer, bufidx, &written);
+ if (!error)
*count += bufidx;
}
*lastino = XFS_AGINO_TO_INO(mp, agno, agino);
}
+
kmem_free(buffer);
if (cur)
xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
XFS_BTREE_NOERROR));
if (agbp)
xfs_buf_relse(agbp);
+
return error;
}
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index 97295d91d170..aaed08022eb9 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -30,6 +30,22 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
int *ubused,
int *stat);
+struct xfs_bulkstat_agichunk {
+ xfs_ino_t ac_lastino; /* last inode returned */
+ char __user **ac_ubuffer;/* pointer into user's buffer */
+ int ac_ubleft; /* bytes left in user's buffer */
+ int ac_ubelem; /* spaces used in user's buffer */
+};
+
+int
+xfs_bulkstat_ag_ichunk(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ struct xfs_inobt_rec_incore *irbp,
+ bulkstat_one_pf formatter,
+ size_t statstruct_size,
+ struct xfs_bulkstat_agichunk *acp);
+
/*
* Values for stat return value.
*/
@@ -50,13 +66,6 @@ xfs_bulkstat(
char __user *ubuffer,/* buffer with inode stats */
int *done); /* 1 if there are more stats to get */
-int
-xfs_bulkstat_single(
- xfs_mount_t *mp,
- xfs_ino_t *lastinop,
- char __user *buffer,
- int *done);
-
typedef int (*bulkstat_one_fmt_pf)( /* used size in bytes or negative error */
void __user *ubuffer, /* buffer to write to */
int ubsize, /* remaining user buffer sz */
OpenPOWER on IntegriCloud