summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/tree-checker.c8
-rw-r--r--fs/cifs/Kconfig2
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/file.c31
-rw-r--r--fs/dax.c55
-rw-r--r--fs/exec.c5
-rw-r--r--fs/iomap.c9
-rw-r--r--fs/nfs/direct.c9
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c6
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/splice.c7
-rw-r--r--fs/xfs/libxfs/xfs_btree.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c4
-rw-r--r--fs/xfs/xfs_qm_bhv.c2
14 files changed, 74 insertions, 70 deletions
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index efcf89a8ba44..1a4e2b101ef2 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -389,13 +389,11 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
/*
* Here we don't really care about alignment since extent allocator can
- * handle it. We care more about the size, as if one block group is
- * larger than maximum size, it's must be some obvious corruption.
+ * handle it. We care more about the size.
*/
- if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) {
+ if (key->offset == 0) {
block_group_err(fs_info, leaf, slot,
- "invalid block group size, have %llu expect (0, %llu]",
- key->offset, BTRFS_MAX_DATA_CHUNK_SIZE);
+ "invalid block group size 0");
return -EUCLEAN;
}
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index abcd78e332fe..85dadb93c992 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -133,7 +133,7 @@ config CIFS_XATTR
config CIFS_POSIX
bool "CIFS POSIX Extensions"
- depends on CIFS_XATTR
+ depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
help
Enabling this option will cause the cifs client to attempt to
negotiate a newer dialect with servers, such as Samba 3.0.5
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 3713d22b95a7..907e85d65bb4 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -174,7 +174,7 @@ cifs_bp_rename_retry:
cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
- full_path[dfsplen] = '\\';
+ full_path[dfsplen] = dirsep;
for (i = 0; i < pplen-1; i++)
if (full_path[dfsplen+1+i] == '/')
full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 74c33d5fafc8..c9bc56b1baac 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2541,14 +2541,13 @@ static int
cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
struct cifs_aio_ctx *ctx)
{
- int wait_retry = 0;
unsigned int wsize, credits;
int rc;
struct TCP_Server_Info *server =
tlink_tcon(wdata->cfile->tlink)->ses->server;
/*
- * Try to resend this wdata, waiting for credits up to 3 seconds.
+ * Wait for credits to resend this wdata.
* Note: we are attempting to resend the whole wdata not in segments
*/
do {
@@ -2556,19 +2555,13 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
server, wdata->bytes, &wsize, &credits);
if (rc)
- break;
+ goto out;
if (wsize < wdata->bytes) {
add_credits_and_wake_if(server, credits, 0);
msleep(1000);
- wait_retry++;
}
- } while (wsize < wdata->bytes && wait_retry < 3);
-
- if (wsize < wdata->bytes) {
- rc = -EBUSY;
- goto out;
- }
+ } while (wsize < wdata->bytes);
rc = -EAGAIN;
while (rc == -EAGAIN) {
@@ -3234,14 +3227,13 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
struct list_head *rdata_list,
struct cifs_aio_ctx *ctx)
{
- int wait_retry = 0;
unsigned int rsize, credits;
int rc;
struct TCP_Server_Info *server =
tlink_tcon(rdata->cfile->tlink)->ses->server;
/*
- * Try to resend this rdata, waiting for credits up to 3 seconds.
+ * Wait for credits to resend this rdata.
* Note: we are attempting to resend the whole rdata not in segments
*/
do {
@@ -3249,24 +3241,13 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
&rsize, &credits);
if (rc)
- break;
+ goto out;
if (rsize < rdata->bytes) {
add_credits_and_wake_if(server, credits, 0);
msleep(1000);
- wait_retry++;
}
- } while (rsize < rdata->bytes && wait_retry < 3);
-
- /*
- * If we can't find enough credits to send this rdata
- * release the rdata and return failure, this will pass
- * whatever I/O amount we have finished to VFS.
- */
- if (rsize < rdata->bytes) {
- rc = -EBUSY;
- goto out;
- }
+ } while (rsize < rdata->bytes);
rc = -EAGAIN;
while (rc == -EAGAIN) {
diff --git a/fs/dax.c b/fs/dax.c
index 9bcce89ea18e..48132eca3761 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -232,6 +232,34 @@ static void *get_unlocked_entry(struct xa_state *xas)
}
}
+/*
+ * The only thing keeping the address space around is the i_pages lock
+ * (it's cycled in clear_inode() after removing the entries from i_pages)
+ * After we call xas_unlock_irq(), we cannot touch xas->xa.
+ */
+static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+{
+ struct wait_exceptional_entry_queue ewait;
+ wait_queue_head_t *wq;
+
+ init_wait(&ewait.wait);
+ ewait.wait.func = wake_exceptional_entry_func;
+
+ wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+ prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
+ xas_unlock_irq(xas);
+ schedule();
+ finish_wait(wq, &ewait.wait);
+
+ /*
+ * Entry lock waits are exclusive. Wake up the next waiter since
+ * we aren't sure we will acquire the entry lock and thus wake
+ * the next waiter up on unlock.
+ */
+ if (waitqueue_active(wq))
+ __wake_up(wq, TASK_NORMAL, 1, &ewait.key);
+}
+
static void put_unlocked_entry(struct xa_state *xas, void *entry)
{
/* If we were the only waiter woken, wake the next one */
@@ -351,21 +379,21 @@ static struct page *dax_busy_page(void *entry)
* @page: The page whose entry we want to lock
*
* Context: Process context.
- * Return: %true if the entry was locked or does not need to be locked.
+ * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
+ * not be locked.
*/
-bool dax_lock_mapping_entry(struct page *page)
+dax_entry_t dax_lock_page(struct page *page)
{
XA_STATE(xas, NULL, 0);
void *entry;
- bool locked;
/* Ensure page->mapping isn't freed while we look at it */
rcu_read_lock();
for (;;) {
struct address_space *mapping = READ_ONCE(page->mapping);
- locked = false;
- if (!dax_mapping(mapping))
+ entry = NULL;
+ if (!mapping || !dax_mapping(mapping))
break;
/*
@@ -375,7 +403,7 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page()
* translation.
*/
- locked = true;
+ entry = (void *)~0UL;
if (S_ISCHR(mapping->host->i_mode))
break;
@@ -389,9 +417,7 @@ bool dax_lock_mapping_entry(struct page *page)
entry = xas_load(&xas);
if (dax_is_locked(entry)) {
rcu_read_unlock();
- entry = get_unlocked_entry(&xas);
- xas_unlock_irq(&xas);
- put_unlocked_entry(&xas, entry);
+ wait_entry_unlocked(&xas, entry);
rcu_read_lock();
continue;
}
@@ -400,23 +426,18 @@ bool dax_lock_mapping_entry(struct page *page)
break;
}
rcu_read_unlock();
- return locked;
+ return (dax_entry_t)entry;
}
-void dax_unlock_mapping_entry(struct page *page)
+void dax_unlock_page(struct page *page, dax_entry_t cookie)
{
struct address_space *mapping = page->mapping;
XA_STATE(xas, &mapping->i_pages, page->index);
- void *entry;
if (S_ISCHR(mapping->host->i_mode))
return;
- rcu_read_lock();
- entry = xas_load(&xas);
- rcu_read_unlock();
- entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
- dax_unlock_entry(&xas, entry);
+ dax_unlock_entry(&xas, (void *)cookie);
}
/*
diff --git a/fs/exec.c b/fs/exec.c
index acc3a5536384..fc281b738a98 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -62,7 +62,6 @@
#include <linux/oom.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
-#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -1084,7 +1083,7 @@ static int de_thread(struct task_struct *tsk)
while (sig->notify_count) {
__set_current_state(TASK_KILLABLE);
spin_unlock_irq(lock);
- freezable_schedule();
+ schedule();
if (unlikely(__fatal_signal_pending(tsk)))
goto killed;
spin_lock_irq(lock);
@@ -1112,7 +1111,7 @@ static int de_thread(struct task_struct *tsk)
__set_current_state(TASK_KILLABLE);
write_unlock_irq(&tasklist_lock);
cgroup_threadgroup_change_end(tsk);
- freezable_schedule();
+ schedule();
if (unlikely(__fatal_signal_pending(tsk)))
goto killed;
}
diff --git a/fs/iomap.c b/fs/iomap.c
index d094e5688bd3..9a5bf1e8925b 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1884,15 +1884,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->wait_for_completion = true;
ret = 0;
}
-
- /*
- * Splicing to pipes can fail on a full pipe. We have to
- * swallow this to make it look like a short IO
- * otherwise the higher splice layers will completely
- * mishandle the error and stop moving data.
- */
- if (ret == -EFAULT)
- ret = 0;
break;
}
pos += ret;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index aa12c3063bae..33824a0a57bf 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -98,8 +98,11 @@ struct nfs_direct_req {
struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
struct work_struct work;
int flags;
+ /* for write */
#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
+ /* for read */
+#define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
struct nfs_writeverf verf; /* unstable write verifier */
};
@@ -412,7 +415,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct page *page = req->wb_page;
- if (!PageCompound(page) && bytes < hdr->good_bytes)
+ if (!PageCompound(page) && bytes < hdr->good_bytes &&
+ (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
set_page_dirty(page);
bytes += req->wb_bytes;
nfs_list_remove_request(req);
@@ -587,6 +591,9 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
+ if (iter_is_iovec(iter))
+ dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
+
nfs_start_io_direct(inode);
NFS_I(inode)->read_io += count;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 74b36ed883ca..310d7500f665 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1733,7 +1733,8 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
if (fh)
hdr->args.fh = fh;
- if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+ if (vers == 4 &&
+ !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
goto out_failed;
/*
@@ -1798,7 +1799,8 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
if (fh)
hdr->args.fh = fh;
- if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+ if (vers == 4 &&
+ !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
goto out_failed;
/*
diff --git a/fs/read_write.c b/fs/read_write.c
index 4dae0399c75a..58f30537c47a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1956,7 +1956,7 @@ loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
struct inode *inode_out = file_inode(file_out);
loff_t ret;
- WARN_ON_ONCE(remap_flags);
+ WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
return -EISDIR;
diff --git a/fs/splice.c b/fs/splice.c
index 3553f1956508..de2ede048473 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -945,11 +945,16 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
sd->flags &= ~SPLICE_F_NONBLOCK;
more = sd->flags & SPLICE_F_MORE;
+ WARN_ON_ONCE(pipe->nrbufs != 0);
+
while (len) {
size_t read_len;
loff_t pos = sd->pos, prev_pos = pos;
- ret = do_splice_to(in, &pos, pipe, len, flags);
+ /* Don't try to read more the pipe has space for. */
+ read_len = min_t(size_t, len,
+ (pipe->buffers - pipe->nrbufs) << PAGE_SHIFT);
+ ret = do_splice_to(in, &pos, pipe, read_len, flags);
if (unlikely(ret <= 0))
goto out_release;
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 34c6d7bd4d18..bbdae2b4559f 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -330,7 +330,7 @@ xfs_btree_sblock_verify_crc(
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
- return __this_address;
+ return false;
return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 404e581f1ea1..1ee8c5539fa4 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1126,9 +1126,9 @@ xfs_free_file_space(
* page could be mmap'd and iomap_zero_range doesn't do that for us.
* Writeback of the eof page will do this, albeit clumsily.
*/
- if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+ if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
- (offset + len) & ~PAGE_MASK, LLONG_MAX);
+ round_down(offset + len, PAGE_SIZE), LLONG_MAX);
}
return error;
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 73a1d77ec187..3091e4bc04ef 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -40,7 +40,7 @@ xfs_fill_statvfs_from_dquot(
statp->f_files = limit;
statp->f_ffree =
(statp->f_files > dqp->q_res_icount) ?
- (statp->f_ffree - dqp->q_res_icount) : 0;
+ (statp->f_files - dqp->q_res_icount) : 0;
}
}
OpenPOWER on IntegriCloud