summaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
authorMiklos Szeredi <mszeredi@redhat.com>2018-10-01 10:07:06 +0200
committerMiklos Szeredi <mszeredi@redhat.com>2018-10-01 10:07:06 +0200
commite52a8250480acd3b26534793c61816e30d85fbb6 (patch)
treebe2ae4d947e7fb876eae27926f70cf9c702243b2 /fs/fuse
parent5da784cce4308ae10a79e3c8c41b13fb9568e4e0 (diff)
downloadblackbird-op-linux-e52a8250480acd3b26534793c61816e30d85fbb6.tar.gz
blackbird-op-linux-e52a8250480acd3b26534793c61816e30d85fbb6.zip
fuse: realloc page array
Writeback caching currently allocates requests with the maximum number of possible pages, while the actual number of pages per request depends on a couple of factors that cannot be determined when the request is allocated (whether page is already under writeback, whether page is contiguous with previous pages already added to a request). This patch allows such requests to start with no page allocation (all pages inline) and grow the page array on demand. If the max_pages tunable remains the default value, then this will mean just one allocation that is the same size as before. If the tunable is larger, then this adds at most 3 additional memory allocations (which is generously compensated by the improved performance from the larger request). Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/dev.c49
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/fuse/fuse_i.h4
3 files changed, 56 insertions, 5 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 69d4df78a417..ae813e609932 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -54,6 +54,18 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages,
__set_bit(FR_PENDING, &req->flags);
}
+static struct page **fuse_req_pages_alloc(unsigned int npages, gfp_t flags,
+ struct fuse_page_desc **desc)
+{
+ struct page **pages;
+
+ pages = kzalloc(npages * (sizeof(struct page *) +
+ sizeof(struct fuse_page_desc)), flags);
+ *desc = (void *) pages + npages * sizeof(struct page *);
+
+ return pages;
+}
+
static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
{
struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
@@ -63,13 +75,12 @@ static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
WARN_ON(npages > FUSE_MAX_MAX_PAGES);
if (npages > FUSE_REQ_INLINE_PAGES) {
- pages = kzalloc(npages * (sizeof(*pages) +
- sizeof(*page_descs)), flags);
+ pages = fuse_req_pages_alloc(npages, flags,
+ &page_descs);
if (!pages) {
kmem_cache_free(fuse_req_cachep, req);
return NULL;
}
- page_descs = (void *) pages + npages * sizeof(*pages);
} else if (npages) {
pages = req->inline_pages;
page_descs = req->inline_page_descs;
@@ -91,11 +102,41 @@ struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
return __fuse_request_alloc(npages, GFP_NOFS);
}
-void fuse_request_free(struct fuse_req *req)
+static void fuse_req_pages_free(struct fuse_req *req)
{
if (req->pages != req->inline_pages)
kfree(req->pages);
+}
+
+bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
+ gfp_t flags)
+{
+ struct page **pages;
+ struct fuse_page_desc *page_descs;
+ unsigned int npages = min_t(unsigned int,
+ max_t(unsigned int, req->max_pages * 2,
+ FUSE_DEFAULT_MAX_PAGES_PER_REQ),
+ fc->max_pages);
+ WARN_ON(npages <= req->max_pages);
+ pages = fuse_req_pages_alloc(npages, flags, &page_descs);
+ if (!pages)
+ return false;
+
+ memcpy(pages, req->pages, sizeof(struct page *) * req->max_pages);
+ memcpy(page_descs, req->page_descs,
+ sizeof(struct fuse_page_desc) * req->max_pages);
+ fuse_req_pages_free(req);
+ req->pages = pages;
+ req->page_descs = page_descs;
+ req->max_pages = npages;
+
+ return true;
+}
+
+void fuse_request_free(struct fuse_req *req)
+{
+ fuse_req_pages_free(req);
kmem_cache_free(fuse_req_cachep, req);
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 035843b501fe..f5507198ea00 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1827,7 +1827,13 @@ static int fuse_writepages_fill(struct page *page,
data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
fuse_writepages_send(data);
data->req = NULL;
+ } else if (req && req->num_pages == req->max_pages) {
+ if (!fuse_req_realloc_pages(fc, req, GFP_NOFS)) {
+ fuse_writepages_send(data);
+ req = data->req = NULL;
+ }
}
+
err = -ENOMEM;
tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!tmp_page)
@@ -1850,7 +1856,7 @@ static int fuse_writepages_fill(struct page *page,
struct fuse_inode *fi = get_fuse_inode(inode);
err = -ENOMEM;
- req = fuse_request_alloc_nofs(fc->max_pages);
+ req = fuse_request_alloc_nofs(FUSE_REQ_INLINE_PAGES);
if (!req) {
__free_page(tmp_page);
goto out_unlock;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 3d578745c852..b7d96e7b5e0f 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -879,6 +879,10 @@ struct fuse_req *fuse_request_alloc(unsigned npages);
struct fuse_req *fuse_request_alloc_nofs(unsigned npages);
+bool fuse_req_realloc_pages(struct fuse_conn *fc, struct fuse_req *req,
+ gfp_t flags);
+
+
/**
* Free a request
*/
OpenPOWER on IntegriCloud