summaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-19 14:26:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-19 14:26:31 -0700
commit6162e4b0bedeb3dac2ba0a5e1b1f56db107d97ec (patch)
treeb4ee364c3819f19acd8a63b06d455b11cd91b9ae /fs/ext4
parent17974c054db3030b714b7108566bf5208d965a19 (diff)
parent6ddb2447846a8ece111e316a2863c2355023682d (diff)
downloadblackbird-op-linux-6162e4b0bedeb3dac2ba0a5e1b1f56db107d97ec.tar.gz
blackbird-op-linux-6162e4b0bedeb3dac2ba0a5e1b1f56db107d97ec.zip
Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
Pull ext4 updates from Ted Ts'o: "A few bug fixes and add support for file-system level encryption in ext4" * tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (31 commits) ext4 crypto: enable encryption feature flag ext4 crypto: add symlink encryption ext4 crypto: enable filename encryption ext4 crypto: filename encryption modifications ext4 crypto: partial update to namei.c for fname crypto ext4 crypto: insert encrypted filenames into a leaf directory block ext4 crypto: teach ext4_htree_store_dirent() to store decrypted filenames ext4 crypto: filename encryption facilities ext4 crypto: implement the ext4 decryption read path ext4 crypto: implement the ext4 encryption write path ext4 crypto: inherit encryption policies on inode and directory create ext4 crypto: enforce context consistency ext4 crypto: add encryption key management facilities ext4 crypto: add ext4 encryption facilities ext4 crypto: add encryption policy and password salt support ext4 crypto: add encryption xattr support ext4 crypto: export ext4_empty_dir() ext4 crypto: add ext4 encryption Kconfig ext4 crypto: reserve codepoints used by the ext4 encryption feature ext4 crypto: add ext4_mpage_readpages() ...
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/Kconfig17
-rw-r--r--fs/ext4/Makefile4
-rw-r--r--fs/ext4/acl.c5
-rw-r--r--fs/ext4/balloc.c3
-rw-r--r--fs/ext4/bitmap.c1
-rw-r--r--fs/ext4/block_validity.c1
-rw-r--r--fs/ext4/crypto.c558
-rw-r--r--fs/ext4/crypto_fname.c709
-rw-r--r--fs/ext4/crypto_key.c165
-rw-r--r--fs/ext4/crypto_policy.c194
-rw-r--r--fs/ext4/dir.c81
-rw-r--r--fs/ext4/ext4.h169
-rw-r--r--fs/ext4/ext4_crypto.h147
-rw-r--r--fs/ext4/extents.c81
-rw-r--r--fs/ext4/extents_status.c2
-rw-r--r--fs/ext4/file.c19
-rw-r--r--fs/ext4/fsync.c1
-rw-r--r--fs/ext4/hash.c1
-rw-r--r--fs/ext4/ialloc.c28
-rw-r--r--fs/ext4/inline.c16
-rw-r--r--fs/ext4/inode.c130
-rw-r--r--fs/ext4/ioctl.c86
-rw-r--r--fs/ext4/namei.c637
-rw-r--r--fs/ext4/page-io.c47
-rw-r--r--fs/ext4/readpage.c328
-rw-r--r--fs/ext4/super.c56
-rw-r--r--fs/ext4/symlink.c97
-rw-r--r--fs/ext4/xattr.c4
-rw-r--r--fs/ext4/xattr.h3
29 files changed, 3344 insertions, 246 deletions
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index efea5d5c44ce..18228c201f7f 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -64,6 +64,23 @@ config EXT4_FS_SECURITY
If you are not using a security module that requires using
extended attributes for file security labels, say N.
+config EXT4_FS_ENCRYPTION
+ bool "Ext4 Encryption"
+ depends on EXT4_FS
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_XTS
+ select CRYPTO_CTS
+ select CRYPTO_SHA256
+ select KEYS
+ select ENCRYPTED_KEYS
+ help
+ Enable encryption of ext4 files and directories. This
+ feature is similar to ecryptfs, but it is more memory
+ efficient since it avoids caching the encrypted and
+ decrypted pages in the page cache.
+
config EXT4_DEBUG
bool "EXT4 debugging support"
depends on EXT4_FS
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 0310fec2ee3d..75285ea9aa05 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -8,7 +8,9 @@ ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
- xattr_trusted.o inline.o
+ xattr_trusted.o inline.o readpage.o
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
+ext4-$(CONFIG_EXT4_FS_ENCRYPTION) += crypto_policy.o crypto.o \
+ crypto_key.o crypto_fname.o
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index d40c8dbbb0d6..69b1e73026a5 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -4,11 +4,6 @@
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/capability.h>
-#include <linux/fs.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 83a6f497c4e0..955bf49a7945 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -14,7 +14,6 @@
#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include "ext4.h"
@@ -641,8 +640,6 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
* fail EDQUOT for metdata, but we do account for it.
*/
if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
- spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
- spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
dquot_alloc_block_nofail(inode,
EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
}
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index b610779a958c..4a606afb171f 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -8,7 +8,6 @@
*/
#include <linux/buffer_head.h>
-#include <linux/jbd2.h>
#include "ext4.h"
unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 41eb9dcfac7e..3522340c7a99 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -16,7 +16,6 @@
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/blkdev.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
#include "ext4.h"
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
new file mode 100644
index 000000000000..8ff15273ab0c
--- /dev/null
+++ b/fs/ext4/crypto.c
@@ -0,0 +1,558 @@
+/*
+ * linux/fs/ext4/crypto.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption functions for ext4
+ *
+ * Written by Michael Halcrow, 2014.
+ *
+ * Filename encryption additions
+ * Uday Savagaonkar, 2014
+ * Encryption policy handling additions
+ * Ildar Muslukhov, 2014
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ * The usage of AES-XTS should conform to recommendations in NIST
+ * Special Publication 800-38E and IEEE P1619/D16.
+ */
+
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <keys/user-type.h>
+#include <keys/encrypted-type.h>
+#include <linux/crypto.h>
+#include <linux/ecryptfs.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/key.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock_types.h>
+
+#include "ext4_extents.h"
+#include "xattr.h"
+
+/* Encryption added and removed here! (L: */
+
+static unsigned int num_prealloc_crypto_pages = 32;
+static unsigned int num_prealloc_crypto_ctxs = 128;
+
+module_param(num_prealloc_crypto_pages, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_pages,
+ "Number of crypto pages to preallocate");
+module_param(num_prealloc_crypto_ctxs, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
+ "Number of crypto contexts to preallocate");
+
+static mempool_t *ext4_bounce_page_pool;
+
+static LIST_HEAD(ext4_free_crypto_ctxs);
+static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
+
+/**
+ * ext4_release_crypto_ctx() - Releases an encryption context
+ * @ctx: The encryption context to release.
+ *
+ * If the encryption context was allocated from the pre-allocated pool, returns
+ * it to that pool. Else, frees it.
+ *
+ * If there's a bounce page in the context, this frees that.
+ */
+void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
+{
+ unsigned long flags;
+
+ if (ctx->bounce_page) {
+ if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
+ __free_page(ctx->bounce_page);
+ else
+ mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
+ ctx->bounce_page = NULL;
+ }
+ ctx->control_page = NULL;
+ if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
+ if (ctx->tfm)
+ crypto_free_tfm(ctx->tfm);
+ kfree(ctx);
+ } else {
+ spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
+ list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
+ spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
+ }
+}
+
+/**
+ * ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context
+ * @mask: The allocation mask.
+ *
+ * Return: An allocated and initialized encryption context on success. An error
+ * value or NULL otherwise.
+ */
+static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask)
+{
+ struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx),
+ mask);
+
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+ return ctx;
+}
+
+/**
+ * ext4_get_crypto_ctx() - Gets an encryption context
+ * @inode: The inode for which we are doing the crypto
+ *
+ * Allocates and initializes an encryption context.
+ *
+ * Return: An allocated and initialized encryption context on success; error
+ * value or NULL otherwise.
+ */
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
+{
+ struct ext4_crypto_ctx *ctx = NULL;
+ int res = 0;
+ unsigned long flags;
+ struct ext4_encryption_key *key = &EXT4_I(inode)->i_encryption_key;
+
+ if (!ext4_read_workqueue)
+ ext4_init_crypto();
+
+ /*
+ * We first try getting the ctx from a free list because in
+ * the common case the ctx will have an allocated and
+ * initialized crypto tfm, so it's probably a worthwhile
+ * optimization. For the bounce page, we first try getting it
+ * from the kernel allocator because that's just about as fast
+ * as getting it from a list and because a cache of free pages
+ * should generally be a "last resort" option for a filesystem
+ * to be able to do its job.
+ */
+ spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
+ ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
+ struct ext4_crypto_ctx, free_list);
+ if (ctx)
+ list_del(&ctx->free_list);
+ spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
+ if (!ctx) {
+ ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS);
+ if (IS_ERR(ctx)) {
+ res = PTR_ERR(ctx);
+ goto out;
+ }
+ ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
+ } else {
+ ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
+ }
+
+ /* Allocate a new Crypto API context if we don't already have
+ * one or if it isn't the right mode. */
+ BUG_ON(key->mode == EXT4_ENCRYPTION_MODE_INVALID);
+ if (ctx->tfm && (ctx->mode != key->mode)) {
+ crypto_free_tfm(ctx->tfm);
+ ctx->tfm = NULL;
+ ctx->mode = EXT4_ENCRYPTION_MODE_INVALID;
+ }
+ if (!ctx->tfm) {
+ switch (key->mode) {
+ case EXT4_ENCRYPTION_MODE_AES_256_XTS:
+ ctx->tfm = crypto_ablkcipher_tfm(
+ crypto_alloc_ablkcipher("xts(aes)", 0, 0));
+ break;
+ case EXT4_ENCRYPTION_MODE_AES_256_GCM:
+ /* TODO(mhalcrow): AEAD w/ gcm(aes);
+ * crypto_aead_setauthsize() */
+ ctx->tfm = ERR_PTR(-ENOTSUPP);
+ break;
+ default:
+ BUG();
+ }
+ if (IS_ERR_OR_NULL(ctx->tfm)) {
+ res = PTR_ERR(ctx->tfm);
+ ctx->tfm = NULL;
+ goto out;
+ }
+ ctx->mode = key->mode;
+ }
+ BUG_ON(key->size != ext4_encryption_key_size(key->mode));
+
+ /* There shouldn't be a bounce page attached to the crypto
+ * context at this point. */
+ BUG_ON(ctx->bounce_page);
+
+out:
+ if (res) {
+ if (!IS_ERR_OR_NULL(ctx))
+ ext4_release_crypto_ctx(ctx);
+ ctx = ERR_PTR(res);
+ }
+ return ctx;
+}
+
+struct workqueue_struct *ext4_read_workqueue;
+static DEFINE_MUTEX(crypto_init);
+
+/**
+ * ext4_exit_crypto() - Shutdown the ext4 encryption system
+ */
+void ext4_exit_crypto(void)
+{
+ struct ext4_crypto_ctx *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
+ if (pos->bounce_page) {
+ if (pos->flags &
+ EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
+ __free_page(pos->bounce_page);
+ } else {
+ mempool_free(pos->bounce_page,
+ ext4_bounce_page_pool);
+ }
+ }
+ if (pos->tfm)
+ crypto_free_tfm(pos->tfm);
+ kfree(pos);
+ }
+ INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
+ if (ext4_bounce_page_pool)
+ mempool_destroy(ext4_bounce_page_pool);
+ ext4_bounce_page_pool = NULL;
+ if (ext4_read_workqueue)
+ destroy_workqueue(ext4_read_workqueue);
+ ext4_read_workqueue = NULL;
+}
+
+/**
+ * ext4_init_crypto() - Set up for ext4 encryption.
+ *
+ * We only call this when we start accessing encrypted files, since it
+ * results in memory getting allocated that wouldn't otherwise be used.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int ext4_init_crypto(void)
+{
+ int i, res;
+
+ mutex_lock(&crypto_init);
+ if (ext4_read_workqueue)
+ goto already_initialized;
+ ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
+ if (!ext4_read_workqueue) {
+ res = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
+ struct ext4_crypto_ctx *ctx;
+
+ ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL);
+ if (IS_ERR(ctx)) {
+ res = PTR_ERR(ctx);
+ goto fail;
+ }
+ list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
+ }
+
+ ext4_bounce_page_pool =
+ mempool_create_page_pool(num_prealloc_crypto_pages, 0);
+ if (!ext4_bounce_page_pool) {
+ res = -ENOMEM;
+ goto fail;
+ }
+already_initialized:
+ mutex_unlock(&crypto_init);
+ return 0;
+fail:
+ ext4_exit_crypto();
+ mutex_unlock(&crypto_init);
+ return res;
+}
+
+void ext4_restore_control_page(struct page *data_page)
+{
+ struct ext4_crypto_ctx *ctx =
+ (struct ext4_crypto_ctx *)page_private(data_page);
+
+ set_page_private(data_page, (unsigned long)NULL);
+ ClearPagePrivate(data_page);
+ unlock_page(data_page);
+ ext4_release_crypto_ctx(ctx);
+}
+
+/**
+ * ext4_crypt_complete() - The completion callback for page encryption
+ * @req: The asynchronous encryption request context
+ * @res: The result of the encryption operation
+ */
+static void ext4_crypt_complete(struct crypto_async_request *req, int res)
+{
+ struct ext4_completion_result *ecr = req->data;
+
+ if (res == -EINPROGRESS)
+ return;
+ ecr->res = res;
+ complete(&ecr->completion);
+}
+
+typedef enum {
+ EXT4_DECRYPT = 0,
+ EXT4_ENCRYPT,
+} ext4_direction_t;
+
+static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
+ struct inode *inode,
+ ext4_direction_t rw,
+ pgoff_t index,
+ struct page *src_page,
+ struct page *dest_page)
+
+{
+ u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
+ struct ablkcipher_request *req = NULL;
+ DECLARE_EXT4_COMPLETION_RESULT(ecr);
+ struct scatterlist dst, src;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
+ int res = 0;
+
+ BUG_ON(!ctx->tfm);
+ BUG_ON(ctx->mode != ei->i_encryption_key.mode);
+
+ if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) {
+ printk_ratelimited(KERN_ERR
+ "%s: unsupported crypto algorithm: %d\n",
+ __func__, ctx->mode);
+ return -ENOTSUPP;
+ }
+
+ crypto_ablkcipher_clear_flags(atfm, ~0);
+ crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+
+ res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw,
+ ei->i_encryption_key.size);
+ if (res) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_ablkcipher_setkey() failed\n",
+ __func__);
+ return res;
+ }
+ req = ablkcipher_request_alloc(atfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_request_alloc() failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+ ablkcipher_request_set_callback(
+ req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ ext4_crypt_complete, &ecr);
+
+ BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
+ memcpy(xts_tweak, &index, sizeof(index));
+ memset(&xts_tweak[sizeof(index)], 0,
+ EXT4_XTS_TWEAK_SIZE - sizeof(index));
+
+ sg_init_table(&dst, 1);
+ sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
+ sg_init_table(&src, 1);
+ sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
+ ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+ xts_tweak);
+ if (rw == EXT4_DECRYPT)
+ res = crypto_ablkcipher_decrypt(req);
+ else
+ res = crypto_ablkcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ BUG_ON(req->base.data != &ecr);
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ ablkcipher_request_free(req);
+ if (res) {
+ printk_ratelimited(
+ KERN_ERR
+ "%s: crypto_ablkcipher_encrypt() returned %d\n",
+ __func__, res);
+ return res;
+ }
+ return 0;
+}
+
+/**
+ * ext4_encrypt() - Encrypts a page
+ * @inode: The inode for which the encryption should take place
+ * @plaintext_page: The page to encrypt. Must be locked.
+ *
+ * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
+ * encryption context.
+ *
+ * Called on the page write path. The caller must call
+ * ext4_restore_control_page() on the returned ciphertext page to
+ * release the bounce buffer and the encryption context.
+ *
+ * Return: An allocated page with the encrypted content on success. Else, an
+ * error value or NULL.
+ */
+struct page *ext4_encrypt(struct inode *inode,
+ struct page *plaintext_page)
+{
+ struct ext4_crypto_ctx *ctx;
+ struct page *ciphertext_page = NULL;
+ int err;
+
+ BUG_ON(!PageLocked(plaintext_page));
+
+ ctx = ext4_get_crypto_ctx(inode);
+ if (IS_ERR(ctx))
+ return (struct page *) ctx;
+
+ /* The encryption operation will require a bounce page. */
+ ciphertext_page = alloc_page(GFP_NOFS);
+ if (!ciphertext_page) {
+ /* This is a potential bottleneck, but at least we'll have
+ * forward progress. */
+ ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
+ GFP_NOFS);
+ if (WARN_ON_ONCE(!ciphertext_page)) {
+ ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
+ GFP_NOFS | __GFP_WAIT);
+ }
+ ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
+ } else {
+ ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
+ }
+ ctx->bounce_page = ciphertext_page;
+ ctx->control_page = plaintext_page;
+ err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
+ plaintext_page, ciphertext_page);
+ if (err) {
+ ext4_release_crypto_ctx(ctx);
+ return ERR_PTR(err);
+ }
+ SetPagePrivate(ciphertext_page);
+ set_page_private(ciphertext_page, (unsigned long)ctx);
+ lock_page(ciphertext_page);
+ return ciphertext_page;
+}
+
+/**
+ * ext4_decrypt() - Decrypts a page in-place
+ * @ctx: The encryption context.
+ * @page: The page to decrypt. Must be locked.
+ *
+ * Decrypts page in-place using the ctx encryption context.
+ *
+ * Called from the read completion callback.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
+{
+ BUG_ON(!PageLocked(page));
+
+ return ext4_page_crypto(ctx, page->mapping->host,
+ EXT4_DECRYPT, page->index, page, page);
+}
+
+/*
+ * Convenience function which takes care of allocating and
+ * deallocating the encryption context
+ */
+int ext4_decrypt_one(struct inode *inode, struct page *page)
+{
+ int ret;
+
+ struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);
+
+ if (!ctx)
+ return -ENOMEM;
+ ret = ext4_decrypt(ctx, page);
+ ext4_release_crypto_ctx(ctx);
+ return ret;
+}
+
+int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
+{
+ struct ext4_crypto_ctx *ctx;
+ struct page *ciphertext_page = NULL;
+ struct bio *bio;
+ ext4_lblk_t lblk = ex->ee_block;
+ ext4_fsblk_t pblk = ext4_ext_pblock(ex);
+ unsigned int len = ext4_ext_get_actual_len(ex);
+ int err = 0;
+
+ BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
+
+ ctx = ext4_get_crypto_ctx(inode);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ciphertext_page = alloc_page(GFP_NOFS);
+ if (!ciphertext_page) {
+ /* This is a potential bottleneck, but at least we'll have
+ * forward progress. */
+ ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
+ GFP_NOFS);
+ if (WARN_ON_ONCE(!ciphertext_page)) {
+ ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
+ GFP_NOFS | __GFP_WAIT);
+ }
+ ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
+ } else {
+ ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
+ }
+ ctx->bounce_page = ciphertext_page;
+
+ while (len--) {
+ err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
+ ZERO_PAGE(0), ciphertext_page);
+ if (err)
+ goto errout;
+
+ bio = bio_alloc(GFP_KERNEL, 1);
+ if (!bio) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ bio->bi_bdev = inode->i_sb->s_bdev;
+ bio->bi_iter.bi_sector = pblk;
+ err = bio_add_page(bio, ciphertext_page,
+ inode->i_sb->s_blocksize, 0);
+ if (err) {
+ bio_put(bio);
+ goto errout;
+ }
+ err = submit_bio_wait(WRITE, bio);
+ if (err)
+ goto errout;
+ }
+ err = 0;
+errout:
+ ext4_release_crypto_ctx(ctx);
+ return err;
+}
+
+bool ext4_valid_contents_enc_mode(uint32_t mode)
+{
+ return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
+}
+
+/**
+ * ext4_validate_encryption_key_size() - Validate the encryption key size
+ * @mode: The key mode.
+ * @size: The key size to validate.
+ *
+ * Return: The validated key size for @mode. Zero if invalid.
+ */
+uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
+{
+ if (size == ext4_encryption_key_size(mode))
+ return size;
+ return 0;
+}
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
new file mode 100644
index 000000000000..ca2f5948c1ac
--- /dev/null
+++ b/fs/ext4/crypto_fname.c
@@ -0,0 +1,709 @@
+/*
+ * linux/fs/ext4/crypto_fname.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains functions for filename crypto management in ext4
+ *
+ * Written by Uday Savagaonkar, 2014.
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ */
+
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <keys/encrypted-type.h>
+#include <keys/user-type.h>
+#include <linux/crypto.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/key.h>
+#include <linux/key.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock_types.h>
+
+#include "ext4.h"
+#include "ext4_crypto.h"
+#include "xattr.h"
+
+/**
+ * ext4_dir_crypt_complete() -
+ */
+static void ext4_dir_crypt_complete(struct crypto_async_request *req, int res)
+{
+ struct ext4_completion_result *ecr = req->data;
+
+ if (res == -EINPROGRESS)
+ return;
+ ecr->res = res;
+ complete(&ecr->completion);
+}
+
+bool ext4_valid_filenames_enc_mode(uint32_t mode)
+{
+ return (mode == EXT4_ENCRYPTION_MODE_AES_256_CTS);
+}
+
+/**
+ * ext4_fname_encrypt() -
+ *
+ * This function encrypts the input filename, and returns the length of the
+ * ciphertext. Errors are returned as negative numbers. We trust the caller to
+ * allocate sufficient memory to oname string.
+ */
+static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
+ const struct qstr *iname,
+ struct ext4_str *oname)
+{
+ u32 ciphertext_len;
+ struct ablkcipher_request *req = NULL;
+ DECLARE_EXT4_COMPLETION_RESULT(ecr);
+ struct crypto_ablkcipher *tfm = ctx->ctfm;
+ int res = 0;
+ char iv[EXT4_CRYPTO_BLOCK_SIZE];
+ struct scatterlist sg[1];
+ char *workbuf;
+
+ if (iname->len <= 0 || iname->len > ctx->lim)
+ return -EIO;
+
+ ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
+ EXT4_CRYPTO_BLOCK_SIZE : iname->len;
+ ciphertext_len = (ciphertext_len > ctx->lim)
+ ? ctx->lim : ciphertext_len;
+
+ /* Allocate request */
+ req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(
+ KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+ ablkcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ ext4_dir_crypt_complete, &ecr);
+
+ /* Map the workpage */
+ workbuf = kmap(ctx->workpage);
+
+ /* Copy the input */
+ memcpy(workbuf, iname->name, iname->len);
+ if (iname->len < ciphertext_len)
+ memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
+
+ /* Initialize IV */
+ memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
+
+ /* Create encryption request */
+ sg_init_table(sg, 1);
+ sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
+ ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv);
+ res = crypto_ablkcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ BUG_ON(req->base.data != &ecr);
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ if (res >= 0) {
+ /* Copy the result to output */
+ memcpy(oname->name, workbuf, ciphertext_len);
+ res = ciphertext_len;
+ }
+ kunmap(ctx->workpage);
+ ablkcipher_request_free(req);
+ if (res < 0) {
+ printk_ratelimited(
+ KERN_ERR "%s: Error (error code %d)\n", __func__, res);
+ }
+ oname->len = ciphertext_len;
+ return res;
+}
+
+/*
+ * ext4_fname_decrypt()
+ * This function decrypts the input filename, and returns
+ * the length of the plaintext.
+ * Errors are returned as negative numbers.
+ * We trust the caller to allocate sufficient memory to oname string.
+ */
+static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx,
+ const struct ext4_str *iname,
+ struct ext4_str *oname)
+{
+ struct ext4_str tmp_in[2], tmp_out[1];
+ struct ablkcipher_request *req = NULL;
+ DECLARE_EXT4_COMPLETION_RESULT(ecr);
+ struct scatterlist sg[1];
+ struct crypto_ablkcipher *tfm = ctx->ctfm;
+ int res = 0;
+ char iv[EXT4_CRYPTO_BLOCK_SIZE];
+ char *workbuf;
+
+ if (iname->len <= 0 || iname->len > ctx->lim)
+ return -EIO;
+
+ tmp_in[0].name = iname->name;
+ tmp_in[0].len = iname->len;
+ tmp_out[0].name = oname->name;
+
+ /* Allocate request */
+ req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(
+ KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+ ablkcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ ext4_dir_crypt_complete, &ecr);
+
+ /* Map the workpage */
+ workbuf = kmap(ctx->workpage);
+
+ /* Copy the input */
+ memcpy(workbuf, iname->name, iname->len);
+
+ /* Initialize IV */
+ memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
+
+ /* Create encryption request */
+ sg_init_table(sg, 1);
+ sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
+ ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv);
+ res = crypto_ablkcipher_decrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ BUG_ON(req->base.data != &ecr);
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ if (res >= 0) {
+ /* Copy the result to output */
+ memcpy(oname->name, workbuf, iname->len);
+ res = iname->len;
+ }
+ kunmap(ctx->workpage);
+ ablkcipher_request_free(req);
+ if (res < 0) {
+ printk_ratelimited(
+ KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n",
+ __func__, res);
+ return res;
+ }
+
+ oname->len = strnlen(oname->name, iname->len);
+ return oname->len;
+}
+
+/**
+ * ext4_fname_encode_digest() -
+ *
+ * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
+ * The encoded string is roughly 4/3 times the size of the input string.
+ */
+int ext4_fname_encode_digest(char *dst, char *src, u32 len)
+{
+ static const char *lookup_table =
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+";
+ u32 current_chunk, num_chunks, i;
+ char tmp_buf[3];
+ u32 c0, c1, c2, c3;
+
+ current_chunk = 0;
+ num_chunks = len/3;
+ for (i = 0; i < num_chunks; i++) {
+ c0 = src[3*i] & 0x3f;
+ c1 = (((src[3*i]>>6)&0x3) | ((src[3*i+1] & 0xf)<<2)) & 0x3f;
+ c2 = (((src[3*i+1]>>4)&0xf) | ((src[3*i+2] & 0x3)<<4)) & 0x3f;
+ c3 = (src[3*i+2]>>2) & 0x3f;
+ dst[4*i] = lookup_table[c0];
+ dst[4*i+1] = lookup_table[c1];
+ dst[4*i+2] = lookup_table[c2];
+ dst[4*i+3] = lookup_table[c3];
+ }
+ if (i*3 < len) {
+ memset(tmp_buf, 0, 3);
+ memcpy(tmp_buf, &src[3*i], len-3*i);
+ c0 = tmp_buf[0] & 0x3f;
+ c1 = (((tmp_buf[0]>>6)&0x3) | ((tmp_buf[1] & 0xf)<<2)) & 0x3f;
+ c2 = (((tmp_buf[1]>>4)&0xf) | ((tmp_buf[2] & 0x3)<<4)) & 0x3f;
+ c3 = (tmp_buf[2]>>2) & 0x3f;
+ dst[4*i] = lookup_table[c0];
+ dst[4*i+1] = lookup_table[c1];
+ dst[4*i+2] = lookup_table[c2];
+ dst[4*i+3] = lookup_table[c3];
+ i++;
+ }
+ return (i * 4);
+}
+
+/**
+ * ext4_fname_hash() -
+ *
+ * This function computes the hash of the input filename, and sets the output
+ * buffer to the *encoded* digest. It returns the length of the digest as its
+ * return value. Errors are returned as negative numbers. We trust the caller
+ * to allocate sufficient memory to oname string.
+ */
+static int ext4_fname_hash(struct ext4_fname_crypto_ctx *ctx,
+ const struct ext4_str *iname,
+ struct ext4_str *oname)
+{
+ struct scatterlist sg;
+ struct hash_desc desc = {
+ .tfm = (struct crypto_hash *)ctx->htfm,
+ .flags = CRYPTO_TFM_REQ_MAY_SLEEP
+ };
+ int res = 0;
+
+ if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
+ res = ext4_fname_encode_digest(oname->name, iname->name,
+ iname->len);
+ oname->len = res;
+ return res;
+ }
+
+ sg_init_one(&sg, iname->name, iname->len);
+ res = crypto_hash_init(&desc);
+ if (res) {
+ printk(KERN_ERR
+ "%s: Error initializing crypto hash; res = [%d]\n",
+ __func__, res);
+ goto out;
+ }
+ res = crypto_hash_update(&desc, &sg, iname->len);
+ if (res) {
+ printk(KERN_ERR
+ "%s: Error updating crypto hash; res = [%d]\n",
+ __func__, res);
+ goto out;
+ }
+ res = crypto_hash_final(&desc,
+ &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE]);
+ if (res) {
+ printk(KERN_ERR
+ "%s: Error finalizing crypto hash; res = [%d]\n",
+ __func__, res);
+ goto out;
+ }
+ /* Encode the digest as a printable string--this will increase the
+ * size of the digest */
+ oname->name[0] = 'I';
+ res = ext4_fname_encode_digest(oname->name+1,
+ &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE],
+ EXT4_FNAME_CRYPTO_DIGEST_SIZE) + 1;
+ oname->len = res;
+out:
+ return res;
+}
+
+/**
+ * ext4_free_fname_crypto_ctx() -
+ *
+ * Frees up a crypto context.
+ */
+void ext4_free_fname_crypto_ctx(struct ext4_fname_crypto_ctx *ctx)
+{
+ if (ctx == NULL || IS_ERR(ctx))
+ return;
+
+ if (ctx->ctfm && !IS_ERR(ctx->ctfm))
+ crypto_free_ablkcipher(ctx->ctfm);
+ if (ctx->htfm && !IS_ERR(ctx->htfm))
+ crypto_free_hash(ctx->htfm);
+ if (ctx->workpage && !IS_ERR(ctx->workpage))
+ __free_page(ctx->workpage);
+ kfree(ctx);
+}
+
+/**
+ * ext4_put_fname_crypto_ctx() -
+ *
+ * Return: The crypto context onto free list. If the free list is above a
+ * threshold, completely frees up the context, and returns the memory.
+ *
+ * TODO: Currently we directly free the crypto context. Eventually we should
+ * add code it to return to free list. Such an approach will increase
+ * efficiency of directory lookup.
+ */
+void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx)
+{
+ if (*ctx == NULL || IS_ERR(*ctx))
+ return;
+ ext4_free_fname_crypto_ctx(*ctx);
+ *ctx = NULL;
+}
+
+/**
+ * ext4_search_fname_crypto_ctx() -
+ */
+static struct ext4_fname_crypto_ctx *ext4_search_fname_crypto_ctx(
+ const struct ext4_encryption_key *key)
+{
+ return NULL;
+}
+
+/**
+ * ext4_alloc_fname_crypto_ctx() -
+ */
+struct ext4_fname_crypto_ctx *ext4_alloc_fname_crypto_ctx(
+ const struct ext4_encryption_key *key)
+{
+ struct ext4_fname_crypto_ctx *ctx;
+
+ ctx = kmalloc(sizeof(struct ext4_fname_crypto_ctx), GFP_NOFS);
+ if (ctx == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (key->mode == EXT4_ENCRYPTION_MODE_INVALID) {
+ /* This will automatically set key mode to invalid
+ * As enum for ENCRYPTION_MODE_INVALID is zero */
+ memset(&ctx->key, 0, sizeof(ctx->key));
+ } else {
+ memcpy(&ctx->key, key, sizeof(struct ext4_encryption_key));
+ }
+ ctx->has_valid_key = (EXT4_ENCRYPTION_MODE_INVALID == key->mode)
+ ? 0 : 1;
+ ctx->ctfm_key_is_ready = 0;
+ ctx->ctfm = NULL;
+ ctx->htfm = NULL;
+ ctx->workpage = NULL;
+ return ctx;
+}
+
+/**
+ * ext4_get_fname_crypto_ctx() -
+ *
+ * Allocates a free crypto context and initializes it to hold
+ * the crypto material for the inode.
+ *
+ * Return: NULL if not encrypted. Error value on error. Valid pointer otherwise.
+ */
+struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(
+ struct inode *inode, u32 max_ciphertext_len)
+{
+ struct ext4_fname_crypto_ctx *ctx;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ int res;
+
+ /* Check if the crypto policy is set on the inode */
+ res = ext4_encrypted_inode(inode);
+ if (res == 0)
+ return NULL;
+
+ if (!ext4_has_encryption_key(inode))
+ ext4_generate_encryption_key(inode);
+
+ /* Get a crypto context based on the key.
+ * A new context is allocated if no context matches the requested key.
+ */
+ ctx = ext4_search_fname_crypto_ctx(&(ei->i_encryption_key));
+ if (ctx == NULL)
+ ctx = ext4_alloc_fname_crypto_ctx(&(ei->i_encryption_key));
+ if (IS_ERR(ctx))
+ return ctx;
+
+ if (ctx->has_valid_key) {
+ if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) {
+ printk_once(KERN_WARNING
+ "ext4: unsupported key mode %d\n",
+ ctx->key.mode);
+ return ERR_PTR(-ENOKEY);
+ }
+
+ /* As a first cut, we will allocate new tfm in every call.
+ * later, we will keep the tfm around, in case the key gets
+ * re-used */
+ if (ctx->ctfm == NULL) {
+ ctx->ctfm = crypto_alloc_ablkcipher("cts(cbc(aes))",
+ 0, 0);
+ }
+ if (IS_ERR(ctx->ctfm)) {
+ res = PTR_ERR(ctx->ctfm);
+ printk(
+ KERN_DEBUG "%s: error (%d) allocating crypto tfm\n",
+ __func__, res);
+ ctx->ctfm = NULL;
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(res);
+ }
+ if (ctx->ctfm == NULL) {
+ printk(
+ KERN_DEBUG "%s: could not allocate crypto tfm\n",
+ __func__);
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ if (ctx->workpage == NULL)
+ ctx->workpage = alloc_page(GFP_NOFS);
+ if (IS_ERR(ctx->workpage)) {
+ res = PTR_ERR(ctx->workpage);
+ printk(
+ KERN_DEBUG "%s: error (%d) allocating work page\n",
+ __func__, res);
+ ctx->workpage = NULL;
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(res);
+ }
+ if (ctx->workpage == NULL) {
+ printk(
+ KERN_DEBUG "%s: could not allocate work page\n",
+ __func__);
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ ctx->lim = max_ciphertext_len;
+ crypto_ablkcipher_clear_flags(ctx->ctfm, ~0);
+ crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctx->ctfm),
+ CRYPTO_TFM_REQ_WEAK_KEY);
+
+ /* If we are lucky, we will get a context that is already
+ * set up with the right key. Else, we will have to
+ * set the key */
+ if (!ctx->ctfm_key_is_ready) {
+ /* Since our crypto objectives for filename encryption
+ * are pretty weak,
+ * we directly use the inode master key */
+ res = crypto_ablkcipher_setkey(ctx->ctfm,
+ ctx->key.raw, ctx->key.size);
+ if (res) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(-EIO);
+ }
+ ctx->ctfm_key_is_ready = 1;
+ } else {
+ /* In the current implementation, key should never be
+ * marked "ready" for a context that has just been
+ * allocated. So we should never reach here */
+ BUG();
+ }
+ }
+ if (ctx->htfm == NULL)
+ ctx->htfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->htfm)) {
+ res = PTR_ERR(ctx->htfm);
+ printk(KERN_DEBUG "%s: error (%d) allocating hash tfm\n",
+ __func__, res);
+ ctx->htfm = NULL;
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(res);
+ }
+ if (ctx->htfm == NULL) {
+ printk(KERN_DEBUG "%s: could not allocate hash tfm\n",
+ __func__);
+ ext4_put_fname_crypto_ctx(&ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return ctx;
+}
+
+/**
+ * ext4_fname_crypto_round_up() -
+ *
+ * Return: The next multiple of block size
+ */
+u32 ext4_fname_crypto_round_up(u32 size, u32 blksize)
+{
+ return ((size+blksize-1)/blksize)*blksize;
+}
+
+/**
+ * ext4_fname_crypto_namelen_on_disk() -
+ */
+int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
+ u32 namelen)
+{
+ u32 ciphertext_len;
+
+ if (ctx == NULL)
+ return -EIO;
+ if (!(ctx->has_valid_key))
+ return -EACCES;
+ ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ?
+ EXT4_CRYPTO_BLOCK_SIZE : namelen;
+ ciphertext_len = (ciphertext_len > ctx->lim)
+ ? ctx->lim : ciphertext_len;
+ return (int) ciphertext_len;
+}
+
+/**
+ * ext4_fname_crypto_alloc_obuff() -
+ *
+ * Allocates an output buffer that is sufficient for the crypto operation
+ * specified by the context and the direction.
+ */
+int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
+ u32 ilen, struct ext4_str *crypto_str)
+{
+ unsigned int olen;
+
+ if (!ctx)
+ return -EIO;
+ olen = ext4_fname_crypto_round_up(ilen, EXT4_CRYPTO_BLOCK_SIZE);
+ crypto_str->len = olen;
+ if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2)
+ olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2;
+ /* Allocated buffer can hold one more character to null-terminate the
+ * string */
+ crypto_str->name = kmalloc(olen+1, GFP_NOFS);
+ if (!(crypto_str->name))
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * ext4_fname_crypto_free_buffer() -
+ *
+ * Frees the buffer allocated for crypto operation.
+ */
+void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str)
+{
+ if (!crypto_str)
+ return;
+ kfree(crypto_str->name);
+ crypto_str->name = NULL;
+}
+
+/**
+ * ext4_fname_disk_to_usr() - converts a filename from disk space to user space
+ */
+int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+ const struct ext4_str *iname,
+ struct ext4_str *oname)
+{
+ if (ctx == NULL)
+ return -EIO;
+ if (iname->len < 3) {
+ /*Check for . and .. */
+ if (iname->name[0] == '.' && iname->name[iname->len-1] == '.') {
+ oname->name[0] = '.';
+ oname->name[iname->len-1] = '.';
+ oname->len = iname->len;
+ return oname->len;
+ }
+ }
+ if (ctx->has_valid_key)
+ return ext4_fname_decrypt(ctx, iname, oname);
+ else
+ return ext4_fname_hash(ctx, iname, oname);
+}
+
+int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+ const struct ext4_dir_entry_2 *de,
+ struct ext4_str *oname)
+{
+ struct ext4_str iname = {.name = (unsigned char *) de->name,
+ .len = de->name_len };
+
+ return _ext4_fname_disk_to_usr(ctx, &iname, oname);
+}
+
+
+/**
+ * ext4_fname_usr_to_disk() - converts a filename from user space to disk space
+ */
+int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
+ const struct qstr *iname,
+ struct ext4_str *oname)
+{
+ int res;
+
+ if (ctx == NULL)
+ return -EIO;
+ if (iname->len < 3) {
+ /*Check for . and .. */
+ if (iname->name[0] == '.' &&
+ iname->name[iname->len-1] == '.') {
+ oname->name[0] = '.';
+ oname->name[iname->len-1] = '.';
+ oname->len = iname->len;
+ return oname->len;
+ }
+ }
+ if (ctx->has_valid_key) {
+ res = ext4_fname_encrypt(ctx, iname, oname);
+ return res;
+ }
+ /* Without a proper key, a user is not allowed to modify the filenames
+ * in a directory. Consequently, a user space name cannot be mapped to
+ * a disk-space name */
+ return -EACCES;
+}
+
+/*
+ * Calculate the htree hash from a filename from user space
+ */
+int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
+ const struct qstr *iname,
+ struct dx_hash_info *hinfo)
+{
+ struct ext4_str tmp, tmp2;
+ int ret = 0;
+
+ if (!ctx || !ctx->has_valid_key ||
+ ((iname->name[0] == '.') &&
+ ((iname->len == 1) ||
+ ((iname->name[1] == '.') && (iname->len == 2))))) {
+ ext4fs_dirhash(iname->name, iname->len, hinfo);
+ return 0;
+ }
+
+ /* First encrypt the plaintext name */
+ ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp);
+ if (ret < 0)
+ return ret;
+
+ ret = ext4_fname_encrypt(ctx, iname, &tmp);
+ if (ret < 0)
+ goto out;
+
+ tmp2.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1;
+ tmp2.name = kmalloc(tmp2.len + 1, GFP_KERNEL);
+ if (tmp2.name == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = ext4_fname_hash(ctx, &tmp, &tmp2);
+ if (ret > 0)
+ ext4fs_dirhash(tmp2.name, tmp2.len, hinfo);
+ ext4_fname_crypto_free_buffer(&tmp2);
+out:
+ ext4_fname_crypto_free_buffer(&tmp);
+ return ret;
+}
+
+/**
+ * ext4_fname_disk_to_htree() - converts a filename from disk space to htree-access string
+ */
+int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
+ const struct ext4_dir_entry_2 *de,
+ struct dx_hash_info *hinfo)
+{
+ struct ext4_str iname = {.name = (unsigned char *) de->name,
+ .len = de->name_len};
+ struct ext4_str tmp;
+ int ret;
+
+ if (!ctx ||
+ ((iname.name[0] == '.') &&
+ ((iname.len == 1) ||
+ ((iname.name[1] == '.') && (iname.len == 2))))) {
+ ext4fs_dirhash(iname.name, iname.len, hinfo);
+ return 0;
+ }
+
+ tmp.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1;
+ tmp.name = kmalloc(tmp.len + 1, GFP_KERNEL);
+ if (tmp.name == NULL)
+ return -ENOMEM;
+
+ ret = ext4_fname_hash(ctx, &iname, &tmp);
+ if (ret > 0)
+ ext4fs_dirhash(tmp.name, tmp.len, hinfo);
+ ext4_fname_crypto_free_buffer(&tmp);
+ return ret;
+}
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
new file mode 100644
index 000000000000..c8392af8abbb
--- /dev/null
+++ b/fs/ext4/crypto_key.c
@@ -0,0 +1,165 @@
+/*
+ * linux/fs/ext4/crypto_key.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption key functions for ext4
+ *
+ * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
+ */
+
+#include <keys/encrypted-type.h>
+#include <keys/user-type.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <uapi/linux/keyctl.h>
+
+#include "ext4.h"
+#include "xattr.h"
+
+static void derive_crypt_complete(struct crypto_async_request *req, int rc)
+{
+ struct ext4_completion_result *ecr = req->data;
+
+ if (rc == -EINPROGRESS)
+ return;
+
+ ecr->res = rc;
+ complete(&ecr->completion);
+}
+
+/**
+ * ext4_derive_key_aes() - Derive a key using AES-128-ECB
+ * @deriving_key: Encryption key used for derivatio.
+ * @source_key: Source key to which to apply derivation.
+ * @derived_key: Derived key.
+ *
+ * Return: Zero on success; non-zero otherwise.
+ */
+static int ext4_derive_key_aes(char deriving_key[EXT4_AES_128_ECB_KEY_SIZE],
+ char source_key[EXT4_AES_256_XTS_KEY_SIZE],
+ char derived_key[EXT4_AES_256_XTS_KEY_SIZE])
+{
+ int res = 0;
+ struct ablkcipher_request *req = NULL;
+ DECLARE_EXT4_COMPLETION_RESULT(ecr);
+ struct scatterlist src_sg, dst_sg;
+ struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
+ 0);
+
+ if (IS_ERR(tfm)) {
+ res = PTR_ERR(tfm);
+ tfm = NULL;
+ goto out;
+ }
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ res = -ENOMEM;
+ goto out;
+ }
+ ablkcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ derive_crypt_complete, &ecr);
+ res = crypto_ablkcipher_setkey(tfm, deriving_key,
+ EXT4_AES_128_ECB_KEY_SIZE);
+ if (res < 0)
+ goto out;
+ sg_init_one(&src_sg, source_key, EXT4_AES_256_XTS_KEY_SIZE);
+ sg_init_one(&dst_sg, derived_key, EXT4_AES_256_XTS_KEY_SIZE);
+ ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
+ EXT4_AES_256_XTS_KEY_SIZE, NULL);
+ res = crypto_ablkcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ BUG_ON(req->base.data != &ecr);
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+
+out:
+ if (req)
+ ablkcipher_request_free(req);
+ if (tfm)
+ crypto_free_ablkcipher(tfm);
+ return res;
+}
+
+/**
+ * ext4_generate_encryption_key() - generates an encryption key
+ * @inode: The inode to generate the encryption key for.
+ */
+int ext4_generate_encryption_key(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_encryption_key *crypt_key = &ei->i_encryption_key;
+ char full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
+ (EXT4_KEY_DESCRIPTOR_SIZE * 2) + 1];
+ struct key *keyring_key = NULL;
+ struct ext4_encryption_key *master_key;
+ struct ext4_encryption_context ctx;
+ struct user_key_payload *ukp;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ &ctx, sizeof(ctx));
+
+ if (res != sizeof(ctx)) {
+ if (res > 0)
+ res = -EINVAL;
+ goto out;
+ }
+ res = 0;
+
+ if (S_ISREG(inode->i_mode))
+ crypt_key->mode = ctx.contents_encryption_mode;
+ else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+ crypt_key->mode = ctx.filenames_encryption_mode;
+ else {
+ printk(KERN_ERR "ext4 crypto: Unsupported inode type.\n");
+ BUG();
+ }
+ crypt_key->size = ext4_encryption_key_size(crypt_key->mode);
+ BUG_ON(!crypt_key->size);
+ if (DUMMY_ENCRYPTION_ENABLED(sbi)) {
+ memset(crypt_key->raw, 0x42, EXT4_AES_256_XTS_KEY_SIZE);
+ goto out;
+ }
+ memcpy(full_key_descriptor, EXT4_KEY_DESC_PREFIX,
+ EXT4_KEY_DESC_PREFIX_SIZE);
+ sprintf(full_key_descriptor + EXT4_KEY_DESC_PREFIX_SIZE,
+ "%*phN", EXT4_KEY_DESCRIPTOR_SIZE,
+ ctx.master_key_descriptor);
+ full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
+ (2 * EXT4_KEY_DESCRIPTOR_SIZE)] = '\0';
+ keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
+ if (IS_ERR(keyring_key)) {
+ res = PTR_ERR(keyring_key);
+ keyring_key = NULL;
+ goto out;
+ }
+ BUG_ON(keyring_key->type != &key_type_logon);
+ ukp = ((struct user_key_payload *)keyring_key->payload.data);
+ if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
+ res = -EINVAL;
+ goto out;
+ }
+ master_key = (struct ext4_encryption_key *)ukp->data;
+ BUILD_BUG_ON(EXT4_AES_128_ECB_KEY_SIZE !=
+ EXT4_KEY_DERIVATION_NONCE_SIZE);
+ BUG_ON(master_key->size != EXT4_AES_256_XTS_KEY_SIZE);
+ res = ext4_derive_key_aes(ctx.nonce, master_key->raw, crypt_key->raw);
+out:
+ if (keyring_key)
+ key_put(keyring_key);
+ if (res < 0)
+ crypt_key->mode = EXT4_ENCRYPTION_MODE_INVALID;
+ return res;
+}
+
+int ext4_has_encryption_key(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_encryption_key *crypt_key = &ei->i_encryption_key;
+
+ return (crypt_key->mode != EXT4_ENCRYPTION_MODE_INVALID);
+}
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
new file mode 100644
index 000000000000..30eaf9e9864a
--- /dev/null
+++ b/fs/ext4/crypto_policy.c
@@ -0,0 +1,194 @@
+/*
+ * linux/fs/ext4/crypto_policy.c
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption policy functions for ext4
+ *
+ * Written by Michael Halcrow, 2015.
+ */
+
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "ext4.h"
+#include "xattr.h"
+
+static int ext4_inode_has_encryption_context(struct inode *inode)
+{
+ int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, NULL, 0);
+ return (res > 0);
+}
+
+/*
+ * check whether the policy is consistent with the encryption context
+ * for the inode
+ */
+static int ext4_is_encryption_context_consistent_with_policy(
+ struct inode *inode, const struct ext4_encryption_policy *policy)
+{
+ struct ext4_encryption_context ctx;
+ int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
+ sizeof(ctx));
+ if (res != sizeof(ctx))
+ return 0;
+ return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
+ EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (ctx.contents_encryption_mode ==
+ policy->contents_encryption_mode) &&
+ (ctx.filenames_encryption_mode ==
+ policy->filenames_encryption_mode));
+}
+
+static int ext4_create_encryption_context_from_policy(
+ struct inode *inode, const struct ext4_encryption_policy *policy)
+{
+ struct ext4_encryption_context ctx;
+ int res = 0;
+
+ ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
+ memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
+ EXT4_KEY_DESCRIPTOR_SIZE);
+ if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) {
+ printk(KERN_WARNING
+ "%s: Invalid contents encryption mode %d\n", __func__,
+ policy->contents_encryption_mode);
+ res = -EINVAL;
+ goto out;
+ }
+ if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) {
+ printk(KERN_WARNING
+ "%s: Invalid filenames encryption mode %d\n", __func__,
+ policy->filenames_encryption_mode);
+ res = -EINVAL;
+ goto out;
+ }
+ ctx.contents_encryption_mode = policy->contents_encryption_mode;
+ ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
+ BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
+ get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
+
+ res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
+ sizeof(ctx), 0);
+out:
+ if (!res)
+ ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+ return res;
+}
+
+int ext4_process_policy(const struct ext4_encryption_policy *policy,
+ struct inode *inode)
+{
+ if (policy->version != 0)
+ return -EINVAL;
+
+ if (!ext4_inode_has_encryption_context(inode)) {
+ if (!ext4_empty_dir(inode))
+ return -ENOTEMPTY;
+ return ext4_create_encryption_context_from_policy(inode,
+ policy);
+ }
+
+ if (ext4_is_encryption_context_consistent_with_policy(inode, policy))
+ return 0;
+
+ printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
+ __func__);
+ return -EINVAL;
+}
+
+int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy)
+{
+ struct ext4_encryption_context ctx;
+
+ int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ &ctx, sizeof(ctx));
+ if (res != sizeof(ctx))
+ return -ENOENT;
+ if (ctx.format != EXT4_ENCRYPTION_CONTEXT_FORMAT_V1)
+ return -EINVAL;
+ policy->version = 0;
+ policy->contents_encryption_mode = ctx.contents_encryption_mode;
+ policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
+ memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
+ EXT4_KEY_DESCRIPTOR_SIZE);
+ return 0;
+}
+
+int ext4_is_child_context_consistent_with_parent(struct inode *parent,
+ struct inode *child)
+{
+ struct ext4_encryption_context parent_ctx, child_ctx;
+ int res;
+
+ if ((parent == NULL) || (child == NULL)) {
+ pr_err("parent %p child %p\n", parent, child);
+ BUG_ON(1);
+ }
+ /* no restrictions if the parent directory is not encrypted */
+ if (!ext4_encrypted_inode(parent))
+ return 1;
+ res = ext4_xattr_get(parent, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ &parent_ctx, sizeof(parent_ctx));
+ if (res != sizeof(parent_ctx))
+ return 0;
+ /* if the child directory is not encrypted, this is always a problem */
+ if (!ext4_encrypted_inode(child))
+ return 0;
+ res = ext4_xattr_get(child, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ &child_ctx, sizeof(child_ctx));
+ if (res != sizeof(child_ctx))
+ return 0;
+ return (memcmp(parent_ctx.master_key_descriptor,
+ child_ctx.master_key_descriptor,
+ EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (parent_ctx.contents_encryption_mode ==
+ child_ctx.contents_encryption_mode) &&
+ (parent_ctx.filenames_encryption_mode ==
+ child_ctx.filenames_encryption_mode));
+}
+
+/**
+ * ext4_inherit_context() - Sets a child context from its parent
+ * @parent: Parent inode from which the context is inherited.
+ * @child: Child inode that inherits the context from @parent.
+ *
+ * Return: Zero on success, non-zero otherwise
+ */
+int ext4_inherit_context(struct inode *parent, struct inode *child)
+{
+ struct ext4_encryption_context ctx;
+ int res = ext4_xattr_get(parent, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ &ctx, sizeof(ctx));
+
+ if (res != sizeof(ctx)) {
+ if (DUMMY_ENCRYPTION_ENABLED(EXT4_SB(parent->i_sb))) {
+ ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
+ ctx.contents_encryption_mode =
+ EXT4_ENCRYPTION_MODE_AES_256_XTS;
+ ctx.filenames_encryption_mode =
+ EXT4_ENCRYPTION_MODE_AES_256_CTS;
+ memset(ctx.master_key_descriptor, 0x42,
+ EXT4_KEY_DESCRIPTOR_SIZE);
+ res = 0;
+ } else {
+ goto out;
+ }
+ }
+ get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
+ res = ext4_xattr_set(child, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
+ sizeof(ctx), 0);
+out:
+ if (!res)
+ ext4_set_inode_flag(child, EXT4_INODE_ENCRYPT);
+ return res;
+}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index c24143ea9c08..61db51a5ce4c 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -22,10 +22,8 @@
*/
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
-#include <linux/rbtree.h>
#include "ext4.h"
#include "xattr.h"
@@ -110,7 +108,10 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
int err;
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
+ struct buffer_head *bh = NULL;
int dir_has_error = 0;
+ struct ext4_fname_crypto_ctx *enc_ctx = NULL;
+ struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
if (is_dx_dir(inode)) {
err = ext4_dx_readdir(file, ctx);
@@ -127,17 +128,28 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
if (ext4_has_inline_data(inode)) {
int has_inline_data = 1;
- int ret = ext4_read_inline_dir(file, ctx,
+ err = ext4_read_inline_dir(file, ctx,
&has_inline_data);
if (has_inline_data)
- return ret;
+ return err;
+ }
+
+ enc_ctx = ext4_get_fname_crypto_ctx(inode, EXT4_NAME_LEN);
+ if (IS_ERR(enc_ctx))
+ return PTR_ERR(enc_ctx);
+ if (enc_ctx) {
+ err = ext4_fname_crypto_alloc_buffer(enc_ctx, EXT4_NAME_LEN,
+ &fname_crypto_str);
+ if (err < 0) {
+ ext4_put_fname_crypto_ctx(&enc_ctx);
+ return err;
+ }
}
offset = ctx->pos & (sb->s_blocksize - 1);
while (ctx->pos < inode->i_size) {
struct ext4_map_blocks map;
- struct buffer_head *bh = NULL;
map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
map.m_len = 1;
@@ -180,6 +192,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
(unsigned long long)ctx->pos);
ctx->pos += sb->s_blocksize - offset;
brelse(bh);
+ bh = NULL;
continue;
}
set_buffer_verified(bh);
@@ -226,25 +239,44 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
offset += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
if (le32_to_cpu(de->inode)) {
- if (!dir_emit(ctx, de->name,
- de->name_len,
- le32_to_cpu(de->inode),
- get_dtype(sb, de->file_type))) {
- brelse(bh);
- return 0;
+ if (enc_ctx == NULL) {
+ /* Directory is not encrypted */
+ if (!dir_emit(ctx, de->name,
+ de->name_len,
+ le32_to_cpu(de->inode),
+ get_dtype(sb, de->file_type)))
+ goto done;
+ } else {
+ /* Directory is encrypted */
+ err = ext4_fname_disk_to_usr(enc_ctx,
+ de, &fname_crypto_str);
+ if (err < 0)
+ goto errout;
+ if (!dir_emit(ctx,
+ fname_crypto_str.name, err,
+ le32_to_cpu(de->inode),
+ get_dtype(sb, de->file_type)))
+ goto done;
}
}
ctx->pos += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
}
- offset = 0;
+ if ((ctx->pos < inode->i_size) && !dir_relax(inode))
+ goto done;
brelse(bh);
- if (ctx->pos < inode->i_size) {
- if (!dir_relax(inode))
- return 0;
- }
+ bh = NULL;
+ offset = 0;
}
- return 0;
+done:
+ err = 0;
+errout:
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ext4_put_fname_crypto_ctx(&enc_ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+#endif
+ brelse(bh);
+ return err;
}
static inline int is_32bit_api(void)
@@ -384,10 +416,15 @@ void ext4_htree_free_dir_info(struct dir_private_info *p)
/*
* Given a directory entry, enter it into the fname rb tree.
+ *
+ * When filename encryption is enabled, the dirent will hold the
+ * encrypted filename, while the htree will hold decrypted filename.
+ * The decrypted filename is passed in via ent_name. parameter.
*/
int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
- struct ext4_dir_entry_2 *dirent)
+ struct ext4_dir_entry_2 *dirent,
+ struct ext4_str *ent_name)
{
struct rb_node **p, *parent = NULL;
struct fname *fname, *new_fn;
@@ -398,17 +435,17 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
p = &info->root.rb_node;
/* Create and allocate the fname structure */
- len = sizeof(struct fname) + dirent->name_len + 1;
+ len = sizeof(struct fname) + ent_name->len + 1;
new_fn = kzalloc(len, GFP_KERNEL);
if (!new_fn)
return -ENOMEM;
new_fn->hash = hash;
new_fn->minor_hash = minor_hash;
new_fn->inode = le32_to_cpu(dirent->inode);
- new_fn->name_len = dirent->name_len;
+ new_fn->name_len = ent_name->len;
new_fn->file_type = dirent->file_type;
- memcpy(new_fn->name, dirent->name, dirent->name_len);
- new_fn->name[dirent->name_len] = 0;
+ memcpy(new_fn->name, ent_name->name, ent_name->len);
+ new_fn->name[ent_name->len] = 0;
while (*p) {
parent = *p;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c8eb32eefc3c..ef267adce19a 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -422,7 +422,7 @@ enum {
EXT4_INODE_DIRTY = 8,
EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */
EXT4_INODE_NOCOMPR = 10, /* Don't compress */
- EXT4_INODE_ENCRYPT = 11, /* Compression error */
+ EXT4_INODE_ENCRYPT = 11, /* Encrypted file */
/* End compression flags --- maybe not all used */
EXT4_INODE_INDEX = 12, /* hash-indexed directory */
EXT4_INODE_IMAGIC = 13, /* AFS directory */
@@ -582,6 +582,15 @@ enum {
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
+/* Encryption algorithms */
+#define EXT4_ENCRYPTION_MODE_INVALID 0
+#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1
+#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2
+#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3
+#define EXT4_ENCRYPTION_MODE_AES_256_CTS 4
+
+#include "ext4_crypto.h"
+
/*
* ioctl commands
*/
@@ -603,6 +612,9 @@ enum {
#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
#define EXT4_IOC_SWAP_BOOT _IO('f', 17)
#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18)
+#define EXT4_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct ext4_encryption_policy)
+#define EXT4_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
+#define EXT4_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct ext4_encryption_policy)
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
@@ -939,6 +951,11 @@ struct ext4_inode_info {
/* Precomputed uuid+inum+igen checksum for seeding inode checksums */
__u32 i_csum_seed;
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ /* Encryption params */
+ struct ext4_encryption_key i_encryption_key;
+#endif
};
/*
@@ -1142,7 +1159,8 @@ struct ext4_super_block {
__le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
__u8 s_log_groups_per_flex; /* FLEX_BG group size */
__u8 s_checksum_type; /* metadata checksum algorithm used */
- __le16 s_reserved_pad;
+ __u8 s_encryption_level; /* versioning level for encryption */
+ __u8 s_reserved_pad; /* Padding to next 32bits */
__le64 s_kbytes_written; /* nr of lifetime kilobytes written */
__le32 s_snapshot_inum; /* Inode number of active snapshot */
__le32 s_snapshot_id; /* sequential ID of active snapshot */
@@ -1169,7 +1187,9 @@ struct ext4_super_block {
__le32 s_overhead_clusters; /* overhead blocks/clusters in fs */
__le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */
__u8 s_encrypt_algos[4]; /* Encryption algorithms in use */
- __le32 s_reserved[105]; /* Padding to the end of the block */
+ __u8 s_encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
+ __le32 s_lpf_ino; /* Location of the lost+found inode */
+ __le32 s_reserved[100]; /* Padding to the end of the block */
__le32 s_checksum; /* crc32c(superblock) */
};
@@ -1180,8 +1200,16 @@ struct ext4_super_block {
/*
* run-time mount flags
*/
-#define EXT4_MF_MNTDIR_SAMPLED 0x0001
-#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */
+#define EXT4_MF_MNTDIR_SAMPLED 0x0001
+#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */
+#define EXT4_MF_TEST_DUMMY_ENCRYPTION 0x0004
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \
+ EXT4_MF_TEST_DUMMY_ENCRYPTION))
+#else
+#define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
+#endif
/* Number of quota types we support */
#define EXT4_MAXQUOTAS 2
@@ -1351,6 +1379,12 @@ struct ext4_sb_info {
struct ratelimit_state s_err_ratelimit_state;
struct ratelimit_state s_warning_ratelimit_state;
struct ratelimit_state s_msg_ratelimit_state;
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ /* Encryption */
+ uint32_t s_file_encryption_mode;
+ uint32_t s_dir_encryption_mode;
+#endif
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1466,6 +1500,18 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_SB(sb) (sb)
#endif
+/*
+ * Returns true if the inode is inode is encrypted
+ */
+static inline int ext4_encrypted_inode(struct inode *inode)
+{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
+#else
+ return 0;
+#endif
+}
+
#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
/*
@@ -1575,8 +1621,9 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
EXT4_FEATURE_INCOMPAT_EXTENTS| \
EXT4_FEATURE_INCOMPAT_64BIT| \
EXT4_FEATURE_INCOMPAT_FLEX_BG| \
- EXT4_FEATURE_INCOMPAT_MMP | \
- EXT4_FEATURE_INCOMPAT_INLINE_DATA)
+ EXT4_FEATURE_INCOMPAT_MMP | \
+ EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
+ EXT4_FEATURE_INCOMPAT_ENCRYPT)
#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
@@ -2001,6 +2048,99 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
struct ext4_group_desc *gdp);
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
+/* crypto_policy.c */
+int ext4_is_child_context_consistent_with_parent(struct inode *parent,
+ struct inode *child);
+int ext4_inherit_context(struct inode *parent, struct inode *child);
+void ext4_to_hex(char *dst, char *src, size_t src_size);
+int ext4_process_policy(const struct ext4_encryption_policy *policy,
+ struct inode *inode);
+int ext4_get_policy(struct inode *inode,
+ struct ext4_encryption_policy *policy);
+
+/* crypto.c */
+bool ext4_valid_contents_enc_mode(uint32_t mode);
+uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
+extern struct workqueue_struct *ext4_read_workqueue;
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
+void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
+void ext4_restore_control_page(struct page *data_page);
+struct page *ext4_encrypt(struct inode *inode,
+ struct page *plaintext_page);
+int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page);
+int ext4_decrypt_one(struct inode *inode, struct page *page);
+int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+int ext4_init_crypto(void);
+void ext4_exit_crypto(void);
+static inline int ext4_sb_has_crypto(struct super_block *sb)
+{
+ return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT);
+}
+#else
+static inline int ext4_init_crypto(void) { return 0; }
+static inline void ext4_exit_crypto(void) { }
+static inline int ext4_sb_has_crypto(struct super_block *sb)
+{
+ return 0;
+}
+#endif
+
+/* crypto_fname.c */
+bool ext4_valid_filenames_enc_mode(uint32_t mode);
+u32 ext4_fname_crypto_round_up(u32 size, u32 blksize);
+int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
+ u32 ilen, struct ext4_str *crypto_str);
+int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+ const struct ext4_str *iname,
+ struct ext4_str *oname);
+int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+ const struct ext4_dir_entry_2 *de,
+ struct ext4_str *oname);
+int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
+ const struct qstr *iname,
+ struct ext4_str *oname);
+int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
+ const struct qstr *iname,
+ struct dx_hash_info *hinfo);
+int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
+ const struct ext4_dir_entry_2 *de,
+ struct dx_hash_info *hinfo);
+int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
+ u32 namelen);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx);
+struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(struct inode *inode,
+ u32 max_len);
+void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str);
+#else
+static inline
+void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx) { }
+static inline
+struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(struct inode *inode,
+ u32 max_len)
+{
+ return NULL;
+}
+static inline void ext4_fname_crypto_free_buffer(struct ext4_str *p) { }
+#endif
+
+
+/* crypto_key.c */
+int ext4_generate_encryption_key(struct inode *inode);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+int ext4_has_encryption_key(struct inode *inode);
+#else
+static inline int ext4_has_encryption_key(struct inode *inode)
+{
+ return 0;
+}
+#endif
+
+
/* dir.c */
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
struct file *,
@@ -2011,17 +2151,20 @@ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
(de), (bh), (buf), (size), (offset)))
extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
- __u32 minor_hash,
- struct ext4_dir_entry_2 *dirent);
+ __u32 minor_hash,
+ struct ext4_dir_entry_2 *dirent,
+ struct ext4_str *ent_name);
extern void ext4_htree_free_dir_info(struct dir_private_info *p);
extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
struct buffer_head *bh,
void *buf, int buf_size,
const char *name, int namelen,
struct ext4_dir_entry_2 **dest_de);
-void ext4_insert_dentry(struct inode *inode,
+int ext4_insert_dentry(struct inode *dir,
+ struct inode *inode,
struct ext4_dir_entry_2 *de,
int buf_size,
+ const struct qstr *iname,
const char *name, int namelen);
static inline void ext4_update_dx_flag(struct inode *inode)
{
@@ -2099,6 +2242,7 @@ extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
/* inode.c */
+int ext4_inode_is_fast_symlink(struct inode *inode);
struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
int ext4_get_block_write(struct inode *inode, sector_t iblock,
@@ -2189,6 +2333,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
void *entry_buf,
int buf_size,
int csum_size);
+extern int ext4_empty_dir(struct inode *inode);
/* resize.c */
extern int ext4_group_add(struct super_block *sb,
@@ -2698,6 +2843,10 @@ static inline void ext4_set_de_type(struct super_block *sb,
de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
}
+/* readpages.c */
+extern int ext4_mpage_readpages(struct address_space *mapping,
+ struct list_head *pages, struct page *page,
+ unsigned nr_pages);
/* symlink.c */
extern const struct inode_operations ext4_symlink_inode_operations;
diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
new file mode 100644
index 000000000000..c2ba35a914b6
--- /dev/null
+++ b/fs/ext4/ext4_crypto.h
@@ -0,0 +1,147 @@
+/*
+ * linux/fs/ext4/ext4_crypto.h
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption header content for ext4
+ *
+ * Written by Michael Halcrow, 2015.
+ */
+
+#ifndef _EXT4_CRYPTO_H
+#define _EXT4_CRYPTO_H
+
+#include <linux/fs.h>
+
+#define EXT4_KEY_DESCRIPTOR_SIZE 8
+
+/* Policy provided via an ioctl on the topmost directory */
+struct ext4_encryption_policy {
+ char version;
+ char contents_encryption_mode;
+ char filenames_encryption_mode;
+ char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
+} __attribute__((__packed__));
+
+#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1
+#define EXT4_KEY_DERIVATION_NONCE_SIZE 16
+
+/**
+ * Encryption context for inode
+ *
+ * Protector format:
+ * 1 byte: Protector format (1 = this version)
+ * 1 byte: File contents encryption mode
+ * 1 byte: File names encryption mode
+ * 1 byte: Reserved
+ * 8 bytes: Master Key descriptor
+ * 16 bytes: Encryption Key derivation nonce
+ */
+struct ext4_encryption_context {
+ char format;
+ char contents_encryption_mode;
+ char filenames_encryption_mode;
+ char reserved;
+ char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
+ char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE];
+} __attribute__((__packed__));
+
+/* Encryption parameters */
+#define EXT4_XTS_TWEAK_SIZE 16
+#define EXT4_AES_128_ECB_KEY_SIZE 16
+#define EXT4_AES_256_GCM_KEY_SIZE 32
+#define EXT4_AES_256_CBC_KEY_SIZE 32
+#define EXT4_AES_256_CTS_KEY_SIZE 32
+#define EXT4_AES_256_XTS_KEY_SIZE 64
+#define EXT4_MAX_KEY_SIZE 64
+
+#define EXT4_KEY_DESC_PREFIX "ext4:"
+#define EXT4_KEY_DESC_PREFIX_SIZE 5
+
+struct ext4_encryption_key {
+ uint32_t mode;
+ char raw[EXT4_MAX_KEY_SIZE];
+ uint32_t size;
+};
+
+#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
+#define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002
+
+struct ext4_crypto_ctx {
+ struct crypto_tfm *tfm; /* Crypto API context */
+ struct page *bounce_page; /* Ciphertext page on write path */
+ struct page *control_page; /* Original page on write path */
+ struct bio *bio; /* The bio for this context */
+ struct work_struct work; /* Work queue for read complete path */
+ struct list_head free_list; /* Free list */
+ int flags; /* Flags */
+ int mode; /* Encryption mode for tfm */
+};
+
+struct ext4_completion_result {
+ struct completion completion;
+ int res;
+};
+
+#define DECLARE_EXT4_COMPLETION_RESULT(ecr) \
+ struct ext4_completion_result ecr = { \
+ COMPLETION_INITIALIZER((ecr).completion), 0 }
+
+static inline int ext4_encryption_key_size(int mode)
+{
+ switch (mode) {
+ case EXT4_ENCRYPTION_MODE_AES_256_XTS:
+ return EXT4_AES_256_XTS_KEY_SIZE;
+ case EXT4_ENCRYPTION_MODE_AES_256_GCM:
+ return EXT4_AES_256_GCM_KEY_SIZE;
+ case EXT4_ENCRYPTION_MODE_AES_256_CBC:
+ return EXT4_AES_256_CBC_KEY_SIZE;
+ case EXT4_ENCRYPTION_MODE_AES_256_CTS:
+ return EXT4_AES_256_CTS_KEY_SIZE;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+#define EXT4_FNAME_NUM_SCATTER_ENTRIES 4
+#define EXT4_CRYPTO_BLOCK_SIZE 16
+#define EXT4_FNAME_CRYPTO_DIGEST_SIZE 32
+
+struct ext4_str {
+ unsigned char *name;
+ u32 len;
+};
+
+struct ext4_fname_crypto_ctx {
+ u32 lim;
+ char tmp_buf[EXT4_CRYPTO_BLOCK_SIZE];
+ struct crypto_ablkcipher *ctfm;
+ struct crypto_hash *htfm;
+ struct page *workpage;
+ struct ext4_encryption_key key;
+ unsigned has_valid_key : 1;
+ unsigned ctfm_key_is_ready : 1;
+};
+
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct ext4_encrypted_symlink_data {
+ __le16 len;
+ char encrypted_path[1];
+} __attribute__((__packed__));
+
+/**
+ * This function is used to calculate the disk space required to
+ * store a filename of length l in encrypted symlink format.
+ */
+static inline u32 encrypted_symlink_data_len(u32 l)
+{
+ if (l < EXT4_CRYPTO_BLOCK_SIZE)
+ l = EXT4_CRYPTO_BLOCK_SIZE;
+ return (l + sizeof(struct ext4_encrypted_symlink_data) - 1);
+}
+
+#endif /* _EXT4_CRYPTO_H */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index bed43081720f..973816bfe4a9 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1717,12 +1717,6 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
{
unsigned short ext1_ee_len, ext2_ee_len;
- /*
- * Make sure that both extents are initialized. We don't merge
- * unwritten extents so that we can be sure that end_io code has
- * the extent that was written properly split out and conversion to
- * initialized is trivial.
- */
if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
return 0;
@@ -3128,6 +3122,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
ee_len = ext4_ext_get_actual_len(ex);
ee_pblock = ext4_ext_pblock(ex);
+ if (ext4_encrypted_inode(inode))
+ return ext4_encrypted_zeroout(inode, ex);
+
ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
if (ret > 0)
ret = 0;
@@ -4535,19 +4532,7 @@ got_allocated_blocks:
*/
reserved_clusters = get_reserved_cluster_alloc(inode,
map->m_lblk, allocated);
- if (map_from_cluster) {
- if (reserved_clusters) {
- /*
- * We have clusters reserved for this range.
- * But since we are not doing actual allocation
- * and are simply using blocks from previously
- * allocated cluster, we should release the
- * reservation and not claim quota.
- */
- ext4_da_update_reserve_space(inode,
- reserved_clusters, 0);
- }
- } else {
+ if (!map_from_cluster) {
BUG_ON(allocated_clusters < reserved_clusters);
if (reserved_clusters < allocated_clusters) {
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -4803,12 +4788,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
else
max_blocks -= lblk;
- flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
- EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
- EXT4_EX_NOCACHE;
- if (mode & FALLOC_FL_KEEP_SIZE)
- flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
-
mutex_lock(&inode->i_mutex);
/*
@@ -4825,15 +4804,28 @@ static long ext4_zero_range(struct file *file, loff_t offset,
ret = inode_newsize_ok(inode, new_size);
if (ret)
goto out_mutex;
- /*
- * If we have a partial block after EOF we have to allocate
- * the entire block.
- */
- if (partial_end)
- max_blocks += 1;
}
+ flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
+
+ /* Preallocate the range including the unaligned edges */
+ if (partial_begin || partial_end) {
+ ret = ext4_alloc_file_blocks(file,
+ round_down(offset, 1 << blkbits) >> blkbits,
+ (round_up((offset + len), 1 << blkbits) -
+ round_down(offset, 1 << blkbits)) >> blkbits,
+ new_size, flags, mode);
+ if (ret)
+ goto out_mutex;
+
+ }
+
+ /* Zero range excluding the unaligned edges */
if (max_blocks > 0) {
+ flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+ EXT4_EX_NOCACHE);
/* Now release the pages and zero block aligned part of pages*/
truncate_pagecache_range(inode, start, end - 1);
@@ -4847,19 +4839,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
flags, mode);
if (ret)
goto out_dio;
- /*
- * Remove entire range from the extent status tree.
- *
- * ext4_es_remove_extent(inode, lblk, max_blocks) is
- * NOT sufficient. I'm not sure why this is the case,
- * but let's be conservative and remove the extent
- * status tree for the entire inode. There should be
- * no outstanding delalloc extents thanks to the
- * filemap_write_and_wait_range() call above.
- */
- ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
- if (ret)
- goto out_dio;
}
if (!partial_begin && !partial_end)
goto out_dio;
@@ -4922,6 +4901,20 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
ext4_lblk_t lblk;
unsigned int blkbits = inode->i_blkbits;
+ /*
+ * Encrypted inodes can't handle collapse range or insert
+ * range since we would need to re-encrypt blocks with a
+ * different IV or XTS tweak (which are based on the logical
+ * block number).
+ *
+ * XXX It's not clear why zero range isn't working, but we'll
+ * leave it disabled for encrypted inodes for now. This is a
+ * bug we should fix....
+ */
+ if (ext4_encrypted_inode(inode) &&
+ (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)))
+ return -EOPNOTSUPP;
+
/* Return error if mode is not supported */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index e04d45733976..d33d5a6852b9 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -9,12 +9,10 @@
*
* Ext4 extents status tree core functions.
*/
-#include <linux/rbtree.h>
#include <linux/list_sort.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "ext4.h"
-#include "extents_status.h"
#include <trace/events/ext4.h>
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index e576d682b353..0613c256c344 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -20,7 +20,6 @@
#include <linux/time.h>
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/mount.h>
#include <linux/path.h>
#include <linux/quotaops.h>
@@ -221,6 +220,13 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct inode *inode = file->f_mapping->host;
+
+ if (ext4_encrypted_inode(inode)) {
+ int err = ext4_generate_encryption_key(inode);
+ if (err)
+ return 0;
+ }
file_accessed(file);
if (IS_DAX(file_inode(file))) {
vma->vm_ops = &ext4_dax_vm_ops;
@@ -238,6 +244,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
struct vfsmount *mnt = filp->f_path.mnt;
struct path path;
char buf[64], *cp;
+ int ret;
if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
!(sb->s_flags & MS_RDONLY))) {
@@ -276,11 +283,17 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
* writing and the journal is present
*/
if (filp->f_mode & FMODE_WRITE) {
- int ret = ext4_inode_attach_jinode(inode);
+ ret = ext4_inode_attach_jinode(inode);
if (ret < 0)
return ret;
}
- return dquot_file_open(inode, filp);
+ ret = dquot_file_open(inode, filp);
+ if (!ret && ext4_encrypted_inode(inode)) {
+ ret = ext4_generate_encryption_key(inode);
+ if (ret)
+ ret = -EACCES;
+ }
+ return ret;
}
/*
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index a8bc47f75fa0..e9d632e9aa4b 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -26,7 +26,6 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/writeback.h>
-#include <linux/jbd2.h>
#include <linux/blkdev.h>
#include "ext4.h"
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index 3d586f02883e..e026aa941fd5 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -10,7 +10,6 @@
*/
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/cryptohash.h>
#include "ext4.h"
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index ac644c31ca67..2cf18a2d5c72 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -14,7 +14,6 @@
#include <linux/time.h>
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
@@ -997,6 +996,12 @@ got:
ei->i_block_group = group;
ei->i_last_alloc_group = ~0;
+ /* If the directory encrypted, then we should encrypt the inode. */
+ if ((S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) &&
+ (ext4_encrypted_inode(dir) ||
+ DUMMY_ENCRYPTION_ENABLED(sbi)))
+ ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+
ext4_set_inode_flags(inode);
if (IS_DIRSYNC(inode))
ext4_handle_sync(handle);
@@ -1029,11 +1034,28 @@ got:
ext4_set_inode_state(inode, EXT4_STATE_NEW);
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
-
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if ((sbi->s_file_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID) &&
+ (sbi->s_dir_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID)) {
+ ei->i_inline_off = 0;
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+ EXT4_FEATURE_INCOMPAT_INLINE_DATA))
+ ext4_set_inode_state(inode,
+ EXT4_STATE_MAY_INLINE_DATA);
+ } else {
+ /* Inline data and encryption are incompatible
+ * We turn off inline data since encryption is enabled */
+ ei->i_inline_off = 1;
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+ EXT4_FEATURE_INCOMPAT_INLINE_DATA))
+ ext4_clear_inode_state(inode,
+ EXT4_STATE_MAY_INLINE_DATA);
+ }
+#else
ei->i_inline_off = 0;
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
-
+#endif
ret = inode;
err = dquot_alloc_inode(inode);
if (err)
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 4b143febf21f..feb2cafbeace 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -11,11 +11,13 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
+#include <linux/fiemap.h>
+
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
#include "truncate.h"
-#include <linux/fiemap.h>
#define EXT4_XATTR_SYSTEM_DATA "data"
#define EXT4_MIN_INLINE_DATA_SIZE ((sizeof(__le32) * EXT4_N_BLOCKS))
@@ -972,7 +974,7 @@ void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
offset = 0;
while ((void *)de < dlimit) {
de_len = ext4_rec_len_from_disk(de->rec_len, inline_size);
- trace_printk("de: off %u rlen %u name %*.s nlen %u ino %u\n",
+ trace_printk("de: off %u rlen %u name %.*s nlen %u ino %u\n",
offset, de_len, de->name_len, de->name,
de->name_len, le32_to_cpu(de->inode));
if (ext4_check_dir_entry(dir, NULL, de, bh,
@@ -1014,7 +1016,8 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
err = ext4_journal_get_write_access(handle, iloc->bh);
if (err)
return err;
- ext4_insert_dentry(inode, de, inline_size, name, namelen);
+ ext4_insert_dentry(dir, inode, de, inline_size, &dentry->d_name,
+ name, namelen);
ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
@@ -1327,6 +1330,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
struct ext4_iloc iloc;
void *dir_buf = NULL;
struct ext4_dir_entry_2 fake;
+ struct ext4_str tmp_str;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
@@ -1398,8 +1402,10 @@ int htree_inlinedir_to_tree(struct file *dir_file,
continue;
if (de->inode == 0)
continue;
- err = ext4_htree_store_dirent(dir_file,
- hinfo->hash, hinfo->minor_hash, de);
+ tmp_str.name = de->name;
+ tmp_str.len = de->name_len;
+ err = ext4_htree_store_dirent(dir_file, hinfo->hash,
+ hinfo->minor_hash, de, &tmp_str);
if (err) {
count = err;
goto out;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index b49cf6e59953..366476e71e10 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -20,7 +20,6 @@
#include <linux/fs.h>
#include <linux/time.h>
-#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
@@ -36,7 +35,6 @@
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
-#include <linux/ratelimit.h>
#include <linux/bitops.h>
#include "ext4_jbd2.h"
@@ -140,7 +138,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
/*
* Test whether an inode is a fast symlink.
*/
-static int ext4_inode_is_fast_symlink(struct inode *inode)
+int ext4_inode_is_fast_symlink(struct inode *inode)
{
int ea_blocks = EXT4_I(inode)->i_file_acl ?
EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
@@ -887,6 +885,95 @@ int do_journal_get_write_access(handle_t *handle,
static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
+ get_block_t *get_block)
+{
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned to = from + len;
+ struct inode *inode = page->mapping->host;
+ unsigned block_start, block_end;
+ sector_t block;
+ int err = 0;
+ unsigned blocksize = inode->i_sb->s_blocksize;
+ unsigned bbits;
+ struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
+ bool decrypt = false;
+
+ BUG_ON(!PageLocked(page));
+ BUG_ON(from > PAGE_CACHE_SIZE);
+ BUG_ON(to > PAGE_CACHE_SIZE);
+ BUG_ON(from > to);
+
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, blocksize, 0);
+ head = page_buffers(page);
+ bbits = ilog2(blocksize);
+ block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+
+ for (bh = head, block_start = 0; bh != head || !block_start;
+ block++, block_start = block_end, bh = bh->b_this_page) {
+ block_end = block_start + blocksize;
+ if (block_end <= from || block_start >= to) {
+ if (PageUptodate(page)) {
+ if (!buffer_uptodate(bh))
+ set_buffer_uptodate(bh);
+ }
+ continue;
+ }
+ if (buffer_new(bh))
+ clear_buffer_new(bh);
+ if (!buffer_mapped(bh)) {
+ WARN_ON(bh->b_size != blocksize);
+ err = get_block(inode, block, bh, 1);
+ if (err)
+ break;
+ if (buffer_new(bh)) {
+ unmap_underlying_metadata(bh->b_bdev,
+ bh->b_blocknr);
+ if (PageUptodate(page)) {
+ clear_buffer_new(bh);
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ continue;
+ }
+ if (block_end > to || block_start < from)
+ zero_user_segments(page, to, block_end,
+ block_start, from);
+ continue;
+ }
+ }
+ if (PageUptodate(page)) {
+ if (!buffer_uptodate(bh))
+ set_buffer_uptodate(bh);
+ continue;
+ }
+ if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
+ !buffer_unwritten(bh) &&
+ (block_start < from || block_end > to)) {
+ ll_rw_block(READ, 1, &bh);
+ *wait_bh++ = bh;
+ decrypt = ext4_encrypted_inode(inode) &&
+ S_ISREG(inode->i_mode);
+ }
+ }
+ /*
+ * If we issued read requests, let them complete.
+ */
+ while (wait_bh > wait) {
+ wait_on_buffer(*--wait_bh);
+ if (!buffer_uptodate(*wait_bh))
+ err = -EIO;
+ }
+ if (unlikely(err))
+ page_zero_new_buffers(page, from, to);
+ else if (decrypt)
+ err = ext4_decrypt_one(inode, page);
+ return err;
+}
+#endif
+
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -949,11 +1036,19 @@ retry_journal:
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (ext4_should_dioread_nolock(inode))
+ ret = ext4_block_write_begin(page, pos, len,
+ ext4_get_block_write);
+ else
+ ret = ext4_block_write_begin(page, pos, len,
+ ext4_get_block);
+#else
if (ext4_should_dioread_nolock(inode))
ret = __block_write_begin(page, pos, len, ext4_get_block_write);
else
ret = __block_write_begin(page, pos, len, ext4_get_block);
-
+#endif
if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, page_buffers(page),
from, to, NULL,
@@ -2575,7 +2670,12 @@ retry_journal:
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ret = ext4_block_write_begin(page, pos, len,
+ ext4_da_get_block_prep);
+#else
ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
+#endif
if (ret < 0) {
unlock_page(page);
ext4_journal_stop(handle);
@@ -2821,7 +2921,7 @@ static int ext4_readpage(struct file *file, struct page *page)
ret = ext4_readpage_inline(inode, page);
if (ret == -EAGAIN)
- return mpage_readpage(page, ext4_get_block);
+ return ext4_mpage_readpages(page->mapping, NULL, page, 1);
return ret;
}
@@ -2836,7 +2936,7 @@ ext4_readpages(struct file *file, struct address_space *mapping,
if (ext4_has_inline_data(inode))
return 0;
- return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
+ return ext4_mpage_readpages(mapping, pages, NULL, nr_pages);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
@@ -3033,6 +3133,9 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
get_block_func = ext4_get_block_write;
dio_flags = DIO_LOCKING;
}
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
+#endif
if (IS_DAX(inode))
ret = dax_do_io(iocb, inode, iter, offset, get_block_func,
ext4_end_io_dio, dio_flags);
@@ -3097,6 +3200,11 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
ssize_t ret;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+ return 0;
+#endif
+
/*
* If we are doing data journalling we don't support O_DIRECT
*/
@@ -3261,6 +3369,13 @@ static int __ext4_block_zero_page_range(handle_t *handle,
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
+ if (S_ISREG(inode->i_mode) &&
+ ext4_encrypted_inode(inode)) {
+ /* We expect the key to be set. */
+ BUG_ON(!ext4_has_encryption_key(inode));
+ BUG_ON(blocksize != PAGE_CACHE_SIZE);
+ WARN_ON_ONCE(ext4_decrypt_one(inode, page));
+ }
}
if (ext4_should_journal_data(inode)) {
BUFFER_TRACE(bh, "get write access");
@@ -4096,7 +4211,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
inode->i_op = &ext4_dir_inode_operations;
inode->i_fop = &ext4_dir_operations;
} else if (S_ISLNK(inode->i_mode)) {
- if (ext4_inode_is_fast_symlink(inode)) {
+ if (ext4_inode_is_fast_symlink(inode) &&
+ !ext4_encrypted_inode(inode)) {
inode->i_op = &ext4_fast_symlink_inode_operations;
nd_terminate_link(ei->i_data, inode->i_size,
sizeof(ei->i_data) - 1);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index f58a0d106726..2cb9e178d1c5 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -8,12 +8,12 @@
*/
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/capability.h>
#include <linux/time.h>
#include <linux/compat.h>
#include <linux/mount.h>
#include <linux/file.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
#include "ext4_jbd2.h"
#include "ext4.h"
@@ -196,6 +196,16 @@ journal_err_out:
return err;
}
+static int uuid_is_zero(__u8 u[16])
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ if (u[i])
+ return 0;
+ return 1;
+}
+
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -615,7 +625,78 @@ resizefs_out:
}
case EXT4_IOC_PRECACHE_EXTENTS:
return ext4_ext_precache(inode);
+ case EXT4_IOC_SET_ENCRYPTION_POLICY: {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ struct ext4_encryption_policy policy;
+ int err = 0;
+
+ if (copy_from_user(&policy,
+ (struct ext4_encryption_policy __user *)arg,
+ sizeof(policy))) {
+ err = -EFAULT;
+ goto encryption_policy_out;
+ }
+ err = ext4_process_policy(&policy, inode);
+encryption_policy_out:
+ return err;
+#else
+ return -EOPNOTSUPP;
+#endif
+ }
+ case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
+ int err, err2;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ handle_t *handle;
+
+ if (!ext4_sb_has_crypto(sb))
+ return -EOPNOTSUPP;
+ if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) {
+ err = mnt_want_write_file(filp);
+ if (err)
+ return err;
+ handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto pwsalt_err_exit;
+ }
+ err = ext4_journal_get_write_access(handle, sbi->s_sbh);
+ if (err)
+ goto pwsalt_err_journal;
+ generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
+ err = ext4_handle_dirty_metadata(handle, NULL,
+ sbi->s_sbh);
+ pwsalt_err_journal:
+ err2 = ext4_journal_stop(handle);
+ if (err2 && !err)
+ err = err2;
+ pwsalt_err_exit:
+ mnt_drop_write_file(filp);
+ if (err)
+ return err;
+ }
+ if (copy_to_user((void *) arg, sbi->s_es->s_encrypt_pw_salt,
+ 16))
+ return -EFAULT;
+ return 0;
+ }
+ case EXT4_IOC_GET_ENCRYPTION_POLICY: {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ struct ext4_encryption_policy policy;
+ int err = 0;
+
+ if (!ext4_encrypted_inode(inode))
+ return -ENOENT;
+ err = ext4_get_policy(inode, &policy);
+ if (err)
+ return err;
+ if (copy_to_user((void *)arg, &policy, sizeof(policy)))
+ return -EFAULT;
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+ }
default:
return -ENOTTY;
}
@@ -680,6 +761,9 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FITRIM:
case EXT4_IOC_RESIZE_FS:
case EXT4_IOC_PRECACHE_EXTENTS:
+ case EXT4_IOC_SET_ENCRYPTION_POLICY:
+ case EXT4_IOC_GET_ENCRYPTION_PWSALT:
+ case EXT4_IOC_GET_ENCRYPTION_POLICY:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 2291923dae4e..ef22cd951c0c 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -26,7 +26,6 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
-#include <linux/jbd2.h>
#include <linux/time.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
@@ -254,8 +253,9 @@ static struct dx_frame *dx_probe(const struct qstr *d_name,
struct dx_hash_info *hinfo,
struct dx_frame *frame);
static void dx_release(struct dx_frame *frames);
-static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
- struct dx_hash_info *hinfo, struct dx_map_entry map[]);
+static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
+ unsigned blocksize, struct dx_hash_info *hinfo,
+ struct dx_map_entry map[]);
static void dx_sort_map(struct dx_map_entry *map, unsigned count);
static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
struct dx_map_entry *offsets, int count, unsigned blocksize);
@@ -586,8 +586,10 @@ struct stats
unsigned bcount;
};
-static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de,
- int size, int show_names)
+static struct stats dx_show_leaf(struct inode *dir,
+ struct dx_hash_info *hinfo,
+ struct ext4_dir_entry_2 *de,
+ int size, int show_names)
{
unsigned names = 0, space = 0;
char *base = (char *) de;
@@ -600,12 +602,80 @@ static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_ent
{
if (show_names)
{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ int len;
+ char *name;
+ struct ext4_str fname_crypto_str
+ = {.name = NULL, .len = 0};
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ int res;
+
+ name = de->name;
+ len = de->name_len;
+ ctx = ext4_get_fname_crypto_ctx(dir,
+ EXT4_NAME_LEN);
+ if (IS_ERR(ctx)) {
+ printk(KERN_WARNING "Error acquiring"
+ " crypto ctxt--skipping crypto\n");
+ ctx = NULL;
+ }
+ if (ctx == NULL) {
+ /* Directory is not encrypted */
+ ext4fs_dirhash(de->name,
+ de->name_len, &h);
+ printk("%*.s:(U)%x.%u ", len,
+ name, h.hash,
+ (unsigned) ((char *) de
+ - base));
+ } else {
+ /* Directory is encrypted */
+ res = ext4_fname_crypto_alloc_buffer(
+ ctx, de->name_len,
+ &fname_crypto_str);
+ if (res < 0) {
+ printk(KERN_WARNING "Error "
+ "allocating crypto "
+ "buffer--skipping "
+ "crypto\n");
+ ext4_put_fname_crypto_ctx(&ctx);
+ ctx = NULL;
+ }
+ res = ext4_fname_disk_to_usr(ctx, de,
+ &fname_crypto_str);
+ if (res < 0) {
+ printk(KERN_WARNING "Error "
+ "converting filename "
+ "from disk to usr"
+ "\n");
+ name = "??";
+ len = 2;
+ } else {
+ name = fname_crypto_str.name;
+ len = fname_crypto_str.len;
+ }
+ res = ext4_fname_disk_to_hash(ctx, de,
+ &h);
+ if (res < 0) {
+ printk(KERN_WARNING "Error "
+ "converting filename "
+ "from disk to htree"
+ "\n");
+ h.hash = 0xDEADBEEF;
+ }
+ printk("%*.s:(E)%x.%u ", len, name,
+ h.hash, (unsigned) ((char *) de
+ - base));
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(
+ &fname_crypto_str);
+ }
+#else
int len = de->name_len;
char *name = de->name;
- while (len--) printk("%c", *name++);
ext4fs_dirhash(de->name, de->name_len, &h);
- printk(":%x.%u ", h.hash,
+ printk("%*.s:%x.%u ", len, name, h.hash,
(unsigned) ((char *) de - base));
+#endif
}
space += EXT4_DIR_REC_LEN(de->name_len);
names++;
@@ -623,7 +693,6 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
unsigned count = dx_get_count(entries), names = 0, space = 0, i;
unsigned bcount = 0;
struct buffer_head *bh;
- int err;
printk("%i indexed blocks...\n", count);
for (i = 0; i < count; i++, entries++)
{
@@ -637,7 +706,8 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
continue;
stats = levels?
dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
- dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
+ dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *)
+ bh->b_data, blocksize, 0);
names += stats.names;
space += stats.space;
bcount += stats.bcount;
@@ -687,8 +757,28 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
if (hinfo->hash_version <= DX_HASH_TEA)
hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (d_name) {
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ int res;
+
+ /* Check if the directory is encrypted */
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx)) {
+ ret_err = ERR_PTR(PTR_ERR(ctx));
+ goto fail;
+ }
+ res = ext4_fname_usr_to_hash(ctx, d_name, hinfo);
+ if (res < 0) {
+ ret_err = ERR_PTR(res);
+ goto fail;
+ }
+ ext4_put_fname_crypto_ctx(&ctx);
+ }
+#else
if (d_name)
ext4fs_dirhash(d_name->name, d_name->len, hinfo);
+#endif
hash = hinfo->hash;
if (root->info.unused_flags & 1) {
@@ -773,6 +863,7 @@ fail:
brelse(frame->bh);
frame--;
}
+
if (ret_err == ERR_PTR(ERR_BAD_DX_DIR))
ext4_warning(dir->i_sb,
"Corrupt dir inode %lu, running e2fsck is "
@@ -878,6 +969,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
struct buffer_head *bh;
struct ext4_dir_entry_2 *de, *top;
int err = 0, count = 0;
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}, tmp_str;
dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
(unsigned long)block));
@@ -889,6 +982,24 @@ static int htree_dirblock_to_tree(struct file *dir_file,
top = (struct ext4_dir_entry_2 *) ((char *) de +
dir->i_sb->s_blocksize -
EXT4_DIR_REC_LEN(0));
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ /* Check if the directory is encrypted */
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ brelse(bh);
+ return err;
+ }
+ if (ctx != NULL) {
+ err = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
+ &fname_crypto_str);
+ if (err < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ brelse(bh);
+ return err;
+ }
+ }
+#endif
for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
bh->b_data, bh->b_size,
@@ -897,21 +1008,52 @@ static int htree_dirblock_to_tree(struct file *dir_file,
/* silently ignore the rest of the block */
break;
}
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ err = ext4_fname_disk_to_hash(ctx, de, hinfo);
+ if (err < 0) {
+ count = err;
+ goto errout;
+ }
+#else
ext4fs_dirhash(de->name, de->name_len, hinfo);
+#endif
if ((hinfo->hash < start_hash) ||
((hinfo->hash == start_hash) &&
(hinfo->minor_hash < start_minor_hash)))
continue;
if (de->inode == 0)
continue;
- if ((err = ext4_htree_store_dirent(dir_file,
- hinfo->hash, hinfo->minor_hash, de)) != 0) {
- brelse(bh);
- return err;
+ if (ctx == NULL) {
+ /* Directory is not encrypted */
+ tmp_str.name = de->name;
+ tmp_str.len = de->name_len;
+ err = ext4_htree_store_dirent(dir_file,
+ hinfo->hash, hinfo->minor_hash, de,
+ &tmp_str);
+ } else {
+ /* Directory is encrypted */
+ err = ext4_fname_disk_to_usr(ctx, de,
+ &fname_crypto_str);
+ if (err < 0) {
+ count = err;
+ goto errout;
+ }
+ err = ext4_htree_store_dirent(dir_file,
+ hinfo->hash, hinfo->minor_hash, de,
+ &fname_crypto_str);
+ }
+ if (err != 0) {
+ count = err;
+ goto errout;
}
count++;
}
+errout:
brelse(bh);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+#endif
return count;
}
@@ -935,6 +1077,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
int count = 0;
int ret, err;
__u32 hashval;
+ struct ext4_str tmp_str;
dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
start_hash, start_minor_hash));
@@ -970,14 +1113,22 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
/* Add '.' and '..' from the htree header */
if (!start_hash && !start_minor_hash) {
de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
- if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0)
+ tmp_str.name = de->name;
+ tmp_str.len = de->name_len;
+ err = ext4_htree_store_dirent(dir_file, 0, 0,
+ de, &tmp_str);
+ if (err != 0)
goto errout;
count++;
}
if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
de = ext4_next_entry(de, dir->i_sb->s_blocksize);
- if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0)
+ tmp_str.name = de->name;
+ tmp_str.len = de->name_len;
+ err = ext4_htree_store_dirent(dir_file, 2, 0,
+ de, &tmp_str);
+ if (err != 0)
goto errout;
count++;
}
@@ -1035,17 +1186,33 @@ static inline int search_dirblock(struct buffer_head *bh,
* Create map of hash values, offsets, and sizes, stored at end of block.
* Returns number of entries mapped.
*/
-static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
- struct dx_hash_info *hinfo,
+static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
+ unsigned blocksize, struct dx_hash_info *hinfo,
struct dx_map_entry *map_tail)
{
int count = 0;
char *base = (char *) de;
struct dx_hash_info h = *hinfo;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ int err;
+
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+#endif
while ((char *) de < base + blocksize) {
if (de->name_len && de->inode) {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ err = ext4_fname_disk_to_hash(ctx, de, &h);
+ if (err < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ return err;
+ }
+#else
ext4fs_dirhash(de->name, de->name_len, &h);
+#endif
map_tail--;
map_tail->hash = h.hash;
map_tail->offs = ((char *) de - base)>>2;
@@ -1056,6 +1223,9 @@ static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
/* XXX: do we need to check rec_len == 0 case? -Chris */
de = ext4_next_entry(de, blocksize);
}
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ext4_put_fname_crypto_ctx(&ctx);
+#endif
return count;
}
@@ -1106,57 +1276,107 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
* `len <= EXT4_NAME_LEN' is guaranteed by caller.
* `de != NULL' is guaranteed by caller.
*/
-static inline int ext4_match (int len, const char * const name,
- struct ext4_dir_entry_2 * de)
+static inline int ext4_match(struct ext4_fname_crypto_ctx *ctx,
+ struct ext4_str *fname_crypto_str,
+ int len, const char * const name,
+ struct ext4_dir_entry_2 *de)
{
- if (len != de->name_len)
- return 0;
+ int res;
+
if (!de->inode)
return 0;
- return !memcmp(name, de->name, len);
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (ctx) {
+ /* Directory is encrypted */
+ res = ext4_fname_disk_to_usr(ctx, de, fname_crypto_str);
+ if (res < 0)
+ return res;
+ if (len != res)
+ return 0;
+ res = memcmp(name, fname_crypto_str->name, len);
+ return (res == 0) ? 1 : 0;
+ }
+#endif
+ if (len != de->name_len)
+ return 0;
+ res = memcmp(name, de->name, len);
+ return (res == 0) ? 1 : 0;
}
/*
* Returns 0 if not found, -1 on failure, and 1 on success
*/
-int search_dir(struct buffer_head *bh,
- char *search_buf,
- int buf_size,
- struct inode *dir,
- const struct qstr *d_name,
- unsigned int offset,
- struct ext4_dir_entry_2 **res_dir)
+int search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
+ struct inode *dir, const struct qstr *d_name,
+ unsigned int offset, struct ext4_dir_entry_2 **res_dir)
{
struct ext4_dir_entry_2 * de;
char * dlimit;
int de_len;
const char *name = d_name->name;
int namelen = d_name->len;
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
+ int res;
+
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx))
+ return -1;
+
+ if (ctx != NULL) {
+ /* Allocate buffer to hold maximum name length */
+ res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
+ &fname_crypto_str);
+ if (res < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ return -1;
+ }
+ }
de = (struct ext4_dir_entry_2 *)search_buf;
dlimit = search_buf + buf_size;
while ((char *) de < dlimit) {
/* this code is executed quadratically often */
/* do minimal checking `by hand' */
+ if ((char *) de + de->name_len <= dlimit) {
+ res = ext4_match(ctx, &fname_crypto_str, namelen,
+ name, de);
+ if (res < 0) {
+ res = -1;
+ goto return_result;
+ }
+ if (res > 0) {
+ /* found a match - just to be sure, do
+ * a full check */
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+ bh->b_data,
+ bh->b_size, offset)) {
+ res = -1;
+ goto return_result;
+ }
+ *res_dir = de;
+ res = 1;
+ goto return_result;
+ }
- if ((char *) de + namelen <= dlimit &&
- ext4_match (namelen, name, de)) {
- /* found a match - just to be sure, do a full check */
- if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
- bh->b_size, offset))
- return -1;
- *res_dir = de;
- return 1;
}
/* prevent looping on a bad block */
de_len = ext4_rec_len_from_disk(de->rec_len,
dir->i_sb->s_blocksize);
- if (de_len <= 0)
- return -1;
+ if (de_len <= 0) {
+ res = -1;
+ goto return_result;
+ }
offset += de_len;
de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
}
- return 0;
+
+ res = 0;
+return_result:
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+ return res;
}
static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
@@ -1345,6 +1565,9 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
ext4_lblk_t block;
int retval;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ *res_dir = NULL;
+#endif
frame = dx_probe(d_name, dir, &hinfo, frames);
if (IS_ERR(frame))
return (struct buffer_head *) frame;
@@ -1417,6 +1640,18 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
ino);
return ERR_PTR(-EIO);
}
+ if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)) &&
+ !ext4_is_child_context_consistent_with_parent(dir,
+ inode)) {
+ iput(inode);
+ ext4_warning(inode->i_sb,
+ "Inconsistent encryption contexts: %lu/%lu\n",
+ (unsigned long) dir->i_ino,
+ (unsigned long) inode->i_ino);
+ return ERR_PTR(-EPERM);
+ }
}
return d_splice_alias(inode, dentry);
}
@@ -1541,7 +1776,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
/* create map in the end of data2 block */
map = (struct dx_map_entry *) (data2 + blocksize);
- count = dx_make_map((struct ext4_dir_entry_2 *) data1,
+ count = dx_make_map(dir, (struct ext4_dir_entry_2 *) data1,
blocksize, hinfo, map);
map -= count;
dx_sort_map(map, count);
@@ -1564,7 +1799,8 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
hash2, split, count-split));
/* Fancy dance to stay within two buffers */
- de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
+ de2 = dx_move_dirents(data1, data2, map + split, count - split,
+ blocksize);
de = dx_pack_dirents(data1, blocksize);
de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
(char *) de,
@@ -1580,8 +1816,10 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
initialize_dirent_tail(t, blocksize);
}
- dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
- dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
+ dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data1,
+ blocksize, 1));
+ dxtrace(dx_show_leaf(dir, hinfo, (struct ext4_dir_entry_2 *) data2,
+ blocksize, 1));
/* Which block gets the new entry? */
if (hinfo->hash >= hash2) {
@@ -1618,15 +1856,48 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
int nlen, rlen;
unsigned int offset = 0;
char *top;
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
+ int res;
+
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx))
+ return -1;
+
+ if (ctx != NULL) {
+ /* Calculate record length needed to store the entry */
+ res = ext4_fname_crypto_namelen_on_disk(ctx, namelen);
+ if (res < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ return res;
+ }
+ reclen = EXT4_DIR_REC_LEN(res);
+
+ /* Allocate buffer to hold maximum name length */
+ res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
+ &fname_crypto_str);
+ if (res < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ return -1;
+ }
+ }
de = (struct ext4_dir_entry_2 *)buf;
top = buf + buf_size - reclen;
while ((char *) de <= top) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
- buf, buf_size, offset))
- return -EIO;
- if (ext4_match(namelen, name, de))
- return -EEXIST;
+ buf, buf_size, offset)) {
+ res = -EIO;
+ goto return_result;
+ }
+ /* Provide crypto context and crypto buffer to ext4 match */
+ res = ext4_match(ctx, &fname_crypto_str, namelen, name, de);
+ if (res < 0)
+ goto return_result;
+ if (res > 0) {
+ res = -EEXIST;
+ goto return_result;
+ }
nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
if ((de->inode ? rlen - nlen : rlen) >= reclen)
@@ -1634,26 +1905,62 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
offset += rlen;
}
- if ((char *) de > top)
- return -ENOSPC;
- *dest_de = de;
- return 0;
+ if ((char *) de > top)
+ res = -ENOSPC;
+ else {
+ *dest_de = de;
+ res = 0;
+ }
+return_result:
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+ return res;
}
-void ext4_insert_dentry(struct inode *inode,
- struct ext4_dir_entry_2 *de,
- int buf_size,
- const char *name, int namelen)
+int ext4_insert_dentry(struct inode *dir,
+ struct inode *inode,
+ struct ext4_dir_entry_2 *de,
+ int buf_size,
+ const struct qstr *iname,
+ const char *name, int namelen)
{
int nlen, rlen;
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
+ struct ext4_str tmp_str;
+ int res;
+
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx))
+ return -EIO;
+ /* By default, the input name would be written to the disk */
+ tmp_str.name = (unsigned char *)name;
+ tmp_str.len = namelen;
+ if (ctx != NULL) {
+ /* Directory is encrypted */
+ res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
+ &fname_crypto_str);
+ if (res < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ return -ENOMEM;
+ }
+ res = ext4_fname_usr_to_disk(ctx, iname, &fname_crypto_str);
+ if (res < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+ return res;
+ }
+ tmp_str.name = fname_crypto_str.name;
+ tmp_str.len = fname_crypto_str.len;
+ }
nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
if (de->inode) {
struct ext4_dir_entry_2 *de1 =
- (struct ext4_dir_entry_2 *)((char *)de + nlen);
+ (struct ext4_dir_entry_2 *)((char *)de + nlen);
de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size);
de->rec_len = ext4_rec_len_to_disk(nlen, buf_size);
de = de1;
@@ -1661,9 +1968,14 @@ void ext4_insert_dentry(struct inode *inode,
de->file_type = EXT4_FT_UNKNOWN;
de->inode = cpu_to_le32(inode->i_ino);
ext4_set_de_type(inode->i_sb, de, inode->i_mode);
- de->name_len = namelen;
- memcpy(de->name, name, namelen);
+ de->name_len = tmp_str.len;
+
+ memcpy(de->name, tmp_str.name, tmp_str.len);
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_fname_crypto_free_buffer(&fname_crypto_str);
+ return 0;
}
+
/*
* Add a new entry into a directory (leaf) block. If de is non-NULL,
* it points to a directory entry which is guaranteed to be large
@@ -1700,8 +2012,12 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
return err;
}
- /* By now the buffer is marked for journaling */
- ext4_insert_dentry(inode, de, blocksize, name, namelen);
+ /* By now the buffer is marked for journaling. Due to crypto operations,
+ * the following function call may fail */
+ err = ext4_insert_dentry(dir, inode, de, blocksize, &dentry->d_name,
+ name, namelen);
+ if (err < 0)
+ return err;
/*
* XXX shouldn't update any times until successful
@@ -1733,8 +2049,13 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct buffer_head *bh)
{
struct inode *dir = dentry->d_parent->d_inode;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ int res;
+#else
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
+#endif
struct buffer_head *bh2;
struct dx_root *root;
struct dx_frame frames[2], *frame;
@@ -1748,7 +2069,13 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct dx_hash_info hinfo;
ext4_lblk_t block;
struct fake_dirent *fde;
- int csum_size = 0;
+ int csum_size = 0;
+
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+#endif
if (ext4_has_metadata_csum(inode->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
@@ -1815,7 +2142,18 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
if (hinfo.hash_version <= DX_HASH_TEA)
hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ res = ext4_fname_usr_to_hash(ctx, &dentry->d_name, &hinfo);
+ if (res < 0) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ ext4_mark_inode_dirty(handle, dir);
+ brelse(bh);
+ return res;
+ }
+ ext4_put_fname_crypto_ctx(&ctx);
+#else
ext4fs_dirhash(name, namelen, &hinfo);
+#endif
memset(frames, 0, sizeof(frames));
frame = frames;
frame->entries = entries;
@@ -1865,7 +2203,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
struct inode *inode)
{
struct inode *dir = dentry->d_parent->d_inode;
- struct buffer_head *bh;
+ struct buffer_head *bh = NULL;
struct ext4_dir_entry_2 *de;
struct ext4_dir_entry_tail *t;
struct super_block *sb;
@@ -1889,14 +2227,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
return retval;
if (retval == 1) {
retval = 0;
- return retval;
+ goto out;
}
}
if (is_dx(dir)) {
retval = ext4_dx_add_entry(handle, dentry, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
- return retval;
+ goto out;
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
ext4_mark_inode_dirty(handle, dir);
@@ -1908,14 +2246,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
return PTR_ERR(bh);
retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
- if (retval != -ENOSPC) {
- brelse(bh);
- return retval;
- }
+ if (retval != -ENOSPC)
+ goto out;
if (blocks == 1 && !dx_fallback &&
- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
- return make_indexed_dir(handle, dentry, inode, bh);
+ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
+ retval = make_indexed_dir(handle, dentry, inode, bh);
+ bh = NULL; /* make_indexed_dir releases bh */
+ goto out;
+ }
brelse(bh);
}
bh = ext4_append(handle, dir, &block);
@@ -1931,6 +2270,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
}
retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
+out:
brelse(bh);
if (retval == 0)
ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
@@ -2237,7 +2577,20 @@ retry:
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
- err = ext4_add_nondir(handle, dentry, inode);
+ err = 0;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (!err && (ext4_encrypted_inode(dir) ||
+ DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb)))) {
+ err = ext4_inherit_context(dir, inode);
+ if (err) {
+ clear_nlink(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+ }
+ }
+#endif
+ if (!err)
+ err = ext4_add_nondir(handle, dentry, inode);
if (!err && IS_DIRSYNC(dir))
ext4_handle_sync(handle);
}
@@ -2418,6 +2771,14 @@ retry:
err = ext4_init_new_dir(handle, dir, inode);
if (err)
goto out_clear_inode;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (ext4_encrypted_inode(dir) ||
+ DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) {
+ err = ext4_inherit_context(dir, inode);
+ if (err)
+ goto out_clear_inode;
+ }
+#endif
err = ext4_mark_inode_dirty(handle, inode);
if (!err)
err = ext4_add_entry(handle, dentry, inode);
@@ -2450,7 +2811,7 @@ out_stop:
/*
* routine to check that the specified directory is empty (for rmdir)
*/
-static int empty_dir(struct inode *inode)
+int ext4_empty_dir(struct inode *inode)
{
unsigned int offset;
struct buffer_head *bh;
@@ -2718,7 +3079,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
goto end_rmdir;
retval = -ENOTEMPTY;
- if (!empty_dir(inode))
+ if (!ext4_empty_dir(inode))
goto end_rmdir;
handle = ext4_journal_start(dir, EXT4_HT_DIR,
@@ -2828,16 +3189,25 @@ static int ext4_symlink(struct inode *dir,
{
handle_t *handle;
struct inode *inode;
- int l, err, retries = 0;
+ int err, len = strlen(symname);
int credits;
-
- l = strlen(symname)+1;
- if (l > dir->i_sb->s_blocksize)
+ bool encryption_required;
+ struct ext4_str disk_link;
+ struct ext4_encrypted_symlink_data *sd = NULL;
+
+ disk_link.len = len + 1;
+ disk_link.name = (char *) symname;
+
+ encryption_required = (ext4_encrypted_inode(dir) ||
+ DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb)));
+ if (encryption_required)
+ disk_link.len = encrypted_symlink_data_len(len) + 1;
+ if (disk_link.len > dir->i_sb->s_blocksize)
return -ENAMETOOLONG;
dquot_initialize(dir);
- if (l > EXT4_N_BLOCKS * 4) {
+ if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
/*
* For non-fast symlinks, we just allocate inode and put it on
* orphan list in the first transaction => we need bitmap,
@@ -2856,16 +3226,49 @@ static int ext4_symlink(struct inode *dir,
credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3;
}
-retry:
+
inode = ext4_new_inode_start_handle(dir, S_IFLNK|S_IRWXUGO,
&dentry->d_name, 0, NULL,
EXT4_HT_DIR, credits);
handle = ext4_journal_current_handle();
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
- goto out_stop;
+ if (IS_ERR(inode)) {
+ if (handle)
+ ext4_journal_stop(handle);
+ return PTR_ERR(inode);
+ }
+
+ if (encryption_required) {
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct qstr istr;
+ struct ext4_str ostr;
+
+ sd = kzalloc(disk_link.len, GFP_NOFS);
+ if (!sd) {
+ err = -ENOMEM;
+ goto err_drop_inode;
+ }
+ err = ext4_inherit_context(dir, inode);
+ if (err)
+ goto err_drop_inode;
+ ctx = ext4_get_fname_crypto_ctx(inode,
+ inode->i_sb->s_blocksize);
+ if (IS_ERR_OR_NULL(ctx)) {
+ /* We just set the policy, so ctx should not be NULL */
+ err = (ctx == NULL) ? -EIO : PTR_ERR(ctx);
+ goto err_drop_inode;
+ }
+ istr.name = (const unsigned char *) symname;
+ istr.len = len;
+ ostr.name = sd->encrypted_path;
+ err = ext4_fname_usr_to_disk(ctx, &istr, &ostr);
+ ext4_put_fname_crypto_ctx(&ctx);
+ if (err < 0)
+ goto err_drop_inode;
+ sd->len = cpu_to_le16(ostr.len);
+ disk_link.name = (char *) sd;
+ }
- if (l > EXT4_N_BLOCKS * 4) {
+ if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
inode->i_op = &ext4_symlink_inode_operations;
ext4_set_aops(inode);
/*
@@ -2881,9 +3284,10 @@ retry:
drop_nlink(inode);
err = ext4_orphan_add(handle, inode);
ext4_journal_stop(handle);
+ handle = NULL;
if (err)
goto err_drop_inode;
- err = __page_symlink(inode, symname, l, 1);
+ err = __page_symlink(inode, disk_link.name, disk_link.len, 1);
if (err)
goto err_drop_inode;
/*
@@ -2895,34 +3299,37 @@ retry:
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
+ handle = NULL;
goto err_drop_inode;
}
set_nlink(inode, 1);
err = ext4_orphan_del(handle, inode);
- if (err) {
- ext4_journal_stop(handle);
- clear_nlink(inode);
+ if (err)
goto err_drop_inode;
- }
} else {
/* clear the extent format for fast symlink */
ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
- inode->i_op = &ext4_fast_symlink_inode_operations;
- memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
- inode->i_size = l-1;
+ inode->i_op = encryption_required ?
+ &ext4_symlink_inode_operations :
+ &ext4_fast_symlink_inode_operations;
+ memcpy((char *)&EXT4_I(inode)->i_data, disk_link.name,
+ disk_link.len);
+ inode->i_size = disk_link.len - 1;
}
EXT4_I(inode)->i_disksize = inode->i_size;
err = ext4_add_nondir(handle, dentry, inode);
if (!err && IS_DIRSYNC(dir))
ext4_handle_sync(handle);
-out_stop:
if (handle)
ext4_journal_stop(handle);
- if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
- goto retry;
+ kfree(sd);
return err;
err_drop_inode:
+ if (handle)
+ ext4_journal_stop(handle);
+ kfree(sd);
+ clear_nlink(inode);
unlock_new_inode(inode);
iput(inode);
return err;
@@ -2937,7 +3344,9 @@ static int ext4_link(struct dentry *old_dentry,
if (inode->i_nlink >= EXT4_LINK_MAX)
return -EMLINK;
-
+ if (ext4_encrypted_inode(dir) &&
+ !ext4_is_child_context_consistent_with_parent(dir, inode))
+ return -EPERM;
dquot_initialize(dir);
retry:
@@ -3238,6 +3647,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
goto end_rename;
+ if ((old.dir != new.dir) &&
+ ext4_encrypted_inode(new.dir) &&
+ !ext4_is_child_context_consistent_with_parent(new.dir,
+ old.inode)) {
+ retval = -EPERM;
+ goto end_rename;
+ }
+
new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
&new.de, &new.inlined);
if (IS_ERR(new.bh)) {
@@ -3258,12 +3675,18 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
if (!(flags & RENAME_WHITEOUT)) {
handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (IS_ERR(handle)) {
+ retval = PTR_ERR(handle);
+ handle = NULL;
+ goto end_rename;
+ }
} else {
whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
- if (IS_ERR(whiteout))
- return PTR_ERR(whiteout);
+ if (IS_ERR(whiteout)) {
+ retval = PTR_ERR(whiteout);
+ whiteout = NULL;
+ goto end_rename;
+ }
}
if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
@@ -3272,7 +3695,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
if (S_ISDIR(old.inode->i_mode)) {
if (new.inode) {
retval = -ENOTEMPTY;
- if (!empty_dir(new.inode))
+ if (!ext4_empty_dir(new.inode))
goto end_rename;
} else {
retval = -EMLINK;
@@ -3346,8 +3769,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
ext4_dec_count(handle, old.dir);
if (new.inode) {
- /* checked empty_dir above, can't have another parent,
- * ext4_dec_count() won't work for many-linked dirs */
+ /* checked ext4_empty_dir above, can't have another
+ * parent, ext4_dec_count() won't work for many-linked
+ * dirs */
clear_nlink(new.inode);
} else {
ext4_inc_count(handle, new.dir);
@@ -3427,8 +3851,11 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
handle = ext4_journal_start(old.dir, EXT4_HT_DIR,
(2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) +
2 * EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (IS_ERR(handle)) {
+ retval = PTR_ERR(handle);
+ handle = NULL;
+ goto end_rename;
+ }
if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
ext4_handle_sync(handle);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 464984261e69..5765f88b3904 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -8,7 +8,6 @@
#include <linux/fs.h>
#include <linux/time.h>
-#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
@@ -24,7 +23,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/ratelimit.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -68,6 +66,10 @@ static void ext4_finish_bio(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ struct page *data_page = NULL;
+ struct ext4_crypto_ctx *ctx = NULL;
+#endif
struct buffer_head *bh, *head;
unsigned bio_start = bvec->bv_offset;
unsigned bio_end = bio_start + bvec->bv_len;
@@ -77,6 +79,15 @@ static void ext4_finish_bio(struct bio *bio)
if (!page)
continue;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (!page->mapping) {
+ /* The bounce data pages are unmapped. */
+ data_page = page;
+ ctx = (struct ext4_crypto_ctx *)page_private(data_page);
+ page = ctx->control_page;
+ }
+#endif
+
if (error) {
SetPageError(page);
set_bit(AS_EIO, &page->mapping->flags);
@@ -101,8 +112,13 @@ static void ext4_finish_bio(struct bio *bio)
} while ((bh = bh->b_this_page) != head);
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
local_irq_restore(flags);
- if (!under_io)
+ if (!under_io) {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (ctx)
+ ext4_restore_control_page(data_page);
+#endif
end_page_writeback(page);
+ }
}
}
@@ -377,6 +393,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
static int io_submit_add_bh(struct ext4_io_submit *io,
struct inode *inode,
+ struct page *page,
struct buffer_head *bh)
{
int ret;
@@ -390,7 +407,7 @@ submit_and_retry:
if (ret)
return ret;
}
- ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
+ ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
if (ret != bh->b_size)
goto submit_and_retry;
io->io_next_block++;
@@ -403,6 +420,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
struct writeback_control *wbc,
bool keep_towrite)
{
+ struct page *data_page = NULL;
struct inode *inode = page->mapping->host;
unsigned block_start, blocksize;
struct buffer_head *bh, *head;
@@ -462,19 +480,29 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
set_buffer_async_write(bh);
} while ((bh = bh->b_this_page) != head);
- /* Now submit buffers to write */
bh = head = page_buffers(page);
+
+ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ data_page = ext4_encrypt(inode, page);
+ if (IS_ERR(data_page)) {
+ ret = PTR_ERR(data_page);
+ data_page = NULL;
+ goto out;
+ }
+ }
+
+ /* Now submit buffers to write */
do {
if (!buffer_async_write(bh))
continue;
- ret = io_submit_add_bh(io, inode, bh);
+ ret = io_submit_add_bh(io, inode,
+ data_page ? data_page : page, bh);
if (ret) {
/*
* We only get here on ENOMEM. Not much else
* we can do but mark the page as dirty, and
* better luck next time.
*/
- redirty_page_for_writepage(wbc, page);
break;
}
nr_submitted++;
@@ -483,6 +511,11 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
/* Error stopped previous loop? Clean up buffers... */
if (ret) {
+ out:
+ if (data_page)
+ ext4_restore_control_page(data_page);
+ printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
+ redirty_page_for_writepage(wbc, page);
do {
clear_buffer_async_write(bh);
bh = bh->b_this_page;
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
new file mode 100644
index 000000000000..171b9ac4b45e
--- /dev/null
+++ b/fs/ext4/readpage.c
@@ -0,0 +1,328 @@
+/*
+ * linux/fs/ext4/readpage.c
+ *
+ * Copyright (C) 2002, Linus Torvalds.
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This was originally taken from fs/mpage.c
+ *
+ * The intent is the ext4_mpage_readpages() function here is intended
+ * to replace mpage_readpages() in the general case, not just for
+ * encrypted files. It has some limitations (see below), where it
+ * will fall back to read_block_full_page(), but these limitations
+ * should only be hit when page_size != block_size.
+ *
+ * This will allow us to attach a callback function to support ext4
+ * encryption.
+ *
+ * If anything unusual happens, such as:
+ *
+ * - encountering a page which has buffers
+ * - encountering a page which has a non-hole after a hole
+ * - encountering a page with non-contiguous blocks
+ *
+ * then this code just gives up and calls the buffer_head-based read function.
+ * It does handle a page which has holes at the end - that is a common case:
+ * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/kdev_t.h>
+#include <linux/gfp.h>
+#include <linux/bio.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/highmem.h>
+#include <linux/prefetch.h>
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+#include <linux/backing-dev.h>
+#include <linux/pagevec.h>
+#include <linux/cleancache.h>
+
+#include "ext4.h"
+
+/*
+ * Call ext4_decrypt on every single page, reusing the encryption
+ * context.
+ */
+static void completion_pages(struct work_struct *work)
+{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ struct ext4_crypto_ctx *ctx =
+ container_of(work, struct ext4_crypto_ctx, work);
+ struct bio *bio = ctx->bio;
+ struct bio_vec *bv;
+ int i;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ struct page *page = bv->bv_page;
+
+ int ret = ext4_decrypt(ctx, page);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ SetPageError(page);
+ } else
+ SetPageUptodate(page);
+ unlock_page(page);
+ }
+ ext4_release_crypto_ctx(ctx);
+ bio_put(bio);
+#else
+ BUG();
+#endif
+}
+
+static inline bool ext4_bio_encrypted(struct bio *bio)
+{
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ return unlikely(bio->bi_private != NULL);
+#else
+ return false;
+#endif
+}
+
+/*
+ * I/O completion handler for multipage BIOs.
+ *
+ * The mpage code never puts partial pages into a BIO (except for end-of-file).
+ * If a page does not map to a contiguous run of blocks then it simply falls
+ * back to block_read_full_page().
+ *
+ * Why is this? If a page's completion depends on a number of different BIOs
+ * which can complete in any order (or at the same time) then determining the
+ * status of that page is hard. See end_buffer_async_read() for the details.
+ * There is no point in duplicating all that complexity.
+ */
+static void mpage_end_io(struct bio *bio, int err)
+{
+ struct bio_vec *bv;
+ int i;
+
+ if (ext4_bio_encrypted(bio)) {
+ struct ext4_crypto_ctx *ctx = bio->bi_private;
+
+ if (err) {
+ ext4_release_crypto_ctx(ctx);
+ } else {
+ INIT_WORK(&ctx->work, completion_pages);
+ ctx->bio = bio;
+ queue_work(ext4_read_workqueue, &ctx->work);
+ return;
+ }
+ }
+ bio_for_each_segment_all(bv, bio, i) {
+ struct page *page = bv->bv_page;
+
+ if (!err) {
+ SetPageUptodate(page);
+ } else {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ }
+ unlock_page(page);
+ }
+
+ bio_put(bio);
+}
+
+int ext4_mpage_readpages(struct address_space *mapping,
+ struct list_head *pages, struct page *page,
+ unsigned nr_pages)
+{
+ struct bio *bio = NULL;
+ unsigned page_idx;
+ sector_t last_block_in_bio = 0;
+
+ struct inode *inode = mapping->host;
+ const unsigned blkbits = inode->i_blkbits;
+ const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
+ const unsigned blocksize = 1 << blkbits;
+ sector_t block_in_file;
+ sector_t last_block;
+ sector_t last_block_in_file;
+ sector_t blocks[MAX_BUF_PER_PAGE];
+ unsigned page_block;
+ struct block_device *bdev = inode->i_sb->s_bdev;
+ int length;
+ unsigned relative_block = 0;
+ struct ext4_map_blocks map;
+
+ map.m_pblk = 0;
+ map.m_lblk = 0;
+ map.m_len = 0;
+ map.m_flags = 0;
+
+ for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
+ int fully_mapped = 1;
+ unsigned first_hole = blocks_per_page;
+
+ prefetchw(&page->flags);
+ if (pages) {
+ page = list_entry(pages->prev, struct page, lru);
+ list_del(&page->lru);
+ if (add_to_page_cache_lru(page, mapping,
+ page->index, GFP_KERNEL))
+ goto next_page;
+ }
+
+ if (page_has_buffers(page))
+ goto confused;
+
+ block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
+ last_block = block_in_file + nr_pages * blocks_per_page;
+ last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
+ if (last_block > last_block_in_file)
+ last_block = last_block_in_file;
+ page_block = 0;
+
+ /*
+ * Map blocks using the previous result first.
+ */
+ if ((map.m_flags & EXT4_MAP_MAPPED) &&
+ block_in_file > map.m_lblk &&
+ block_in_file < (map.m_lblk + map.m_len)) {
+ unsigned map_offset = block_in_file - map.m_lblk;
+ unsigned last = map.m_len - map_offset;
+
+ for (relative_block = 0; ; relative_block++) {
+ if (relative_block == last) {
+ /* needed? */
+ map.m_flags &= ~EXT4_MAP_MAPPED;
+ break;
+ }
+ if (page_block == blocks_per_page)
+ break;
+ blocks[page_block] = map.m_pblk + map_offset +
+ relative_block;
+ page_block++;
+ block_in_file++;
+ }
+ }
+
+ /*
+ * Then do more ext4_map_blocks() calls until we are
+ * done with this page.
+ */
+ while (page_block < blocks_per_page) {
+ if (block_in_file < last_block) {
+ map.m_lblk = block_in_file;
+ map.m_len = last_block - block_in_file;
+
+ if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
+ set_error_page:
+ SetPageError(page);
+ zero_user_segment(page, 0,
+ PAGE_CACHE_SIZE);
+ unlock_page(page);
+ goto next_page;
+ }
+ }
+ if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
+ fully_mapped = 0;
+ if (first_hole == blocks_per_page)
+ first_hole = page_block;
+ page_block++;
+ block_in_file++;
+ continue;
+ }
+ if (first_hole != blocks_per_page)
+ goto confused; /* hole -> non-hole */
+
+ /* Contiguous blocks? */
+ if (page_block && blocks[page_block-1] != map.m_pblk-1)
+ goto confused;
+ for (relative_block = 0; ; relative_block++) {
+ if (relative_block == map.m_len) {
+ /* needed? */
+ map.m_flags &= ~EXT4_MAP_MAPPED;
+ break;
+ } else if (page_block == blocks_per_page)
+ break;
+ blocks[page_block] = map.m_pblk+relative_block;
+ page_block++;
+ block_in_file++;
+ }
+ }
+ if (first_hole != blocks_per_page) {
+ zero_user_segment(page, first_hole << blkbits,
+ PAGE_CACHE_SIZE);
+ if (first_hole == 0) {
+ SetPageUptodate(page);
+ unlock_page(page);
+ goto next_page;
+ }
+ } else if (fully_mapped) {
+ SetPageMappedToDisk(page);
+ }
+ if (fully_mapped && blocks_per_page == 1 &&
+ !PageUptodate(page) && cleancache_get_page(page) == 0) {
+ SetPageUptodate(page);
+ goto confused;
+ }
+
+ /*
+ * This page will go to BIO. Do we need to send this
+ * BIO off first?
+ */
+ if (bio && (last_block_in_bio != blocks[0] - 1)) {
+ submit_and_realloc:
+ submit_bio(READ, bio);
+ bio = NULL;
+ }
+ if (bio == NULL) {
+ struct ext4_crypto_ctx *ctx = NULL;
+
+ if (ext4_encrypted_inode(inode) &&
+ S_ISREG(inode->i_mode)) {
+ ctx = ext4_get_crypto_ctx(inode);
+ if (IS_ERR(ctx))
+ goto set_error_page;
+ }
+ bio = bio_alloc(GFP_KERNEL,
+ min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
+ if (!bio) {
+ if (ctx)
+ ext4_release_crypto_ctx(ctx);
+ goto set_error_page;
+ }
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
+ bio->bi_end_io = mpage_end_io;
+ bio->bi_private = ctx;
+ }
+
+ length = first_hole << blkbits;
+ if (bio_add_page(bio, page, length, 0) < length)
+ goto submit_and_realloc;
+
+ if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
+ (relative_block == map.m_len)) ||
+ (first_hole != blocks_per_page)) {
+ submit_bio(READ, bio);
+ bio = NULL;
+ } else
+ last_block_in_bio = blocks[blocks_per_page - 1];
+ goto next_page;
+ confused:
+ if (bio) {
+ submit_bio(READ, bio);
+ bio = NULL;
+ }
+ if (!PageUptodate(page))
+ block_read_full_page(page, ext4_get_block);
+ else
+ unlock_page(page);
+ next_page:
+ if (pages)
+ page_cache_release(page);
+ }
+ BUG_ON(pages && !list_empty(pages));
+ if (bio)
+ submit_bio(READ, bio);
+ return 0;
+}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d348c7d29d80..821f22dbe825 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -21,7 +21,6 @@
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
-#include <linux/jbd2.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
@@ -323,22 +322,6 @@ static void save_error_info(struct super_block *sb, const char *func,
ext4_commit_super(sb, 1);
}
-/*
- * The del_gendisk() function uninitializes the disk-specific data
- * structures, including the bdi structure, without telling anyone
- * else. Once this happens, any attempt to call mark_buffer_dirty()
- * (for example, by ext4_commit_super), will cause a kernel OOPS.
- * This is a kludge to prevent these oops until we can put in a proper
- * hook in del_gendisk() to inform the VFS and file system layers.
- */
-static int block_device_ejected(struct super_block *sb)
-{
- struct inode *bd_inode = sb->s_bdev->bd_inode;
- struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
-
- return bdi->dev == NULL;
-}
-
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
struct super_block *sb = journal->j_private;
@@ -893,6 +876,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
atomic_set(&ei->i_ioend_count, 0);
atomic_set(&ei->i_unwritten, 0);
INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ ei->i_encryption_key.mode = EXT4_ENCRYPTION_MODE_INVALID;
+#endif
return &ei->vfs_inode;
}
@@ -1120,7 +1106,7 @@ enum {
Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
- Opt_data_err_abort, Opt_data_err_ignore,
+ Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
@@ -1211,6 +1197,7 @@ static const match_table_t tokens = {
{Opt_init_itable, "init_itable"},
{Opt_noinit_itable, "noinit_itable"},
{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
+ {Opt_test_dummy_encryption, "test_dummy_encryption"},
{Opt_removed, "check=none"}, /* mount option from ext2/3 */
{Opt_removed, "nocheck"}, /* mount option from ext2/3 */
{Opt_removed, "reservation"}, /* mount option from ext2/3 */
@@ -1412,6 +1399,7 @@ static const struct mount_opts {
{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
{Opt_max_dir_size_kb, 0, MOPT_GTE0},
+ {Opt_test_dummy_encryption, 0, MOPT_GTE0},
{Opt_err, 0, 0}
};
@@ -1588,6 +1576,15 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
}
*journal_ioprio =
IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
+ } else if (token == Opt_test_dummy_encryption) {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
+ ext4_msg(sb, KERN_WARNING,
+ "Test dummy encryption mode enabled");
+#else
+ ext4_msg(sb, KERN_WARNING,
+ "Test dummy encryption mount option ignored");
+#endif
} else if (m->flags & MOPT_DATAJ) {
if (is_remount) {
if (!sbi->s_journal)
@@ -2685,11 +2682,13 @@ static struct attribute *ext4_attrs[] = {
EXT4_INFO_ATTR(lazy_itable_init);
EXT4_INFO_ATTR(batched_discard);
EXT4_INFO_ATTR(meta_bg_resize);
+EXT4_INFO_ATTR(encryption);
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
ATTR_LIST(batched_discard),
ATTR_LIST(meta_bg_resize),
+ ATTR_LIST(encryption),
NULL,
};
@@ -3448,6 +3447,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_bdev->bd_part)
sbi->s_sectors_written_start =
part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ /* Modes of operations for file and directory encryption. */
+ sbi->s_file_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS;
+ sbi->s_dir_encryption_mode = EXT4_ENCRYPTION_MODE_INVALID;
+#endif
/* Cleanup superblock name */
for (cp = sb->s_id; (cp = strchr(cp, '/'));)
@@ -3692,6 +3696,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
}
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT) &&
+ es->s_encryption_level) {
+ ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
+ es->s_encryption_level);
+ goto failed_mount;
+ }
+
if (sb->s_blocksize != blocksize) {
/* Validate the filesystem blocksize */
if (!sb_set_blocksize(sb, blocksize)) {
@@ -4054,6 +4065,13 @@ no_journal:
}
}
+ if (unlikely(sbi->s_mount_flags & EXT4_MF_TEST_DUMMY_ENCRYPTION) &&
+ !(sb->s_flags & MS_RDONLY) &&
+ !EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT)) {
+ EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT);
+ ext4_commit_super(sb, 1);
+ }
+
/*
* Get the # of file system overhead blocks from the
* superblock if present.
@@ -4570,7 +4588,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
int error = 0;
- if (!sbh || block_device_ejected(sb))
+ if (!sbh)
return error;
if (buffer_write_io_error(sbh)) {
/*
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index ff3711932018..136ca0e911fd 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -18,13 +18,101 @@
*/
#include <linux/fs.h>
-#include <linux/jbd2.h>
#include <linux/namei.h>
#include "ext4.h"
#include "xattr.h"
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
{
+ struct page *cpage = NULL;
+ char *caddr, *paddr = NULL;
+ struct ext4_str cstr, pstr;
+ struct inode *inode = dentry->d_inode;
+ struct ext4_fname_crypto_ctx *ctx = NULL;
+ struct ext4_encrypted_symlink_data *sd;
+ loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
+ int res;
+ u32 plen, max_size = inode->i_sb->s_blocksize;
+
+ if (!ext4_encrypted_inode(inode))
+ return page_follow_link_light(dentry, nd);
+
+ ctx = ext4_get_fname_crypto_ctx(inode, inode->i_sb->s_blocksize);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ if (ext4_inode_is_fast_symlink(inode)) {
+ caddr = (char *) EXT4_I(dentry->d_inode)->i_data;
+ max_size = sizeof(EXT4_I(dentry->d_inode)->i_data);
+ } else {
+ cpage = read_mapping_page(inode->i_mapping, 0, NULL);
+ if (IS_ERR(cpage)) {
+ ext4_put_fname_crypto_ctx(&ctx);
+ return cpage;
+ }
+ caddr = kmap(cpage);
+ caddr[size] = 0;
+ }
+
+ /* Symlink is encrypted */
+ sd = (struct ext4_encrypted_symlink_data *)caddr;
+ cstr.name = sd->encrypted_path;
+ cstr.len = le32_to_cpu(sd->len);
+ if ((cstr.len +
+ sizeof(struct ext4_encrypted_symlink_data) - 1) >
+ max_size) {
+ /* Symlink data on the disk is corrupted */
+ res = -EIO;
+ goto errout;
+ }
+ plen = (cstr.len < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) ?
+ EXT4_FNAME_CRYPTO_DIGEST_SIZE*2 : cstr.len;
+ paddr = kmalloc(plen + 1, GFP_NOFS);
+ if (!paddr) {
+ res = -ENOMEM;
+ goto errout;
+ }
+ pstr.name = paddr;
+ res = _ext4_fname_disk_to_usr(ctx, &cstr, &pstr);
+ if (res < 0)
+ goto errout;
+ /* Null-terminate the name */
+ if (res <= plen)
+ paddr[res] = '\0';
+ nd_set_link(nd, paddr);
+ ext4_put_fname_crypto_ctx(&ctx);
+ if (cpage) {
+ kunmap(cpage);
+ page_cache_release(cpage);
+ }
+ return NULL;
+errout:
+ ext4_put_fname_crypto_ctx(&ctx);
+ if (cpage) {
+ kunmap(cpage);
+ page_cache_release(cpage);
+ }
+ kfree(paddr);
+ return ERR_PTR(res);
+}
+
+static void ext4_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+{
+ struct page *page = cookie;
+
+ if (!page) {
+ kfree(nd_get_link(nd));
+ } else {
+ kunmap(page);
+ page_cache_release(page);
+ }
+}
+#endif
+
+static void *ext4_follow_fast_link(struct dentry *dentry, struct nameidata *nd)
+{
struct ext4_inode_info *ei = EXT4_I(dentry->d_inode);
nd_set_link(nd, (char *) ei->i_data);
return NULL;
@@ -32,8 +120,13 @@ static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
const struct inode_operations ext4_symlink_inode_operations = {
.readlink = generic_readlink,
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ .follow_link = ext4_follow_link,
+ .put_link = ext4_put_link,
+#else
.follow_link = page_follow_link_light,
.put_link = page_put_link,
+#endif
.setattr = ext4_setattr,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
@@ -43,7 +136,7 @@ const struct inode_operations ext4_symlink_inode_operations = {
const struct inode_operations ext4_fast_symlink_inode_operations = {
.readlink = generic_readlink,
- .follow_link = ext4_follow_link,
+ .follow_link = ext4_follow_fast_link,
.setattr = ext4_setattr,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 1e09fc77395c..759842ff8af0 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -55,7 +55,6 @@
#include <linux/slab.h>
#include <linux/mbcache.h>
#include <linux/quotaops.h>
-#include <linux/rwsem.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
@@ -639,8 +638,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
free += EXT4_XATTR_LEN(name_len);
}
if (i->value) {
- if (free < EXT4_XATTR_SIZE(i->value_len) ||
- free < EXT4_XATTR_LEN(name_len) +
+ if (free < EXT4_XATTR_LEN(name_len) +
EXT4_XATTR_SIZE(i->value_len))
return -ENOSPC;
}
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 29bedf5589f6..ddc0957760ba 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -23,6 +23,7 @@
#define EXT4_XATTR_INDEX_SECURITY 6
#define EXT4_XATTR_INDEX_SYSTEM 7
#define EXT4_XATTR_INDEX_RICHACL 8
+#define EXT4_XATTR_INDEX_ENCRYPTION 9
struct ext4_xattr_header {
__le32 h_magic; /* magic number for identification */
@@ -98,6 +99,8 @@ extern const struct xattr_handler ext4_xattr_user_handler;
extern const struct xattr_handler ext4_xattr_trusted_handler;
extern const struct xattr_handler ext4_xattr_security_handler;
+#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
+
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
OpenPOWER on IntegriCloud