FBE: Add support for hardware based FBE on f2fs and adapt ext4 fs

This is a snapshot of the crypto drivers as of msm-4.14 commit
367c46b1 (Enable hardware based FBE on f2fs and adapt ext4 fs).

Change-Id: Ifb52ed101d6e971c5823037f7895049b830c78c5
Signed-off-by: Zhen Kong <zkong@codeaurora.org>
This commit is contained in:
Zhen Kong 2019-03-14 10:55:19 -07:00 committed by Gerrit - the friendly Code Review server
parent 7f3b0bb0b3
commit ee7bdc62fd
57 changed files with 2872 additions and 57 deletions

View File

@ -580,6 +580,14 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
}
EXPORT_SYMBOL(bio_phys_segments);
static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src)
{
#ifdef CONFIG_PFK
dst->bi_crypt_key = src->bi_crypt_key;
dst->bi_iter.bi_dun = src->bi_iter.bi_dun;
#endif
}
/**
* __bio_clone_fast - clone a bio that shares the original bio's biovec
* @bio: destination bio
@ -609,7 +617,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
bio_clone_crypt_key(bio, bio_src);
bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);

View File

@ -1832,6 +1832,9 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
bio->bi_next = req->bio;
req->bio = bio;
#ifdef CONFIG_PFK
WARN_ON(req->__dun || bio->bi_iter.bi_dun);
#endif
req->__sector = bio->bi_iter.bi_sector;
req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
@ -1981,6 +1984,9 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio)
else
req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
req->write_hint = bio->bi_write_hint;
#ifdef CONFIG_PFK
req->__dun = bio->bi_iter.bi_dun;
#endif
blk_rq_bio_prep(req->q, req, bio);
}
EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
@ -3123,8 +3129,13 @@ bool blk_update_request(struct request *req, blk_status_t error,
req->__data_len -= total_bytes;
/* update sector only for requests with clear definition of sector */
if (!blk_rq_is_passthrough(req))
if (!blk_rq_is_passthrough(req)) {
req->__sector += total_bytes >> 9;
#ifdef CONFIG_PFK
if (req->__dun)
req->__dun += total_bytes >> 12;
#endif
}
/* mixed attributes always follow the first bio */
if (req->rq_flags & RQF_MIXED_MERGE) {
@ -3488,6 +3499,9 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
#ifdef CONFIG_PFK
dst->__dun = blk_rq_dun(src);
#endif
dst->__data_len = blk_rq_bytes(src);
if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
dst->rq_flags |= RQF_SPECIAL_PAYLOAD;

View File

@ -9,7 +9,7 @@
#include <linux/scatterlist.h>
#include <trace/events/block.h>
#include <linux/pfk.h>
#include "blk.h"
static struct bio *blk_bio_discard_split(struct request_queue *q,
@ -670,6 +670,11 @@ static void blk_account_io_merge(struct request *req)
}
}
static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
{
return (!pfk_allow_merge_bio(bio, nxt));
}
/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
@ -708,6 +713,9 @@ static struct request *attempt_merge(struct request_queue *q,
if (req->write_hint != next->write_hint)
return NULL;
if (crypto_not_mergeable(req->bio, next->bio))
return 0;
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@ -838,11 +846,18 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (rq->write_hint != bio->bi_write_hint)
return false;
if (crypto_not_mergeable(rq->bio, bio))
return false;
return true;
}
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{
#ifdef CONFIG_PFK
if (blk_rq_dun(rq) || bio_dun(bio))
return ELEVATOR_NO_MERGE;
#endif
if (req_op(rq) == REQ_OP_DISCARD &&
queue_max_discard_segments(rq->q) > 1)
return ELEVATOR_DISCARD_MERGE;

View File

@ -54,15 +54,6 @@ static inline void queue_lockdep_assert_held(struct request_queue *q)
lockdep_assert_held(q->queue_lock);
}
static inline void queue_flag_set_unlocked(unsigned int flag,
struct request_queue *q)
{
if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
kref_read(&q->kobj.kref))
lockdep_assert_held(q->queue_lock);
__set_bit(flag, &q->queue_flags);
}
static inline void queue_flag_clear_unlocked(unsigned int flag,
struct request_queue *q)
{

View File

@ -422,7 +422,7 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
{
struct elevator_queue *e = q->elevator;
struct request *__rq;
enum elv_merge ret;
/*
* Levels of merges:
* nomerges: No merges at all attempted
@ -435,9 +435,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
/*
* First try one-hit cache.
*/
if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
enum elv_merge ret = blk_try_merge(q->last_merge, bio);
if (q->last_merge) {
if (!elv_bio_merge_ok(q->last_merge, bio))
return ELEVATOR_NO_MERGE;
ret = blk_try_merge(q->last_merge, bio);
if (ret != ELEVATOR_NO_MERGE) {
*req = q->last_merge;
return ret;

View File

@ -762,4 +762,8 @@ config CRYPTO_DEV_CCREE
source "drivers/crypto/hisilicon/Kconfig"
if ARCH_QCOM
source drivers/crypto/msm/Kconfig
endif
endif # CRYPTO_HW

View File

@ -21,7 +21,7 @@ obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
obj-$(CONFIG_CRYPTO_DEV_QCOM_ICE) += msm/
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o

View File

@ -2244,6 +2244,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
if (!shost->use_clustering)
q->limits.cluster = 0;
if (shost->inlinecrypt_support)
queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q);
/*
* Set a reasonable default alignment: The larger of 32-byte (dword),
* which is a common minimum for HBAs, and the minimum DMA alignment,

View File

@ -938,8 +938,9 @@ static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
else
return 0;
/* Use request LBA as the DUN value */
/* Use request LBA or given dun as the DUN value */
if (req->bio) {
#ifdef CONFIG_PFK
if (bio_dun(req->bio)) {
/* dun @bio can be split, so we have to adjust offset */
*dun = bio_dun(req->bio);
@ -947,8 +948,11 @@ static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
*dun = req->bio->bi_iter.bi_sector;
*dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
}
#else
*dun = req->bio->bi_iter.bi_sector;
*dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
#endif
}
ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
return ret;
@ -2191,6 +2195,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
__func__, err);
goto out_variant_clear;
} else {
hba->host->inlinecrypt_support = 1;
}
host->generic_phy = devm_phy_get(dev, "ufsphy");

View File

@ -1,4 +1,7 @@
obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o
ccflags-y += -Ifs/ext4
ccflags-y += -Ifs/f2fs
fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o fscrypt_ice.o
fscrypto-$(CONFIG_BLOCK) += bio.o

View File

@ -33,14 +33,17 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
int ret = fscrypt_decrypt_page(page->mapping->host, page,
PAGE_SIZE, 0, page->index);
if (ret) {
WARN_ON_ONCE(1);
SetPageError(page);
} else if (done) {
if (fscrypt_using_hardware_encryption(page->mapping->host)) {
SetPageUptodate(page);
} else {
int ret = fscrypt_decrypt_page(page->mapping->host,
page, PAGE_SIZE, 0, page->index);
if (ret) {
WARN_ON_ONCE(1);
SetPageError(page);
} else if (done) {
SetPageUptodate(page);
}
}
if (done)
unlock_page(page);

140
fs/crypto/fscrypt_ice.c Normal file
View File

@ -0,0 +1,140 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include "fscrypt_ice.h"
int fscrypt_using_hardware_encryption(const struct inode *inode)
{
struct fscrypt_info *ci = inode->i_crypt_info;
return S_ISREG(inode->i_mode) && ci &&
ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
}
EXPORT_SYMBOL(fscrypt_using_hardware_encryption);
/*
* Retrieves encryption key from the inode
*/
char *fscrypt_get_ice_encryption_key(const struct inode *inode)
{
struct fscrypt_info *ci = NULL;
if (!inode)
return NULL;
ci = inode->i_crypt_info;
if (!ci)
return NULL;
return &(ci->ci_raw_key[0]);
}
/*
* Retrieves encryption salt from the inode
*/
char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
{
struct fscrypt_info *ci = NULL;
if (!inode)
return NULL;
ci = inode->i_crypt_info;
if (!ci)
return NULL;
return &(ci->ci_raw_key[fscrypt_get_ice_encryption_key_size(inode)]);
}
/*
* returns true if the cipher mode in inode is AES XTS
*/
int fscrypt_is_aes_xts_cipher(const struct inode *inode)
{
struct fscrypt_info *ci = inode->i_crypt_info;
if (!ci)
return 0;
return (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE);
}
/*
* returns true if encryption info in both inodes is equal
*/
bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
const struct inode *inode2)
{
char *key1 = NULL;
char *key2 = NULL;
char *salt1 = NULL;
char *salt2 = NULL;
if (!inode1 || !inode2)
return false;
if (inode1 == inode2)
return true;
/*
* both do not belong to ice, so we don't care, they are equal
* for us
*/
if (!fscrypt_should_be_processed_by_ice(inode1) &&
!fscrypt_should_be_processed_by_ice(inode2))
return true;
/* one belongs to ice, the other does not -> not equal */
if (fscrypt_should_be_processed_by_ice(inode1) ^
fscrypt_should_be_processed_by_ice(inode2))
return false;
key1 = fscrypt_get_ice_encryption_key(inode1);
key2 = fscrypt_get_ice_encryption_key(inode2);
salt1 = fscrypt_get_ice_encryption_salt(inode1);
salt2 = fscrypt_get_ice_encryption_salt(inode2);
/* key and salt should not be null by this point */
if (!key1 || !key2 || !salt1 || !salt2 ||
(fscrypt_get_ice_encryption_key_size(inode1) !=
fscrypt_get_ice_encryption_key_size(inode2)) ||
(fscrypt_get_ice_encryption_salt_size(inode1) !=
fscrypt_get_ice_encryption_salt_size(inode2)))
return false;
if ((memcmp(key1, key2,
fscrypt_get_ice_encryption_key_size(inode1)) == 0) &&
(memcmp(salt1, salt2,
fscrypt_get_ice_encryption_salt_size(inode1)) == 0))
return true;
return false;
}
void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun)
{
if (fscrypt_should_be_processed_by_ice(inode))
bio->bi_iter.bi_dun = dun;
}
EXPORT_SYMBOL(fscrypt_set_ice_dun);
/*
* This function will be used for filesystem when deciding to merge bios.
* Basic assumption is, if inline_encryption is set, single bio has to
* guarantee consecutive LBAs as well as ino|pg->index.
*/
bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted)
{
if (!bio)
return true;
/* if both of them are not encrypted, no further check is needed */
if (!bio_dun(bio) && !bio_encrypted)
return true;
/* ICE allows only consecutive iv_key stream. */
return bio_end_dun(bio) == dun;
}
EXPORT_SYMBOL(fscrypt_mergeable_bio);

99
fs/crypto/fscrypt_ice.h Normal file
View File

@ -0,0 +1,99 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _FSCRYPT_ICE_H
#define _FSCRYPT_ICE_H
#include <linux/blkdev.h>
#include "fscrypt_private.h"
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
{
if (!inode->i_sb->s_cop)
return false;
if (!inode->i_sb->s_cop->is_encrypted((struct inode *)inode))
return false;
return fscrypt_using_hardware_encryption(inode);
}
static inline int fscrypt_is_ice_capable(const struct super_block *sb)
{
return blk_queue_inlinecrypt(bdev_get_queue(sb->s_bdev));
}
int fscrypt_is_aes_xts_cipher(const struct inode *inode);
char *fscrypt_get_ice_encryption_key(const struct inode *inode);
char *fscrypt_get_ice_encryption_salt(const struct inode *inode);
bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
const struct inode *inode2);
static inline size_t fscrypt_get_ice_encryption_key_size(
const struct inode *inode)
{
return FS_AES_256_XTS_KEY_SIZE / 2;
}
static inline size_t fscrypt_get_ice_encryption_salt_size(
const struct inode *inode)
{
return FS_AES_256_XTS_KEY_SIZE / 2;
}
#else
static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
{
return false;
}
static inline int fscrypt_is_ice_capable(const struct super_block *sb)
{
return false;
}
static inline char *fscrypt_get_ice_encryption_key(const struct inode *inode)
{
return NULL;
}
static inline char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
{
return NULL;
}
static inline size_t fscrypt_get_ice_encryption_key_size(
const struct inode *inode)
{
return 0;
}
static inline size_t fscrypt_get_ice_encryption_salt_size(
const struct inode *inode)
{
return 0;
}
static inline int fscrypt_is_xts_cipher(const struct inode *inode)
{
return 0;
}
static inline bool fscrypt_is_ice_encryption_info_equal(
const struct inode *inode1,
const struct inode *inode2)
{
return false;
}
static inline int fscrypt_is_aes_xts_cipher(const struct inode *inode)
{
return 0;
}
#endif
#endif /* _FSCRYPT_ICE_H */

View File

@ -12,11 +12,23 @@
#ifndef _FSCRYPT_PRIVATE_H
#define _FSCRYPT_PRIVATE_H
#ifndef __FS_HAS_ENCRYPTION
#define __FS_HAS_ENCRYPTION 1
#endif
#include <linux/fscrypt.h>
#include <crypto/hash.h>
#include <linux/pfk.h>
/* Encryption parameters */
#define FS_AES_128_ECB_KEY_SIZE 16
#define FS_AES_128_CBC_KEY_SIZE 16
#define FS_AES_128_CTS_KEY_SIZE 16
#define FS_AES_256_GCM_KEY_SIZE 32
#define FS_AES_256_CBC_KEY_SIZE 32
#define FS_AES_256_CTS_KEY_SIZE 32
#define FS_AES_256_XTS_KEY_SIZE 64
#define FS_KEY_DERIVATION_NONCE_SIZE 16
/**
@ -82,11 +94,13 @@ struct fscrypt_info {
struct fscrypt_master_key *ci_master_key;
/* fields from the fscrypt_context */
u8 ci_data_mode;
u8 ci_filename_mode;
u8 ci_flags;
u8 ci_master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE];
u8 ci_raw_key[FS_MAX_KEY_SIZE];
};
typedef enum {
@ -112,6 +126,10 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
filenames_mode == FS_ENCRYPTION_MODE_ADIANTUM)
return true;
if (contents_mode == FS_ENCRYPTION_MODE_PRIVATE &&
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
return true;
return false;
}
@ -168,6 +186,7 @@ struct fscrypt_mode {
int ivsize;
bool logged_impl_name;
bool needs_essiv;
bool inline_encryption;
};
extern void __exit fscrypt_essiv_cleanup(void);

View File

@ -18,6 +18,7 @@
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
#include "fscrypt_ice.h"
static struct crypto_shash *essiv_hash_tfm;
@ -161,11 +162,20 @@ static struct fscrypt_mode available_modes[] = {
.keysize = 32,
.ivsize = 32,
},
[FS_ENCRYPTION_MODE_PRIVATE] = {
.friendly_name = "ice",
.cipher_str = "xts(aes)",
.keysize = 64,
.ivsize = 16,
.inline_encryption = true,
},
};
static struct fscrypt_mode *
select_encryption_mode(const struct fscrypt_info *ci, const struct inode *inode)
{
struct fscrypt_mode *mode = NULL;
if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) {
fscrypt_warn(inode->i_sb,
"inode %lu uses unsupported encryption modes (contents mode %d, filenames mode %d)",
@ -174,8 +184,19 @@ select_encryption_mode(const struct fscrypt_info *ci, const struct inode *inode)
return ERR_PTR(-EINVAL);
}
if (S_ISREG(inode->i_mode))
return &available_modes[ci->ci_data_mode];
if (S_ISREG(inode->i_mode)) {
mode = &available_modes[ci->ci_data_mode];
if (IS_ERR(mode)) {
fscrypt_warn(inode->i_sb, "Invalid mode");
return ERR_PTR(-EINVAL);
}
if (mode->inline_encryption &&
!fscrypt_is_ice_capable(inode->i_sb)) {
fscrypt_warn(inode->i_sb, "ICE support not available");
return ERR_PTR(-EINVAL);
}
return mode;
}
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
return &available_modes[ci->ci_filename_mode];
@ -220,6 +241,9 @@ static int find_and_derive_key(const struct inode *inode,
memcpy(derived_key, payload->raw, mode->keysize);
err = 0;
}
} else if (mode->inline_encryption) {
memcpy(derived_key, payload->raw, mode->keysize);
err = 0;
} else {
err = derive_key_aes(payload->raw, ctx, derived_key,
mode->keysize);
@ -495,12 +519,21 @@ static void put_crypt_info(struct fscrypt_info *ci)
if (ci->ci_master_key) {
put_master_key(ci->ci_master_key);
} else {
crypto_free_skcipher(ci->ci_ctfm);
crypto_free_cipher(ci->ci_essiv_tfm);
if (ci->ci_ctfm)
crypto_free_skcipher(ci->ci_ctfm);
if (ci->ci_essiv_tfm)
crypto_free_cipher(ci->ci_essiv_tfm);
}
memset(ci->ci_raw_key, 0, sizeof(ci->ci_raw_key));
kmem_cache_free(fscrypt_info_cachep, ci);
}
static int fscrypt_data_encryption_mode(struct inode *inode)
{
return fscrypt_should_be_processed_by_ice(inode) ?
FS_ENCRYPTION_MODE_PRIVATE : FS_ENCRYPTION_MODE_AES_256_XTS;
}
int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
@ -524,7 +557,8 @@ int fscrypt_get_encryption_info(struct inode *inode)
/* Fake up a context for an unencrypted directory */
memset(&ctx, 0, sizeof(ctx));
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
ctx.contents_encryption_mode =
fscrypt_data_encryption_mode(inode);
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
} else if (res != sizeof(ctx)) {
@ -569,9 +603,13 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (res)
goto out;
res = setup_crypto_transform(crypt_info, mode, raw_key, inode);
if (res)
goto out;
if (!mode->inline_encryption) {
res = setup_crypto_transform(crypt_info, mode, raw_key, inode);
if (res)
goto out;
} else {
memcpy(crypt_info->ci_raw_key, raw_key, mode->keysize);
}
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
crypt_info = NULL;

View File

@ -37,6 +37,8 @@
#include <linux/uio.h>
#include <linux/atomic.h>
#include <linux/prefetch.h>
#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
#include <linux/fscrypt.h>
/*
* How many user pages to map in one call to get_user_pages(). This determines
@ -451,6 +453,23 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
}
#ifdef CONFIG_PFK
static bool is_inode_filesystem_type(const struct inode *inode,
const char *fs_type)
{
if (!inode || !fs_type)
return false;
if (!inode->i_sb)
return false;
if (!inode->i_sb->s_type)
return false;
return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
}
#endif
/*
* In the AIO read case we speculatively dirty the pages before starting IO.
* During IO completion, any of these pages which happen to have been written
@ -473,7 +492,17 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
bio_set_pages_dirty(bio);
dio->bio_disk = bio->bi_disk;
#ifdef CONFIG_PFK
bio->bi_dio_inode = dio->inode;
/* iv sector for security/pfe/pfk_fscrypt.c and f2fs in fs/f2fs/f2fs.h */
#define PG_DUN_NEW(i, p) \
(((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p) & 0xffffffff))
if (is_inode_filesystem_type(dio->inode, "f2fs"))
fscrypt_set_ice_dun(dio->inode, bio, PG_DUN_NEW(dio->inode,
(sdio->logical_offset_in_bio >> PAGE_SHIFT)));
#endif
if (sdio->submit_io) {
sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
dio->bio_cookie = BLK_QC_T_NONE;
@ -485,6 +514,18 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
sdio->logical_offset_in_bio = 0;
}
struct inode *dio_bio_get_inode(struct bio *bio)
{
struct inode *inode = NULL;
if (bio == NULL)
return NULL;
#ifdef CONFIG_PFK
inode = bio->bi_dio_inode;
#endif
return inode;
}
/*
* Release any resources in case of a failure
*/

View File

@ -107,10 +107,16 @@ config EXT4_ENCRYPTION
decrypted pages in the page cache.
config EXT4_FS_ENCRYPTION
bool
default y
bool "Ext4 FS Encryption"
default n
depends on EXT4_ENCRYPTION
config EXT4_FS_ICE_ENCRYPTION
bool "Ext4 Encryption with ICE support"
default n
depends on EXT4_FS_ENCRYPTION
depends on PFK
config EXT4_DEBUG
bool "EXT4 debugging support"
depends on EXT4_FS

View File

@ -1219,7 +1219,8 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode);
S_ISREG(inode->i_mode) &&
!fscrypt_using_hardware_encryption(inode);
}
}
/*
@ -3765,9 +3766,14 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
get_block_func, ext4_end_io_dio, NULL,
dio_flags);
#if defined(CONFIG_EXT4_FS_ENCRYPTION)
WARN_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
&& !fscrypt_using_hardware_encryption(inode));
#endif
ret = __blockdev_direct_IO(iocb, inode,
inode->i_sb->s_bdev, iter,
get_block_func,
ext4_end_io_dio, NULL, dio_flags);
if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN)) {
@ -3874,8 +3880,9 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t ret;
int rw = iov_iter_rw(iter);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
#if defined(CONFIG_EXT4_FS_ENCRYPTION)
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
&& !fscrypt_using_hardware_encryption(inode))
return 0;
#endif
@ -4090,7 +4097,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (!buffer_uptodate(bh))
goto unlock;
if (S_ISREG(inode->i_mode) &&
ext4_encrypted_inode(inode)) {
ext4_encrypted_inode(inode) &&
!fscrypt_using_hardware_encryption(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);

View File

@ -482,8 +482,10 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_t gfp_flags = GFP_NOFS;
retry_encrypt:
data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
page->index, gfp_flags);
if (!fscrypt_using_hardware_encryption(inode))
data_page = fscrypt_encrypt_page(inode,
page, PAGE_SIZE, 0,
page->index, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {

View File

@ -280,6 +280,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
}
if (bio == NULL) {
struct fscrypt_ctx *ctx = NULL;
unsigned int flags = 0;
if (ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
@ -298,8 +299,9 @@ int ext4_mpage_readpages(struct address_space *mapping,
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
bio->bi_private = ctx;
bio_set_op_attrs(bio, REQ_OP_READ,
is_readahead ? REQ_RAHEAD : 0);
if (is_readahead)
flags = flags | REQ_RAHEAD;
bio_set_op_attrs(bio, REQ_OP_READ, flags);
}
length = first_hole << blkbits;

View File

@ -1328,6 +1328,11 @@ static bool ext4_dummy_context(struct inode *inode)
return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
}
static inline bool ext4_is_encrypted(struct inode *inode)
{
return ext4_encrypted_inode(inode);
}
static const struct fscrypt_operations ext4_cryptops = {
.key_prefix = "ext4:",
.get_context = ext4_get_context,
@ -1335,6 +1340,7 @@ static const struct fscrypt_operations ext4_cryptops = {
.dummy_context = ext4_dummy_context,
.empty_dir = ext4_empty_dir,
.max_namelen = EXT4_NAME_LEN,
.is_encrypted = ext4_is_encrypted,
};
#endif

View File

@ -481,6 +481,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
struct inode *inode = fio->page->mapping->host;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
@ -493,14 +494,13 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
1, is_read_io(fio->op), fio->type, fio->temp);
if (f2fs_may_encrypt_bio(inode, fio))
fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page));
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
if (fio->io_wbc && !is_read_io(fio->op))
wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
bio_set_op_attrs(bio, fio->op, fio->op_flags);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
@ -516,6 +516,9 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
struct page *bio_page;
struct inode *inode;
bool bio_encrypted;
u64 dun;
f2fs_bug_on(sbi, is_read_io(fio->op));
@ -538,6 +541,9 @@ next:
verify_block_addr(fio, fio->new_blkaddr);
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
inode = fio->page->mapping->host;
dun = PG_DUN(inode, fio->page);
bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
/* set submitted = true as a return value */
fio->submitted = true;
@ -548,6 +554,11 @@ next:
(io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
/* ICE support */
if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
if ((fio->type == DATA || fio->type == NODE) &&
@ -559,6 +570,8 @@ alloc_new:
io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
BIO_MAX_PAGES, false,
fio->type, fio->temp);
if (bio_encrypted)
fscrypt_set_ice_dun(inode, io->bio, dun);
io->fio = *fio;
}
@ -600,9 +613,10 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
return ERR_PTR(-ENOMEM);
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
bio_set_op_attrs(bio, REQ_OP_READ, 0);
if (f2fs_encrypted_file(inode))
if (f2fs_encrypted_file(inode) &&
!fscrypt_using_hardware_encryption(inode))
post_read_steps |= 1 << STEP_DECRYPT;
if (post_read_steps) {
ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
@ -627,6 +641,9 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
if (IS_ERR(bio))
return PTR_ERR(bio);
if (f2fs_may_encrypt_bio(inode, NULL))
fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, page));
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, blkaddr);
@ -1557,6 +1574,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
sector_t last_block_in_file;
sector_t block_nr;
struct f2fs_map_blocks map;
bool bio_encrypted;
u64 dun;
map.m_pblk = 0;
map.m_lblk = 0;
@ -1639,6 +1658,14 @@ submit_and_realloc:
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
dun = PG_DUN(inode, page);
bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted)) {
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
is_readahead ? REQ_RAHEAD : 0);
@ -1646,6 +1673,8 @@ submit_and_realloc:
bio = NULL;
goto set_error_page;
}
if (bio_encrypted)
fscrypt_set_ice_dun(inode, bio, dun);
}
/*
@ -1726,6 +1755,9 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
retry_encrypt:
if (fscrypt_using_hardware_encryption(inode))
return 0;
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags);
if (IS_ERR(fio->encrypted_page)) {

View File

@ -3590,6 +3590,10 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int rw = iov_iter_rw(iter);
if ((f2fs_encrypted_file(inode)) &&
!fscrypt_using_hardware_encryption(inode))
return true;
if (f2fs_post_read_required(inode))
return true;
if (sbi->s_ndevs)
@ -3609,6 +3613,16 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
return false;
}
static inline bool f2fs_may_encrypt_bio(struct inode *inode,
struct f2fs_io_info *fio)
{
if (fio && (fio->type != DATA || fio->encrypted_page))
return false;
return (f2fs_encrypted_file(inode) &&
fscrypt_using_hardware_encryption(inode));
}
#ifdef CONFIG_F2FS_FAULT_INJECTION
extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
unsigned int type);

View File

@ -2190,6 +2190,11 @@ static bool f2fs_dummy_context(struct inode *inode)
return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
}
static inline bool f2fs_is_encrypted(struct inode *inode)
{
return f2fs_encrypted_file(inode);
}
static const struct fscrypt_operations f2fs_cryptops = {
.key_prefix = "f2fs:",
.get_context = f2fs_get_context,
@ -2197,6 +2202,7 @@ static const struct fscrypt_operations f2fs_cryptops = {
.dummy_context = f2fs_dummy_context,
.empty_dir = f2fs_empty_dir,
.max_namelen = F2FS_NAME_LEN,
.is_encrypted = f2fs_is_encrypted,
};
#endif

View File

@ -326,6 +326,12 @@ void flush_delayed_fput(void)
static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
void flush_delayed_fput_wait(void)
{
delayed_fput(NULL);
flush_delayed_work(&delayed_fput_work);
}
void fput(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {

View File

@ -2926,6 +2926,11 @@ int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry,
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, want_excl);
if (error)
return error;
error = security_inode_post_create(dir, dentry, mode);
if (error)
return error;
if (!error)
fsnotify_create(dir, dentry);
return error;
@ -3750,6 +3755,11 @@ int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, u
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
if (error)
return error;
error = security_inode_post_create(dir, dentry, mode);
if (error)
return error;
if (!error)
fsnotify_create(dir, dentry);
return error;

View File

@ -21,6 +21,7 @@
#include <linux/fs_struct.h> /* get_fs_root et.al. */
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
#include <linux/uaccess.h>
#include <linux/file.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
#include <linux/bootmem.h>
@ -1134,6 +1135,12 @@ static void delayed_mntput(struct work_struct *unused)
}
static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
void flush_delayed_mntput_wait(void)
{
delayed_mntput(NULL);
flush_delayed_work(&delayed_mntput_work);
}
static void mntput_no_expire(struct mount *mnt)
{
rcu_read_lock();
@ -1650,6 +1657,7 @@ int ksys_umount(char __user *name, int flags)
struct mount *mnt;
int retval;
int lookup_flags = 0;
bool user_request = !(current->flags & PF_KTHREAD);
if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
return -EINVAL;
@ -1675,11 +1683,36 @@ int ksys_umount(char __user *name, int flags)
if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
goto dput_and_out;
/* flush delayed_fput to put mnt_count */
if (user_request)
flush_delayed_fput_wait();
retval = do_umount(mnt, flags);
dput_and_out:
/* we mustn't call path_put() as that would clear mnt_expiry_mark */
dput(path.dentry);
mntput_no_expire(mnt);
if (!user_request)
goto out;
if (!retval) {
/*
* If the last delayed_fput() is called during do_umount()
* and makes mnt_count zero, we need to guarantee to register
* delayed_mntput by waiting for delayed_fput work again.
*/
flush_delayed_fput_wait();
/* flush delayed_mntput_work to put sb->s_active */
flush_delayed_mntput_wait();
}
if (!retval || (flags & MNT_FORCE)) {
/* filesystem needs to handle unclosed namespaces */
if (mnt->mnt.mnt_sb->s_op->umount_end)
mnt->mnt.mnt_sb->s_op->umount_end(mnt->mnt.mnt_sb,
flags);
}
out:
return retval;
}

View File

@ -73,6 +73,9 @@
#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
#define bio_dun(bio) ((bio)->bi_iter.bi_dun)
#define bio_duns(bio) (bio_sectors(bio) >> 3) /* 4KB unit */
#define bio_end_dun(bio) (bio_dun(bio) + bio_duns(bio))
/*
* Return the data direction, READ or WRITE.
@ -170,6 +173,11 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
{
iter->bi_sector += bytes >> 9;
#ifdef CONFIG_PFK
if (iter->bi_dun)
iter->bi_dun += bytes >> 12;
#endif
if (bio_no_advance_iter(bio)) {
iter->bi_size -= bytes;
iter->bi_done += bytes;

View File

@ -187,6 +187,13 @@ struct bio {
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
};
#ifdef CONFIG_PFK
/* Encryption key to use (NULL if none) */
const struct blk_encryption_key *bi_crypt_key;
#endif
#ifdef CONFIG_DM_DEFAULT_KEY
int bi_crypt_skip;
#endif
unsigned short bi_vcnt; /* how many bio_vec's */
@ -201,7 +208,9 @@ struct bio {
struct bio_vec *bi_io_vec; /* the actual vec list */
struct bio_set *bi_pool;
#ifdef CONFIG_PFK
struct inode *bi_dio_inode;
#endif
/*
* We can inline a number of vecs at the end of the bio, to avoid
* double allocations for a small number of bio_vecs. This member
@ -324,6 +333,11 @@ enum req_flag_bits {
__REQ_SORTED = __REQ_RAHEAD, /* elevator knows about this request */
__REQ_URGENT, /* urgent request */
/* Android specific flags */
__REQ_NOENCRYPT, /*
* ok to not encrypt (already encrypted at fs
* level)
*/
/* command specific flags for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
@ -348,6 +362,7 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
#define REQ_NOENCRYPT (1ULL << __REQ_NOENCRYPT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)

View File

@ -161,6 +161,7 @@ struct request {
unsigned int __data_len; /* total data len */
int tag;
sector_t __sector; /* sector cursor */
u64 __dun; /* dun for UFS */
struct bio *bio;
struct bio *biotail;
@ -699,6 +700,7 @@ struct request_queue {
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
#define QUEUE_FLAG_INLINECRYPT 30 /* inline encryption support */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
@ -731,6 +733,8 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_queue_scsi_passthrough(q) \
test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
#define blk_queue_inlinecrypt(q) \
test_bit(QUEUE_FLAG_INLINECRYPT, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@ -878,6 +882,15 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
return q->nr_requests;
}
static inline void queue_flag_set_unlocked(unsigned int flag,
struct request_queue *q)
{
if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
kref_read(&q->kobj.kref))
lockdep_assert_held(q->queue_lock);
__set_bit(flag, &q->queue_flags);
}
/*
* q->prep_rq_fn return values
*/
@ -1043,6 +1056,11 @@ static inline sector_t blk_rq_pos(const struct request *rq)
return rq->__sector;
}
static inline sector_t blk_rq_dun(const struct request *rq)
{
return rq->__dun;
}
static inline unsigned int blk_rq_bytes(const struct request *rq)
{
return rq->__data_len;

View File

@ -44,6 +44,7 @@ struct bvec_iter {
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
u64 bi_dun; /* DUN setting for bio */
};
/*

View File

@ -87,6 +87,7 @@ extern void put_unused_fd(unsigned int fd);
extern void fd_install(unsigned int fd, struct file *file);
extern void flush_delayed_fput(void);
extern void flush_delayed_fput_wait(void);
extern void __fput_sync(struct file *);
#endif /* __LINUX_FILE_H */

View File

@ -1878,6 +1878,7 @@ struct super_operations {
void *(*clone_mnt_data) (void *);
void (*copy_mnt_data) (void *, void *);
void (*umount_begin) (struct super_block *);
void (*umount_end)(struct super_block *sb, int flags);
int (*show_options)(struct seq_file *, struct dentry *);
int (*show_options2)(struct vfsmount *,struct seq_file *, struct dentry *);
@ -3096,6 +3097,8 @@ static inline void inode_dio_end(struct inode *inode)
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
struct inode *dio_bio_get_inode(struct bio *bio);
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);

View File

@ -19,6 +19,11 @@
#define FS_CRYPTO_BLOCK_SIZE 16
struct fscrypt_ctx;
/* iv sector for security/pfe/pfk_fscrypt.c and f2fs */
#define PG_DUN(i, p) \
(((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p)->index & 0xffffffff))
struct fscrypt_info;
struct fscrypt_str {

View File

@ -174,6 +174,21 @@ static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
return -EOPNOTSUPP;
}
/* fscrypt_ice.c */
static inline int fscrypt_using_hardware_encryption(const struct inode *inode)
{
return 0;
}
static inline void fscrypt_set_ice_dun(const struct inode *inode,
struct bio *bio, u64 dun) {}
static inline bool fscrypt_mergeable_bio(struct bio *bio,
sector_t iv_block, bool bio_encrypted)
{
return true;
}
/* hooks.c */
static inline int fscrypt_file_open(struct inode *inode, struct file *filp)

View File

@ -30,6 +30,7 @@ struct fscrypt_operations {
bool (*dummy_context)(struct inode *);
bool (*empty_dir)(struct inode *);
unsigned int max_namelen;
bool (*is_encrypted)(struct inode *inode);
};
struct fscrypt_ctx {
@ -182,6 +183,12 @@ extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
/* fscrypt_ice.c */
extern int fscrypt_using_hardware_encryption(const struct inode *inode);
extern void fscrypt_set_ice_dun(const struct inode *inode,
struct bio *bio, u64 dun);
extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted);
/* hooks.c */
extern int fscrypt_file_open(struct inode *inode, struct file *filp);
extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);

View File

@ -1516,6 +1516,8 @@ union security_list_options {
size_t *len);
int (*inode_create)(struct inode *dir, struct dentry *dentry,
umode_t mode);
int (*inode_post_create)(struct inode *dir, struct dentry *dentry,
umode_t mode);
int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
@ -1830,6 +1832,7 @@ struct security_hook_heads {
struct hlist_head inode_free_security;
struct hlist_head inode_init_security;
struct hlist_head inode_create;
struct hlist_head inode_post_create;
struct hlist_head inode_link;
struct hlist_head inode_unlink;
struct hlist_head inode_symlink;

View File

@ -31,6 +31,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/bio.h>
struct linux_binprm;
struct cred;
@ -283,6 +284,8 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, const char **name,
void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
int security_inode_post_create(struct inode *dir, struct dentry *dentry,
umode_t mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@ -671,6 +674,13 @@ static inline int security_inode_create(struct inode *dir,
return 0;
}
static inline int security_inode_post_create(struct inode *dir,
struct dentry *dentry,
umode_t mode)
{
return 0;
}
static inline int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)

View File

@ -648,6 +648,9 @@ struct Scsi_Host {
/* The controller does not support WRITE SAME */
unsigned no_write_same:1;
/* Inline encryption support? */
unsigned inlinecrypt_support:1;
unsigned use_blk_mq:1;
unsigned use_cmd_list:1;

View File

@ -283,6 +283,7 @@ struct fsxattr {
#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
#define FS_ENCRYPTION_MODE_ADIANTUM 9
#define FS_ENCRYPTION_MODE_PRIVATE 127
struct fscrypt_policy {
__u8 version;

View File

@ -6,6 +6,10 @@ menu "Security options"
source security/keys/Kconfig
if ARCH_QCOM
source security/pfe/Kconfig
endif
config SECURITY_DMESG_RESTRICT
bool "Restrict unprivileged access to the kernel syslog"
default n

View File

@ -10,6 +10,7 @@ subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
subdir-$(CONFIG_SECURITY_YAMA) += yama
subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin
subdir-$(CONFIG_ARCH_QCOM) += pfe
# always enable default capabilities
obj-y += commoncap.o
@ -26,6 +27,7 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
obj-$(CONFIG_ARCH_QCOM) += pfe/
# Object integrity file lists
subdir-$(CONFIG_INTEGRITY) += integrity

42
security/pfe/Kconfig Normal file
View File

@ -0,0 +1,42 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Qualcomm Technologies, Inc Per File Encryption security device drivers"
depends on ARCH_QCOM
config PFT
bool "Per-File-Tagger driver"
depends on SECURITY
default n
help
This driver is used for tagging enterprise files.
It is part of the Per-File-Encryption (PFE) feature.
The driver is tagging files when created by
registered application.
Tagged files are encrypted using the dm-req-crypt driver.
config PFK
bool "Per-File-Key driver"
depends on SECURITY
depends on SECURITY_SELINUX
default n
help
This driver is used for storing eCryptfs information
in file node.
This is part of eCryptfs hardware enhanced solution
provided by Qualcomm Technologies, Inc.
Information is used when file is encrypted later using
ICE or dm crypto engine
config PFK_WRAPPED_KEY_SUPPORTED
bool "Per-File-Key driver with wrapped key support"
depends on SECURITY
depends on SECURITY_SELINUX
depends on QSEECOM
depends on PFK
default n
help
Adds wrapped key support in PFK driver. Instead of setting
the key directly in ICE, it unwraps the key and sets the key
in ICE.
It ensures the key is protected within a secure environment
and only the wrapped key is present in the kernel.
endmenu

7
security/pfe/Makefile Normal file
View File

@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
ccflags-y += -Ifs/crypto
ccflags-y += -Idrivers/misc
obj-$(CONFIG_PFT) += pft.o
obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ice.o pfk_ext4.o pfk_f2fs.o

547
security/pfe/pfk.c Normal file
View File

@ -0,0 +1,547 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
/*
* Per-File-Key (PFK).
*
* This driver is responsible for overall management of various
* Per File Encryption variants that work on top of or as part of different
* file systems.
*
* The driver has the following purpose :
* 1) Define priorities between PFE's if more than one is enabled
* 2) Extract key information from inode
* 3) Load and manage various keys in ICE HW engine
* 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER
* that need to take decision on HW encryption management of the data
* Some examples:
* BLOCK LAYER: when it takes decision on whether 2 chunks can be united
* to one encryption / decryption request sent to the HW
*
* UFS DRIVER: when it need to configure ICE HW with a particular key slot
* to be used for encryption / decryption
*
* PFE variants can differ on particular way of storing the cryptographic info
* inside inode, actions to be taken upon file operations, etc., but the common
* properties are described above
*
*/
#define pr_fmt(fmt) "pfk [%s]: " fmt, __func__
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/printk.h>
#include <linux/bio.h>
#include <linux/security.h>
#include <crypto/algapi.h>
#include <crypto/ice.h>
#include <linux/pfk.h>
#include "pfk_kc.h"
#include "objsec.h"
#include "pfk_ice.h"
#include "pfk_ext4.h"
#include "pfk_f2fs.h"
#include "pfk_internal.h"
static bool pfk_ready;
/* might be replaced by a table when more than one cipher is supported */
#define PFK_SUPPORTED_KEY_SIZE 32
#define PFK_SUPPORTED_SALT_SIZE 32
/* Various PFE types and function tables to support each one of them */
enum pfe_type {EXT4_CRYPT_PFE, F2FS_CRYPT_PFE, INVALID_PFE};
typedef int (*pfk_parse_inode_type)(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
bool *is_pfe);
typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
const struct bio *bio2, const struct inode *inode1,
const struct inode *inode2);
static const pfk_parse_inode_type pfk_parse_inode_ftable[] = {
&pfk_ext4_parse_inode, /* EXT4_CRYPT_PFE */
&pfk_f2fs_parse_inode, /* F2FS_CRYPT_PFE */
};
static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = {
&pfk_ext4_allow_merge_bio, /* EXT4_CRYPT_PFE */
&pfk_f2fs_allow_merge_bio, /* F2FS_CRYPT_PFE */
};
static void __exit pfk_exit(void)
{
pfk_ready = false;
pfk_ext4_deinit();
pfk_f2fs_deinit();
pfk_kc_deinit();
}
static int __init pfk_init(void)
{
int ret = 0;
ret = pfk_ext4_init();
if (ret != 0)
goto fail;
ret = pfk_f2fs_init();
if (ret != 0)
goto fail;
ret = pfk_kc_init();
if (ret != 0) {
pr_err("could init pfk key cache, error %d\n", ret);
pfk_ext4_deinit();
pfk_f2fs_deinit();
goto fail;
}
pfk_ready = true;
pr_debug("Driver initialized successfully\n");
return 0;
fail:
pr_err("Failed to init driver\n");
return -ENODEV;
}
/*
* If more than one type is supported simultaneously, this function will also
* set the priority between them
*/
static enum pfe_type pfk_get_pfe_type(const struct inode *inode)
{
if (!inode)
return INVALID_PFE;
if (pfk_is_ext4_type(inode))
return EXT4_CRYPT_PFE;
if (pfk_is_f2fs_type(inode))
return F2FS_CRYPT_PFE;
return INVALID_PFE;
}
/**
* inode_to_filename() - get the filename from inode pointer.
* @inode: inode pointer
*
* it is used for debug prints.
*
* Return: filename string or "unknown".
*/
char *inode_to_filename(const struct inode *inode)
{
struct dentry *dentry = NULL;
char *filename = NULL;
if (!inode)
return "NULL";
if (hlist_empty(&inode->i_dentry))
return "unknown";
dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
filename = dentry->d_iname;
return filename;
}
/**
* pfk_is_ready() - driver is initialized and ready.
*
* Return: true if the driver is ready.
*/
static inline bool pfk_is_ready(void)
{
return pfk_ready;
}
/**
* pfk_bio_get_inode() - get the inode from a bio.
* @bio: Pointer to BIO structure.
*
* Walk the bio struct links to get the inode.
* Please note, that in general bio may consist of several pages from
* several files, but in our case we always assume that all pages come
* from the same file, since our logic ensures it. That is why we only
* walk through the first page to look for inode.
*
* Return: pointer to the inode struct if successful, or NULL otherwise.
*
*/
static struct inode *pfk_bio_get_inode(const struct bio *bio)
{
if (!bio)
return NULL;
if (!bio_has_data((struct bio *)bio))
return NULL;
if (!bio->bi_io_vec)
return NULL;
if (!bio->bi_io_vec->bv_page)
return NULL;
if (PageAnon(bio->bi_io_vec->bv_page)) {
struct inode *inode;
/* Using direct-io (O_DIRECT) without page cache */
inode = dio_bio_get_inode((struct bio *)bio);
pr_debug("inode on direct-io, inode = 0x%pK.\n", inode);
return inode;
}
if (!page_mapping(bio->bi_io_vec->bv_page))
return NULL;
return page_mapping(bio->bi_io_vec->bv_page)->host;
}
/**
* pfk_key_size_to_key_type() - translate key size to key size enum
* @key_size: key size in bytes
* @key_size_type: pointer to store the output enum (can be null)
*
* return 0 in case of success, error otherwise (i.e not supported key size)
*/
int pfk_key_size_to_key_type(size_t key_size,
enum ice_crpto_key_size *key_size_type)
{
/*
* currently only 32 bit key size is supported
* in the future, table with supported key sizes might
* be introduced
*/
if (key_size != PFK_SUPPORTED_KEY_SIZE) {
pr_err("not supported key size %zu\n", key_size);
return -EINVAL;
}
if (key_size_type)
*key_size_type = ICE_CRYPTO_KEY_SIZE_256;
return 0;
}
/*
* Retrieves filesystem type from inode's superblock
*/
bool pfe_is_inode_filesystem_type(const struct inode *inode,
const char *fs_type)
{
if (!inode || !fs_type)
return false;
if (!inode->i_sb)
return false;
if (!inode->i_sb->s_type)
return false;
return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
}
/**
* pfk_get_key_for_bio() - get the encryption key to be used for a bio
*
* @bio: pointer to the BIO
* @key_info: pointer to the key information which will be filled in
* @algo_mode: optional pointer to the algorithm identifier which will be set
* @is_pfe: will be set to false if the BIO should be left unencrypted
*
* Return: 0 if a key is being used, otherwise a -errno value
*/
static int pfk_get_key_for_bio(const struct bio *bio,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo_mode,
bool *is_pfe, unsigned int *data_unit)
{
const struct inode *inode;
enum pfe_type which_pfe;
const struct blk_encryption_key *key;
char *s_type = NULL;
inode = pfk_bio_get_inode(bio);
which_pfe = pfk_get_pfe_type(inode);
s_type = (char *)pfk_kc_get_storage_type();
/*
* Update dun based on storage type.
* 512 byte dun - For ext4 emmc
* 4K dun - For ext4 ufs, f2fs ufs and f2fs emmc
*/
if (data_unit) {
if (!bio_dun(bio) && !memcmp(s_type, "sdcc", strlen("sdcc")))
*data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B;
else
*data_unit = 1 << ICE_CRYPTO_DATA_UNIT_4_KB;
}
if (which_pfe != INVALID_PFE) {
/* Encrypted file; override ->bi_crypt_key */
pr_debug("parsing inode %lu with PFE type %d\n",
inode->i_ino, which_pfe);
return (*(pfk_parse_inode_ftable[which_pfe]))
(bio, inode, key_info, algo_mode, is_pfe);
}
/*
* bio is not for an encrypted file. Use ->bi_crypt_key if it was set.
* Otherwise, don't encrypt/decrypt the bio.
*/
key = bio->bi_crypt_key;
if (!key) {
*is_pfe = false;
return -EINVAL;
}
/* Note: the "salt" is really just the second half of the XTS key. */
BUILD_BUG_ON(sizeof(key->raw) !=
PFK_SUPPORTED_KEY_SIZE + PFK_SUPPORTED_SALT_SIZE);
key_info->key = &key->raw[0];
key_info->key_size = PFK_SUPPORTED_KEY_SIZE;
key_info->salt = &key->raw[PFK_SUPPORTED_KEY_SIZE];
key_info->salt_size = PFK_SUPPORTED_SALT_SIZE;
if (algo_mode)
*algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
return 0;
}
/**
* pfk_load_key_start() - loads PFE encryption key to the ICE
* Can also be invoked from non
* PFE context, in this case it
* is not relevant and is_pfe
* flag is set to false
*
* @bio: Pointer to the BIO structure
* @ice_setting: Pointer to ice setting structure that will be filled with
* ice configuration values, including the index to which the key was loaded
* @is_pfe: will be false if inode is not relevant to PFE, in such a case
* it should be treated as non PFE by the block layer
*
* Returns the index where the key is stored in encryption hw and additional
* information that will be used later for configuration of the encryption hw.
*
* Must be followed by pfk_load_key_end when key is no longer used by ice
*
*/
int pfk_load_key_start(const struct bio *bio,
struct ice_crypto_setting *ice_setting, bool *is_pfe,
bool async)
{
int ret = 0;
struct pfk_key_info key_info = {NULL, NULL, 0, 0};
enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
enum ice_crpto_key_size key_size_type = 0;
unsigned int data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B;
u32 key_index = 0;
if (!is_pfe) {
pr_err("is_pfe is NULL\n");
return -EINVAL;
}
/*
* only a few errors below can indicate that
* this function was not invoked within PFE context,
* otherwise we will consider it PFE
*/
*is_pfe = true;
if (!pfk_is_ready())
return -ENODEV;
if (!ice_setting) {
pr_err("ice setting is NULL\n");
return -EINVAL;
}
ret = pfk_get_key_for_bio(bio, &key_info, &algo_mode, is_pfe,
&data_unit);
if (ret != 0)
return ret;
ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type);
if (ret != 0)
return ret;
ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
key_info.salt, key_info.salt_size, &key_index, async,
data_unit);
if (ret) {
if (ret != -EBUSY && ret != -EAGAIN)
pr_err("start: could not load key into pfk key cache, error %d\n",
ret);
return ret;
}
ice_setting->key_size = key_size_type;
ice_setting->algo_mode = algo_mode;
/* hardcoded for now */
ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
ice_setting->key_index = key_index;
pr_debug("loaded key for file %s key_index %d\n",
inode_to_filename(pfk_bio_get_inode(bio)), key_index);
return 0;
}
/**
* pfk_load_key_end() - marks the PFE key as no longer used by ICE
* Can also be invoked from non
* PFE context, in this case it is not
* relevant and is_pfe flag is
* set to false
*
* @bio: Pointer to the BIO structure
* @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
* from PFE context
*/
int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
{
int ret = 0;
struct pfk_key_info key_info = {NULL, NULL, 0, 0};
if (!is_pfe) {
pr_err("is_pfe is NULL\n");
return -EINVAL;
}
/* only a few errors below can indicate that
* this function was not invoked within PFE context,
* otherwise we will consider it PFE
*/
*is_pfe = true;
if (!pfk_is_ready())
return -ENODEV;
ret = pfk_get_key_for_bio(bio, &key_info, NULL, is_pfe, NULL);
if (ret != 0)
return ret;
pfk_kc_load_key_end(key_info.key, key_info.key_size,
key_info.salt, key_info.salt_size);
pr_debug("finished using key for file %s\n",
inode_to_filename(pfk_bio_get_inode(bio)));
return 0;
}
/**
* pfk_allow_merge_bio() - Check if 2 BIOs can be merged.
* @bio1: Pointer to first BIO structure.
* @bio2: Pointer to second BIO structure.
*
* Prevent merging of BIOs from encrypted and non-encrypted
* files, or files encrypted with different key.
* Also prevent non encrypted and encrypted data from the same file
* to be merged (ecryptfs header if stored inside file should be non
* encrypted)
* This API is called by the file system block layer.
*
* Return: true if the BIOs allowed to be merged, false
* otherwise.
*/
bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2)
{
const struct blk_encryption_key *key1;
const struct blk_encryption_key *key2;
const struct inode *inode1;
const struct inode *inode2;
enum pfe_type which_pfe1;
enum pfe_type which_pfe2;
if (!pfk_is_ready())
return false;
if (!bio1 || !bio2)
return false;
if (bio1 == bio2)
return true;
key1 = bio1->bi_crypt_key;
key2 = bio2->bi_crypt_key;
inode1 = pfk_bio_get_inode(bio1);
inode2 = pfk_bio_get_inode(bio2);
which_pfe1 = pfk_get_pfe_type(inode1);
which_pfe2 = pfk_get_pfe_type(inode2);
/*
* If one bio is for an encrypted file and the other is for a different
* type of encrypted file or for blocks that are not part of an
* encrypted file, do not merge.
*/
if (which_pfe1 != which_pfe2)
return false;
if (which_pfe1 != INVALID_PFE) {
/* Both bios are for the same type of encrypted file. */
return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2,
inode1, inode2);
}
/*
* Neither bio is for an encrypted file. Merge only if the default keys
* are the same (or both are NULL).
*/
return key1 == key2 ||
(key1 && key2 &&
!crypto_memneq(key1->raw, key2->raw, sizeof(key1->raw)));
}
int pfk_fbe_clear_key(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size)
{
int ret = -EINVAL;
if (!key || !salt)
return ret;
ret = pfk_kc_remove_key_with_salt(key, key_size, salt, salt_size);
if (ret)
pr_err("Clear key error: ret value %d\n", ret);
return ret;
}
/**
* Flush key table on storage core reset. During core reset key configuration
* is lost in ICE. We need to flash the cache, so that the keys will be
* reconfigured again for every subsequent transaction
*/
void pfk_clear_on_reset(void)
{
if (!pfk_is_ready())
return;
pfk_kc_clear_on_reset();
}
module_init(pfk_init);
module_exit(pfk_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Per-File-Key driver");

177
security/pfe/pfk_ext4.c Normal file
View File

@ -0,0 +1,177 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
/*
* Per-File-Key (PFK) - EXT4
*
* This driver is used for working with EXT4 crypt extension
*
* The key information is stored in node by EXT4 when file is first opened
* and will be later accessed by Block Device Driver to actually load the key
* to encryption hw.
*
* PFK exposes API's for loading and removing keys from encryption hw
* and also API to determine whether 2 adjacent blocks can be agregated by
* Block Layer in one request to encryption hw.
*
*/
#define pr_fmt(fmt) "pfk_ext4 [%s]: " fmt, __func__
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/printk.h>
#include "fscrypt_ice.h"
#include "pfk_ext4.h"
//#include "ext4_ice.h"
static bool pfk_ext4_ready;
/*
* pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer
*/
void pfk_ext4_deinit(void)
{
pfk_ext4_ready = false;
}
/*
* pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer
*/
int __init pfk_ext4_init(void)
{
pfk_ext4_ready = true;
pr_info("PFK EXT4 inited successfully\n");
return 0;
}
/**
* pfk_ecryptfs_is_ready() - driver is initialized and ready.
*
* Return: true if the driver is ready.
*/
static inline bool pfk_ext4_is_ready(void)
{
return pfk_ext4_ready;
}
/**
* pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE
* @inode: inode pointer
*/
bool pfk_is_ext4_type(const struct inode *inode)
{
if (!pfe_is_inode_filesystem_type(inode, "ext4"))
return false;
return fscrypt_should_be_processed_by_ice(inode);
}
/**
* pfk_ext4_parse_cipher() - parse cipher from inode to enum
* @inode: inode
* @algo: pointer to store the output enum (can be null)
*
* return 0 in case of success, error otherwise (i.e not supported cipher)
*/
static int pfk_ext4_parse_cipher(const struct inode *inode,
enum ice_cryto_algo_mode *algo)
{
/*
* currently only AES XTS algo is supported
* in the future, table with supported ciphers might
* be introduced
*/
if (!inode)
return -EINVAL;
if (!fscrypt_is_aes_xts_cipher(inode)) {
pr_err("ext4 alghoritm is not supported by pfk\n");
return -EINVAL;
}
if (algo)
*algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
return 0;
}
int pfk_ext4_parse_inode(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
bool *is_pfe)
{
int ret = 0;
if (!is_pfe)
return -EINVAL;
/*
* only a few errors below can indicate that
* this function was not invoked within PFE context,
* otherwise we will consider it PFE
*/
*is_pfe = true;
if (!pfk_ext4_is_ready())
return -ENODEV;
if (!inode)
return -EINVAL;
if (!key_info)
return -EINVAL;
key_info->key = fscrypt_get_ice_encryption_key(inode);
if (!key_info->key) {
pr_err("could not parse key from ext4\n");
return -EINVAL;
}
key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
if (!key_info->key_size) {
pr_err("could not parse key size from ext4\n");
return -EINVAL;
}
key_info->salt = fscrypt_get_ice_encryption_salt(inode);
if (!key_info->salt) {
pr_err("could not parse salt from ext4\n");
return -EINVAL;
}
key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
if (!key_info->salt_size) {
pr_err("could not parse salt size from ext4\n");
return -EINVAL;
}
ret = pfk_ext4_parse_cipher(inode, algo);
if (ret != 0) {
pr_err("not supported cipher\n");
return ret;
}
return 0;
}
bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
const struct bio *bio2, const struct inode *inode1,
const struct inode *inode2)
{
/* if there is no ext4 pfk, don't disallow merging blocks */
if (!pfk_ext4_is_ready())
return true;
if (!inode1 || !inode2)
return false;
return fscrypt_is_ice_encryption_info_equal(inode1, inode2);
}

30
security/pfe/pfk_ext4.h Normal file
View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _PFK_EXT4_H_
#define _PFK_EXT4_H_
#include <linux/types.h>
#include <linux/fs.h>
#include <crypto/ice.h>
#include "pfk_internal.h"
bool pfk_is_ext4_type(const struct inode *inode);
int pfk_ext4_parse_inode(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
bool *is_pfe);
bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
const struct bio *bio2, const struct inode *inode1,
const struct inode *inode2);
int __init pfk_ext4_init(void);
void pfk_ext4_deinit(void);
#endif /* _PFK_EXT4_H_ */

188
security/pfe/pfk_f2fs.c Normal file
View File

@ -0,0 +1,188 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
/*
* Per-File-Key (PFK) - f2fs
*
* This driver is used for working with EXT4/F2FS crypt extension
*
* The key information is stored in node by EXT4/F2FS when file is first opened
* and will be later accessed by Block Device Driver to actually load the key
* to encryption hw.
*
* PFK exposes API's for loading and removing keys from encryption hw
* and also API to determine whether 2 adjacent blocks can be agregated by
* Block Layer in one request to encryption hw.
*
*/
#define pr_fmt(fmt) "pfk_f2fs [%s]: " fmt, __func__
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/printk.h>
#include "fscrypt_ice.h"
#include "pfk_f2fs.h"
static bool pfk_f2fs_ready;
/*
* pfk_f2fs_deinit() - Deinit function, should be invoked by upper PFK layer
*/
void pfk_f2fs_deinit(void)
{
pfk_f2fs_ready = false;
}
/*
* pfk_f2fs_init() - Init function, should be invoked by upper PFK layer
*/
int __init pfk_f2fs_init(void)
{
pfk_f2fs_ready = true;
pr_info("PFK F2FS inited successfully\n");
return 0;
}
/**
* pfk_f2fs_is_ready() - driver is initialized and ready.
*
* Return: true if the driver is ready.
*/
static inline bool pfk_f2fs_is_ready(void)
{
return pfk_f2fs_ready;
}
/**
* pfk_is_f2fs_type() - return true if inode belongs to ICE F2FS PFE
* @inode: inode pointer
*/
bool pfk_is_f2fs_type(const struct inode *inode)
{
if (!pfe_is_inode_filesystem_type(inode, "f2fs"))
return false;
return fscrypt_should_be_processed_by_ice(inode);
}
/**
* pfk_f2fs_parse_cipher() - parse cipher from inode to enum
* @inode: inode
* @algo: pointer to store the output enum (can be null)
*
* return 0 in case of success, error otherwise (i.e not supported cipher)
*/
static int pfk_f2fs_parse_cipher(const struct inode *inode,
enum ice_cryto_algo_mode *algo)
{
/*
* currently only AES XTS algo is supported
* in the future, table with supported ciphers might
* be introduced
*/
if (!inode)
return -EINVAL;
if (!fscrypt_is_aes_xts_cipher(inode)) {
pr_err("f2fs alghoritm is not supported by pfk\n");
return -EINVAL;
}
if (algo)
*algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
return 0;
}
int pfk_f2fs_parse_inode(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
bool *is_pfe)
{
int ret = 0;
if (!is_pfe)
return -EINVAL;
/*
* only a few errors below can indicate that
* this function was not invoked within PFE context,
* otherwise we will consider it PFE
*/
*is_pfe = true;
if (!pfk_f2fs_is_ready())
return -ENODEV;
if (!inode)
return -EINVAL;
if (!key_info)
return -EINVAL;
key_info->key = fscrypt_get_ice_encryption_key(inode);
if (!key_info->key) {
pr_err("could not parse key from f2fs\n");
return -EINVAL;
}
key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
if (!key_info->key_size) {
pr_err("could not parse key size from f2fs\n");
return -EINVAL;
}
key_info->salt = fscrypt_get_ice_encryption_salt(inode);
if (!key_info->salt) {
pr_err("could not parse salt from f2fs\n");
return -EINVAL;
}
key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
if (!key_info->salt_size) {
pr_err("could not parse salt size from f2fs\n");
return -EINVAL;
}
ret = pfk_f2fs_parse_cipher(inode, algo);
if (ret != 0) {
pr_err("not supported cipher\n");
return ret;
}
return 0;
}
bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
const struct bio *bio2, const struct inode *inode1,
const struct inode *inode2)
{
bool mergeable;
/* if there is no f2fs pfk, don't disallow merging blocks */
if (!pfk_f2fs_is_ready())
return true;
if (!inode1 || !inode2)
return false;
mergeable = fscrypt_is_ice_encryption_info_equal(inode1, inode2);
if (!mergeable)
return false;
/* ICE allows only consecutive iv_key stream. */
if (!bio_dun(bio1) && !bio_dun(bio2))
return true;
else if (!bio_dun(bio1) || !bio_dun(bio2))
return false;
return bio_end_dun(bio1) == bio_dun(bio2);
}

30
security/pfe/pfk_f2fs.h Normal file
View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _PFK_F2FS_H_
#define _PFK_F2FS_H_
#include <linux/types.h>
#include <linux/fs.h>
#include <crypto/ice.h>
#include "pfk_internal.h"
bool pfk_is_f2fs_type(const struct inode *inode);
int pfk_f2fs_parse_inode(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
bool *is_pfe);
bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
const struct bio *bio2, const struct inode *inode1,
const struct inode *inode2);
int __init pfk_f2fs_init(void);
void pfk_f2fs_deinit(void);
#endif /* _PFK_F2FS_H_ */

189
security/pfe/pfk_ice.c Normal file
View File

@ -0,0 +1,189 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/async.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/device-mapper.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/qseecomi.h>
#include <crypto/ice.h>
#include "pfk_ice.h"
/**********************************/
/** global definitions **/
/**********************************/
#define TZ_ES_INVALIDATE_ICE_KEY 0x3
#define TZ_ES_CONFIG_SET_ICE_KEY 0x4
/* index 0 and 1 is reserved for FDE */
#define MIN_ICE_KEY_INDEX 2
#define MAX_ICE_KEY_INDEX 31
#define TZ_ES_CONFIG_SET_ICE_KEY_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \
TZ_ES_CONFIG_SET_ICE_KEY)
#define TZ_ES_INVALIDATE_ICE_KEY_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY)
#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1( \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_5( \
TZ_SYSCALL_PARAM_TYPE_VAL, \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
#define CONTEXT_SIZE 0x1000
#define ICE_BUFFER_SIZE 64
static uint8_t ice_buffer[ICE_BUFFER_SIZE];
enum {
ICE_CIPHER_MODE_XTS_128 = 0,
ICE_CIPHER_MODE_CBC_128 = 1,
ICE_CIPHER_MODE_XTS_256 = 3,
ICE_CIPHER_MODE_CBC_256 = 4
};
static int set_key(uint32_t index, const uint8_t *key, const uint8_t *salt,
unsigned int data_unit)
{
struct scm_desc desc = {0};
int ret = 0;
uint32_t smc_id = 0;
char *tzbuf = (char *)ice_buffer;
uint32_t size = ICE_BUFFER_SIZE / 2;
memset(tzbuf, 0, ICE_BUFFER_SIZE);
memcpy(ice_buffer, key, size);
memcpy(ice_buffer+size, salt, size);
dmac_flush_range(tzbuf, tzbuf + ICE_BUFFER_SIZE);
smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID;
desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID;
desc.args[0] = index;
desc.args[1] = virt_to_phys(tzbuf);
desc.args[2] = ICE_BUFFER_SIZE;
desc.args[3] = ICE_CIPHER_MODE_XTS_256;
desc.args[4] = data_unit;
ret = scm_call2_noretry(smc_id, &desc);
if (ret)
pr_err("%s:SCM call Error: 0x%x\n", __func__, ret);
return ret;
}
static int clear_key(uint32_t index)
{
struct scm_desc desc = {0};
int ret = 0;
uint32_t smc_id = 0;
smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
desc.args[0] = index;
ret = scm_call2_noretry(smc_id, &desc);
if (ret)
pr_err("%s:SCM call Error: 0x%x\n", __func__, ret);
return ret;
}
int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
char *storage_type, unsigned int data_unit)
{
int ret = 0, ret1 = 0;
char *s_type = storage_type;
if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
pr_err("%s Invalid index %d\n", __func__, index);
return -EINVAL;
}
if (!key || !salt) {
pr_err("%s Invalid key/salt\n", __func__);
return -EINVAL;
}
if (s_type == NULL) {
pr_err("%s Invalid Storage type\n", __func__);
return -EINVAL;
}
ret = qcom_ice_setup_ice_hw((const char *)s_type, true);
if (ret) {
pr_err("%s: could not enable clocks: %d\n", __func__, ret);
goto out;
}
ret = set_key(index, key, salt, data_unit);
if (ret) {
pr_err("%s: Set Key Error: %d\n", __func__, ret);
if (ret == -EBUSY) {
if (qcom_ice_setup_ice_hw((const char *)s_type, false))
pr_err("%s: clock disable failed\n", __func__);
goto out;
}
/* Try to invalidate the key to keep ICE in proper state */
ret1 = clear_key(index);
if (ret1)
pr_err("%s: Invalidate key error: %d\n", __func__, ret);
}
ret1 = qcom_ice_setup_ice_hw((const char *)s_type, false);
if (ret)
pr_err("%s: Error %d disabling clocks\n", __func__, ret);
out:
return ret;
}
int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type)
{
int ret = 0;
if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
pr_err("%s Invalid index %d\n", __func__, index);
return -EINVAL;
}
if (storage_type == NULL) {
pr_err("%s Invalid Storage type\n", __func__);
return -EINVAL;
}
ret = qcom_ice_setup_ice_hw((const char *)storage_type, true);
if (ret) {
pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret);
return ret;
}
ret = clear_key(index);
if (ret)
pr_err("%s: Invalidate key error: %d\n", __func__, ret);
if (qcom_ice_setup_ice_hw((const char *)storage_type, false))
pr_err("%s: could not disable clocks\n", __func__);
return ret;
}

25
security/pfe/pfk_ice.h Normal file
View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef PFK_ICE_H_
#define PFK_ICE_H_
/*
* PFK ICE
*
* ICE keys configuration through scm calls.
*
*/
#include <linux/types.h>
int pfk_ice_init(void);
int pfk_ice_deinit(void);
int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
char *storage_type, unsigned int data_unit);
int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type);
#endif /* PFK_ICE_H_ */

View File

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _PFK_INTERNAL_H_
#define _PFK_INTERNAL_H_
#include <linux/types.h>
#include <crypto/ice.h>
struct pfk_key_info {
const unsigned char *key;
const unsigned char *salt;
size_t key_size;
size_t salt_size;
};
int pfk_key_size_to_key_type(size_t key_size,
enum ice_crpto_key_size *key_size_type);
bool pfe_is_inode_filesystem_type(const struct inode *inode,
const char *fs_type);
char *inode_to_filename(const struct inode *inode);
#endif /* _PFK_INTERNAL_H_ */

912
security/pfe/pfk_kc.c Normal file
View File

@ -0,0 +1,912 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
/*
* PFK Key Cache
*
* Key Cache used internally in PFK.
* The purpose of the cache is to save access time to QSEE when loading keys.
* Currently the cache is the same size as the total number of keys that can
* be loaded to ICE. Since this number is relatively small, the algorithms for
* cache eviction are simple, linear and based on last usage timestamp, i.e
* the node that will be evicted is the one with the oldest timestamp.
* Empty entries always have the oldest timestamp.
*/
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <crypto/ice.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/printk.h>
#include <linux/sched/signal.h>
#include "pfk_kc.h"
#include "pfk_ice.h"
/** the first available index in ice engine */
#define PFK_KC_STARTING_INDEX 2
/** currently the only supported key and salt sizes */
#define PFK_KC_KEY_SIZE 32
#define PFK_KC_SALT_SIZE 32
/** Table size */
#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
/** The maximum key and salt size */
#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
#define PFK_UFS "ufs"
static DEFINE_SPINLOCK(kc_lock);
static unsigned long flags;
static bool kc_ready;
static char *s_type = "sdcc";
/**
* enum pfk_kc_entry_state - state of the entry inside kc table
*
* @FREE: entry is free
* @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine
and cannot be used by others. SCM call
to load key to ICE is pending to be performed
* @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and
cannot be used by others. SCM call to load the
key to ICE was successfully executed and key is
now loaded
* @INACTIVE_INVALIDATING: entry is being invalidated during file close
and cannot be used by others until invalidation
is complete
* @INACTIVE: entry's key is already loaded, but is not
currently being used. It can be re-used for
optimization and to avoid SCM call cost or
it can be taken by another key if there are
no FREE entries
* @SCM_ERROR: error occurred while scm call was performed to
load the key to ICE
*/
enum pfk_kc_entry_state {
FREE,
ACTIVE_ICE_PRELOAD,
ACTIVE_ICE_LOADED,
INACTIVE_INVALIDATING,
INACTIVE,
SCM_ERROR
};
struct kc_entry {
unsigned char key[PFK_MAX_KEY_SIZE];
size_t key_size;
unsigned char salt[PFK_MAX_SALT_SIZE];
size_t salt_size;
u64 time_stamp;
u32 key_index;
struct task_struct *thread_pending;
enum pfk_kc_entry_state state;
/* ref count for the number of requests in the HW queue for this key */
int loaded_ref_cnt;
int scm_error;
};
static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
/**
* kc_is_ready() - driver is initialized and ready.
*
* Return: true if the key cache is ready.
*/
static inline bool kc_is_ready(void)
{
return kc_ready;
}
static inline void kc_spin_lock(void)
{
spin_lock_irqsave(&kc_lock, flags);
}
static inline void kc_spin_unlock(void)
{
spin_unlock_irqrestore(&kc_lock, flags);
}
/**
* pfk_kc_get_storage_type() - return the hardware storage type.
*
* Return: storage type queried during bootup.
*/
const char *pfk_kc_get_storage_type(void)
{
return s_type;
}
/**
* kc_entry_is_available() - checks whether the entry is available
*
* Return true if it is , false otherwise or if invalid
* Should be invoked under spinlock
*/
static bool kc_entry_is_available(const struct kc_entry *entry)
{
if (!entry)
return false;
return (entry->state == FREE || entry->state == INACTIVE);
}
/**
* kc_entry_wait_till_available() - waits till entry is available
*
* Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
* by signal
*
* Should be invoked under spinlock
*/
static int kc_entry_wait_till_available(struct kc_entry *entry)
{
int res = 0;
while (!kc_entry_is_available(entry)) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
res = -ERESTARTSYS;
break;
}
/* assuming only one thread can try to invalidate
* the same entry
*/
entry->thread_pending = current;
kc_spin_unlock();
schedule();
kc_spin_lock();
}
set_current_state(TASK_RUNNING);
return res;
}
/**
* kc_entry_start_invalidating() - moves entry to state
* INACTIVE_INVALIDATING
* If entry is in use, waits till
* it gets available
* @entry: pointer to entry
*
* Return 0 in case of success, otherwise error
* Should be invoked under spinlock
*/
static int kc_entry_start_invalidating(struct kc_entry *entry)
{
int res;
res = kc_entry_wait_till_available(entry);
if (res)
return res;
entry->state = INACTIVE_INVALIDATING;
return 0;
}
/**
* kc_entry_finish_invalidating() - moves entry to state FREE
* wakes up all the tasks waiting
* on it
*
* @entry: pointer to entry
*
* Return 0 in case of success, otherwise error
* Should be invoked under spinlock
*/
static void kc_entry_finish_invalidating(struct kc_entry *entry)
{
if (!entry)
return;
if (entry->state != INACTIVE_INVALIDATING)
return;
entry->state = FREE;
}
/**
* kc_min_entry() - compare two entries to find one with minimal time
* @a: ptr to the first entry. If NULL the other entry will be returned
* @b: pointer to the second entry
*
* Return the entry which timestamp is the minimal, or b if a is NULL
*/
static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
struct kc_entry *b)
{
if (!a)
return b;
if (time_before64(b->time_stamp, a->time_stamp))
return b;
return a;
}
/**
* kc_entry_at_index() - return entry at specific index
* @index: index of entry to be accessed
*
* Return entry
* Should be invoked under spinlock
*/
static struct kc_entry *kc_entry_at_index(int index)
{
return &(kc_table[index]);
}
/**
* kc_find_key_at_index() - find kc entry starting at specific index
* @key: key to look for
* @key_size: the key size
* @salt: salt to look for
* @salt_size: the salt size
* @sarting_index: index to start search with, if entry found, updated with
* index of that entry
*
* Return entry or NULL in case of error
* Should be invoked under spinlock
*/
static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
size_t key_size, const unsigned char *salt, size_t salt_size,
int *starting_index)
{
struct kc_entry *entry = NULL;
int i = 0;
for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
entry = kc_entry_at_index(i);
if (salt != NULL) {
if (entry->salt_size != salt_size)
continue;
if (memcmp(entry->salt, salt, salt_size) != 0)
continue;
}
if (entry->key_size != key_size)
continue;
if (memcmp(entry->key, key, key_size) == 0) {
*starting_index = i;
return entry;
}
}
return NULL;
}
/**
* kc_find_key() - find kc entry
* @key: key to look for
* @key_size: the key size
* @salt: salt to look for
* @salt_size: the salt size
*
* Return entry or NULL in case of error
* Should be invoked under spinlock
*/
static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size)
{
int index = 0;
return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
}
/**
* kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
* that is not locked
*
* Returns entry with minimal timestamp. Empty entries have timestamp
* of 0, therefore they are returned first.
* If all the entries are locked, will return NULL
* Should be invoked under spin lock
*/
static struct kc_entry *kc_find_oldest_entry_non_locked(void)
{
struct kc_entry *curr_min_entry = NULL;
struct kc_entry *entry = NULL;
int i = 0;
for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
entry = kc_entry_at_index(i);
if (entry->state == FREE)
return entry;
if (entry->state == INACTIVE)
curr_min_entry = kc_min_entry(curr_min_entry, entry);
}
return curr_min_entry;
}
/**
* kc_update_timestamp() - updates timestamp of entry to current
*
* @entry: entry to update
*
*/
static void kc_update_timestamp(struct kc_entry *entry)
{
if (!entry)
return;
entry->time_stamp = get_jiffies_64();
}
/**
* kc_clear_entry() - clear the key from entry and mark entry not in use
*
* @entry: pointer to entry
*
* Should be invoked under spinlock
*/
static void kc_clear_entry(struct kc_entry *entry)
{
if (!entry)
return;
memset(entry->key, 0, entry->key_size);
memset(entry->salt, 0, entry->salt_size);
entry->key_size = 0;
entry->salt_size = 0;
entry->time_stamp = 0;
entry->scm_error = 0;
entry->state = FREE;
entry->loaded_ref_cnt = 0;
entry->thread_pending = NULL;
}
/**
* kc_update_entry() - replaces the key in given entry and
* loads the new key to ICE
*
* @entry: entry to replace key in
* @key: key
* @key_size: key_size
* @salt: salt
* @salt_size: salt_size
* @data_unit: dun size
*
* The previous key is securely released and wiped, the new one is loaded
* to ICE.
* Should be invoked under spinlock
* Caller to validate that key/salt_size matches the size in struct kc_entry
*/
static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
size_t key_size, const unsigned char *salt, size_t salt_size,
unsigned int data_unit)
{
int ret;
kc_clear_entry(entry);
memcpy(entry->key, key, key_size);
entry->key_size = key_size;
memcpy(entry->salt, salt, salt_size);
entry->salt_size = salt_size;
/* Mark entry as no longer free before releasing the lock */
entry->state = ACTIVE_ICE_PRELOAD;
kc_spin_unlock();
ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
entry->salt, s_type, data_unit);
kc_spin_lock();
return ret;
}
/**
* pfk_kc_init() - init function
*
* Return 0 in case of success, error otherwise
*/
int pfk_kc_init(void)
{
int i = 0;
struct kc_entry *entry = NULL;
kc_spin_lock();
for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
entry = kc_entry_at_index(i);
entry->key_index = PFK_KC_STARTING_INDEX + i;
}
kc_ready = true;
kc_spin_unlock();
return 0;
}
/**
* pfk_kc_denit() - deinit function
*
* Return 0 in case of success, error otherwise
*/
int pfk_kc_deinit(void)
{
int res = pfk_kc_clear();
kc_ready = false;
return res;
}
/**
* pfk_kc_load_key_start() - retrieve the key from cache or add it if
* it's not there and return the ICE hw key index in @key_index.
* @key: pointer to the key
* @key_size: the size of the key
* @salt: pointer to the salt
* @salt_size: the size of the salt
* @key_index: the pointer to key_index where the output will be stored
* @async: whether scm calls are allowed in the caller context
*
* If key is present in cache, than the key_index will be retrieved from cache.
* If it is not present, the oldest entry from kc table will be evicted,
* the key will be loaded to ICE via QSEE to the index that is the evicted
* entry number and stored in cache.
* Entry that is going to be used is marked as being used, it will mark
* as not being used when ICE finishes using it and pfk_kc_load_key_end
* will be invoked.
* As QSEE calls can only be done from a non-atomic context, when @async flag
* is set to 'false', it specifies that it is ok to make the calls in the
* current context. Otherwise, when @async is set, the caller should retry the
* call again from a different context, and -EAGAIN error will be returned.
*
* Return 0 in case of success, error otherwise
*/
int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size, u32 *key_index,
bool async, unsigned int data_unit)
{
int ret = 0;
struct kc_entry *entry = NULL;
bool entry_exists = false;
if (!kc_is_ready())
return -ENODEV;
if (!key || !salt || !key_index) {
pr_err("%s key/salt/key_index NULL\n", __func__);
return -EINVAL;
}
if (key_size != PFK_KC_KEY_SIZE) {
pr_err("unsupported key size %zu\n", key_size);
return -EINVAL;
}
if (salt_size != PFK_KC_SALT_SIZE) {
pr_err("unsupported salt size %zu\n", salt_size);
return -EINVAL;
}
kc_spin_lock();
entry = kc_find_key(key, key_size, salt, salt_size);
if (!entry) {
if (async) {
pr_debug("%s task will populate entry\n", __func__);
kc_spin_unlock();
return -EAGAIN;
}
entry = kc_find_oldest_entry_non_locked();
if (!entry) {
/* could not find a single non locked entry,
* return EBUSY to upper layers so that the
* request will be rescheduled
*/
kc_spin_unlock();
return -EBUSY;
}
} else {
entry_exists = true;
}
pr_debug("entry with index %d is in state %d\n",
entry->key_index, entry->state);
switch (entry->state) {
case (INACTIVE):
if (entry_exists) {
kc_update_timestamp(entry);
entry->state = ACTIVE_ICE_LOADED;
if (!strcmp(s_type, (char *)PFK_UFS)) {
if (async)
entry->loaded_ref_cnt++;
} else {
entry->loaded_ref_cnt++;
}
break;
}
case (FREE):
ret = kc_update_entry(entry, key, key_size, salt, salt_size,
data_unit);
if (ret) {
entry->state = SCM_ERROR;
entry->scm_error = ret;
pr_err("%s: key load error (%d)\n", __func__, ret);
} else {
kc_update_timestamp(entry);
entry->state = ACTIVE_ICE_LOADED;
/*
* In case of UFS only increase ref cnt for async calls,
* sync calls from within work thread do not pass
* requests further to HW
*/
if (!strcmp(s_type, (char *)PFK_UFS)) {
if (async)
entry->loaded_ref_cnt++;
} else {
entry->loaded_ref_cnt++;
}
}
break;
case (ACTIVE_ICE_PRELOAD):
case (INACTIVE_INVALIDATING):
ret = -EAGAIN;
break;
case (ACTIVE_ICE_LOADED):
kc_update_timestamp(entry);
if (!strcmp(s_type, (char *)PFK_UFS)) {
if (async)
entry->loaded_ref_cnt++;
} else {
entry->loaded_ref_cnt++;
}
break;
case(SCM_ERROR):
ret = entry->scm_error;
kc_clear_entry(entry);
entry->state = FREE;
break;
default:
pr_err("invalid state %d for entry with key index %d\n",
entry->state, entry->key_index);
ret = -EINVAL;
}
*key_index = entry->key_index;
kc_spin_unlock();
return ret;
}
/**
* pfk_kc_load_key_end() - finish the process of key loading that was started
* by pfk_kc_load_key_start
* by marking the entry as not
* being in use
* @key: pointer to the key
* @key_size: the size of the key
* @salt: pointer to the salt
* @salt_size: the size of the salt
*
*/
void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size)
{
struct kc_entry *entry = NULL;
struct task_struct *tmp_pending = NULL;
int ref_cnt = 0;
if (!kc_is_ready())
return;
if (!key || !salt)
return;
if (key_size != PFK_KC_KEY_SIZE)
return;
if (salt_size != PFK_KC_SALT_SIZE)
return;
kc_spin_lock();
entry = kc_find_key(key, key_size, salt, salt_size);
if (!entry) {
kc_spin_unlock();
pr_err("internal error, there should an entry to unlock\n");
return;
}
ref_cnt = --entry->loaded_ref_cnt;
if (ref_cnt < 0)
pr_err("internal error, ref count should never be negative\n");
if (!ref_cnt) {
entry->state = INACTIVE;
/*
* wake-up invalidation if it's waiting
* for the entry to be released
*/
if (entry->thread_pending) {
tmp_pending = entry->thread_pending;
entry->thread_pending = NULL;
kc_spin_unlock();
wake_up_process(tmp_pending);
return;
}
}
kc_spin_unlock();
}
/**
* pfk_kc_remove_key() - remove the key from cache and from ICE engine
* @key: pointer to the key
* @key_size: the size of the key
* @salt: pointer to the key
* @salt_size: the size of the key
*
* Return 0 in case of success, error otherwise (also in case of non
* (existing key)
*/
int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size)
{
struct kc_entry *entry = NULL;
int res = 0;
if (!kc_is_ready())
return -ENODEV;
if (!key)
return -EINVAL;
if (!salt)
return -EINVAL;
if (key_size != PFK_KC_KEY_SIZE)
return -EINVAL;
if (salt_size != PFK_KC_SALT_SIZE)
return -EINVAL;
kc_spin_lock();
entry = kc_find_key(key, key_size, salt, salt_size);
if (!entry) {
pr_debug("%s: key does not exist\n", __func__);
kc_spin_unlock();
return -EINVAL;
}
res = kc_entry_start_invalidating(entry);
if (res != 0) {
kc_spin_unlock();
return res;
}
kc_clear_entry(entry);
kc_spin_unlock();
qti_pfk_ice_invalidate_key(entry->key_index, s_type);
kc_spin_lock();
kc_entry_finish_invalidating(entry);
kc_spin_unlock();
return 0;
}
/**
* pfk_kc_remove_key() - remove the key from cache and from ICE engine
* when no salt is available. Will only search key part, if there are several,
* all will be removed
*
* @key: pointer to the key
* @key_size: the size of the key
*
* Return 0 in case of success, error otherwise (also for non-existing key)
*/
int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
{
struct kc_entry *entry = NULL;
int index = 0;
int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
int temp_indexes_size = 0;
int i = 0;
int res = 0;
if (!kc_is_ready())
return -ENODEV;
if (!key)
return -EINVAL;
if (key_size != PFK_KC_KEY_SIZE)
return -EINVAL;
memset(temp_indexes, -1, sizeof(temp_indexes));
kc_spin_lock();
entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
if (!entry) {
pr_err("%s: key does not exist\n", __func__);
kc_spin_unlock();
return -EINVAL;
}
res = kc_entry_start_invalidating(entry);
if (res != 0) {
kc_spin_unlock();
return res;
}
temp_indexes[temp_indexes_size++] = index;
kc_clear_entry(entry);
/* let's clean additional entries with the same key if there are any */
do {
index++;
entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
if (!entry)
break;
res = kc_entry_start_invalidating(entry);
if (res != 0) {
kc_spin_unlock();
goto out;
}
temp_indexes[temp_indexes_size++] = index;
kc_clear_entry(entry);
} while (true);
kc_spin_unlock();
temp_indexes_size--;
for (i = temp_indexes_size; i >= 0 ; i--)
qti_pfk_ice_invalidate_key(
kc_entry_at_index(temp_indexes[i])->key_index,
s_type);
/* fall through */
res = 0;
out:
kc_spin_lock();
for (i = temp_indexes_size; i >= 0 ; i--)
kc_entry_finish_invalidating(
kc_entry_at_index(temp_indexes[i]));
kc_spin_unlock();
return res;
}
/**
* pfk_kc_clear() - clear the table and remove all keys from ICE
*
* Return 0 on success, error otherwise
*
*/
int pfk_kc_clear(void)
{
struct kc_entry *entry = NULL;
int i = 0;
int res = 0;
if (!kc_is_ready())
return -ENODEV;
kc_spin_lock();
for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
entry = kc_entry_at_index(i);
res = kc_entry_start_invalidating(entry);
if (res != 0) {
kc_spin_unlock();
goto out;
}
kc_clear_entry(entry);
}
kc_spin_unlock();
for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
s_type);
/* fall through */
res = 0;
out:
kc_spin_lock();
for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
kc_entry_finish_invalidating(kc_entry_at_index(i));
kc_spin_unlock();
return res;
}
/**
* pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
* The assumption is that at this point we don't have any pending transactions
* Also, there is no need to clear keys from ICE
*
* Return 0 on success, error otherwise
*
*/
void pfk_kc_clear_on_reset(void)
{
struct kc_entry *entry = NULL;
int i = 0;
if (!kc_is_ready())
return;
kc_spin_lock();
for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
entry = kc_entry_at_index(i);
kc_clear_entry(entry);
}
kc_spin_unlock();
}
static int pfk_kc_find_storage_type(char **device)
{
char boot[20] = {'\0'};
char *match = (char *)strnstr(saved_command_line,
"androidboot.bootdevice=",
strlen(saved_command_line));
if (match) {
memcpy(boot, (match + strlen("androidboot.bootdevice=")),
sizeof(boot) - 1);
if (strnstr(boot, PFK_UFS, strlen(boot)))
*device = PFK_UFS;
return 0;
}
return -EINVAL;
}
static int __init pfk_kc_pre_init(void)
{
return pfk_kc_find_storage_type(&s_type);
}
static void __exit pfk_kc_exit(void)
{
s_type = NULL;
}
module_init(pfk_kc_pre_init);
module_exit(pfk_kc_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Per-File-Key-KC driver");

27
security/pfe/pfk_kc.h Normal file
View File

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef PFK_KC_H_
#define PFK_KC_H_
#include <linux/types.h>
int pfk_kc_init(void);
int pfk_kc_deinit(void);
int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size, u32 *key_index,
bool async, unsigned int data_unit);
void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size);
int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size);
int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
int pfk_kc_clear(void);
void pfk_kc_clear_on_reset(void);
const char *pfk_kc_get_storage_type(void);
extern char *saved_command_line;
#endif /* PFK_KC_H_ */

View File

@ -623,6 +623,14 @@ int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode
}
EXPORT_SYMBOL_GPL(security_inode_create);
int security_inode_post_create(struct inode *dir, struct dentry *dentry,
umode_t mode)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return call_int_hook(inode_post_create, 0, dir, dentry, mode);
}
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{

View File

@ -26,8 +26,7 @@
#include <linux/in.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include "flask.h"
#include "avc.h"
#include "security.h"
struct task_security_struct {
u32 osid; /* SID prior to last execve */
@ -64,6 +63,8 @@ struct inode_security_struct {
u32 sid; /* SID of this object */
u16 sclass; /* security class of this object */
unsigned char initialized; /* initialization flag */
u32 tag; /* Per-File-Encryption tag */
void *pfk_data; /* Per-File-Key data from ecryptfs */
spinlock_t lock;
};

View File

@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
#include "flask.h"
#define SECSID_NULL 0x00000000 /* unspecified SID */
#define SECSID_WILD 0xffffffff /* wildcard SID */