Newer
Older
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008 IBM Corporation
* Author: Mimi Zohar <zohar@us.ibm.com>
*
* ima_policy.c
* - initialize default measure policy rules
#include <linux/init.h>
#include <linux/kernel_read_file.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include "ima.h"
/* flags definitions */
#define IMA_FUNC 0x0001
#define IMA_MASK 0x0002
#define IMA_FSMAGIC 0x0004
#define IMA_UID 0x0008
#define IMA_FOWNER 0x0010
#define IMA_PCR 0x0100
#define IMA_FSNAME 0x0200
#define IMA_KEYRINGS 0x0400
#define IMA_LABEL 0x0800
#define IMA_VALIDATE_ALGOS 0x1000
#define IMA_GID 0x2000
#define IMA_EGID 0x4000
#define IMA_FGROUP 0x8000
#define UNKNOWN 0
#define MEASURE 0x0001 /* same as IMA_MEASURE */
#define DONT_MEASURE 0x0002
#define APPRAISE 0x0004 /* same as IMA_APPRAISE */
#define DONT_APPRAISE 0x0008
#define HASH 0x0100
#define DONT_HASH 0x0200
#define INVALID_PCR(a) (((a) < 0) || \
(a) >= (sizeof_field(struct ima_iint_cache, measured_pcrs) * 8))
static int temp_ima_appraise;
atomic_t ima_setxattr_allowed_hash_algorithms;
#define MAX_LSM_RULES 6
enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
};
enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB };
enum policy_rule_list { IMA_DEFAULT_POLICY = 1, IMA_CUSTOM_POLICY };
struct ima_rule_opt_list {
size_t count;
char *items[] __counted_by(count);
/*
* These comparators are needed nowhere outside of ima so just define them here.
* This pattern should hopefully never be needed outside of ima.
*/
static inline bool vfsuid_gt_kuid(vfsuid_t vfsuid, kuid_t kuid)
{
return __vfsuid_val(vfsuid) > __kuid_val(kuid);
}
static inline bool vfsgid_gt_kgid(vfsgid_t vfsgid, kgid_t kgid)
{
return __vfsgid_val(vfsgid) > __kgid_val(kgid);
}
static inline bool vfsuid_lt_kuid(vfsuid_t vfsuid, kuid_t kuid)
{
return __vfsuid_val(vfsuid) < __kuid_val(kuid);
}
static inline bool vfsgid_lt_kgid(vfsgid_t vfsgid, kgid_t kgid)
{
return __vfsgid_val(vfsgid) < __kgid_val(kgid);
}
unsigned int flags;
enum ima_hooks func;
int mask;
unsigned long fsmagic;

Linus Torvalds
committed
kuid_t fowner;
bool (*uid_op)(kuid_t cred_uid, kuid_t rule_uid); /* Handlers for operators */
bool (*fowner_op)(vfsuid_t vfsuid, kuid_t rule_uid); /* vfsuid_eq_kuid(), vfsuid_gt_kuid(), vfsuid_lt_kuid() */
bool (*fgroup_op)(vfsgid_t vfsgid, kgid_t rule_gid); /* vfsgid_eq_kgid(), vfsgid_gt_kgid(), vfsgid_lt_kgid() */
unsigned int allowed_algos; /* bitfield of allowed hash algorithms */
struct {
void *rule; /* LSM file metadata specific */
char *args_p; /* audit value */
char *fsname;
struct ima_rule_opt_list *keyrings; /* Measure keys added to these keyrings */
struct ima_rule_opt_list *label; /* Measure data grouped under this label */
struct ima_template_desc *template;
/*
* sanity check in case the kernels gains more hash algorithms that can
* fit in an unsigned int
*/
static_assert(
8 * sizeof(unsigned int) >= HASH_ALGO__LAST,
"The bitfield allowed_algos in ima_rule_entry is too small to contain all the supported hash algorithms, consider using a bigger type");
/*
* Without LSM specific knowledge, the default policy can only be
* written in terms of .action, .func, .mask, .fsmagic, .uid, .gid,
* .fowner, and .fgroup
/*
* The minimum rule set to allow for full TCB coverage. Measures all files
* opened or mmap for exec and everything read by root. Dangerous because
* normal users can easily run the machine out of memory simply building
* and running executables.
*/
static struct ima_rule_entry dont_measure_rules[] __ro_after_init = {
{.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .func = FILE_CHECK,
.flags = IMA_FSMAGIC | IMA_FUNC},
{.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = SMACK_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = CGROUP_SUPER_MAGIC,
.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = CGROUP2_SUPER_MAGIC,
.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = EFIVARFS_MAGIC, .flags = IMA_FSMAGIC}
static struct ima_rule_entry original_measurement_rules[] __ro_after_init = {
{.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
{.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
{.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
.uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
.flags = IMA_FUNC | IMA_MASK | IMA_UID},
{.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
{.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
};
static struct ima_rule_entry default_measurement_rules[] __ro_after_init = {
{.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
{.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
{.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
.uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
.flags = IMA_FUNC | IMA_INMASK | IMA_EUID},
{.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
.uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
.flags = IMA_FUNC | IMA_INMASK | IMA_UID},
{.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
{.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
{.action = MEASURE, .func = POLICY_CHECK, .flags = IMA_FUNC},
static struct ima_rule_entry default_appraise_rules[] __ro_after_init = {
{.action = DONT_APPRAISE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = RAMFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = SMACK_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = EFIVARFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_APPRAISE, .fsmagic = CGROUP2_SUPER_MAGIC, .flags = IMA_FSMAGIC},
#ifdef CONFIG_IMA_WRITE_POLICY
{.action = APPRAISE, .func = POLICY_CHECK,
.flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
#endif
#ifndef CONFIG_IMA_APPRAISE_SIGNED_INIT
{.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .fowner_op = &vfsuid_eq_kuid,
.flags = IMA_FOWNER},
#else
/* force signature */
{.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .fowner_op = &vfsuid_eq_kuid,
.flags = IMA_FOWNER | IMA_DIGSIG_REQUIRED},
#endif
static struct ima_rule_entry build_appraise_rules[] __ro_after_init = {
#ifdef CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS
{.action = APPRAISE, .func = MODULE_CHECK,
.flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
#endif
#ifdef CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS
{.action = APPRAISE, .func = FIRMWARE_CHECK,
.flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
#endif
#ifdef CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS
{.action = APPRAISE, .func = KEXEC_KERNEL_CHECK,
.flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
#endif
#ifdef CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS
{.action = APPRAISE, .func = POLICY_CHECK,
.flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
#endif
};
static struct ima_rule_entry secure_boot_rules[] __ro_after_init = {
{.action = APPRAISE, .func = MODULE_CHECK,
.flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
{.action = APPRAISE, .func = FIRMWARE_CHECK,
.flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
{.action = APPRAISE, .func = KEXEC_KERNEL_CHECK,
.flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
{.action = APPRAISE, .func = POLICY_CHECK,
.flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
};
static struct ima_rule_entry critical_data_rules[] __ro_after_init = {
{.action = MEASURE, .func = CRITICAL_DATA, .flags = IMA_FUNC},
};
/* An array of architecture specific rules */
static struct ima_rule_entry *arch_policy_entry __ro_after_init;
static LIST_HEAD(ima_default_rules);
static LIST_HEAD(ima_policy_rules);
static LIST_HEAD(ima_temp_rules);
static struct list_head __rcu *ima_rules = (struct list_head __rcu *)(&ima_default_rules);
static int __init default_measure_policy_setup(char *str)
if (ima_policy)
return 1;
ima_policy = ORIGINAL_TCB;
__setup("ima_tcb", default_measure_policy_setup);
static bool ima_use_appraise_tcb __initdata;
static bool ima_use_secure_boot __initdata;
static bool ima_use_critical_data __initdata;
static bool ima_fail_unverifiable_sigs __ro_after_init;
static int __init policy_setup(char *str)
{
char *p;
while ((p = strsep(&str, " |\n")) != NULL) {
if (*p == ' ')
continue;
if ((strcmp(p, "tcb") == 0) && !ima_policy)
ima_policy = DEFAULT_TCB;
else if (strcmp(p, "appraise_tcb") == 0)
else if (strcmp(p, "secure_boot") == 0)
else if (strcmp(p, "critical_data") == 0)
ima_use_critical_data = true;
else if (strcmp(p, "fail_securely") == 0)
ima_fail_unverifiable_sigs = true;
else
pr_err("policy \"%s\" not found", p);
}
return 1;
}
__setup("ima_policy=", policy_setup);
static int __init default_appraise_policy_setup(char *str)
{
return 1;
}
__setup("ima_appraise_tcb", default_appraise_policy_setup);
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
static struct ima_rule_opt_list *ima_alloc_rule_opt_list(const substring_t *src)
{
struct ima_rule_opt_list *opt_list;
size_t count = 0;
char *src_copy;
char *cur, *next;
size_t i;
src_copy = match_strdup(src);
if (!src_copy)
return ERR_PTR(-ENOMEM);
next = src_copy;
while ((cur = strsep(&next, "|"))) {
/* Don't accept an empty list item */
if (!(*cur)) {
kfree(src_copy);
return ERR_PTR(-EINVAL);
}
count++;
}
/* Don't accept an empty list */
if (!count) {
kfree(src_copy);
return ERR_PTR(-EINVAL);
}
opt_list = kzalloc(struct_size(opt_list, items, count), GFP_KERNEL);
if (!opt_list) {
kfree(src_copy);
return ERR_PTR(-ENOMEM);
}
opt_list->count = count;
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
/*
* strsep() has already replaced all instances of '|' with '\0',
* leaving a byte sequence of NUL-terminated strings. Reference each
* string with the array of items.
*
* IMPORTANT: Ownership of the allocated buffer is transferred from
* src_copy to the first element in the items array. To free the
* buffer, kfree() must only be called on the first element of the
* array.
*/
for (i = 0, cur = src_copy; i < count; i++) {
opt_list->items[i] = cur;
cur = strchr(cur, '\0') + 1;
}
return opt_list;
}
static void ima_free_rule_opt_list(struct ima_rule_opt_list *opt_list)
{
if (!opt_list)
return;
if (opt_list->count) {
kfree(opt_list->items[0]);
opt_list->count = 0;
}
kfree(opt_list);
}
static void ima_lsm_free_rule(struct ima_rule_entry *entry)
{
int i;
for (i = 0; i < MAX_LSM_RULES; i++) {
ima_filter_rule_free(entry->lsm[i].rule);
kfree(entry->lsm[i].args_p);
}
}
static void ima_free_rule(struct ima_rule_entry *entry)
{
if (!entry)
return;
/*
* entry->template->fields may be allocated in ima_parse_rule() but that
* reference is owned by the corresponding ima_template_desc element in
* the defined_templates list and cannot be freed here
*/
kfree(entry->fsname);
ima_free_rule_opt_list(entry->keyrings);
ima_lsm_free_rule(entry);
static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry,
gfp_t gfp)
{
struct ima_rule_entry *nentry;
/*
* Immutable elements are copied over as pointers and data; only
* lsm rules can change
*/
nentry = kmemdup(entry, sizeof(*nentry), gfp);
if (!nentry)
return NULL;
memset(nentry->lsm, 0, sizeof_field(struct ima_rule_entry, lsm));
for (i = 0; i < MAX_LSM_RULES; i++) {
if (!entry->lsm[i].args_p)
continue;
nentry->lsm[i].type = entry->lsm[i].type;
nentry->lsm[i].args_p = entry->lsm[i].args_p;
ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
nentry->lsm[i].args_p,
&nentry->lsm[i].rule,
gfp);
if (!nentry->lsm[i].rule)
pr_warn("rule for LSM \'%s\' is undefined\n",
nentry->lsm[i].args_p);
}
return nentry;
}
static int ima_lsm_update_rule(struct ima_rule_entry *entry)
{
struct ima_rule_entry *nentry;
nentry = ima_lsm_copy_rule(entry, GFP_KERNEL);
if (!nentry)
return -ENOMEM;
list_replace_rcu(&entry->list, &nentry->list);
synchronize_rcu();
/*
* ima_lsm_copy_rule() shallow copied all references, except for the
* LSM references, from entry to nentry so we only want to free the LSM
* references and the entry itself. All other memory references will now
* be owned by nentry.
*/
for (i = 0; i < MAX_LSM_RULES; i++)
ima_filter_rule_free(entry->lsm[i].rule);

Tyler Hicks
committed
static bool ima_rule_contains_lsm_cond(struct ima_rule_entry *entry)
{
int i;
for (i = 0; i < MAX_LSM_RULES; i++)
if (entry->lsm[i].args_p)
return true;
return false;
}
* The LSM policy can be reloaded, leaving the IMA LSM based rules referring
* to the old, stale LSM policy. Update the IMA LSM based rules to reflect
*/
static void ima_lsm_update_rules(void)
{
struct ima_rule_entry *entry, *e;
int result;
list_for_each_entry_safe(entry, e, &ima_policy_rules, list) {
if (!ima_rule_contains_lsm_cond(entry))
continue;
result = ima_lsm_update_rule(entry);
if (result) {
pr_err("lsm rule update error %d\n", result);
int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
void *lsm_data)
{
if (event != LSM_POLICY_CHANGE)
return NOTIFY_DONE;
ima_lsm_update_rules();
return NOTIFY_OK;
}
* ima_match_rule_data - determine whether func_data matches the policy rule
* @rule: a pointer to a rule
* @func_data: data to match against the measure rule data
* @cred: a pointer to a credentials structure for user validation
*
* Returns true if func_data matches one in the rule, false otherwise.
static bool ima_match_rule_data(struct ima_rule_entry *rule,
const char *func_data,
const struct cred *cred)
const struct ima_rule_opt_list *opt_list = NULL;
bool matched = false;
if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
return false;
switch (rule->func) {
case KEY_CHECK:
if (!rule->keyrings)
return true;
opt_list = rule->keyrings;
break;
if (!rule->label)
return true;
opt_list = rule->label;
break;
default:
return false;
}
if (!func_data)
for (i = 0; i < opt_list->count; i++) {
if (!strcmp(opt_list->items[i], func_data)) {
matched = true;
break;
}
}
return matched;
}
* ima_match_rules - determine whether an inode matches the policy rule.
* @rule: a pointer to a rule
* @idmap: idmap of the mount the inode was found from
* @inode: a pointer to an inode
* @cred: a pointer to a credentials structure for user validation
* @prop: LSM properties of the task to be validated
* @func: LIM hook identifier
* @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
* @func_data: func specific data, may be NULL
*
* Returns true on rule match, false on failure.
*/
static bool ima_match_rules(struct ima_rule_entry *rule,
struct inode *inode, const struct cred *cred,
struct lsm_prop *prop, enum ima_hooks func, int mask,
const char *func_data)
bool result = false;
struct ima_rule_entry *lsm_rule = rule;
bool rule_reinitialized = false;
if ((rule->flags & IMA_FUNC) &&
(rule->func != func && func != POST_SETATTR))
switch (func) {
case KEY_CHECK:
case CRITICAL_DATA:
return ((rule->func == func) &&
ima_match_rule_data(rule, func_data, cred));
default:
break;
}
if ((rule->flags & IMA_MASK) &&
(rule->mask != mask && func != POST_SETATTR))
if ((rule->flags & IMA_INMASK) &&
(!(rule->mask & mask) && func != POST_SETATTR))
return false;
if ((rule->flags & IMA_FSMAGIC)
&& rule->fsmagic != inode->i_sb->s_magic)
return false;
if ((rule->flags & IMA_FSNAME)
&& strcmp(rule->fsname, inode->i_sb->s_type->name))
return false;
if ((rule->flags & IMA_FSUUID) &&
!uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid))
if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
if (rule->flags & IMA_EUID) {
if (has_capability_noaudit(current, CAP_SETUID)) {
if (!rule->uid_op(cred->euid, rule->uid)
&& !rule->uid_op(cred->suid, rule->uid)
&& !rule->uid_op(cred->uid, rule->uid))
} else if (!rule->uid_op(cred->euid, rule->uid))
if ((rule->flags & IMA_GID) && !rule->gid_op(cred->gid, rule->gid))
return false;
if (rule->flags & IMA_EGID) {
if (has_capability_noaudit(current, CAP_SETGID)) {
if (!rule->gid_op(cred->egid, rule->gid)
&& !rule->gid_op(cred->sgid, rule->gid)
&& !rule->gid_op(cred->gid, rule->gid))
return false;
} else if (!rule->gid_op(cred->egid, rule->gid))
return false;
}
if ((rule->flags & IMA_FOWNER) &&
!rule->fowner_op(i_uid_into_vfsuid(idmap, inode),
!rule->fgroup_op(i_gid_into_vfsgid(idmap, inode),
struct lsm_prop inode_prop = { };
if (!lsm_rule->lsm[i].rule) {
if (!lsm_rule->lsm[i].args_p)
continue;
else
return false;
}
switch (i) {
case LSM_OBJ_USER:
case LSM_OBJ_ROLE:
case LSM_OBJ_TYPE:
security_inode_getlsmprop(inode, &inode_prop);
rc = ima_filter_rule_match(&inode_prop,
lsm_rule->lsm[i].type,
lsm_rule->lsm[i].rule);
break;
case LSM_SUBJ_USER:
case LSM_SUBJ_ROLE:
case LSM_SUBJ_TYPE:
rc = ima_filter_rule_match(prop, lsm_rule->lsm[i].type,
lsm_rule->lsm[i].rule);
if (rc == -ESTALE && !rule_reinitialized) {
lsm_rule = ima_lsm_copy_rule(rule, GFP_ATOMIC);
if (lsm_rule) {
rule_reinitialized = true;
goto retry;
}
}
if (!rc) {
result = false;
goto out;
}
}
result = true;
out:
if (rule_reinitialized) {
for (i = 0; i < MAX_LSM_RULES; i++)
ima_filter_rule_free(lsm_rule->lsm[i].rule);
kfree(lsm_rule);
/*
* In addition to knowing that we need to appraise the file in general,
* we need to differentiate between calling hooks, for hook specific rules.
static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
if (!(rule->flags & IMA_FUNC))
return IMA_FILE_APPRAISE;
return IMA_MMAP_APPRAISE;
case BPRM_CHECK:
return IMA_BPRM_APPRAISE;
case CREDS_CHECK:
return IMA_CREDS_APPRAISE;
case POST_SETATTR:
case MODULE_CHECK ... MAX_CHECK - 1:
default:
return IMA_READ_APPRAISE;
/**
* ima_match_policy - decision based on LSM and other conditions
* @idmap: idmap of the mount the inode was found from
* @inode: pointer to an inode for which the policy decision is being made
* @cred: pointer to a credentials structure for which the policy decision is
* being made
* @prop: LSM properties of the task to be validated
* @func: IMA hook identifier
* @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
* @flags: IMA actions to consider (e.g. IMA_MEASURE | IMA_APPRAISE)
* @pcr: set the pcr to extend
* @template_desc: the template that should be used for this rule
* @func_data: func specific data, may be NULL
* @allowed_algos: allowlist of hash algorithms for the IMA xattr
*
* Measure decision based on func/mask/fsmagic and LSM(subj/obj/type)
* conditions.
*
* Since the IMA policy may be updated multiple times we need to lock the
* list when walking it. Reads are many orders of magnitude more numerous
* than writes so ima_match_policy() is classical RCU candidate.
int ima_match_policy(struct mnt_idmap *idmap, struct inode *inode,
const struct cred *cred, struct lsm_prop *prop,
enum ima_hooks func, int mask, int flags, int *pcr,
struct ima_template_desc **template_desc,
const char *func_data, unsigned int *allowed_algos)
struct ima_rule_entry *entry;
int action = 0, actmask = flags | (flags << 1);
struct list_head *ima_rules_tmp;
if (template_desc && !*template_desc)
*template_desc = ima_template_desc_current();
ima_rules_tmp = rcu_dereference(ima_rules);
list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
if (!(entry->action & actmask))
continue;
if (!ima_match_rules(entry, idmap, inode, cred, prop,

Linus Torvalds
committed
func, mask, func_data))
action |= entry->flags & IMA_NONACTION_FLAGS;
action |= entry->action & IMA_DO_MASK;
if (entry->action & IMA_APPRAISE) {
action |= get_subaction(entry, func);
if (ima_fail_unverifiable_sigs)
action |= IMA_FAIL_UNVERIFIABLE_SIGS;
if (allowed_algos &&
entry->flags & IMA_VALIDATE_ALGOS)
*allowed_algos = entry->allowed_algos;
}
if (entry->action & IMA_DO_MASK)
actmask &= ~(entry->action | entry->action << 1);
else
actmask &= ~(entry->action | entry->action >> 1);
if ((pcr) && (entry->flags & IMA_PCR))
*pcr = entry->pcr;
if (template_desc && entry->template)
*template_desc = entry->template;
/**
* ima_update_policy_flags() - Update global IMA variables
*
* Update ima_policy_flag and ima_setxattr_allowed_hash_algorithms
* based on the currently loaded policy.
*
* With ima_policy_flag, the decision to short circuit out of a function
* or not call the function in the first place can be made earlier.
*
* With ima_setxattr_allowed_hash_algorithms, the policy can restrict the
* set of hash algorithms accepted when updating the security.ima xattr of
* a file.
*
* Context: called after a policy update and at system initialization.
void ima_update_policy_flags(void)
int new_policy_flag = 0;
struct list_head *ima_rules_tmp;
ima_rules_tmp = rcu_dereference(ima_rules);
list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
/*
* SETXATTR_CHECK rules do not implement a full policy check
* because rule checking would probably have an important
* performance impact on setxattr(). As a consequence, only one
* SETXATTR_CHECK can be active at a given time.
* Because we want to preserve that property, we set out to use
* atomic_cmpxchg. Either:
* - the atomic was non-zero: a setxattr hash policy is
* already enforced, we do nothing
* - the atomic was zero: no setxattr policy was set, enable
* the setxattr hash policy
*/
if (entry->func == SETXATTR_CHECK) {
atomic_cmpxchg(&ima_setxattr_allowed_hash_algorithms,
0, entry->allowed_algos);
/* SETXATTR_CHECK doesn't impact ima_policy_flag */
continue;
}
new_policy_flag |= entry->action;
ima_appraise |= (build_ima_appraise | temp_ima_appraise);
new_policy_flag &= ~IMA_APPRAISE;
ima_policy_flag = new_policy_flag;
static int ima_appraise_flag(enum ima_hooks func)
{
if (func == MODULE_CHECK)
return IMA_APPRAISE_MODULES;
else if (func == FIRMWARE_CHECK)
return IMA_APPRAISE_FIRMWARE;
else if (func == POLICY_CHECK)
return IMA_APPRAISE_POLICY;
else if (func == KEXEC_KERNEL_CHECK)
return IMA_APPRAISE_KEXEC;
static void add_rules(struct ima_rule_entry *entries, int count,
enum policy_rule_list policy_rule)
{
int i = 0;
for (i = 0; i < count; i++) {
struct ima_rule_entry *entry;
if (policy_rule & IMA_DEFAULT_POLICY)
list_add_tail(&entries[i].list, &ima_default_rules);
if (policy_rule & IMA_CUSTOM_POLICY) {
entry = kmemdup(&entries[i], sizeof(*entry),
GFP_KERNEL);
if (!entry)
continue;
list_add_tail(&entry->list, &ima_policy_rules);
}
if (entries[i].action == APPRAISE) {
if (entries != build_appraise_rules)
temp_ima_appraise |=
ima_appraise_flag(entries[i].func);
else
build_ima_appraise |=
ima_appraise_flag(entries[i].func);
}
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
static int ima_parse_rule(char *rule, struct ima_rule_entry *entry);
static int __init ima_init_arch_policy(void)
{
const char * const *arch_rules;
const char * const *rules;
int arch_entries = 0;
int i = 0;
arch_rules = arch_get_ima_policy();
if (!arch_rules)
return arch_entries;
/* Get number of rules */
for (rules = arch_rules; *rules != NULL; rules++)
arch_entries++;
arch_policy_entry = kcalloc(arch_entries + 1,
sizeof(*arch_policy_entry), GFP_KERNEL);
if (!arch_policy_entry)
return 0;
/* Convert each policy string rules to struct ima_rule_entry format */
for (rules = arch_rules, i = 0; *rules != NULL; rules++) {
char rule[255];
int result;
result = strscpy(rule, *rules, sizeof(rule));
INIT_LIST_HEAD(&arch_policy_entry[i].list);
result = ima_parse_rule(rule, &arch_policy_entry[i]);
if (result) {
pr_warn("Skipping unknown architecture policy rule: %s\n",
rule);
memset(&arch_policy_entry[i], 0,
sizeof(*arch_policy_entry));
continue;
}
i++;
}
return i;
}
/**
* ima_init_policy - initialize the default measure rules.
*
* ima_rules points to either the ima_default_rules or the new ima_policy_rules.
void __init ima_init_policy(void)
int build_appraise_entries, arch_entries;
/* if !ima_policy, we load NO default rules */
if (ima_policy)
add_rules(dont_measure_rules, ARRAY_SIZE(dont_measure_rules),
IMA_DEFAULT_POLICY);
switch (ima_policy) {
case ORIGINAL_TCB:
add_rules(original_measurement_rules,
ARRAY_SIZE(original_measurement_rules),
IMA_DEFAULT_POLICY);
add_rules(default_measurement_rules,
ARRAY_SIZE(default_measurement_rules),
IMA_DEFAULT_POLICY);
/*
* Based on runtime secure boot flags, insert arch specific measurement
* and appraise rules requiring file signatures for both the initial
* and custom policies, prior to other appraise rules.
* (Highest priority)
*/
arch_entries = ima_init_arch_policy();
if (!arch_entries)
pr_info("No architecture policies found\n");
else
add_rules(arch_policy_entry, arch_entries,
IMA_DEFAULT_POLICY | IMA_CUSTOM_POLICY);
* Insert the builtin "secure_boot" policy rules requiring file
* signatures, prior to other appraise rules.
if (ima_use_secure_boot)
add_rules(secure_boot_rules, ARRAY_SIZE(secure_boot_rules),
IMA_DEFAULT_POLICY);
/*
* Insert the build time appraise rules requiring file signatures
* for both the initial and custom policies, prior to other appraise
* rules. As the secure boot rules includes all of the build time
* rules, include either one or the other set of rules, but not both.
build_appraise_entries = ARRAY_SIZE(build_appraise_rules);
if (build_appraise_entries) {
if (ima_use_secure_boot)
add_rules(build_appraise_rules, build_appraise_entries,
IMA_CUSTOM_POLICY);
else