summaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig47
-rw-r--r--security/Kconfig.hardening153
-rw-r--r--security/Makefile2
-rw-r--r--security/apparmor/Kconfig3
-rw-r--r--security/apparmor/Makefile6
-rw-r--r--security/apparmor/af_unix.c799
-rw-r--r--security/apparmor/apparmorfs.c53
-rw-r--r--security/apparmor/audit.c2
-rw-r--r--security/apparmor/capability.c61
-rw-r--r--security/apparmor/crypto.c85
-rw-r--r--security/apparmor/domain.c203
-rw-r--r--security/apparmor/file.c94
-rw-r--r--security/apparmor/include/af_unix.h55
-rw-r--r--security/apparmor/include/apparmor.h4
-rw-r--r--security/apparmor/include/audit.h5
-rw-r--r--security/apparmor/include/capability.h1
-rw-r--r--security/apparmor/include/cred.h31
-rw-r--r--security/apparmor/include/file.h11
-rw-r--r--security/apparmor/include/ipc.h3
-rw-r--r--security/apparmor/include/label.h51
-rw-r--r--security/apparmor/include/lib.h46
-rw-r--r--security/apparmor/include/match.h10
-rw-r--r--security/apparmor/include/net.h38
-rw-r--r--security/apparmor/include/path.h1
-rw-r--r--security/apparmor/include/perms.h8
-rw-r--r--security/apparmor/include/policy.h59
-rw-r--r--security/apparmor/include/secid.h7
-rw-r--r--security/apparmor/include/sig_names.h6
-rw-r--r--security/apparmor/include/signal.h19
-rw-r--r--security/apparmor/ipc.c13
-rw-r--r--security/apparmor/label.c37
-rw-r--r--security/apparmor/lib.c114
-rw-r--r--security/apparmor/lsm.c470
-rw-r--r--security/apparmor/match.c23
-rw-r--r--security/apparmor/mount.c12
-rw-r--r--security/apparmor/net.c189
-rw-r--r--security/apparmor/policy.c93
-rw-r--r--security/apparmor/policy_compat.c6
-rw-r--r--security/apparmor/policy_ns.c2
-rw-r--r--security/apparmor/policy_unpack.c67
-rw-r--r--security/apparmor/policy_unpack_test.c8
-rw-r--r--security/apparmor/procattr.c6
-rw-r--r--security/apparmor/resource.c11
-rw-r--r--security/apparmor/secid.c34
-rw-r--r--security/apparmor/task.c11
-rw-r--r--security/bpf/hooks.c1
-rw-r--r--security/commoncap.c111
-rw-r--r--security/inode.c64
-rw-r--r--security/integrity/evm/evm_crypto.c2
-rw-r--r--security/integrity/evm/evm_main.c2
-rw-r--r--security/integrity/evm/evm_secfs.c15
-rw-r--r--security/integrity/ima/Kconfig11
-rw-r--r--security/integrity/ima/ima.h13
-rw-r--r--security/integrity/ima/ima_appraise.c27
-rw-r--r--security/integrity/ima/ima_fs.c137
-rw-r--r--security/integrity/ima/ima_init.c2
-rw-r--r--security/integrity/ima/ima_kexec.c199
-rw-r--r--security/integrity/ima/ima_main.c90
-rw-r--r--security/integrity/ima/ima_policy.c5
-rw-r--r--security/integrity/ima/ima_queue.c49
-rw-r--r--security/integrity/platform_certs/load_powerpc.c5
-rw-r--r--security/ipe/Kconfig1
-rw-r--r--security/ipe/audit.c52
-rw-r--r--security/ipe/fs.c57
-rw-r--r--security/ipe/policy.c17
-rw-r--r--security/ipe/policy_fs.c40
-rw-r--r--security/keys/Kconfig2
-rw-r--r--security/keys/gc.c4
-rw-r--r--security/keys/key.c3
-rw-r--r--security/keys/sysctl.c2
-rw-r--r--security/keys/trusted-keys/trusted_dcp.c22
-rw-r--r--security/landlock/.kunitconfig2
-rw-r--r--security/landlock/Makefile5
-rw-r--r--security/landlock/access.h100
-rw-r--r--security/landlock/audit.c522
-rw-r--r--security/landlock/audit.h76
-rw-r--r--security/landlock/cred.c28
-rw-r--r--security/landlock/cred.h92
-rw-r--r--security/landlock/domain.c264
-rw-r--r--security/landlock/domain.h174
-rw-r--r--security/landlock/errata.h99
-rw-r--r--security/landlock/errata/abi-4.h15
-rw-r--r--security/landlock/errata/abi-6.h19
-rw-r--r--security/landlock/fs.c426
-rw-r--r--security/landlock/fs.h41
-rw-r--r--security/landlock/id.c295
-rw-r--r--security/landlock/id.h25
-rw-r--r--security/landlock/limits.h7
-rw-r--r--security/landlock/net.c81
-rw-r--r--security/landlock/ruleset.c56
-rw-r--r--security/landlock/ruleset.h98
-rw-r--r--security/landlock/setup.c40
-rw-r--r--security/landlock/setup.h3
-rw-r--r--security/landlock/syscalls.c143
-rw-r--r--security/landlock/task.c257
-rw-r--r--security/loadpin/Kconfig2
-rw-r--r--security/lockdown/lockdown.c2
-rw-r--r--security/lsm_audit.c59
-rw-r--r--security/min_addr.c11
-rw-r--r--security/safesetid/securityfs.c3
-rw-r--r--security/security.c177
-rw-r--r--security/selinux/Makefile7
-rw-r--r--security/selinux/avc.c63
-rw-r--r--security/selinux/hooks.c448
-rw-r--r--security/selinux/ibpkey.c13
-rw-r--r--security/selinux/include/avc.h9
-rw-r--r--security/selinux/include/classmap.h12
-rw-r--r--security/selinux/include/conditional.h2
-rw-r--r--security/selinux/include/netnode.h2
-rw-r--r--security/selinux/include/objsec.h24
-rw-r--r--security/selinux/include/policycap.h2
-rw-r--r--security/selinux/include/policycap_names.h2
-rw-r--r--security/selinux/include/security.h22
-rw-r--r--security/selinux/netif.c6
-rw-r--r--security/selinux/netnode.c15
-rw-r--r--security/selinux/netport.c14
-rw-r--r--security/selinux/nlmsgtab.c1
-rw-r--r--security/selinux/selinuxfs.c11
-rw-r--r--security/selinux/ss/avtab.c19
-rw-r--r--security/selinux/ss/avtab.h13
-rw-r--r--security/selinux/ss/conditional.c24
-rw-r--r--security/selinux/ss/conditional.h6
-rw-r--r--security/selinux/ss/context.c2
-rw-r--r--security/selinux/ss/context.h14
-rw-r--r--security/selinux/ss/ebitmap.c12
-rw-r--r--security/selinux/ss/ebitmap.h7
-rw-r--r--security/selinux/ss/hashtab.c3
-rw-r--r--security/selinux/ss/mls.c6
-rw-r--r--security/selinux/ss/mls_types.h2
-rw-r--r--security/selinux/ss/policydb.c154
-rw-r--r--security/selinux/ss/policydb.h24
-rw-r--r--security/selinux/ss/services.c139
-rw-r--r--security/selinux/ss/sidtab.c6
-rw-r--r--security/selinux/xfrm.c2
-rw-r--r--security/smack/smack.h21
-rw-r--r--security/smack/smack_access.c16
-rw-r--r--security/smack/smack_lsm.c119
-rw-r--r--security/smack/smackfs.c67
-rw-r--r--security/tomoyo/common.c117
-rw-r--r--security/tomoyo/domain.c13
-rw-r--r--security/tomoyo/securityfs_if.c6
-rw-r--r--security/tomoyo/tomoyo.c5
-rw-r--r--security/yama/yama_lsm.c15
143 files changed, 6465 insertions, 2014 deletions
diff --git a/security/Kconfig b/security/Kconfig
index 28e685f53bd1..4816fc74f81e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -51,6 +51,27 @@ config PROC_MEM_NO_FORCE
endchoice
+config MSEAL_SYSTEM_MAPPINGS
+ bool "mseal system mappings"
+ depends on 64BIT
+ depends on ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
+ depends on !CHECKPOINT_RESTORE
+ help
+ Apply mseal on system mappings.
+ The system mappings includes vdso, vvar, vvar_vclock,
+ vectors (arm compat-mode), sigpage (arm compat-mode), uprobes.
+
+ A 64-bit kernel is required for the memory sealing feature.
+ No specific hardware features from the CPU are needed.
+
+ WARNING: This feature breaks programs which rely on relocating
+ or unmapping system mappings. Known broken software at the time
+ of writing includes CHECKPOINT_RESTORE, UML, gVisor, rr. Therefore
+ this config can't be enabled universally.
+
+ For complete descriptions of memory sealing, please see
+ Documentation/userspace-api/mseal.rst
+
config SECURITY
bool "Enable different security models"
depends on SYSFS
@@ -64,6 +85,11 @@ config SECURITY
If you are unsure how to answer this question, answer N.
+config HAS_SECURITY_AUDIT
+ def_bool y
+ depends on AUDIT
+ depends on SECURITY
+
config SECURITYFS
bool "Enable the securityfs filesystem"
help
@@ -159,27 +185,6 @@ config LSM_MMAP_MIN_ADDR
this low address space will need the permission specific to the
systems running LSM.
-config HARDENED_USERCOPY
- bool "Harden memory copies between kernel and userspace"
- imply STRICT_DEVMEM
- help
- This option checks for obviously wrong memory regions when
- copying memory to/from the kernel (via copy_to_user() and
- copy_from_user() functions) by rejecting memory ranges that
- are larger than the specified heap object, span multiple
- separately allocated pages, are not on the process stack,
- or are part of the kernel text. This prevents entire classes
- of heap overflow exploits and similar kernel memory exposures.
-
-config FORTIFY_SOURCE
- bool "Harden common str/mem functions against buffer overflows"
- depends on ARCH_HAS_FORTIFY_SOURCE
- # https://github.com/llvm/llvm-project/issues/53645
- depends on !CC_IS_CLANG || !X86_32
- help
- Detect overflows of buffers in common string and memory functions
- where the compiler can determine and validate the buffer sizes.
-
config STATIC_USERMODEHELPER
bool "Force all usermode helper calls through a single binary"
help
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index c9d5ca3d8d08..b9a5bc3430aa 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -1,22 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Kernel hardening options"
-config GCC_PLUGIN_STRUCTLEAK
- bool
- help
- While the kernel is built with warnings enabled for any missed
- stack variable initializations, this warning is silenced for
- anything passed by reference to another function, under the
- occasionally misguided assumption that the function will do
- the initialization. As this regularly leads to exploitable
- flaws, this plugin is available to identify and zero-initialize
- such variables, depending on the chosen level of coverage.
-
- This plugin was originally ported from grsecurity/PaX. More
- information at:
- * https://grsecurity.net/
- * https://pax.grsecurity.net/
-
menu "Memory initialization"
config CC_HAS_AUTO_VAR_INIT_PATTERN
@@ -36,7 +20,6 @@ config CC_HAS_AUTO_VAR_INIT_ZERO
choice
prompt "Initialize kernel stack variables at function entry"
- default GCC_PLUGIN_STRUCTLEAK_BYREF_ALL if COMPILE_TEST && GCC_PLUGINS
default INIT_STACK_ALL_PATTERN if COMPILE_TEST && CC_HAS_AUTO_VAR_INIT_PATTERN
default INIT_STACK_ALL_ZERO if CC_HAS_AUTO_VAR_INIT_ZERO
default INIT_STACK_NONE
@@ -60,55 +43,6 @@ choice
classes of uninitialized stack variable exploits
and information exposures.
- config GCC_PLUGIN_STRUCTLEAK_USER
- bool "zero-init structs marked for userspace (weak)"
- # Plugin can be removed once the kernel only supports GCC 12+
- depends on GCC_PLUGINS && !CC_HAS_AUTO_VAR_INIT_ZERO
- select GCC_PLUGIN_STRUCTLEAK
- help
- Zero-initialize any structures on the stack containing
- a __user attribute. This can prevent some classes of
- uninitialized stack variable exploits and information
- exposures, like CVE-2013-2141:
- https://git.kernel.org/linus/b9e146d8eb3b9eca
-
- config GCC_PLUGIN_STRUCTLEAK_BYREF
- bool "zero-init structs passed by reference (strong)"
- # Plugin can be removed once the kernel only supports GCC 12+
- depends on GCC_PLUGINS && !CC_HAS_AUTO_VAR_INIT_ZERO
- depends on !(KASAN && KASAN_STACK)
- select GCC_PLUGIN_STRUCTLEAK
- help
- Zero-initialize any structures on the stack that may
- be passed by reference and had not already been
- explicitly initialized. This can prevent most classes
- of uninitialized stack variable exploits and information
- exposures, like CVE-2017-1000410:
- https://git.kernel.org/linus/06e7e776ca4d3654
-
- As a side-effect, this keeps a lot of variables on the
- stack that can otherwise be optimized out, so combining
- this with CONFIG_KASAN_STACK can lead to a stack overflow
- and is disallowed.
-
- config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
- bool "zero-init everything passed by reference (very strong)"
- # Plugin can be removed once the kernel only supports GCC 12+
- depends on GCC_PLUGINS && !CC_HAS_AUTO_VAR_INIT_ZERO
- depends on !(KASAN && KASAN_STACK)
- select GCC_PLUGIN_STRUCTLEAK
- help
- Zero-initialize any stack variables that may be passed
- by reference and had not already been explicitly
- initialized. This is intended to eliminate all classes
- of uninitialized stack variable exploits and information
- exposures.
-
- As a side-effect, this keeps a lot of variables on the
- stack that can otherwise be optimized out, so combining
- this with CONFIG_KASAN_STACK can lead to a stack overflow
- and is disallowed.
-
config INIT_STACK_ALL_PATTERN
bool "pattern-init everything (strongest)"
depends on CC_HAS_AUTO_VAR_INIT_PATTERN
@@ -127,6 +61,7 @@ choice
repeating for all types and padding except float and double
which use 0xFF repeating (-NaN). Clang on 32-bit uses 0xFF
repeating for all types and padding.
+ GCC uses 0xFE repeating for all types, and zero for padding.
config INIT_STACK_ALL_ZERO
bool "zero-init everything (strongest and safest)"
@@ -147,20 +82,13 @@ choice
endchoice
-config GCC_PLUGIN_STRUCTLEAK_VERBOSE
- bool "Report forcefully initialized variables"
- depends on GCC_PLUGIN_STRUCTLEAK
- depends on !COMPILE_TEST # too noisy
- help
- This option will cause a warning to be printed each time the
- structleak plugin finds a variable it thinks needs to be
- initialized. Since not all existing initializers are detected
- by the plugin, this can produce false positive warnings.
+config CC_HAS_SANCOV_STACK_DEPTH_CALLBACK
+ def_bool $(cc-option,-fsanitize-coverage-stack-depth-callback-min=1)
-config GCC_PLUGIN_STACKLEAK
+config KSTACK_ERASE
bool "Poison kernel stack before returning from syscalls"
- depends on GCC_PLUGINS
- depends on HAVE_ARCH_STACKLEAK
+ depends on HAVE_ARCH_KSTACK_ERASE
+ depends on GCC_PLUGINS || CC_HAS_SANCOV_STACK_DEPTH_CALLBACK
help
This option makes the kernel erase the kernel stack before
returning from system calls. This has the effect of leaving
@@ -178,6 +106,10 @@ config GCC_PLUGIN_STACKLEAK
are advised to test this feature on your expected workload before
deploying it.
+config GCC_PLUGIN_STACKLEAK
+ def_bool KSTACK_ERASE
+ depends on GCC_PLUGINS
+ help
This plugin was ported from grsecurity/PaX. More information at:
* https://grsecurity.net/
* https://pax.grsecurity.net/
@@ -192,37 +124,37 @@ config GCC_PLUGIN_STACKLEAK_VERBOSE
instrumented. This is useful for comparing coverage between
builds.
-config STACKLEAK_TRACK_MIN_SIZE
- int "Minimum stack frame size of functions tracked by STACKLEAK"
+config KSTACK_ERASE_TRACK_MIN_SIZE
+ int "Minimum stack frame size of functions tracked by KSTACK_ERASE"
default 100
range 0 4096
- depends on GCC_PLUGIN_STACKLEAK
+ depends on KSTACK_ERASE
help
- The STACKLEAK gcc plugin instruments the kernel code for tracking
+ The KSTACK_ERASE option instruments the kernel code for tracking
the lowest border of the kernel stack (and for some other purposes).
- It inserts the stackleak_track_stack() call for the functions with
- a stack frame size greater than or equal to this parameter.
+ It inserts the __sanitizer_cov_stack_depth() call for the functions
+ with a stack frame size greater than or equal to this parameter.
If unsure, leave the default value 100.
-config STACKLEAK_METRICS
- bool "Show STACKLEAK metrics in the /proc file system"
- depends on GCC_PLUGIN_STACKLEAK
+config KSTACK_ERASE_METRICS
+ bool "Show KSTACK_ERASE metrics in the /proc file system"
+ depends on KSTACK_ERASE
depends on PROC_FS
help
- If this is set, STACKLEAK metrics for every task are available in
- the /proc file system. In particular, /proc/<pid>/stack_depth
+ If this is set, KSTACK_ERASE metrics for every task are available
+ in the /proc file system. In particular, /proc/<pid>/stack_depth
shows the maximum kernel stack consumption for the current and
previous syscalls. Although this information is not precise, it
- can be useful for estimating the STACKLEAK performance impact for
- your workloads.
+ can be useful for estimating the KSTACK_ERASE performance impact
+ for your workloads.
-config STACKLEAK_RUNTIME_DISABLE
+config KSTACK_ERASE_RUNTIME_DISABLE
bool "Allow runtime disabling of kernel stack erasing"
- depends on GCC_PLUGIN_STACKLEAK
+ depends on KSTACK_ERASE
help
This option provides 'stack_erasing' sysctl, which can be used in
runtime to control kernel stack erasing for kernels built with
- CONFIG_GCC_PLUGIN_STACKLEAK.
+ CONFIG_KSTACK_ERASE.
config INIT_ON_ALLOC_DEFAULT_ON
bool "Enable heap memory zeroing on allocation by default"
@@ -279,6 +211,39 @@ config ZERO_CALL_USED_REGS
endmenu
+menu "Bounds checking"
+
+config FORTIFY_SOURCE
+ bool "Harden common str/mem functions against buffer overflows"
+ depends on ARCH_HAS_FORTIFY_SOURCE
+ # https://github.com/llvm/llvm-project/issues/53645
+ depends on !X86_32 || !CC_IS_CLANG || CLANG_VERSION >= 160000
+ help
+ Detect overflows of buffers in common string and memory functions
+ where the compiler can determine and validate the buffer sizes.
+
+config HARDENED_USERCOPY
+ bool "Harden memory copies between kernel and userspace"
+ imply STRICT_DEVMEM
+ help
+ This option checks for obviously wrong memory regions when
+ copying memory to/from the kernel (via copy_to_user() and
+ copy_from_user() functions) by rejecting memory ranges that
+ are larger than the specified heap object, span multiple
+ separately allocated pages, are not on the process stack,
+ or are part of the kernel text. This prevents entire classes
+ of heap overflow exploits and similar kernel memory exposures.
+
+config HARDENED_USERCOPY_DEFAULT_ON
+ bool "Harden memory copies by default"
+ depends on HARDENED_USERCOPY
+ default HARDENED_USERCOPY
+ help
+ This has the effect of setting "hardened_usercopy=on" on the kernel
+ command line. This can be disabled with "hardened_usercopy=off".
+
+endmenu
+
menu "Hardening of kernel data structures"
config LIST_HARDENED
diff --git a/security/Makefile b/security/Makefile
index cc0982214b84..22ff4c8bd8ce 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_SECURITY) += security.o
obj-$(CONFIG_SECURITYFS) += inode.o
obj-$(CONFIG_SECURITY_SELINUX) += selinux/
obj-$(CONFIG_SECURITY_SMACK) += smack/
-obj-$(CONFIG_SECURITY) += lsm_audit.o
+obj-$(CONFIG_HAS_SECURITY_AUDIT) += lsm_audit.o
obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
obj-$(CONFIG_SECURITY_YAMA) += yama/
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
index 64cc3044a42c..1e3bd44643da 100644
--- a/security/apparmor/Kconfig
+++ b/security/apparmor/Kconfig
@@ -59,8 +59,7 @@ config SECURITY_APPARMOR_INTROSPECT_POLICY
config SECURITY_APPARMOR_HASH
bool "Enable introspection of sha256 hashes for loaded profiles"
depends on SECURITY_APPARMOR_INTROSPECT_POLICY
- select CRYPTO
- select CRYPTO_SHA256
+ select CRYPTO_LIB_SHA256
default y
help
This option selects whether introspection of loaded policy
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
index b9c5879dd599..12fb419714c0 100644
--- a/security/apparmor/Makefile
+++ b/security/apparmor/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
apparmor-y := apparmorfs.o audit.o capability.o task.o ipc.o lib.o match.o \
path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
resource.o secid.o file.o policy_ns.o label.o mount.o net.o \
- policy_compat.o
+ policy_compat.o af_unix.o
apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
obj-$(CONFIG_SECURITY_APPARMOR_KUNIT_TEST) += apparmor_policy_unpack_test.o
@@ -28,7 +28,7 @@ clean-files := capability_names.h rlim_names.h net_names.h
# to
# #define AA_SFS_AF_MASK "local inet"
quiet_cmd_make-af = GEN $@
-cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\
+cmd_make-af = echo "static const char *const address_family_names[] = {" > $@ ;\
sed $< >>$@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
echo "};" >> $@ ;\
@@ -43,7 +43,7 @@ cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\
# to
# [1] = "stream",
quiet_cmd_make-sock = GEN $@
-cmd_make-sock = echo "static const char *sock_type_names[] = {" >> $@ ;\
+cmd_make-sock = echo "static const char *const sock_type_names[] = {" >> $@ ;\
sed $^ >>$@ -r -n \
-e 's/^\tSOCK_([A-Z0-9_]+)[\t]+=[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
echo "};" >> $@
diff --git a/security/apparmor/af_unix.c b/security/apparmor/af_unix.c
new file mode 100644
index 000000000000..9129766d1e9c
--- /dev/null
+++ b/security/apparmor/af_unix.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor af_unix fine grained mediation
+ *
+ * Copyright 2023 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/fs.h>
+#include <net/tcp_states.h>
+
+#include "include/audit.h"
+#include "include/af_unix.h"
+#include "include/apparmor.h"
+#include "include/file.h"
+#include "include/label.h"
+#include "include/path.h"
+#include "include/policy.h"
+#include "include/cred.h"
+
+
+static inline struct sock *aa_unix_sk(struct unix_sock *u)
+{
+ return &u->sk;
+}
+
+static int unix_fs_perm(const char *op, u32 mask, const struct cred *subj_cred,
+ struct aa_label *label, struct path *path)
+{
+ AA_BUG(!label);
+ AA_BUG(!path);
+
+ if (unconfined(label) || !label_mediates(label, AA_CLASS_FILE))
+ return 0;
+
+ mask &= NET_FS_PERMS;
+ /* if !u->path.dentry socket is being shutdown - implicit delegation
+ * until obj delegation is supported
+ */
+ if (path->dentry) {
+ /* the sunpath may not be valid for this ns so use the path */
+ struct inode *inode = path->dentry->d_inode;
+ vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_idmap(path->mnt), inode);
+ struct path_cond cond = {
+ .uid = vfsuid_into_kuid(vfsuid),
+ .mode = inode->i_mode,
+ };
+
+ return aa_path_perm(op, subj_cred, label, path,
+ PATH_SOCK_COND, mask, &cond);
+ } /* else implicitly delegated */
+
+ return 0;
+}
+
+/* match_addr special constants */
+#define ABSTRACT_ADDR "\x00" /* abstract socket addr */
+#define ANONYMOUS_ADDR "\x01" /* anonymous endpoint, no addr */
+#define DISCONNECTED_ADDR "\x02" /* addr is another namespace */
+#define SHUTDOWN_ADDR "\x03" /* path addr is shutdown and cleared */
+#define FS_ADDR "/" /* path addr in fs */
+
+static aa_state_t match_addr(struct aa_dfa *dfa, aa_state_t state,
+ struct sockaddr_un *addr, int addrlen)
+{
+ if (addr)
+ /* include leading \0 */
+ state = aa_dfa_match_len(dfa, state, addr->sun_path,
+ unix_addr_len(addrlen));
+ else
+ state = aa_dfa_match_len(dfa, state, ANONYMOUS_ADDR, 1);
+ /* todo: could change to out of band for cleaner separation */
+ state = aa_dfa_null_transition(dfa, state);
+
+ return state;
+}
+
+static aa_state_t match_to_local(struct aa_policydb *policy,
+ aa_state_t state, u32 request,
+ int type, int protocol,
+ struct sockaddr_un *addr, int addrlen,
+ struct aa_perms **p,
+ const char **info)
+{
+ state = aa_match_to_prot(policy, state, request, PF_UNIX, type,
+ protocol, NULL, info);
+ if (state) {
+ state = match_addr(policy->dfa, state, addr, addrlen);
+ if (state) {
+ /* todo: local label matching */
+ state = aa_dfa_null_transition(policy->dfa, state);
+ if (!state)
+ *info = "failed local label match";
+ } else {
+ *info = "failed local address match";
+ }
+ }
+
+ return state;
+}
+
+struct sockaddr_un *aa_sunaddr(const struct unix_sock *u, int *addrlen)
+{
+ struct unix_address *addr;
+
+ /* memory barrier is sufficient see note in net/unix/af_unix.c */
+ addr = smp_load_acquire(&u->addr);
+ if (addr) {
+ *addrlen = addr->len;
+ return addr->name;
+ }
+ *addrlen = 0;
+ return NULL;
+}
+
+static aa_state_t match_to_sk(struct aa_policydb *policy,
+ aa_state_t state, u32 request,
+ struct unix_sock *u, struct aa_perms **p,
+ const char **info)
+{
+ int addrlen;
+ struct sockaddr_un *addr = aa_sunaddr(u, &addrlen);
+
+ return match_to_local(policy, state, request, u->sk.sk_type,
+ u->sk.sk_protocol, addr, addrlen, p, info);
+}
+
+#define CMD_ADDR 1
+#define CMD_LISTEN 2
+#define CMD_OPT 4
+
+static aa_state_t match_to_cmd(struct aa_policydb *policy, aa_state_t state,
+ u32 request, struct unix_sock *u,
+ char cmd, struct aa_perms **p,
+ const char **info)
+{
+ AA_BUG(!p);
+
+ state = match_to_sk(policy, state, request, u, p, info);
+ if (state && !*p) {
+ state = aa_dfa_match_len(policy->dfa, state, &cmd, 1);
+ if (!state)
+ *info = "failed cmd selection match";
+ }
+
+ return state;
+}
+
+static aa_state_t match_to_peer(struct aa_policydb *policy, aa_state_t state,
+ u32 request, struct unix_sock *u,
+ struct sockaddr_un *peer_addr, int peer_addrlen,
+ struct aa_perms **p, const char **info)
+{
+ AA_BUG(!p);
+
+ state = match_to_cmd(policy, state, request, u, CMD_ADDR, p, info);
+ if (state && !*p) {
+ state = match_addr(policy->dfa, state, peer_addr, peer_addrlen);
+ if (!state)
+ *info = "failed peer address match";
+ }
+
+ return state;
+}
+
+static aa_state_t match_label(struct aa_profile *profile,
+ struct aa_ruleset *rule, aa_state_t state,
+ u32 request, struct aa_profile *peer,
+ struct aa_perms *p,
+ struct apparmor_audit_data *ad)
+{
+ AA_BUG(!profile);
+ AA_BUG(!peer);
+
+ ad->peer = &peer->label;
+
+ if (state && !p) {
+ state = aa_dfa_match(rule->policy->dfa, state,
+ peer->base.hname);
+ if (!state)
+ ad->info = "failed peer label match";
+
+ }
+
+ return aa_do_perms(profile, rule->policy, state, request, p, ad);
+}
+
+
+/* unix sock creation comes before we know if the socket will be an fs
+ * socket
+ * v6 - semantics are handled by mapping in profile load
+ * v7 - semantics require sock create for tasks creating an fs socket.
+ * v8 - same as v7
+ */
+static int profile_create_perm(struct aa_profile *profile, int family,
+ int type, int protocol,
+ struct apparmor_audit_data *ad)
+{
+ struct aa_ruleset *rules = profile->label.rules[0];
+ aa_state_t state;
+
+ AA_BUG(!profile);
+ AA_BUG(profile_unconfined(profile));
+
+ state = RULE_MEDIATES_v9NET(rules);
+ if (state) {
+ state = aa_match_to_prot(rules->policy, state, AA_MAY_CREATE,
+ PF_UNIX, type, protocol, NULL,
+ &ad->info);
+
+ return aa_do_perms(profile, rules->policy, state, AA_MAY_CREATE,
+ NULL, ad);
+ }
+
+ return aa_profile_af_perm(profile, ad, AA_MAY_CREATE, family, type,
+ protocol);
+}
+
+static int profile_sk_perm(struct aa_profile *profile,
+ struct apparmor_audit_data *ad,
+ u32 request, struct sock *sk, struct path *path)
+{
+ struct aa_ruleset *rules = profile->label.rules[0];
+ struct aa_perms *p = NULL;
+ aa_state_t state;
+
+ AA_BUG(!profile);
+ AA_BUG(!sk);
+ AA_BUG(profile_unconfined(profile));
+
+ state = RULE_MEDIATES_v9NET(rules);
+ if (state) {
+ if (is_unix_fs(sk))
+ return unix_fs_perm(ad->op, request, ad->subj_cred,
+ &profile->label,
+ &unix_sk(sk)->path);
+
+ state = match_to_sk(rules->policy, state, request, unix_sk(sk),
+ &p, &ad->info);
+
+ return aa_do_perms(profile, rules->policy, state, request, p,
+ ad);
+ }
+
+ return aa_profile_af_sk_perm(profile, ad, request, sk);
+}
+
+static int profile_bind_perm(struct aa_profile *profile, struct sock *sk,
+ struct apparmor_audit_data *ad)
+{
+ struct aa_ruleset *rules = profile->label.rules[0];
+ struct aa_perms *p = NULL;
+ aa_state_t state;
+
+ AA_BUG(!profile);
+ AA_BUG(!sk);
+ AA_BUG(!ad);
+ AA_BUG(profile_unconfined(profile));
+
+ state = RULE_MEDIATES_v9NET(rules);
+ if (state) {
+ if (is_unix_addr_fs(ad->net.addr, ad->net.addrlen))
+ /* under v7-9 fs hook handles bind */
+ return 0;
+ /* bind for abstract socket */
+ state = match_to_local(rules->policy, state, AA_MAY_BIND,
+ sk->sk_type, sk->sk_protocol,
+ unix_addr(ad->net.addr),
+ ad->net.addrlen,
+ &p, &ad->info);
+
+ return aa_do_perms(profile, rules->policy, state, AA_MAY_BIND,
+ p, ad);
+ }
+
+ return aa_profile_af_sk_perm(profile, ad, AA_MAY_BIND, sk);
+}
+
+static int profile_listen_perm(struct aa_profile *profile, struct sock *sk,
+ int backlog, struct apparmor_audit_data *ad)
+{
+ struct aa_ruleset *rules = profile->label.rules[0];
+ struct aa_perms *p = NULL;
+ aa_state_t state;
+
+ AA_BUG(!profile);
+ AA_BUG(!sk);
+ AA_BUG(!ad);
+ AA_BUG(profile_unconfined(profile));
+
+ state = RULE_MEDIATES_v9NET(rules);
+ if (state) {
+ __be16 b = cpu_to_be16(backlog);
+
+ if (is_unix_fs(sk))
+ return unix_fs_perm(ad->op, AA_MAY_LISTEN,
+ ad->subj_cred, &profile->label,
+ &unix_sk(sk)->path);
+
+ state = match_to_cmd(rules->policy, state, AA_MAY_LISTEN,
+ unix_sk(sk), CMD_LISTEN, &p, &ad->info);
+ if (state && !p) {
+ state = aa_dfa_match_len(rules->policy->dfa, state,
+ (char *) &b, 2);
+ if (!state)
+ ad->info = "failed listen backlog match";
+ }
+ return aa_do_perms(profile, rules->policy, state, AA_MAY_LISTEN,
+ p, ad);
+ }
+
+ return aa_profile_af_sk_perm(profile, ad, AA_MAY_LISTEN, sk);
+}
+
+static int profile_accept_perm(struct aa_profile *profile,
+ struct sock *sk,
+ struct apparmor_audit_data *ad)
+{
+ struct aa_ruleset *rules = profile->label.rules[0];
+ struct aa_perms *p = NULL;
+ aa_state_t state;
+
+ AA_BUG(!profile);
+ AA_BUG(!sk);
+ AA_BUG(!ad);
+ AA_BUG(profile_unconfined(profile));
+
+ state = RULE_MEDIATES_v9NET(rules);
+ if (state) {
+ if (is_unix_fs(sk))
+ return unix_fs_perm(ad->op, AA_MAY_ACCEPT,
+ ad->subj_cred, &profile->label,
+ &unix_sk(sk)->path);
+
+ state = match_to_sk(rules->policy, state, AA_MAY_ACCEPT,
+ unix_sk(sk), &p, &ad->info);
+
+ return aa_do_perms(profile, rules->policy, state, AA_MAY_ACCEPT,
+ p, ad);
+ }
+
+ return aa_profile_af_sk_perm(profile, ad, AA_MAY_ACCEPT, sk);
+}
+
+static int profile_opt_perm(struct aa_profile *profile, u32 request,
+ struct sock *sk, int optname,
+ struct apparmor_audit_data *ad)
+{
+ struct aa_ruleset *rules = profile->label.rules[0];
+ struct aa_perms *p = NULL;
+ aa_state_t state;
+
+ AA_BUG(!profile);
+ AA_BUG(!sk);
+ AA_BUG(!ad);
+ AA_BUG(profile_unconfined(profile));
+
+ state = RULE_MEDIATES_v9NET(rules);
+ if (state) {
+ __be16 b = cpu_to_be16(optname);
+ if (is_unix_fs(sk))
+ return unix_fs_perm(ad->op, request,
+ ad->subj_cred, &profile->label,
+ &unix_sk(sk)->path);
+
+ state = match_to_cmd(rules->policy, state, request, unix_sk(sk),
+ CMD_OPT, &p, &ad->info);
+ if (state && !p) {
+ state = aa_dfa_match_len(rules->policy->dfa, state,
+ (char *) &b, 2);
+ if (!state)
+ ad->info = "failed sockopt match";
+ }
+ return aa_do_perms(profile, rules->policy, state, request, p,
+ ad);
+ }
+
+ return aa_profile_af_sk_perm(profile, ad, request, sk);
+}
+
+/* null peer_label is allowed, in which case the peer_sk label is used */
+static int profile_peer_perm(struct aa_profile *profile, u32 request,
+ struct sock *sk, struct path *path,
+ struct sockaddr_un *peer_addr,
+ int peer_addrlen, struct path *peer_path,
+ struct aa_label *peer_label,
+ struct apparmor_audit_data *ad)
+{
+ struct aa_ruleset *rules = profile->label.rules[0];
+ struct aa_perms *p = NULL;
+ aa_state_t state;
+
+ AA_BUG(!profile);
+ AA_BUG(profile_unconfined(profile));
+ AA_BUG(!sk);
+ AA_BUG(!peer_label);
+ AA_BUG(!ad);
+
+ state = RULE_MEDIATES_v9NET(rules);
+ if (state) {
+ struct aa_profile *peerp;
+
+ if (peer_path)
+ return unix_fs_perm(ad->op, request, ad->subj_cred,
+ &profile->label, peer_path);
+ else if (path)
+ return unix_fs_perm(ad->op, request, ad->subj_cred,
+ &profile->label, path);
+ state = match_to_peer(rules->policy, state, request,
+ unix_sk(sk),
+ peer_addr, peer_addrlen, &p, &ad->info);
+
+ return fn_for_each_in_ns(peer_label, peerp,
+ match_label(profile, rules, state, request,
+ peerp, p, ad));
+ }
+
+ return aa_profile_af_sk_perm(profile, ad, request, sk);
+}
+
+/* -------------------------------- */
+
+int aa_unix_create_perm(struct aa_label *label, int family, int type,
+ int protocol)
+{
+ if (!unconfined(label)) {
+ struct aa_profile *profile;
+ DEFINE_AUDIT_NET(ad, OP_CREATE, current_cred(), NULL, family,
+ type, protocol);
+
+ return fn_for_each_confined(label, profile,
+ profile_create_perm(profile, family, type,
+ protocol, &ad));
+ }
+
+ return 0;
+}
+
+static int aa_unix_label_sk_perm(const struct cred *subj_cred,
+ struct aa_label *label,
+ const char *op, u32 request, struct sock *sk,
+ struct path *path)
+{
+ if (!unconfined(label)) {
+ struct aa_profile *profile;
+ DEFINE_AUDIT_SK(ad, op, subj_cred, sk);
+
+ return fn_for_each_confined(label, profile,
+ profile_sk_perm(profile, &ad, request, sk,
+ path));
+ }
+ return 0;
+}
+
+/* revalidation, get/set attr, shutdown */
+int aa_unix_sock_perm(const char *op, u32 request, struct socket *sock)
+{
+ struct aa_label *label;
+ int error;
+
+ label = begin_current_label_crit_section();
+ error = aa_unix_label_sk_perm(current_cred(), label, op,
+ request, sock->sk,
+ is_unix_fs(sock->sk) ? &unix_sk(sock->sk)->path : NULL);
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+static int valid_addr(struct sockaddr *addr, int addr_len)
+{
+ struct sockaddr_un *sunaddr = unix_addr(addr);
+
+ /* addr_len == offsetof(struct sockaddr_un, sun_path) is autobind */
+ if (addr_len < offsetof(struct sockaddr_un, sun_path) ||
+ addr_len > sizeof(*sunaddr))
+ return -EINVAL;
+ return 0;
+}
+
+int aa_unix_bind_perm(struct socket *sock, struct sockaddr *addr,
+ int addrlen)
+{
+ struct aa_profile *profile;
+ struct aa_label *label;
+ int error = 0;
+
+ error = valid_addr(addr, addrlen);
+ if (error)
+ return error;
+
+ label = begin_current_label_crit_section();
+ /* fs bind is handled by mknod */
+ if (!unconfined(label)) {
+ DEFINE_AUDIT_SK(ad, OP_BIND, current_cred(), sock->sk);
+
+ ad.net.addr = unix_addr(addr);
+ ad.net.addrlen = addrlen;
+
+ error = fn_for_each_confined(label, profile,
+ profile_bind_perm(profile, sock->sk, &ad));
+ }
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+/*
+ * unix connections are covered by the
+ * - unix_stream_connect (stream) and unix_may_send hooks (dgram)
+ * - fs connect is handled by open
+ * This is just here to document this is not needed for af_unix
+ *
+int aa_unix_connect_perm(struct socket *sock, struct sockaddr *address,
+ int addrlen)
+{
+ return 0;
+}
+*/
+
+int aa_unix_listen_perm(struct socket *sock, int backlog)
+{
+ struct aa_profile *profile;
+ struct aa_label *label;
+ int error = 0;
+
+ label = begin_current_label_crit_section();
+ if (!unconfined(label)) {
+ DEFINE_AUDIT_SK(ad, OP_LISTEN, current_cred(), sock->sk);
+
+ error = fn_for_each_confined(label, profile,
+ profile_listen_perm(profile, sock->sk,
+ backlog, &ad));
+ }
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+
+/* ability of sock to connect, not peer address binding */
+int aa_unix_accept_perm(struct socket *sock, struct socket *newsock)
+{
+ struct aa_profile *profile;
+ struct aa_label *label;
+ int error = 0;
+
+ label = begin_current_label_crit_section();
+ if (!unconfined(label)) {
+ DEFINE_AUDIT_SK(ad, OP_ACCEPT, current_cred(), sock->sk);
+
+ error = fn_for_each_confined(label, profile,
+ profile_accept_perm(profile, sock->sk, &ad));
+ }
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+
+/*
+ * dgram handled by unix_may_sendmsg, right to send on stream done at connect
+ * could do per msg unix_stream here, but connect + socket transfer is
+ * sufficient. This is just here to document this is not needed for af_unix
+ *
+ * sendmsg, recvmsg
+int aa_unix_msg_perm(const char *op, u32 request, struct socket *sock,
+ struct msghdr *msg, int size)
+{
+ return 0;
+}
+*/
+
+int aa_unix_opt_perm(const char *op, u32 request, struct socket *sock,
+ int level, int optname)
+{
+ struct aa_profile *profile;
+ struct aa_label *label;
+ int error = 0;
+
+ label = begin_current_label_crit_section();
+ if (!unconfined(label)) {
+ DEFINE_AUDIT_SK(ad, op, current_cred(), sock->sk);
+
+ error = fn_for_each_confined(label, profile,
+ profile_opt_perm(profile, request, sock->sk,
+ optname, &ad));
+ }
+ end_current_label_crit_section(label);
+
+ return error;
+}
+
+static int unix_peer_perm(const struct cred *subj_cred,
+ struct aa_label *label, const char *op, u32 request,
+ struct sock *sk, struct path *path,
+ struct sockaddr_un *peer_addr, int peer_addrlen,
+ struct path *peer_path, struct aa_label *peer_label)
+{
+ struct aa_profile *profile;
+ DEFINE_AUDIT_SK(ad, op, subj_cred, sk);
+
+ ad.net.peer.addr = peer_addr;
+ ad.net.peer.addrlen = peer_addrlen;
+
+ return fn_for_each_confined(label, profile,
+ profile_peer_perm(profile, request, sk, path,
+ peer_addr, peer_addrlen, peer_path,
+ peer_label, &ad));
+}
+
+/**
+ *
+ * Requires: lock held on both @sk and @peer_sk
+ * called by unix_stream_connect, unix_may_send
+ */
+int aa_unix_peer_perm(const struct cred *subj_cred,
+ struct aa_label *label, const char *op, u32 request,
+ struct sock *sk, struct sock *peer_sk,
+ struct aa_label *peer_label)
+{
+ struct unix_sock *peeru = unix_sk(peer_sk);
+ struct unix_sock *u = unix_sk(sk);
+ int plen;
+ struct sockaddr_un *paddr = aa_sunaddr(unix_sk(peer_sk), &plen);
+
+ AA_BUG(!label);
+ AA_BUG(!sk);
+ AA_BUG(!peer_sk);
+ AA_BUG(!peer_label);
+
+ return unix_peer_perm(subj_cred, label, op, request, sk,
+ is_unix_fs(sk) ? &u->path : NULL,
+ paddr, plen,
+ is_unix_fs(peer_sk) ? &peeru->path : NULL,
+ peer_label);
+}
+
+/* sk_plabel for comparison only */
+static void update_sk_ctx(struct sock *sk, struct aa_label *label,
+ struct aa_label *plabel)
+{
+ struct aa_label *l, *old;
+ struct aa_sk_ctx *ctx = aa_sock(sk);
+ bool update_sk;
+
+ rcu_read_lock();
+ update_sk = (plabel &&
+ (plabel != rcu_access_pointer(ctx->peer_lastupdate) ||
+ !aa_label_is_subset(plabel, rcu_dereference(ctx->peer)))) ||
+ !__aa_subj_label_is_cached(label, rcu_dereference(ctx->label));
+ rcu_read_unlock();
+ if (!update_sk)
+ return;
+
+ spin_lock(&unix_sk(sk)->lock);
+ old = rcu_dereference_protected(ctx->label,
+ lockdep_is_held(&unix_sk(sk)->lock));
+ l = aa_label_merge(old, label, GFP_ATOMIC);
+ if (l) {
+ if (l != old) {
+ rcu_assign_pointer(ctx->label, l);
+ aa_put_label(old);
+ } else
+ aa_put_label(l);
+ }
+ if (plabel && rcu_access_pointer(ctx->peer_lastupdate) != plabel) {
+ old = rcu_dereference_protected(ctx->peer, lockdep_is_held(&unix_sk(sk)->lock));
+
+ if (old == plabel) {
+ rcu_assign_pointer(ctx->peer_lastupdate, plabel);
+ } else if (aa_label_is_subset(plabel, old)) {
+ rcu_assign_pointer(ctx->peer_lastupdate, plabel);
+ rcu_assign_pointer(ctx->peer, aa_get_label(plabel));
+ aa_put_label(old);
+ } /* else race or a subset - don't update */
+ }
+ spin_unlock(&unix_sk(sk)->lock);
+}
+
+static void update_peer_ctx(struct sock *sk, struct aa_sk_ctx *ctx,
+ struct aa_label *label)
+{
+ struct aa_label *l, *old;
+
+ spin_lock(&unix_sk(sk)->lock);
+ old = rcu_dereference_protected(ctx->peer,
+ lockdep_is_held(&unix_sk(sk)->lock));
+ l = aa_label_merge(old, label, GFP_ATOMIC);
+ if (l) {
+ if (l != old) {
+ rcu_assign_pointer(ctx->peer, l);
+ aa_put_label(old);
+ } else
+ aa_put_label(l);
+ }
+ spin_unlock(&unix_sk(sk)->lock);
+}
+
+/* This fn is only checked if something has changed in the security
+ * boundaries. Otherwise cached info off file is sufficient
+ */
+int aa_unix_file_perm(const struct cred *subj_cred, struct aa_label *label,
+ const char *op, u32 request, struct file *file)
+{
+ struct socket *sock = (struct socket *) file->private_data;
+ struct sockaddr_un *addr, *peer_addr;
+ int addrlen, peer_addrlen;
+ struct aa_label *plabel = NULL;
+ struct sock *peer_sk = NULL;
+ u32 sk_req = request & ~NET_PEER_MASK;
+ struct path path;
+ bool is_sk_fs;
+ int error = 0;
+
+ AA_BUG(!label);
+ AA_BUG(!sock);
+ AA_BUG(!sock->sk);
+ AA_BUG(sock->sk->sk_family != PF_UNIX);
+
+ /* investigate only using lock via unix_peer_get()
+ * addr only needs the memory barrier, but need to investigate
+ * path
+ */
+ unix_state_lock(sock->sk);
+ peer_sk = unix_peer(sock->sk);
+ if (peer_sk)
+ sock_hold(peer_sk);
+
+ is_sk_fs = is_unix_fs(sock->sk);
+ addr = aa_sunaddr(unix_sk(sock->sk), &addrlen);
+ path = unix_sk(sock->sk)->path;
+ unix_state_unlock(sock->sk);
+
+ if (is_sk_fs && peer_sk)
+ sk_req = request;
+ if (sk_req) {
+ error = aa_unix_label_sk_perm(subj_cred, label, op,
+ sk_req, sock->sk,
+ is_sk_fs ? &path : NULL);
+ }
+ if (!peer_sk)
+ goto out;
+
+ peer_addr = aa_sunaddr(unix_sk(peer_sk), &peer_addrlen);
+
+ struct path peer_path;
+
+ peer_path = unix_sk(peer_sk)->path;
+ if (!is_sk_fs && is_unix_fs(peer_sk)) {
+ last_error(error,
+ unix_fs_perm(op, request, subj_cred, label,
+ is_unix_fs(peer_sk) ? &peer_path : NULL));
+ } else if (!is_sk_fs) {
+ struct aa_label *plabel;
+ struct aa_sk_ctx *pctx = aa_sock(peer_sk);
+
+ rcu_read_lock();
+ plabel = aa_get_label_rcu(&pctx->label);
+ rcu_read_unlock();
+ /* no fs check of aa_unix_peer_perm because conditions above
+ * ensure they will never be done
+ */
+ last_error(error,
+ xcheck(unix_peer_perm(subj_cred, label, op,
+ MAY_READ | MAY_WRITE, sock->sk,
+ is_sk_fs ? &path : NULL,
+ peer_addr, peer_addrlen,
+ is_unix_fs(peer_sk) ?
+ &peer_path : NULL,
+ plabel),
+ unix_peer_perm(file->f_cred, plabel, op,
+ MAY_READ | MAY_WRITE, peer_sk,
+ is_unix_fs(peer_sk) ?
+ &peer_path : NULL,
+ addr, addrlen,
+ is_sk_fs ? &path : NULL,
+ label)));
+ if (!error && !__aa_subj_label_is_cached(plabel, label))
+ update_peer_ctx(peer_sk, pctx, label);
+ }
+ sock_put(peer_sk);
+
+out:
+
+ /* update peer cache to latest successful perm check */
+ if (error == 0)
+ update_sk_ctx(sock->sk, label, plabel);
+ aa_put_label(plabel);
+
+ return error;
+}
+
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 2c0185ebc900..391a586d0557 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -43,7 +43,7 @@
* The interface is split into two main components based on their function
* a securityfs component:
* used for static files that are always available, and which allows
- * userspace to specificy the location of the security filesystem.
+ * userspace to specify the location of the security filesystem.
*
* fns and data are prefixed with
* aa_sfs_
@@ -204,7 +204,7 @@ static struct file_system_type aafs_ops = {
/**
* __aafs_setup_d_inode - basic inode setup for apparmorfs
* @dir: parent directory for the dentry
- * @dentry: dentry we are seting the inode up for
+ * @dentry: dentry we are setting the inode up for
* @mode: permissions the file should have
* @data: data to store on inode.i_private, available in open()
* @link: if symlink, symlink target string
@@ -283,7 +283,7 @@ static struct dentry *aafs_create(const char *name, umode_t mode,
dir = d_inode(parent);
inode_lock(dir);
- dentry = lookup_one_len(name, parent, strlen(name));
+ dentry = lookup_noperm(&QSTR(name), parent);
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
goto fail_lock;
@@ -612,8 +612,7 @@ static const struct file_operations aa_fs_ns_revision_fops = {
static void profile_query_cb(struct aa_profile *profile, struct aa_perms *perms,
const char *match_str, size_t match_len)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
struct aa_perms tmp = { };
aa_state_t state = DFA_NOMATCH;
@@ -626,11 +625,20 @@ static void profile_query_cb(struct aa_profile *profile, struct aa_perms *perms,
if (state) {
struct path_cond cond = { };
- tmp = *(aa_lookup_fperms(rules->file, state, &cond));
+ tmp = *(aa_lookup_condperms(current_fsuid(),
+ rules->file, state, &cond));
}
} else if (rules->policy->dfa) {
if (!RULE_MEDIATES(rules, *match_str))
return; /* no change to current perms */
+ /* old user space does not correctly detect dbus mediation
+ * support so we may get dbus policy and requests when
+ * the abi doesn't support it. This can cause mediation
+ * regressions, so explicitly test for this situation.
+ */
+ if (*match_str == AA_CLASS_DBUS &&
+ !RULE_MEDIATES_v9NET(rules))
+ return; /* no change to current perms */
state = aa_dfa_match_len(rules->policy->dfa,
rules->policy->start[0],
match_str, match_len);
@@ -997,7 +1005,7 @@ static int aa_sfs_seq_show(struct seq_file *seq, void *v)
switch (fs_file->v_type) {
case AA_SFS_TYPE_BOOLEAN:
- seq_printf(seq, "%s\n", fs_file->v.boolean ? "yes" : "no");
+ seq_printf(seq, "%s\n", str_yes_no(fs_file->v.boolean));
break;
case AA_SFS_TYPE_STRING:
seq_printf(seq, "%s\n", fs_file->v.string);
@@ -1006,7 +1014,7 @@ static int aa_sfs_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%#08lx\n", fs_file->v.u64);
break;
default:
- /* Ignore unpritable entry types. */
+ /* Ignore unprintable entry types. */
break;
}
@@ -1152,7 +1160,7 @@ static int seq_ns_stacked_show(struct seq_file *seq, void *v)
struct aa_label *label;
label = begin_current_label_crit_section();
- seq_printf(seq, "%s\n", label->size > 1 ? "yes" : "no");
+ seq_printf(seq, "%s\n", str_yes_no(label->size > 1));
end_current_label_crit_section(label);
return 0;
@@ -1175,7 +1183,7 @@ static int seq_ns_nsstacked_show(struct seq_file *seq, void *v)
}
}
- seq_printf(seq, "%s\n", count > 1 ? "yes" : "no");
+ seq_printf(seq, "%s\n", str_yes_no(count > 1));
end_current_label_crit_section(label);
return 0;
@@ -1795,8 +1803,8 @@ fail2:
return error;
}
-static int ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct aa_ns *ns, *parent;
/* TODO: improve permission check */
@@ -1808,7 +1816,7 @@ static int ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir,
AA_MAY_LOAD_POLICY);
end_current_label_crit_section(label);
if (error)
- return error;
+ return ERR_PTR(error);
parent = aa_get_ns(dir->i_private);
AA_BUG(d_inode(ns_subns_dir(parent)) != dir);
@@ -1843,7 +1851,7 @@ out:
mutex_unlock(&parent->lock);
aa_put_ns(parent);
- return error;
+ return ERR_PTR(error);
}
static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
@@ -2244,7 +2252,7 @@ static void *p_next(struct seq_file *f, void *p, loff_t *pos)
/**
* p_stop - stop depth first traversal
* @f: seq_file we are filling
- * @p: the last profile writen
+ * @p: the last profile written
*
* Release all locking done by p_start/p_next on namespace tree
*/
@@ -2332,6 +2340,7 @@ static struct aa_sfs_entry aa_sfs_entry_attach[] = {
static struct aa_sfs_entry aa_sfs_entry_domain[] = {
AA_SFS_FILE_BOOLEAN("change_hat", 1),
AA_SFS_FILE_BOOLEAN("change_hatv", 1),
+ AA_SFS_FILE_BOOLEAN("unconfined_allowed_children", 1),
AA_SFS_FILE_BOOLEAN("change_onexec", 1),
AA_SFS_FILE_BOOLEAN("change_profile", 1),
AA_SFS_FILE_BOOLEAN("stack", 1),
@@ -2340,6 +2349,7 @@ static struct aa_sfs_entry aa_sfs_entry_domain[] = {
AA_SFS_FILE_BOOLEAN("computed_longest_left", 1),
AA_SFS_DIR("attach_conditions", aa_sfs_entry_attach),
AA_SFS_FILE_BOOLEAN("disconnected.path", 1),
+ AA_SFS_FILE_BOOLEAN("kill.signal", 1),
AA_SFS_FILE_STRING("version", "1.2"),
{ }
};
@@ -2364,7 +2374,7 @@ static struct aa_sfs_entry aa_sfs_entry_policy[] = {
AA_SFS_FILE_BOOLEAN("set_load", 1),
/* number of out of band transitions supported */
AA_SFS_FILE_U64("outofband", MAX_OOB_SUPPORTED),
- AA_SFS_FILE_U64("permstable32_version", 1),
+ AA_SFS_FILE_U64("permstable32_version", 3),
AA_SFS_FILE_STRING("permstable32", PERMS32STR),
AA_SFS_FILE_U64("state32", 1),
AA_SFS_DIR("unconfined_restrictions", aa_sfs_entry_unconfined),
@@ -2384,6 +2394,11 @@ static struct aa_sfs_entry aa_sfs_entry_ns[] = {
{ }
};
+static struct aa_sfs_entry aa_sfs_entry_dbus[] = {
+ AA_SFS_FILE_STRING("mask", "acquire send receive"),
+ { }
+};
+
static struct aa_sfs_entry aa_sfs_entry_query_label[] = {
AA_SFS_FILE_STRING("perms", "allow deny audit quiet"),
AA_SFS_FILE_BOOLEAN("data", 1),
@@ -2406,6 +2421,7 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = {
AA_SFS_DIR("domain", aa_sfs_entry_domain),
AA_SFS_DIR("file", aa_sfs_entry_file),
AA_SFS_DIR("network_v8", aa_sfs_entry_network),
+ AA_SFS_DIR("network_v9", aa_sfs_entry_networkv9),
AA_SFS_DIR("mount", aa_sfs_entry_mount),
AA_SFS_DIR("namespaces", aa_sfs_entry_ns),
AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK),
@@ -2413,6 +2429,7 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = {
AA_SFS_DIR("caps", aa_sfs_entry_caps),
AA_SFS_DIR("ptrace", aa_sfs_entry_ptrace),
AA_SFS_DIR("signal", aa_sfs_entry_signal),
+ AA_SFS_DIR("dbus", aa_sfs_entry_dbus),
AA_SFS_DIR("query", aa_sfs_entry_query),
AA_SFS_DIR("io_uring", aa_sfs_entry_io_uring),
{ }
@@ -2551,7 +2568,7 @@ static int aa_mk_null_file(struct dentry *parent)
return error;
inode_lock(d_inode(parent));
- dentry = lookup_one_len(NULL_FILE_NAME, parent, strlen(NULL_FILE_NAME));
+ dentry = lookup_noperm(&QSTR(NULL_FILE_NAME), parent);
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
goto out;
@@ -2612,7 +2629,7 @@ static int policy_readlink(struct dentry *dentry, char __user *buffer,
res = snprintf(name, sizeof(name), "%s:[%lu]", AAFS_NAME,
d_inode(dentry)->i_ino);
if (res > 0 && res < sizeof(name))
- res = readlink_copy(buffer, buflen, name);
+ res = readlink_copy(buffer, buflen, name, strlen(name));
else
res = -ENOENT;
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 73087d76f649..ac89602aa2d9 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -192,7 +192,7 @@ int aa_audit(int type, struct aa_profile *profile,
aa_audit_msg(type, ad, cb);
if (ad->type == AUDIT_APPARMOR_KILL)
- (void)send_sig_info(SIGKILL, NULL,
+ (void)send_sig_info(profile->signal, NULL,
ad->common.type == LSM_AUDIT_DATA_TASK &&
ad->common.u.tsk ? ad->common.u.tsk : current);
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
index 7ca489ee1054..b9ea6bc45c1a 100644
--- a/security/apparmor/capability.c
+++ b/security/apparmor/capability.c
@@ -27,6 +27,7 @@
struct aa_sfs_entry aa_sfs_entry_caps[] = {
AA_SFS_FILE_STRING("mask", AA_SFS_CAPS_MASK),
+ AA_SFS_FILE_BOOLEAN("extended", 1),
{ }
};
@@ -68,8 +69,7 @@ static int audit_caps(struct apparmor_audit_data *ad, struct aa_profile *profile
{
const u64 AUDIT_CACHE_TIMEOUT_NS = 1000*1000*1000; /* 1 second */
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
struct audit_cache *ent;
int type = AUDIT_APPARMOR_AUTO;
@@ -121,10 +121,32 @@ static int audit_caps(struct apparmor_audit_data *ad, struct aa_profile *profile
static int profile_capable(struct aa_profile *profile, int cap,
unsigned int opts, struct apparmor_audit_data *ad)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
+ aa_state_t state;
int error;
+ state = RULE_MEDIATES(rules, ad->class);
+ if (state) {
+ struct aa_perms perms = { };
+ u32 request;
+
+ /* caps broken into 256 x 32 bit permission chunks */
+ state = aa_dfa_next(rules->policy->dfa, state, cap >> 5);
+ request = 1 << (cap & 0x1f);
+ perms = *aa_lookup_perms(rules->policy, state);
+ aa_apply_modes_to_perms(profile, &perms);
+
+ if (opts & CAP_OPT_NOAUDIT) {
+ if (perms.complain & request)
+ ad->info = "optional: no audit";
+ else
+ ad = NULL;
+ }
+ return aa_check_perms(profile, &perms, request, ad,
+ audit_cb);
+ }
+
+ /* fallback to old caps mediation that doesn't support conditionals */
if (cap_raised(rules->caps.allow, cap) &&
!cap_raised(rules->caps.denied, cap))
error = 0;
@@ -168,3 +190,34 @@ int aa_capable(const struct cred *subj_cred, struct aa_label *label,
return error;
}
+
+kernel_cap_t aa_profile_capget(struct aa_profile *profile)
+{
+ struct aa_ruleset *rules = profile->label.rules[0];
+ aa_state_t state;
+
+ state = RULE_MEDIATES(rules, AA_CLASS_CAP);
+ if (state) {
+ kernel_cap_t caps = CAP_EMPTY_SET;
+ int i;
+
+ /* caps broken into up to 256, 32 bit permission chunks */
+ for (i = 0; i < (CAP_LAST_CAP >> 5); i++) {
+ struct aa_perms perms = { };
+ aa_state_t tmp;
+
+ tmp = aa_dfa_next(rules->policy->dfa, state, i);
+ perms = *aa_lookup_perms(rules->policy, tmp);
+ aa_apply_modes_to_perms(profile, &perms);
+ caps.val |= ((u64)(perms.allow)) << (i * 5);
+ caps.val |= ((u64)(perms.complain)) << (i * 5);
+ }
+ return caps;
+ }
+
+ /* fallback to old caps */
+ if (COMPLAIN_MODE(profile))
+ return CAP_FULL_SET;
+
+ return rules->caps.allow;
+}
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
index aad486b2fca6..227d47c14907 100644
--- a/security/apparmor/crypto.c
+++ b/security/apparmor/crypto.c
@@ -11,113 +11,52 @@
* it should be.
*/
-#include <crypto/hash.h>
+#include <crypto/sha2.h>
#include "include/apparmor.h"
#include "include/crypto.h"
-static unsigned int apparmor_hash_size;
-
-static struct crypto_shash *apparmor_tfm;
-
unsigned int aa_hash_size(void)
{
- return apparmor_hash_size;
+ return SHA256_DIGEST_SIZE;
}
char *aa_calc_hash(void *data, size_t len)
{
- SHASH_DESC_ON_STACK(desc, apparmor_tfm);
char *hash;
- int error;
-
- if (!apparmor_tfm)
- return NULL;
- hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
+ hash = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
if (!hash)
return ERR_PTR(-ENOMEM);
- desc->tfm = apparmor_tfm;
-
- error = crypto_shash_init(desc);
- if (error)
- goto fail;
- error = crypto_shash_update(desc, (u8 *) data, len);
- if (error)
- goto fail;
- error = crypto_shash_final(desc, hash);
- if (error)
- goto fail;
-
+ sha256(data, len, hash);
return hash;
-
-fail:
- kfree(hash);
-
- return ERR_PTR(error);
}
int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
size_t len)
{
- SHASH_DESC_ON_STACK(desc, apparmor_tfm);
- int error;
+ struct sha256_ctx sctx;
__le32 le32_version = cpu_to_le32(version);
if (!aa_g_hash_policy)
return 0;
- if (!apparmor_tfm)
- return 0;
-
- profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
+ profile->hash = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
if (!profile->hash)
return -ENOMEM;
- desc->tfm = apparmor_tfm;
-
- error = crypto_shash_init(desc);
- if (error)
- goto fail;
- error = crypto_shash_update(desc, (u8 *) &le32_version, 4);
- if (error)
- goto fail;
- error = crypto_shash_update(desc, (u8 *) start, len);
- if (error)
- goto fail;
- error = crypto_shash_final(desc, profile->hash);
- if (error)
- goto fail;
-
+ sha256_init(&sctx);
+ sha256_update(&sctx, (u8 *)&le32_version, 4);
+ sha256_update(&sctx, (u8 *)start, len);
+ sha256_final(&sctx, profile->hash);
return 0;
-
-fail:
- kfree(profile->hash);
- profile->hash = NULL;
-
- return error;
}
static int __init init_profile_hash(void)
{
- struct crypto_shash *tfm;
-
- if (!apparmor_initialized)
- return 0;
-
- tfm = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(tfm)) {
- int error = PTR_ERR(tfm);
- AA_ERROR("failed to setup profile sha256 hashing: %d\n", error);
- return error;
- }
- apparmor_tfm = tfm;
- apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
-
- aa_info_message("AppArmor sha256 policy hashing enabled");
-
+ if (apparmor_initialized)
+ aa_info_message("AppArmor sha256 policy hashing enabled");
return 0;
}
-
late_initcall(init_profile_hash);
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 5939bd9a9b9b..267da82afb14 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -28,6 +28,12 @@
#include "include/policy.h"
#include "include/policy_ns.h"
+static const char * const CONFLICTING_ATTACH_STR = "conflicting profile attachments";
+static const char * const CONFLICTING_ATTACH_STR_IX =
+ "conflicting profile attachments - ix fallback";
+static const char * const CONFLICTING_ATTACH_STR_UX =
+ "conflicting profile attachments - ux fallback";
+
/**
* may_change_ptraced_domain - check if can change profile on ptraced task
* @to_cred: cred of task changing domain
@@ -87,8 +93,7 @@ static inline aa_state_t match_component(struct aa_profile *profile,
struct aa_profile *tp,
bool stack, aa_state_t state)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
const char *ns_name;
if (stack)
@@ -125,8 +130,7 @@ static int label_compound_match(struct aa_profile *profile,
aa_state_t state, bool subns, u32 request,
struct aa_perms *perms)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
struct aa_profile *tp;
struct label_it i;
struct path_cond cond = { };
@@ -154,7 +158,8 @@ next:
if (!state)
goto fail;
}
- *perms = *(aa_lookup_fperms(rules->file, state, &cond));
+ *perms = *(aa_lookup_condperms(current_fsuid(), rules->file, state,
+ &cond));
aa_apply_modes_to_perms(profile, perms);
if ((perms->allow & request) != request)
return -EACCES;
@@ -187,8 +192,7 @@ static int label_components_match(struct aa_profile *profile,
aa_state_t start, bool subns, u32 request,
struct aa_perms *perms)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
struct aa_profile *tp;
struct label_it i;
struct aa_perms tmp;
@@ -209,7 +213,8 @@ static int label_components_match(struct aa_profile *profile,
return 0;
next:
- tmp = *(aa_lookup_fperms(rules->file, state, &cond));
+ tmp = *(aa_lookup_condperms(current_fsuid(), rules->file, state,
+ &cond));
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum(perms, &tmp);
label_for_each_cont(i, label, tp) {
@@ -218,7 +223,8 @@ next:
state = match_component(profile, tp, stack, start);
if (!state)
goto fail;
- tmp = *(aa_lookup_fperms(rules->file, state, &cond));
+ tmp = *(aa_lookup_condperms(current_fsuid(), rules->file, state,
+ &cond));
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum(perms, &tmp);
}
@@ -323,7 +329,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
size = vfs_getxattr_alloc(&nop_mnt_idmap, d, attach->xattrs[i],
&value, value_size, GFP_KERNEL);
if (size >= 0) {
- u32 index, perm;
+ struct aa_perms *perms;
/*
* Check the xattr presence before value. This ensure
@@ -335,9 +341,8 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
/* Check xattr value */
state = aa_dfa_match_len(attach->xmatch->dfa, state,
value, size);
- index = ACCEPT_TABLE(attach->xmatch->dfa)[state];
- perm = attach->xmatch->perms[index].allow;
- if (!(perm & MAY_EXEC)) {
+ perms = aa_lookup_perms(attach->xmatch, state);
+ if (!(perms->allow & MAY_EXEC)) {
ret = -EINVAL;
goto out;
}
@@ -415,15 +420,14 @@ restart:
if (attach->xmatch->dfa) {
unsigned int count;
aa_state_t state;
- u32 index, perm;
+ struct aa_perms *perms;
state = aa_dfa_leftmatch(attach->xmatch->dfa,
attach->xmatch->start[AA_CLASS_XMATCH],
name, &count);
- index = ACCEPT_TABLE(attach->xmatch->dfa)[state];
- perm = attach->xmatch->perms[index].allow;
+ perms = aa_lookup_perms(attach->xmatch, state);
/* any accepting state means a valid match. */
- if (perm & MAY_EXEC) {
+ if (perms->allow & MAY_EXEC) {
int ret = 0;
if (count < candidate_len)
@@ -484,7 +488,7 @@ restart:
if (!candidate || conflict) {
if (conflict)
- *info = "conflicting profile attachments";
+ *info = CONFLICTING_ATTACH_STR;
rcu_read_unlock();
return NULL;
}
@@ -508,15 +512,16 @@ static const char *next_name(int xtype, const char *name)
* @name: returns: name tested to find label (NOT NULL)
*
* Returns: refcounted label, or NULL on failure (MAYBE NULL)
+ * @name will always be set with the last name tried
*/
struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
const char **name)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
struct aa_label *label = NULL;
u32 xtype = xindex & AA_X_TYPE_MASK;
int index = xindex & AA_X_INDEX_MASK;
+ const char *next;
AA_BUG(!name);
@@ -524,25 +529,27 @@ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
/* TODO: move lookup parsing to unpack time so this is a straight
* index into the resultant label
*/
- for (*name = rules->file->trans.table[index]; !label && *name;
- *name = next_name(xtype, *name)) {
+ for (next = rules->file->trans.table[index]; next;
+ next = next_name(xtype, next)) {
+ const char *lookup = (*next == '&') ? next + 1 : next;
+ *name = next;
if (xindex & AA_X_CHILD) {
- struct aa_profile *new_profile;
- /* release by caller */
- new_profile = aa_find_child(profile, *name);
- if (new_profile)
- label = &new_profile->label;
+ /* TODO: switich to parse to get stack of child */
+ struct aa_profile *new = aa_find_child(profile, lookup);
+
+ if (new)
+ /* release by caller */
+ return &new->label;
continue;
}
- label = aa_label_parse(&profile->label, *name, GFP_KERNEL,
+ label = aa_label_parse(&profile->label, lookup, GFP_KERNEL,
true, false);
- if (IS_ERR(label))
- label = NULL;
+ if (!IS_ERR_OR_NULL(label))
+ /* release by caller */
+ return label;
}
- /* released by caller */
-
- return label;
+ return NULL;
}
/**
@@ -564,12 +571,12 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
const char **lookupname,
const char **info)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
struct aa_label *new = NULL;
+ struct aa_label *stack = NULL;
struct aa_ns *ns = profile->ns;
u32 xtype = xindex & AA_X_TYPE_MASK;
- const char *stack = NULL;
+ /* Used for info checks during fallback handling */
+ const char *old_info = NULL;
switch (xtype) {
case AA_X_NONE:
@@ -578,13 +585,14 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
break;
case AA_X_TABLE:
/* TODO: fix when perm mapping done at unload */
- stack = rules->file->trans.table[xindex & AA_X_INDEX_MASK];
- if (*stack != '&') {
- /* released by caller */
- new = x_table_lookup(profile, xindex, lookupname);
- stack = NULL;
+ /* released by caller
+ * if null for both stack and direct want to try fallback
+ */
+ new = x_table_lookup(profile, xindex, lookupname);
+ if (!new || **lookupname != '&')
break;
- }
+ stack = new;
+ new = NULL;
fallthrough; /* to X_NAME */
case AA_X_NAME:
if (xindex & AA_X_CHILD)
@@ -599,17 +607,38 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
break;
}
+ /* fallback transition check */
if (!new) {
if (xindex & AA_X_INHERIT) {
/* (p|c|n)ix - don't change profile but do
* use the newest version
*/
- *info = "ix fallback";
+ if (*info == CONFLICTING_ATTACH_STR) {
+ *info = CONFLICTING_ATTACH_STR_IX;
+ } else {
+ old_info = *info;
+ *info = "ix fallback";
+ }
/* no profile && no error */
new = aa_get_newest_label(&profile->label);
} else if (xindex & AA_X_UNCONFINED) {
new = aa_get_newest_label(ns_unconfined(profile->ns));
- *info = "ux fallback";
+ if (*info == CONFLICTING_ATTACH_STR) {
+ *info = CONFLICTING_ATTACH_STR_UX;
+ } else {
+ old_info = *info;
+ *info = "ux fallback";
+ }
+ }
+ /* We set old_info on the code paths above where overwriting
+ * could have happened, so now check if info was set by
+ * find_attach as well (i.e. whether we actually overwrote)
+ * and warn accordingly.
+ */
+ if (old_info && old_info != CONFLICTING_ATTACH_STR) {
+ pr_warn_ratelimited(
+ "AppArmor: find_attach (from profile %s) audit info \"%s\" dropped",
+ profile->base.hname, old_info);
}
}
@@ -617,12 +646,12 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
/* base the stack on post domain transition */
struct aa_label *base = new;
- new = aa_label_parse(base, stack, GFP_KERNEL, true, false);
- if (IS_ERR(new))
- new = NULL;
+ new = aa_label_merge(base, stack, GFP_KERNEL);
+ /* null on error */
aa_put_label(base);
}
+ aa_put_label(stack);
/* released by caller */
return new;
}
@@ -633,8 +662,7 @@ static struct aa_label *profile_transition(const struct cred *subj_cred,
char *buffer, struct path_cond *cond,
bool *secure_exec)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
struct aa_label *new = NULL;
struct aa_profile *new_profile = NULL;
const char *info = NULL, *name = NULL, *target = NULL;
@@ -652,7 +680,7 @@ static struct aa_label *profile_transition(const struct cred *subj_cred,
if (error) {
if (profile_unconfined(profile) ||
(profile->label.flags & FLAG_IX_ON_NAME_ERROR)) {
- AA_DEBUG("name lookup ix on error");
+ AA_DEBUG(DEBUG_DOMAIN, "name lookup ix on error");
error = 0;
new = aa_get_newest_label(&profile->label);
}
@@ -663,11 +691,27 @@ static struct aa_label *profile_transition(const struct cred *subj_cred,
if (profile_unconfined(profile)) {
new = find_attach(bprm, profile->ns,
&profile->ns->base.profiles, name, &info);
+ /* info set -> something unusual that we should report
+ * Currently this is only conflicting attachments, but other
+ * infos added in the future should also be logged by default
+ * and only excluded on a case-by-case basis
+ */
+ if (info) {
+ /* Because perms is never used again after this audit
+ * we don't need to care about clobbering it
+ */
+ perms.audit |= MAY_EXEC;
+ perms.allow |= MAY_EXEC;
+ /* Don't cause error if auditing fails */
+ (void) aa_audit_file(subj_cred, profile, &perms,
+ OP_EXEC, MAY_EXEC, name, target, new, cond->uid,
+ info, error);
+ }
if (new) {
- AA_DEBUG("unconfined attached to new label");
+ AA_DEBUG(DEBUG_DOMAIN, "unconfined attached to new label");
return new;
}
- AA_DEBUG("unconfined exec no attachment");
+ AA_DEBUG(DEBUG_DOMAIN, "unconfined exec no attachment");
return aa_get_newest_label(&profile->label);
}
@@ -678,9 +722,21 @@ static struct aa_label *profile_transition(const struct cred *subj_cred,
new = x_to_label(profile, bprm, name, perms.xindex, &target,
&info);
if (new && new->proxy == profile->label.proxy && info) {
+ /* Force audit on conflicting attachment fallback
+ * Because perms is never used again after this audit
+ * we don't need to care about clobbering it
+ */
+ if (info == CONFLICTING_ATTACH_STR_IX
+ || info == CONFLICTING_ATTACH_STR_UX)
+ perms.audit |= MAY_EXEC;
/* hack ix fallback - improve how this is detected */
goto audit;
} else if (!new) {
+ if (info) {
+ pr_warn_ratelimited(
+ "AppArmor: %s (from profile %s) audit info \"%s\" dropped on missing transition",
+ __func__, profile->base.hname, info);
+ }
info = "profile transition not found";
/* remove MAY_EXEC to audit as failure or complaint */
perms.allow &= ~MAY_EXEC;
@@ -739,8 +795,7 @@ static int profile_onexec(const struct cred *subj_cred,
char *buffer, struct path_cond *cond,
bool *secure_exec)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
aa_state_t state = rules->file->start[AA_CLASS_FILE];
struct aa_perms perms = {};
const char *xname = NULL, *info = "change_profile onexec";
@@ -755,7 +810,7 @@ static int profile_onexec(const struct cred *subj_cred,
/* change_profile on exec already granted */
/*
* NOTE: Domain transitions from unconfined are allowed
- * even when no_new_privs is set because this aways results
+ * even when no_new_privs is set because this always results
* in a further reduction of permissions.
*/
return 0;
@@ -766,7 +821,7 @@ static int profile_onexec(const struct cred *subj_cred,
if (error) {
if (profile_unconfined(profile) ||
(profile->label.flags & FLAG_IX_ON_NAME_ERROR)) {
- AA_DEBUG("name lookup ix on error");
+ AA_DEBUG(DEBUG_DOMAIN, "name lookup ix on error");
error = 0;
}
xname = bprm->filename;
@@ -926,7 +981,7 @@ int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
*
* NOTE: Domain transitions from unconfined and to stacked
* subsets are allowed even when no_new_privs is set because this
- * aways results in a further reduction of permissions.
+ * always results in a further reduction of permissions.
*/
if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) &&
!unconfined(label) &&
@@ -1188,10 +1243,24 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
if (task_no_new_privs(current) && !unconfined(label) && !ctx->nnp)
ctx->nnp = aa_get_label(label);
+ /* return -EPERM when unconfined doesn't have children to avoid
+ * changing the traditional error code for unconfined.
+ */
if (unconfined(label)) {
- info = "unconfined can not change_hat";
- error = -EPERM;
- goto fail;
+ struct label_it i;
+ bool empty = true;
+
+ rcu_read_lock();
+ label_for_each_in_ns(i, labels_ns(label), label, profile) {
+ empty &= list_empty(&profile->base.profiles);
+ }
+ rcu_read_unlock();
+
+ if (empty) {
+ info = "unconfined can not change_hat";
+ error = -EPERM;
+ goto fail;
+ }
}
if (count) {
@@ -1216,7 +1285,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
if (task_no_new_privs(current) && !unconfined(label) &&
!aa_label_is_unconfined_subset(new, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
- AA_DEBUG("no_new_privs - change_hat denied");
+ AA_DEBUG(DEBUG_DOMAIN,
+ "no_new_privs - change_hat denied");
error = -EPERM;
goto out;
}
@@ -1237,7 +1307,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
if (task_no_new_privs(current) && !unconfined(label) &&
!aa_label_is_unconfined_subset(previous, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
- AA_DEBUG("no_new_privs - change_hat denied");
+ AA_DEBUG(DEBUG_DOMAIN,
+ "no_new_privs - change_hat denied");
error = -EPERM;
goto out;
}
@@ -1282,8 +1353,7 @@ static int change_profile_perms_wrapper(const char *op, const char *name,
struct aa_label *target, bool stack,
u32 request, struct aa_perms *perms)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
const char *info = NULL;
int error = 0;
@@ -1343,7 +1413,7 @@ int aa_change_profile(const char *fqname, int flags)
if (!fqname || !*fqname) {
aa_put_label(label);
- AA_DEBUG("no profile name");
+ AA_DEBUG(DEBUG_DOMAIN, "no profile name");
return -EINVAL;
}
@@ -1462,7 +1532,8 @@ check:
if (task_no_new_privs(current) && !unconfined(label) &&
!aa_label_is_unconfined_subset(new, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
- AA_DEBUG("no_new_privs - change_hat denied");
+ AA_DEBUG(DEBUG_DOMAIN,
+ "no_new_privs - change_hat denied");
error = -EPERM;
goto out;
}
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index d52a5b14dad4..c75820402878 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -14,6 +14,7 @@
#include <linux/fs.h>
#include <linux/mount.h>
+#include "include/af_unix.h"
#include "include/apparmor.h"
#include "include/audit.h"
#include "include/cred.h"
@@ -168,8 +169,9 @@ static int path_name(const char *op, const struct cred *subj_cred,
struct aa_perms default_perms = {};
/**
- * aa_lookup_fperms - convert dfa compressed perms to internal perms
- * @file_rules: the aa_policydb to lookup perms for (NOT NULL)
+ * aa_lookup_condperms - convert dfa compressed perms to internal perms
+ * @subj_uid: uid to use for subject owner test
+ * @rules: the aa_policydb to lookup perms for (NOT NULL)
* @state: state in dfa
* @cond: conditions to consider (NOT NULL)
*
@@ -177,18 +179,21 @@ struct aa_perms default_perms = {};
*
* Returns: a pointer to a file permission set
*/
-struct aa_perms *aa_lookup_fperms(struct aa_policydb *file_rules,
- aa_state_t state, struct path_cond *cond)
+struct aa_perms *aa_lookup_condperms(kuid_t subj_uid, struct aa_policydb *rules,
+ aa_state_t state, struct path_cond *cond)
{
- unsigned int index = ACCEPT_TABLE(file_rules->dfa)[state];
+ unsigned int index = ACCEPT_TABLE(rules->dfa)[state];
- if (!(file_rules->perms))
+ if (!(rules->perms))
return &default_perms;
- if (uid_eq(current_fsuid(), cond->uid))
- return &(file_rules->perms[index]);
+ if ((ACCEPT_TABLE2(rules->dfa)[state] & ACCEPT_FLAG_OWNER)) {
+ if (uid_eq(subj_uid, cond->uid))
+ return &(rules->perms[index]);
+ return &(rules->perms[index + 1]);
+ }
- return &(file_rules->perms[index + 1]);
+ return &(rules->perms[index]);
}
/**
@@ -207,21 +212,22 @@ aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start,
{
aa_state_t state;
state = aa_dfa_match(file_rules->dfa, start, name);
- *perms = *(aa_lookup_fperms(file_rules, state, cond));
+ *perms = *(aa_lookup_condperms(current_fsuid(), file_rules, state,
+ cond));
return state;
}
-static int __aa_path_perm(const char *op, const struct cred *subj_cred,
- struct aa_profile *profile, const char *name,
- u32 request, struct path_cond *cond, int flags,
- struct aa_perms *perms)
+int __aa_path_perm(const char *op, const struct cred *subj_cred,
+ struct aa_profile *profile, const char *name,
+ u32 request, struct path_cond *cond, int flags,
+ struct aa_perms *perms)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
int e = 0;
- if (profile_unconfined(profile))
+ if (profile_unconfined(profile) ||
+ ((flags & PATH_SOCK_COND) && !RULE_MEDIATES_v9NET(rules)))
return 0;
aa_str_perms(rules->file, rules->file->start[AA_CLASS_FILE],
name, cond, perms);
@@ -316,8 +322,7 @@ static int profile_path_link(const struct cred *subj_cred,
const struct path *target, char *buffer2,
struct path_cond *cond)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
const char *lname, *tname = NULL;
struct aa_perms lperms = {}, perms;
const char *info = NULL;
@@ -423,9 +428,11 @@ int aa_path_link(const struct cred *subj_cred,
{
struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry };
+ struct inode *inode = d_backing_inode(old_dentry);
+ vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_idmap(target.mnt), inode);
struct path_cond cond = {
- d_backing_inode(old_dentry)->i_uid,
- d_backing_inode(old_dentry)->i_mode
+ .uid = vfsuid_into_kuid(vfsuid),
+ .mode = inode->i_mode,
};
char *buffer = NULL, *buffer2 = NULL;
struct aa_profile *profile;
@@ -534,22 +541,19 @@ static int __file_sock_perm(const char *op, const struct cred *subj_cred,
struct aa_label *flabel, struct file *file,
u32 request, u32 denied)
{
- struct socket *sock = (struct socket *) file->private_data;
int error;
- AA_BUG(!sock);
-
/* revalidation due to label out of date. No revocation at this time */
if (!denied && aa_label_is_subset(flabel, label))
return 0;
/* TODO: improve to skip profiles cached in flabel */
- error = aa_sock_file_perm(subj_cred, label, op, request, sock);
+ error = aa_sock_file_perm(subj_cred, label, op, request, file);
if (denied) {
/* TODO: improve to skip profiles checked above */
/* check every profile in file label to is cached */
last_error(error, aa_sock_file_perm(subj_cred, flabel, op,
- request, sock));
+ request, file));
}
if (!error)
update_file_ctx(file_ctx(file), label, request);
@@ -557,6 +561,35 @@ static int __file_sock_perm(const char *op, const struct cred *subj_cred,
return error;
}
+/* for now separate fn to indicate semantics of the check */
+static bool __file_is_delegated(struct aa_label *obj_label)
+{
+ return unconfined(obj_label);
+}
+
+static bool __unix_needs_revalidation(struct file *file, struct aa_label *label,
+ u32 request)
+{
+ struct socket *sock = (struct socket *) file->private_data;
+
+ lockdep_assert_in_rcu_read_lock();
+
+ if (!S_ISSOCK(file_inode(file)->i_mode))
+ return false;
+ if (request & NET_PEER_MASK)
+ return false;
+ if (sock->sk->sk_family == PF_UNIX) {
+ struct aa_sk_ctx *ctx = aa_sock(sock->sk);
+
+ if (rcu_access_pointer(ctx->peer) !=
+ rcu_access_pointer(ctx->peer_lastupdate))
+ return true;
+ return !__aa_subj_label_is_cached(rcu_dereference(ctx->label),
+ label);
+ }
+ return false;
+}
+
/**
* aa_file_perm - do permission revalidation check & audit for @file
* @op: operation being checked
@@ -594,17 +627,18 @@ int aa_file_perm(const char *op, const struct cred *subj_cred,
* delegation from unconfined tasks
*/
denied = request & ~fctx->allow;
- if (unconfined(label) || unconfined(flabel) ||
- (!denied && aa_label_is_subset(flabel, label))) {
+ if (unconfined(label) || __file_is_delegated(flabel) ||
+ __unix_needs_revalidation(file, label, request) ||
+ (!denied && __aa_subj_label_is_cached(label, flabel))) {
rcu_read_unlock();
goto done;
}
+ /* slow path - revalidate access */
flabel = aa_get_newest_label(flabel);
rcu_read_unlock();
- /* TODO: label cross check */
- if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
+ if (path_mediated_fs(file->f_path.dentry))
error = __file_path_perm(op, subj_cred, label, flabel, file,
request, denied, in_atomic);
diff --git a/security/apparmor/include/af_unix.h b/security/apparmor/include/af_unix.h
new file mode 100644
index 000000000000..4a62e600d82b
--- /dev/null
+++ b/security/apparmor/include/af_unix.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor af_unix fine grained mediation
+ *
+ * Copyright 2023 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+#ifndef __AA_AF_UNIX_H
+
+#include <net/af_unix.h>
+
+#include "label.h"
+
+#define unix_addr(A) ((struct sockaddr_un *)(A))
+#define unix_addr_len(L) ((L) - sizeof(sa_family_t))
+#define unix_peer(sk) (unix_sk(sk)->peer)
+#define is_unix_addr_abstract_name(B) ((B)[0] == 0)
+#define is_unix_addr_anon(A, L) ((A) && unix_addr_len(L) <= 0)
+#define is_unix_addr_fs(A, L) (!is_unix_addr_anon(A, L) && \
+ !is_unix_addr_abstract_name(unix_addr(A)->sun_path))
+
+#define is_unix_anonymous(U) (!unix_sk(U)->addr)
+#define is_unix_fs(U) (!is_unix_anonymous(U) && \
+ unix_sk(U)->addr->name->sun_path[0])
+#define is_unix_connected(S) ((S)->state == SS_CONNECTED)
+
+
+struct sockaddr_un *aa_sunaddr(const struct unix_sock *u, int *addrlen);
+int aa_unix_peer_perm(const struct cred *subj_cred,
+ struct aa_label *label, const char *op, u32 request,
+ struct sock *sk, struct sock *peer_sk,
+ struct aa_label *peer_label);
+int aa_unix_sock_perm(const char *op, u32 request, struct socket *sock);
+int aa_unix_create_perm(struct aa_label *label, int family, int type,
+ int protocol);
+int aa_unix_bind_perm(struct socket *sock, struct sockaddr *address,
+ int addrlen);
+int aa_unix_connect_perm(struct socket *sock, struct sockaddr *address,
+ int addrlen);
+int aa_unix_listen_perm(struct socket *sock, int backlog);
+int aa_unix_accept_perm(struct socket *sock, struct socket *newsock);
+int aa_unix_msg_perm(const char *op, u32 request, struct socket *sock,
+ struct msghdr *msg, int size);
+int aa_unix_opt_perm(const char *op, u32 request, struct socket *sock, int level,
+ int optname);
+int aa_unix_file_perm(const struct cred *subj_cred, struct aa_label *label,
+ const char *op, u32 request, struct file *file);
+
+#endif /* __AA_AF_UNIX_H */
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index f83934913b0f..cc6e3df1bc62 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -28,6 +28,7 @@
#define AA_CLASS_SIGNAL 10
#define AA_CLASS_XMATCH 11
#define AA_CLASS_NET 14
+#define AA_CLASS_NETV9 15
#define AA_CLASS_LABEL 16
#define AA_CLASS_POSIX_MQUEUE 17
#define AA_CLASS_MODULE 19
@@ -38,12 +39,13 @@
#define AA_CLASS_X 31
#define AA_CLASS_DBUS 32
+/* NOTE: if AA_CLASS_LAST > 63 need to update label->mediates */
#define AA_CLASS_LAST AA_CLASS_DBUS
/* Control parameters settable through module/boot flags */
extern enum audit_mode aa_g_audit;
extern bool aa_g_audit_header;
-extern bool aa_g_debug;
+extern int aa_g_debug;
extern bool aa_g_hash_policy;
extern bool aa_g_export_binary;
extern int aa_g_rawdata_compression_level;
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index e27229349abb..1a71a94ea19c 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -138,9 +138,12 @@ struct apparmor_audit_data {
};
struct {
int type, protocol;
- struct sock *peer_sk;
void *addr;
int addrlen;
+ struct {
+ void *addr;
+ int addrlen;
+ } peer;
} net;
};
};
diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
index d6dcc604ec0c..1ddcec2d1160 100644
--- a/security/apparmor/include/capability.h
+++ b/security/apparmor/include/capability.h
@@ -36,6 +36,7 @@ struct aa_caps {
extern struct aa_sfs_entry aa_sfs_entry_caps[];
+kernel_cap_t aa_profile_capget(struct aa_profile *profile);
int aa_capable(const struct cred *subj_cred, struct aa_label *label,
int cap, unsigned int opts);
diff --git a/security/apparmor/include/cred.h b/security/apparmor/include/cred.h
index 7265d2f81dd5..b028e4c13b6f 100644
--- a/security/apparmor/include/cred.h
+++ b/security/apparmor/include/cred.h
@@ -114,10 +114,22 @@ static inline struct aa_label *aa_get_current_label(void)
return aa_get_label(l);
}
-#define __end_current_label_crit_section(X) end_current_label_crit_section(X)
+/**
+ * __end_current_label_crit_section - end crit section begun with __begin_...
+ * @label: label obtained from __begin_current_label_crit_section
+ * @needput: output: bool set by __begin_current_label_crit_section
+ *
+ * Returns: label to use for this crit section
+ */
+static inline void __end_current_label_crit_section(struct aa_label *label,
+ bool needput)
+{
+ if (unlikely(needput))
+ aa_put_label(label);
+}
/**
- * end_label_crit_section - put a reference found with begin_current_label..
+ * end_current_label_crit_section - put a reference found with begin_current_label..
* @label: label reference to put
*
* Should only be used with a reference obtained with
@@ -132,6 +144,7 @@ static inline void end_current_label_crit_section(struct aa_label *label)
/**
* __begin_current_label_crit_section - current's confining label
+ * @needput: store whether the label needs to be put when ending crit section
*
* Returns: up to date confining label or the ns unconfined label (NOT NULL)
*
@@ -142,13 +155,16 @@ static inline void end_current_label_crit_section(struct aa_label *label)
* critical section between __begin_current_label_crit_section() ..
* __end_current_label_crit_section()
*/
-static inline struct aa_label *__begin_current_label_crit_section(void)
+static inline struct aa_label *__begin_current_label_crit_section(bool *needput)
{
struct aa_label *label = aa_current_raw_label();
- if (label_is_stale(label))
- label = aa_get_newest_label(label);
+ if (label_is_stale(label)) {
+ *needput = true;
+ return aa_get_newest_label(label);
+ }
+ *needput = false;
return label;
}
@@ -184,10 +200,11 @@ static inline struct aa_ns *aa_get_current_ns(void)
{
struct aa_label *label;
struct aa_ns *ns;
+ bool needput;
- label = __begin_current_label_crit_section();
+ label = __begin_current_label_crit_section(&needput);
ns = aa_get_ns(labels_ns(label));
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
return ns;
}
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
index 6e8f2aa66cd6..ef60f99bc5ae 100644
--- a/security/apparmor/include/file.h
+++ b/security/apparmor/include/file.h
@@ -77,12 +77,17 @@ int aa_audit_file(const struct cred *cred,
const char *target, struct aa_label *tlabel, kuid_t ouid,
const char *info, int error);
-struct aa_perms *aa_lookup_fperms(struct aa_policydb *file_rules,
- aa_state_t state, struct path_cond *cond);
+struct aa_perms *aa_lookup_condperms(kuid_t subj_uid,
+ struct aa_policydb *file_rules,
+ aa_state_t state, struct path_cond *cond);
aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start,
const char *name, struct path_cond *cond,
struct aa_perms *perms);
+int __aa_path_perm(const char *op, const struct cred *subj_cred,
+ struct aa_profile *profile, const char *name,
+ u32 request, struct path_cond *cond, int flags,
+ struct aa_perms *perms);
int aa_path_perm(const char *op, const struct cred *subj_cred,
struct aa_label *label, const struct path *path,
int flags, u32 request, struct path_cond *cond);
@@ -99,7 +104,7 @@ void aa_inherit_files(const struct cred *cred, struct files_struct *files);
/**
- * aa_map_file_perms - map file flags to AppArmor permissions
+ * aa_map_file_to_perms - map file flags to AppArmor permissions
* @file: open file to map flags to AppArmor permissions
*
* Returns: apparmor permission set for the file
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
index 74d17052f76b..323dd071afe9 100644
--- a/security/apparmor/include/ipc.h
+++ b/security/apparmor/include/ipc.h
@@ -13,6 +13,9 @@
#include <linux/sched.h>
+#define SIGUNKNOWN 0
+#define MAXMAPPED_SIG 35
+
int aa_may_signal(const struct cred *subj_cred, struct aa_label *sender,
const struct cred *target_cred, struct aa_label *target,
int sig);
diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h
index 93290ae300bb..c0812dbc1b5b 100644
--- a/security/apparmor/include/label.h
+++ b/security/apparmor/include/label.h
@@ -19,6 +19,7 @@
#include "lib.h"
struct aa_ns;
+struct aa_ruleset;
#define LOCAL_VEC_ENTRIES 8
#define DEFINE_VEC(T, V) \
@@ -109,7 +110,7 @@ struct label_it {
int i, j;
};
-/* struct aa_label - lazy labeling struct
+/* struct aa_label_base - base info of label
* @count: ref count of active users
* @node: rbtree position
* @rcu: rcu callback struct
@@ -118,7 +119,10 @@ struct label_it {
* @flags: stale and other flags - values may change under label set lock
* @secid: secid that references this label
* @size: number of entries in @ent[]
- * @ent: set of profiles for label, actual size determined by @size
+ * @mediates: bitmask for label_mediates
+ * profile: label vec when embedded in a profile FLAG_PROFILE is set
+ * rules: variable length rules in a profile FLAG_PROFILE is set
+ * vec: vector of profiles comprising the compound label
*/
struct aa_label {
struct kref count;
@@ -129,7 +133,18 @@ struct aa_label {
long flags;
u32 secid;
int size;
- struct aa_profile *vec[];
+ u64 mediates;
+ union {
+ struct {
+ /* only used is the label is a profile, size of
+ * rules[] is determined by the profile
+ * profile[1] is poison or null as guard
+ */
+ struct aa_profile *profile[2];
+ DECLARE_FLEX_ARRAY(struct aa_ruleset *, rules);
+ };
+ DECLARE_FLEX_ARRAY(struct aa_profile *, vec);
+ };
};
#define last_error(E, FN) \
@@ -231,20 +246,17 @@ int aa_label_next_confined(struct aa_label *l, int i);
#define fn_for_each_not_in_set(L1, L2, P, FN) \
fn_for_each2_XXX((L1), (L2), P, FN, _not_in_set)
-#define LABEL_MEDIATES(L, C) \
-({ \
- struct aa_profile *profile; \
- struct label_it i; \
- int ret = 0; \
- label_for_each(i, (L), profile) { \
- if (RULE_MEDIATES(&profile->rules, (C))) { \
- ret = 1; \
- break; \
- } \
- } \
- ret; \
-})
+static inline bool label_mediates(struct aa_label *L, unsigned char C)
+{
+ return (L)->mediates & (((u64) 1) << (C));
+}
+static inline bool label_mediates_safe(struct aa_label *L, unsigned char C)
+{
+ if (C > AA_CLASS_LAST)
+ return false;
+ return label_mediates(L, C);
+}
void aa_labelset_destroy(struct aa_labelset *ls);
void aa_labelset_init(struct aa_labelset *ls);
@@ -417,6 +429,13 @@ static inline void aa_put_label(struct aa_label *l)
kref_put(&l->count, aa_label_kref);
}
+/* wrapper fn to indicate semantics of the check */
+static inline bool __aa_subj_label_is_cached(struct aa_label *subj_label,
+ struct aa_label *obj_label)
+{
+ return aa_label_is_subset(obj_label, subj_label);
+}
+
struct aa_proxy *aa_alloc_proxy(struct aa_label *l, gfp_t gfp);
void aa_proxy_kref(struct kref *kref);
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index f11a0db7f51d..444197075fd6 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -19,22 +19,34 @@
extern struct aa_dfa *stacksplitdfa;
/*
- * DEBUG remains global (no per profile flag) since it is mostly used in sysctl
- * which is not related to profile accesses.
- */
-
-#define DEBUG_ON (aa_g_debug)
-/*
* split individual debug cases out in preparation for finer grained
* debug controls in the future.
*/
-#define AA_DEBUG_LABEL DEBUG_ON
#define dbg_printk(__fmt, __args...) pr_debug(__fmt, ##__args)
-#define AA_DEBUG(fmt, args...) \
+
+#define DEBUG_NONE 0
+#define DEBUG_LABEL_ABS_ROOT 1
+#define DEBUG_LABEL 2
+#define DEBUG_DOMAIN 4
+#define DEBUG_POLICY 8
+#define DEBUG_INTERFACE 0x10
+
+#define DEBUG_ALL 0x1f /* update if new DEBUG_X added */
+#define DEBUG_PARSE_ERROR (-1)
+
+#define DEBUG_ON (aa_g_debug != DEBUG_NONE)
+#define DEBUG_ABS_ROOT (aa_g_debug & DEBUG_LABEL_ABS_ROOT)
+
+#define AA_DEBUG(opt, fmt, args...) \
do { \
- if (DEBUG_ON) \
- pr_debug_ratelimited("AppArmor: " fmt, ##args); \
+ if (aa_g_debug & opt) \
+ pr_warn_ratelimited("%s: " fmt, __func__, ##args); \
} while (0)
+#define AA_DEBUG_LABEL(LAB, X, fmt, args...) \
+do { \
+ if ((LAB)->flags & FLAG_DEBUG1) \
+ AA_DEBUG(X, fmt, args); \
+} while (0)
#define AA_WARN(X) WARN((X), "APPARMOR WARN %s: %s\n", __func__, #X)
@@ -48,9 +60,16 @@ extern struct aa_dfa *stacksplitdfa;
#define AA_BUG_FMT(X, fmt, args...) \
WARN((X), "AppArmor WARN %s: (" #X "): " fmt, __func__, ##args)
#else
-#define AA_BUG_FMT(X, fmt, args...) no_printk(fmt, ##args)
+#define AA_BUG_FMT(X, fmt, args...) \
+ do { \
+ BUILD_BUG_ON_INVALID(X); \
+ no_printk(fmt, ##args); \
+ } while (0)
#endif
+int aa_parse_debug_params(const char *str);
+int aa_print_debug_params(char *buffer);
+
#define AA_ERROR(fmt, args...) \
pr_err_ratelimited("AppArmor: " fmt, ##args)
@@ -106,6 +125,7 @@ struct aa_str_table {
};
void aa_free_str_table(struct aa_str_table *table);
+bool aa_resize_str_table(struct aa_str_table *t, int newsize, gfp_t gfp);
struct counted_str {
struct kref count;
@@ -151,7 +171,7 @@ struct aa_policy {
/**
* basename - find the last component of an hname
- * @name: hname to find the base profile name component of (NOT NULL)
+ * @hname: hname to find the base profile name component of (NOT NULL)
*
* Returns: the tail (base profile name) name component of an hname
*/
@@ -281,7 +301,7 @@ __do_cleanup: \
} \
__done: \
if (!__new_) \
- AA_DEBUG("label build failed\n"); \
+ AA_DEBUG(DEBUG_LABEL, "label build failed\n"); \
(__new_); \
})
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
index 536ce3abd598..1fbe82f5021b 100644
--- a/security/apparmor/include/match.h
+++ b/security/apparmor/include/match.h
@@ -17,7 +17,7 @@
#define DFA_START 1
-/**
+/*
* The format used for transition tables is based on the GNU flex table
* file format (--tables-file option; see Table File Format in the flex
* info pages and the flex sources for documentation). The magic number
@@ -137,17 +137,15 @@ aa_state_t aa_dfa_matchn_until(struct aa_dfa *dfa, aa_state_t start,
void aa_dfa_free_kref(struct kref *kref);
-#define WB_HISTORY_SIZE 24
+/* This needs to be a power of 2 */
+#define WB_HISTORY_SIZE 32
struct match_workbuf {
- unsigned int count;
unsigned int pos;
unsigned int len;
- unsigned int size; /* power of 2, same as history size */
- unsigned int history[WB_HISTORY_SIZE];
+ aa_state_t history[WB_HISTORY_SIZE];
};
#define DEFINE_MATCH_WB(N) \
struct match_workbuf N = { \
- .count = 0, \
.pos = 0, \
.len = 0, \
}
diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
index c42ed8a73f1c..0d0b0ce42723 100644
--- a/security/apparmor/include/net.h
+++ b/security/apparmor/include/net.h
@@ -47,8 +47,9 @@
#define NET_PEER_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CONNECT | \
AA_MAY_ACCEPT)
struct aa_sk_ctx {
- struct aa_label *label;
- struct aa_label *peer;
+ struct aa_label __rcu *label;
+ struct aa_label __rcu *peer;
+ struct aa_label __rcu *peer_lastupdate; /* ptr cmp only, no deref */
};
static inline struct aa_sk_ctx *aa_sock(const struct sock *sk)
@@ -56,7 +57,7 @@ static inline struct aa_sk_ctx *aa_sock(const struct sock *sk)
return sk->sk_security + apparmor_blob_sizes.lbs_sock;
}
-#define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \
+#define DEFINE_AUDIT_NET(NAME, OP, CRED, SK, F, T, P) \
struct lsm_network_audit NAME ## _net = { .sk = (SK), \
.family = (F)}; \
DEFINE_AUDIT_DATA(NAME, \
@@ -65,24 +66,15 @@ static inline struct aa_sk_ctx *aa_sock(const struct sock *sk)
AA_CLASS_NET, \
OP); \
NAME.common.u.net = &(NAME ## _net); \
+ NAME.subj_cred = (CRED); \
NAME.net.type = (T); \
NAME.net.protocol = (P)
-#define DEFINE_AUDIT_SK(NAME, OP, SK) \
- DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \
+#define DEFINE_AUDIT_SK(NAME, OP, CRED, SK) \
+ DEFINE_AUDIT_NET(NAME, OP, CRED, SK, (SK)->sk_family, (SK)->sk_type, \
(SK)->sk_protocol)
-#define af_select(FAMILY, FN, DEF_FN) \
-({ \
- int __e; \
- switch ((FAMILY)) { \
- default: \
- __e = DEF_FN; \
- } \
- __e; \
-})
-
struct aa_secmark {
u8 audit;
u8 deny;
@@ -91,11 +83,19 @@ struct aa_secmark {
};
extern struct aa_sfs_entry aa_sfs_entry_network[];
-
+extern struct aa_sfs_entry aa_sfs_entry_networkv9[];
+
+int aa_do_perms(struct aa_profile *profile, struct aa_policydb *policy,
+ aa_state_t state, u32 request, struct aa_perms *p,
+ struct apparmor_audit_data *ad);
+/* passing in state returned by XXX_mediates_AF() */
+aa_state_t aa_match_to_prot(struct aa_policydb *policy, aa_state_t state,
+ u32 request, u16 af, int type, int protocol,
+ struct aa_perms **p, const char **info);
void audit_net_cb(struct audit_buffer *ab, void *va);
int aa_profile_af_perm(struct aa_profile *profile,
struct apparmor_audit_data *ad,
- u32 request, u16 family, int type);
+ u32 request, u16 family, int type, int protocol);
int aa_af_perm(const struct cred *subj_cred, struct aa_label *label,
const char *op, u32 request, u16 family,
int type, int protocol);
@@ -105,13 +105,13 @@ static inline int aa_profile_af_sk_perm(struct aa_profile *profile,
struct sock *sk)
{
return aa_profile_af_perm(profile, ad, request, sk->sk_family,
- sk->sk_type);
+ sk->sk_type, sk->sk_protocol);
}
int aa_sk_perm(const char *op, u32 request, struct sock *sk);
int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label,
const char *op, u32 request,
- struct socket *sock);
+ struct file *file);
int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
u32 secid, const struct sock *sk);
diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
index 343189903dba..8bb915d48dc7 100644
--- a/security/apparmor/include/path.h
+++ b/security/apparmor/include/path.h
@@ -13,6 +13,7 @@
enum path_flags {
PATH_IS_DIR = 0x1, /* path is a directory */
+ PATH_SOCK_COND = 0x2,
PATH_CONNECT_PATH = 0x4, /* connect disconnected paths to / */
PATH_CHROOT_REL = 0x8, /* do path lookup relative to chroot */
PATH_CHROOT_NSCONNECT = 0x10, /* connect paths that are at ns root */
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index bbaa7d39a39a..37a3781b99a0 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -101,8 +101,8 @@ extern struct aa_perms allperms;
/**
* aa_perms_accum_raw - accumulate perms with out masking off overlapping perms
- * @accum - perms struct to accumulate into
- * @addend - perms struct to add to @accum
+ * @accum: perms struct to accumulate into
+ * @addend: perms struct to add to @accum
*/
static inline void aa_perms_accum_raw(struct aa_perms *accum,
struct aa_perms *addend)
@@ -128,8 +128,8 @@ static inline void aa_perms_accum_raw(struct aa_perms *accum,
/**
* aa_perms_accum - accumulate perms, masking off overlapping perms
- * @accum - perms struct to accumulate into
- * @addend - perms struct to add to @accum
+ * @accum: perms struct to accumulate into
+ * @addend: perms struct to add to @accum
*/
static inline void aa_perms_accum(struct aa_perms *accum,
struct aa_perms *addend)
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index 757e3c232c57..4c50875c9d13 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -59,6 +59,11 @@ extern const char *const aa_profile_mode_names[];
#define on_list_rcu(X) (!list_empty(X) && (X)->prev != LIST_POISON2)
+/* flags in the dfa accept2 table */
+enum dfa_accept_flags {
+ ACCEPT_FLAG_OWNER = 1,
+};
+
/*
* FIXME: currently need a clean way to replace and remove profiles as a
* set. It should be done at the namespace level.
@@ -124,6 +129,7 @@ static inline void aa_put_pdb(struct aa_policydb *pdb)
kref_put(&pdb->count, aa_pdb_free_kref);
}
+/* lookup perm that doesn't have and object conditional */
static inline struct aa_perms *aa_lookup_perms(struct aa_policydb *policy,
aa_state_t state)
{
@@ -135,7 +141,6 @@ static inline struct aa_perms *aa_lookup_perms(struct aa_policydb *policy,
return &(policy->perms[index]);
}
-
/* struct aa_data - generic data structure
* key: name for retrieving this data
* size: size of data in bytes
@@ -160,8 +165,6 @@ struct aa_data {
* @secmark: secmark label match info
*/
struct aa_ruleset {
- struct list_head list;
-
int size;
/* TODO: merge policy and file */
@@ -175,6 +178,7 @@ struct aa_ruleset {
struct aa_secmark *secmark;
};
+
/* struct aa_attachment - data and rules for a profiles attachment
* @list:
* @xmatch_str: human readable attachment string
@@ -193,7 +197,6 @@ struct aa_attachment {
/* struct aa_profile - basic confinement data
* @base - base components of the profile (name, refcount, lists, lock ...)
- * @label - label this profile is an extension of
* @parent: parent of profile
* @ns: namespace the profile is in
* @rename: optional profile name that this profile renamed
@@ -201,13 +204,20 @@ struct aa_attachment {
* @audit: the auditing mode of the profile
* @mode: the enforcement mode of the profile
* @path_flags: flags controlling path generation behavior
+ * @signal: the signal that should be used when kill is used
* @disconnected: what to prepend if attach_disconnected is specified
* @attach: attachment rules for the profile
* @rules: rules to be enforced
*
+ * learning_cache: the accesses learned in complain mode
+ * raw_data: rawdata of the loaded profile policy
+ * hash: cryptographic hash of the profile
* @dents: dentries for the profiles file entries in apparmorfs
* @dirname: name of the profile dir in apparmorfs
+ * @dents: set of dentries associated with the profile
* @data: hashtable for free-form policy aa_data
+ * @label - label this profile is an extension of
+ * @rules - label with the rule vec on its end
*
* The AppArmor profile contains the basic confinement data. Each profile
* has a name, and exists in a namespace. The @name and @exec_match are
@@ -231,16 +241,19 @@ struct aa_profile {
enum audit_mode audit;
long mode;
u32 path_flags;
+ int signal;
const char *disconnected;
struct aa_attachment attach;
- struct list_head rules;
struct aa_loaddata *rawdata;
unsigned char *hash;
char *dirname;
struct dentry *dents[AAFS_PROF_SIZEOF];
struct rhashtable *data;
+
+ int n_rules;
+ /* special - variable length must be last entry in profile */
struct aa_label label;
};
@@ -298,24 +311,38 @@ static inline aa_state_t RULE_MEDIATES(struct aa_ruleset *rules,
rules->policy->start[0], &class, 1);
}
-static inline aa_state_t RULE_MEDIATES_AF(struct aa_ruleset *rules, u16 AF)
+static inline aa_state_t RULE_MEDIATES_v9NET(struct aa_ruleset *rules)
{
- aa_state_t state = RULE_MEDIATES(rules, AA_CLASS_NET);
- __be16 be_af = cpu_to_be16(AF);
+ return RULE_MEDIATES(rules, AA_CLASS_NETV9);
+}
+
+static inline aa_state_t RULE_MEDIATES_NET(struct aa_ruleset *rules)
+{
+ /* can not use RULE_MEDIATE_v9AF here, because AF match fail
+ * can not be distiguished from class match fail, and we only
+ * fallback to checking older class on class match failure
+ */
+ aa_state_t state = RULE_MEDIATES(rules, AA_CLASS_NETV9);
+ /* fallback and check v7/8 if v9 is NOT mediated */
if (!state)
- return DFA_NOMATCH;
- return aa_dfa_match_len(rules->policy->dfa, state, (char *) &be_af, 2);
+ state = RULE_MEDIATES(rules, AA_CLASS_NET);
+
+ return state;
}
-static inline aa_state_t ANY_RULE_MEDIATES(struct list_head *head,
- unsigned char class)
+
+void aa_compute_profile_mediates(struct aa_profile *profile);
+static inline bool profile_mediates(struct aa_profile *profile,
+ unsigned char class)
{
- struct aa_ruleset *rule;
+ return label_mediates(&profile->label, class);
+}
- /* TODO: change to list walk */
- rule = list_first_entry(head, typeof(*rule), list);
- return RULE_MEDIATES(rule, class);
+static inline bool profile_mediates_safe(struct aa_profile *profile,
+ unsigned char class)
+{
+ return label_mediates_safe(&profile->label, class);
}
/**
diff --git a/security/apparmor/include/secid.h b/security/apparmor/include/secid.h
index f6a515640950..6025d3849cf8 100644
--- a/security/apparmor/include/secid.h
+++ b/security/apparmor/include/secid.h
@@ -25,11 +25,10 @@ struct aa_label;
extern int apparmor_display_secid_mode;
struct aa_label *aa_secid_to_label(u32 secid);
-int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
-int apparmor_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata,
- u32 *seclen);
+int apparmor_secid_to_secctx(u32 secid, struct lsm_context *cp);
+int apparmor_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp);
int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
-void apparmor_release_secctx(char *secdata, u32 seclen);
+void apparmor_release_secctx(struct lsm_context *cp);
int aa_alloc_secid(struct aa_label *label, gfp_t gfp);
diff --git a/security/apparmor/include/sig_names.h b/security/apparmor/include/sig_names.h
index cbf7a997ed84..c772668cdc62 100644
--- a/security/apparmor/include/sig_names.h
+++ b/security/apparmor/include/sig_names.h
@@ -1,9 +1,5 @@
#include <linux/signal.h>
-
-#define SIGUNKNOWN 0
-#define MAXMAPPED_SIG 35
-#define MAXMAPPED_SIGNAME (MAXMAPPED_SIG + 1)
-#define SIGRT_BASE 128
+#include "signal.h"
/* provide a mapping of arch signal to internal signal # for mediation
* those that are always an alias SIGCLD for SIGCLHD and SIGPOLL for SIGIO
diff --git a/security/apparmor/include/signal.h b/security/apparmor/include/signal.h
new file mode 100644
index 000000000000..729763fa7ce6
--- /dev/null
+++ b/security/apparmor/include/signal.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor ipc mediation function definitions.
+ *
+ * Copyright 2023 Canonical Ltd.
+ */
+
+#ifndef __AA_SIGNAL_H
+#define __AA_SIGNAL_H
+
+#define SIGUNKNOWN 0
+#define MAXMAPPED_SIG 35
+
+#define MAXMAPPED_SIGNAME (MAXMAPPED_SIG + 1)
+#define SIGRT_BASE 128
+
+#endif /* __AA_SIGNAL_H */
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 0cdf4340b02d..df5712cea685 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -80,21 +80,20 @@ static int profile_signal_perm(const struct cred *cred,
struct aa_label *peer, u32 request,
struct apparmor_audit_data *ad)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
struct aa_perms perms;
aa_state_t state;
- if (profile_unconfined(profile) ||
- !ANY_RULE_MEDIATES(&profile->rules, AA_CLASS_SIGNAL))
+ if (profile_unconfined(profile))
return 0;
ad->subj_cred = cred;
ad->peer = peer;
/* TODO: secondary cache check <profile, profile, perm> */
- state = aa_dfa_next(rules->policy->dfa,
- rules->policy->start[AA_CLASS_SIGNAL],
- ad->signal);
+ state = RULE_MEDIATES(rules, AA_CLASS_SIGNAL);
+ if (!state)
+ return 0;
+ state = aa_dfa_next(rules->policy->dfa, state, ad->signal);
aa_label_match(profile, rules, peer, state, false, request, &perms);
aa_apply_modes_to_perms(profile, &perms);
return aa_check_perms(profile, &perms, request, ad, audit_signal_cb);
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index 91483ecacc16..913678f199c3 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -198,21 +198,25 @@ static bool vec_is_stale(struct aa_profile **vec, int n)
return false;
}
-static long accum_vec_flags(struct aa_profile **vec, int n)
+static void accum_label_info(struct aa_label *new)
{
long u = FLAG_UNCONFINED;
int i;
- AA_BUG(!vec);
+ AA_BUG(!new);
- for (i = 0; i < n; i++) {
- u |= vec[i]->label.flags & (FLAG_DEBUG1 | FLAG_DEBUG2 |
- FLAG_STALE);
- if (!(u & vec[i]->label.flags & FLAG_UNCONFINED))
+ /* size == 1 is a profile and flags must be set as part of creation */
+ if (new->size == 1)
+ return;
+
+ for (i = 0; i < new->size; i++) {
+ u |= new->vec[i]->label.flags & (FLAG_DEBUG1 | FLAG_DEBUG2 |
+ FLAG_STALE);
+ if (!(u & new->vec[i]->label.flags & FLAG_UNCONFINED))
u &= ~FLAG_UNCONFINED;
+ new->mediates |= new->vec[i]->label.mediates;
}
-
- return u;
+ new->flags |= u;
}
static int sort_cmp(const void *a, const void *b)
@@ -431,7 +435,7 @@ struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp)
/* + 1 for null terminator entry on vec */
new = kzalloc(struct_size(new, vec, size + 1), gfp);
- AA_DEBUG("%s (%p)\n", __func__, new);
+ AA_DEBUG(DEBUG_LABEL, "%s (%p)\n", __func__, new);
if (!new)
goto fail;
@@ -645,6 +649,7 @@ static bool __label_replace(struct aa_label *old, struct aa_label *new)
rb_replace_node(&old->node, &new->node, &ls->root);
old->flags &= ~FLAG_IN_TREE;
new->flags |= FLAG_IN_TREE;
+ accum_label_info(new);
return true;
}
@@ -705,6 +710,7 @@ static struct aa_label *__label_insert(struct aa_labelset *ls,
rb_link_node(&label->node, parent, new);
rb_insert_color(&label->node, &ls->root);
label->flags |= FLAG_IN_TREE;
+ accum_label_info(label);
return aa_get_label(label);
}
@@ -1085,7 +1091,6 @@ static struct aa_label *label_merge_insert(struct aa_label *new,
else if (k == b->size)
return aa_get_label(b);
}
- new->flags |= accum_vec_flags(new->vec, new->size);
ls = labels_set(new);
write_lock_irqsave(&ls->lock, flags);
label = __label_insert(labels_set(new), new, false);
@@ -1456,7 +1461,7 @@ bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp)
/*
* cached label name is present and visible
- * @label->hname only exists if label is namespace hierachical
+ * @label->hname only exists if label is namespace hierarchical
*/
static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
int flags)
@@ -1617,7 +1622,7 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
AA_BUG(!str && size != 0);
AA_BUG(!label);
- if (AA_DEBUG_LABEL && (flags & FLAG_ABS_ROOT)) {
+ if (DEBUG_ABS_ROOT && (flags & FLAG_ABS_ROOT)) {
ns = root_ns;
len = snprintf(str, size, "_");
update_for_len(total, len, size, str);
@@ -1731,7 +1736,7 @@ void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
display_mode(ns, label, flags)) {
len = aa_label_asxprint(&name, ns, label, flags, gfp);
if (len < 0) {
- AA_DEBUG("label print error");
+ AA_DEBUG(DEBUG_LABEL, "label print error");
return;
}
str = name;
@@ -1759,7 +1764,7 @@ void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
len = aa_label_asxprint(&str, ns, label, flags, gfp);
if (len < 0) {
- AA_DEBUG("label print error");
+ AA_DEBUG(DEBUG_LABEL, "label print error");
return;
}
seq_puts(f, str);
@@ -1782,7 +1787,7 @@ void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
len = aa_label_asxprint(&str, ns, label, flags, gfp);
if (len < 0) {
- AA_DEBUG("label print error");
+ AA_DEBUG(DEBUG_LABEL, "label print error");
return;
}
pr_info("%s", str);
@@ -1865,7 +1870,7 @@ struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str,
AA_BUG(!str);
str = skipn_spaces(str, n);
- if (str == NULL || (AA_DEBUG_LABEL && *str == '_' &&
+ if (str == NULL || (DEBUG_ABS_ROOT && *str == '_' &&
base != &root_ns->unconfined->label))
return ERR_PTR(-EINVAL);
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 7db62213e352..82dbb97ad406 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -25,6 +25,120 @@ struct aa_perms allperms = { .allow = ALL_PERMS_MASK,
.quiet = ALL_PERMS_MASK,
.hide = ALL_PERMS_MASK };
+struct val_table_ent {
+ const char *str;
+ int value;
+};
+
+static struct val_table_ent debug_values_table[] = {
+ { "N", DEBUG_NONE },
+ { "none", DEBUG_NONE },
+ { "n", DEBUG_NONE },
+ { "0", DEBUG_NONE },
+ { "all", DEBUG_ALL },
+ { "Y", DEBUG_ALL },
+ { "y", DEBUG_ALL },
+ { "1", DEBUG_ALL },
+ { "abs_root", DEBUG_LABEL_ABS_ROOT },
+ { "label", DEBUG_LABEL },
+ { "domain", DEBUG_DOMAIN },
+ { "policy", DEBUG_POLICY },
+ { "interface", DEBUG_INTERFACE },
+ { NULL, 0 }
+};
+
+static struct val_table_ent *val_table_find_ent(struct val_table_ent *table,
+ const char *name, size_t len)
+{
+ struct val_table_ent *entry;
+
+ for (entry = table; entry->str != NULL; entry++) {
+ if (strncmp(entry->str, name, len) == 0 &&
+ strlen(entry->str) == len)
+ return entry;
+ }
+ return NULL;
+}
+
+int aa_parse_debug_params(const char *str)
+{
+ struct val_table_ent *ent;
+ const char *next;
+ int val = 0;
+
+ do {
+ size_t n = strcspn(str, "\r\n,");
+
+ next = str + n;
+ ent = val_table_find_ent(debug_values_table, str, next - str);
+ if (ent)
+ val |= ent->value;
+ else
+ AA_DEBUG(DEBUG_INTERFACE, "unknown debug type '%.*s'",
+ (int)(next - str), str);
+ str = next + 1;
+ } while (*next != 0);
+ return val;
+}
+
+/**
+ * val_mask_to_str - convert a perm mask to its short string
+ * @str: character buffer to store string in (at least 10 characters)
+ * @size: size of the @str buffer
+ * @table: NUL-terminated character buffer of permission characters (NOT NULL)
+ * @mask: permission mask to convert
+ */
+static int val_mask_to_str(char *str, size_t size,
+ const struct val_table_ent *table, u32 mask)
+{
+ const struct val_table_ent *ent;
+ int total = 0;
+
+ for (ent = table; ent->str; ent++) {
+ if (ent->value && (ent->value & mask) == ent->value) {
+ int len = scnprintf(str, size, "%s%s", total ? "," : "",
+ ent->str);
+ size -= len;
+ str += len;
+ total += len;
+ mask &= ~ent->value;
+ }
+ }
+
+ return total;
+}
+
+int aa_print_debug_params(char *buffer)
+{
+ if (!aa_g_debug)
+ return sprintf(buffer, "N");
+ return val_mask_to_str(buffer, PAGE_SIZE, debug_values_table,
+ aa_g_debug);
+}
+
+bool aa_resize_str_table(struct aa_str_table *t, int newsize, gfp_t gfp)
+{
+ char **n;
+ int i;
+
+ if (t->size == newsize)
+ return true;
+ n = kcalloc(newsize, sizeof(*n), gfp);
+ if (!n)
+ return false;
+ for (i = 0; i < min(t->size, newsize); i++)
+ n[i] = t->table[i];
+ for (; i < t->size; i++)
+ kfree_sensitive(t->table[i]);
+ if (newsize > t->size)
+ memset(&n[t->size], 0, (newsize-t->size)*sizeof(*n));
+ kfree_sensitive(t->table);
+ t->table = n;
+ t->size = newsize;
+
+ return true;
+}
+
/**
* aa_free_str_table - free entries str table
* @t: the string table to free (MAYBE NULL)
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 1edc12862a7d..8e1cc229b41b 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -26,6 +26,7 @@
#include <uapi/linux/mount.h>
#include <uapi/linux/lsm.h>
+#include "include/af_unix.h"
#include "include/apparmor.h"
#include "include/apparmorfs.h"
#include "include/audit.h"
@@ -126,14 +127,15 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
struct aa_label *tracer, *tracee;
const struct cred *cred;
int error;
+ bool needput;
cred = get_task_cred(child);
tracee = cred_label(cred); /* ref count on cred */
- tracer = __begin_current_label_crit_section();
+ tracer = __begin_current_label_crit_section(&needput);
error = aa_may_ptrace(current_cred(), tracer, cred, tracee,
(mode & PTRACE_MODE_READ) ? AA_PTRACE_READ
: AA_PTRACE_TRACE);
- __end_current_label_crit_section(tracer);
+ __end_current_label_crit_section(tracer, needput);
put_cred(cred);
return error;
@@ -144,14 +146,15 @@ static int apparmor_ptrace_traceme(struct task_struct *parent)
struct aa_label *tracer, *tracee;
const struct cred *cred;
int error;
+ bool needput;
- tracee = __begin_current_label_crit_section();
+ tracee = __begin_current_label_crit_section(&needput);
cred = get_task_cred(parent);
tracer = cred_label(cred); /* ref count on cred */
error = aa_may_ptrace(cred, tracer, current_cred(), tracee,
AA_PTRACE_TRACE);
put_cred(cred);
- __end_current_label_crit_section(tracee);
+ __end_current_label_crit_section(tracee, needput);
return error;
}
@@ -176,15 +179,11 @@ static int apparmor_capget(const struct task_struct *target, kernel_cap_t *effec
struct label_it i;
label_for_each_confined(i, label, profile) {
- struct aa_ruleset *rules;
- if (COMPLAIN_MODE(profile))
- continue;
- rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
- *effective = cap_intersect(*effective,
- rules->caps.allow);
- *permitted = cap_intersect(*permitted,
- rules->caps.allow);
+ kernel_cap_t allowed;
+
+ allowed = aa_profile_capget(profile);
+ *effective = cap_intersect(*effective, allowed);
+ *permitted = cap_intersect(*permitted, allowed);
}
}
rcu_read_unlock();
@@ -221,12 +220,13 @@ static int common_perm(const char *op, const struct path *path, u32 mask,
{
struct aa_label *label;
int error = 0;
+ bool needput;
- label = __begin_current_label_crit_section();
+ label = __begin_current_label_crit_section(&needput);
if (!unconfined(label))
error = aa_path_perm(op, current_cred(), label, path, 0, mask,
cond);
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
return error;
}
@@ -524,14 +524,15 @@ static int common_file_perm(const char *op, struct file *file, u32 mask,
{
struct aa_label *label;
int error = 0;
+ bool needput;
/* don't reaudit files closed during inheritance */
- if (file->f_path.dentry == aa_null.dentry)
+ if (unlikely(file->f_path.dentry == aa_null.dentry))
return -EACCES;
- label = __begin_current_label_crit_section();
+ label = __begin_current_label_crit_section(&needput);
error = aa_file_perm(op, current_cred(), label, file, mask, in_atomic);
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
return error;
}
@@ -633,7 +634,7 @@ static int profile_uring(struct aa_profile *profile, u32 request,
AA_BUG(!profile);
- rules = list_first_entry(&profile->rules, typeof(*rules), list);
+ rules = profile->label.rules[0];
state = RULE_MEDIATES(rules, AA_CLASS_IO_URING);
if (state) {
struct aa_perms perms = { };
@@ -664,15 +665,16 @@ static int apparmor_uring_override_creds(const struct cred *new)
struct aa_profile *profile;
struct aa_label *label;
int error;
+ bool needput;
DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_IO_URING,
OP_URING_OVERRIDE);
ad.uring.target = cred_label(new);
- label = __begin_current_label_crit_section();
+ label = __begin_current_label_crit_section(&needput);
error = fn_for_each(label, profile,
profile_uring(profile, AA_MAY_OVERRIDE_CRED,
cred_label(new), CAP_SYS_ADMIN, &ad));
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
return error;
}
@@ -688,14 +690,15 @@ static int apparmor_uring_sqpoll(void)
struct aa_profile *profile;
struct aa_label *label;
int error;
+ bool needput;
DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_IO_URING,
OP_URING_SQPOLL);
- label = __begin_current_label_crit_section();
+ label = __begin_current_label_crit_section(&needput);
error = fn_for_each(label, profile,
profile_uring(profile, AA_MAY_CREATE_SQPOLL,
NULL, CAP_SYS_ADMIN, &ad));
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
return error;
}
@@ -706,6 +709,7 @@ static int apparmor_sb_mount(const char *dev_name, const struct path *path,
{
struct aa_label *label;
int error = 0;
+ bool needput;
/* Discard magic */
if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
@@ -713,7 +717,7 @@ static int apparmor_sb_mount(const char *dev_name, const struct path *path,
flags &= ~AA_MS_IGNORE_MASK;
- label = __begin_current_label_crit_section();
+ label = __begin_current_label_crit_section(&needput);
if (!unconfined(label)) {
if (flags & MS_REMOUNT)
error = aa_remount(current_cred(), label, path, flags,
@@ -732,7 +736,7 @@ static int apparmor_sb_mount(const char *dev_name, const struct path *path,
error = aa_new_mount(current_cred(), label, dev_name,
path, type, flags, data);
}
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
return error;
}
@@ -742,12 +746,13 @@ static int apparmor_move_mount(const struct path *from_path,
{
struct aa_label *label;
int error = 0;
+ bool needput;
- label = __begin_current_label_crit_section();
+ label = __begin_current_label_crit_section(&needput);
if (!unconfined(label))
error = aa_move_mount(current_cred(), label, from_path,
to_path);
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
return error;
}
@@ -756,11 +761,12 @@ static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
{
struct aa_label *label;
int error = 0;
+ bool needput;
- label = __begin_current_label_crit_section();
+ label = __begin_current_label_crit_section(&needput);
if (!unconfined(label))
error = aa_umount(current_cred(), label, mnt, flags);
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
return error;
}
@@ -984,10 +990,12 @@ static void apparmor_bprm_committed_creds(const struct linux_binprm *bprm)
static void apparmor_current_getlsmprop_subj(struct lsm_prop *prop)
{
- struct aa_label *label = __begin_current_label_crit_section();
+ struct aa_label *label;
+ bool needput;
+ label = __begin_current_label_crit_section(&needput);
prop->apparmor.label = label;
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
}
static void apparmor_task_getlsmprop_obj(struct task_struct *p,
@@ -1002,13 +1010,16 @@ static void apparmor_task_getlsmprop_obj(struct task_struct *p,
static int apparmor_task_setrlimit(struct task_struct *task,
unsigned int resource, struct rlimit *new_rlim)
{
- struct aa_label *label = __begin_current_label_crit_section();
+ struct aa_label *label;
int error = 0;
+ bool needput;
+
+ label = __begin_current_label_crit_section(&needput);
if (!unconfined(label))
error = aa_task_setrlimit(current_cred(), label, task,
resource, new_rlim);
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
return error;
}
@@ -1019,6 +1030,7 @@ static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo
const struct cred *tc;
struct aa_label *cl, *tl;
int error;
+ bool needput;
tc = get_task_cred(target);
tl = aa_get_newest_cred_label(tc);
@@ -1030,9 +1042,9 @@ static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo
error = aa_may_signal(cred, cl, tc, tl, sig);
aa_put_label(cl);
} else {
- cl = __begin_current_label_crit_section();
+ cl = __begin_current_label_crit_section(&needput);
error = aa_may_signal(current_cred(), cl, tc, tl, sig);
- __end_current_label_crit_section(cl);
+ __end_current_label_crit_section(cl, needput);
}
aa_put_label(tl);
put_cred(tc);
@@ -1061,12 +1073,29 @@ static int apparmor_userns_create(const struct cred *cred)
return error;
}
+static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t gfp)
+{
+ struct aa_sk_ctx *ctx = aa_sock(sk);
+ struct aa_label *label;
+ bool needput;
+
+ label = __begin_current_label_crit_section(&needput);
+ //spin_lock_init(&ctx->lock);
+ rcu_assign_pointer(ctx->label, aa_get_label(label));
+ rcu_assign_pointer(ctx->peer, NULL);
+ rcu_assign_pointer(ctx->peer_lastupdate, NULL);
+ __end_current_label_crit_section(label, needput);
+ return 0;
+}
+
static void apparmor_sk_free_security(struct sock *sk)
{
struct aa_sk_ctx *ctx = aa_sock(sk);
- aa_put_label(ctx->label);
- aa_put_label(ctx->peer);
+ /* dead these won't be updated any more */
+ aa_put_label(rcu_dereference_protected(ctx->label, true));
+ aa_put_label(rcu_dereference_protected(ctx->peer, true));
+ aa_put_label(rcu_dereference_protected(ctx->peer_lastupdate, true));
}
/**
@@ -1080,13 +1109,153 @@ static void apparmor_sk_clone_security(const struct sock *sk,
struct aa_sk_ctx *ctx = aa_sock(sk);
struct aa_sk_ctx *new = aa_sock(newsk);
- if (new->label)
- aa_put_label(new->label);
- new->label = aa_get_label(ctx->label);
+ /* not actually in use yet */
+ if (rcu_access_pointer(ctx->label) != rcu_access_pointer(new->label)) {
+ aa_put_label(rcu_dereference_protected(new->label, true));
+ rcu_assign_pointer(new->label, aa_get_label_rcu(&ctx->label));
+ }
+
+ if (rcu_access_pointer(ctx->peer) != rcu_access_pointer(new->peer)) {
+ aa_put_label(rcu_dereference_protected(new->peer, true));
+ rcu_assign_pointer(new->peer, aa_get_label_rcu(&ctx->peer));
+ }
+
+ if (rcu_access_pointer(ctx->peer_lastupdate) != rcu_access_pointer(new->peer_lastupdate)) {
+ aa_put_label(rcu_dereference_protected(new->peer_lastupdate, true));
+ rcu_assign_pointer(new->peer_lastupdate,
+ aa_get_label_rcu(&ctx->peer_lastupdate));
+ }
+}
+
+static int unix_connect_perm(const struct cred *cred, struct aa_label *label,
+ struct sock *sk, struct sock *peer_sk)
+{
+ struct aa_sk_ctx *peer_ctx = aa_sock(peer_sk);
+ int error;
+
+ error = aa_unix_peer_perm(cred, label, OP_CONNECT,
+ (AA_MAY_CONNECT | AA_MAY_SEND | AA_MAY_RECEIVE),
+ sk, peer_sk,
+ rcu_dereference_protected(peer_ctx->label,
+ lockdep_is_held(&unix_sk(peer_sk)->lock)));
+ if (!is_unix_fs(peer_sk)) {
+ last_error(error,
+ aa_unix_peer_perm(cred,
+ rcu_dereference_protected(peer_ctx->label,
+ lockdep_is_held(&unix_sk(peer_sk)->lock)),
+ OP_CONNECT,
+ (AA_MAY_ACCEPT | AA_MAY_SEND | AA_MAY_RECEIVE),
+ peer_sk, sk, label));
+ }
+
+ return error;
+}
+
+/* lockdep check in unix_connect_perm - push sks here to check */
+static void unix_connect_peers(struct aa_sk_ctx *sk_ctx,
+ struct aa_sk_ctx *peer_ctx)
+{
+ /* Cross reference the peer labels for SO_PEERSEC */
+ struct aa_label *label = rcu_dereference_protected(sk_ctx->label, true);
+
+ aa_get_label(label);
+ aa_put_label(rcu_dereference_protected(peer_ctx->peer,
+ true));
+ rcu_assign_pointer(peer_ctx->peer, label); /* transfer cnt */
+
+ label = aa_get_label(rcu_dereference_protected(peer_ctx->label,
+ true));
+ //spin_unlock(&peer_ctx->lock);
+
+ //spin_lock(&sk_ctx->lock);
+ aa_put_label(rcu_dereference_protected(sk_ctx->peer,
+ true));
+ aa_put_label(rcu_dereference_protected(sk_ctx->peer_lastupdate,
+ true));
+
+ rcu_assign_pointer(sk_ctx->peer, aa_get_label(label));
+ rcu_assign_pointer(sk_ctx->peer_lastupdate, label); /* transfer cnt */
+ //spin_unlock(&sk_ctx->lock);
+}
+
+/**
+ * apparmor_unix_stream_connect - check perms before making unix domain conn
+ * @sk: sk attempting to connect
+ * @peer_sk: sk that is accepting the connection
+ * @newsk: new sk created for this connection
+ * peer is locked when this hook is called
+ *
+ * Return:
+ * 0 if connection is permitted
+ * error code on denial or failure
+ */
+static int apparmor_unix_stream_connect(struct sock *sk, struct sock *peer_sk,
+ struct sock *newsk)
+{
+ struct aa_sk_ctx *sk_ctx = aa_sock(sk);
+ struct aa_sk_ctx *peer_ctx = aa_sock(peer_sk);
+ struct aa_sk_ctx *new_ctx = aa_sock(newsk);
+ struct aa_label *label;
+ int error;
+ bool needput;
+
+ label = __begin_current_label_crit_section(&needput);
+ error = unix_connect_perm(current_cred(), label, sk, peer_sk);
+ __end_current_label_crit_section(label, needput);
+
+ if (error)
+ return error;
+
+ /* newsk doesn't go through post_create, but does go through
+ * security_sk_alloc()
+ */
+ rcu_assign_pointer(new_ctx->label,
+ aa_get_label(rcu_dereference_protected(peer_ctx->label,
+ true)));
+
+ /* Cross reference the peer labels for SO_PEERSEC */
+ unix_connect_peers(sk_ctx, new_ctx);
+
+ return 0;
+}
+
+/**
+ * apparmor_unix_may_send - check perms before conn or sending unix dgrams
+ * @sock: socket sending the message
+ * @peer: socket message is being send to
+ *
+ * Performs bidirectional permission checks for Unix domain socket communication:
+ * 1. Verifies sender has AA_MAY_SEND to target socket
+ * 2. Verifies receiver has AA_MAY_RECEIVE from source socket
+ *
+ * sock and peer are locked when this hook is called
+ * called by: dgram_connect peer setup but path not copied to newsk
+ *
+ * Return:
+ * 0 if transmission is permitted
+ * error code on denial or failure
+ */
+static int apparmor_unix_may_send(struct socket *sock, struct socket *peer)
+{
+ struct aa_sk_ctx *peer_ctx = aa_sock(peer->sk);
+ struct aa_label *label;
+ int error;
+ bool needput;
+
+ label = __begin_current_label_crit_section(&needput);
+ error = xcheck(aa_unix_peer_perm(current_cred(),
+ label, OP_SENDMSG, AA_MAY_SEND,
+ sock->sk, peer->sk,
+ rcu_dereference_protected(peer_ctx->label,
+ true)),
+ aa_unix_peer_perm(peer->file ? peer->file->f_cred : NULL,
+ rcu_dereference_protected(peer_ctx->label,
+ true),
+ OP_SENDMSG, AA_MAY_RECEIVE, peer->sk,
+ sock->sk, label));
+ __end_current_label_crit_section(label, needput);
- if (new->peer)
- aa_put_label(new->peer);
- new->peer = aa_get_label(ctx->peer);
+ return error;
}
static int apparmor_socket_create(int family, int type, int protocol, int kern)
@@ -1096,13 +1265,19 @@ static int apparmor_socket_create(int family, int type, int protocol, int kern)
AA_BUG(in_interrupt());
+ if (kern)
+ return 0;
+
label = begin_current_label_crit_section();
- if (!(kern || unconfined(label)))
- error = af_select(family,
- create_perm(label, family, type, protocol),
- aa_af_perm(current_cred(), label,
- OP_CREATE, AA_MAY_CREATE,
- family, type, protocol));
+ if (!unconfined(label)) {
+ if (family == PF_UNIX)
+ error = aa_unix_create_perm(label, family, type,
+ protocol);
+ else
+ error = aa_af_perm(current_cred(), label, OP_CREATE,
+ AA_MAY_CREATE, family, type,
+ protocol);
+ }
end_current_label_crit_section(label);
return error;
@@ -1135,14 +1310,58 @@ static int apparmor_socket_post_create(struct socket *sock, int family,
if (sock->sk) {
struct aa_sk_ctx *ctx = aa_sock(sock->sk);
- aa_put_label(ctx->label);
- ctx->label = aa_get_label(label);
+ /* still not live */
+ aa_put_label(rcu_dereference_protected(ctx->label, true));
+ rcu_assign_pointer(ctx->label, aa_get_label(label));
}
aa_put_label(label);
return 0;
}
+static int apparmor_socket_socketpair(struct socket *socka,
+ struct socket *sockb)
+{
+ struct aa_sk_ctx *a_ctx = aa_sock(socka->sk);
+ struct aa_sk_ctx *b_ctx = aa_sock(sockb->sk);
+ struct aa_label *label;
+
+ /* socks not live yet - initial values set in sk_alloc */
+ label = begin_current_label_crit_section();
+ if (rcu_access_pointer(a_ctx->label) != label) {
+ AA_BUG("a_ctx != label");
+ aa_put_label(rcu_dereference_protected(a_ctx->label, true));
+ rcu_assign_pointer(a_ctx->label, aa_get_label(label));
+ }
+ if (rcu_access_pointer(b_ctx->label) != label) {
+ AA_BUG("b_ctx != label");
+ aa_put_label(rcu_dereference_protected(b_ctx->label, true));
+ rcu_assign_pointer(b_ctx->label, aa_get_label(label));
+ }
+
+ if (socka->sk->sk_family == PF_UNIX) {
+ /* unix socket pairs by-pass unix_stream_connect */
+ unix_connect_peers(a_ctx, b_ctx);
+ }
+ end_current_label_crit_section(label);
+
+ return 0;
+}
+
+/**
+ * apparmor_socket_bind - check perms before bind addr to socket
+ * @sock: socket to bind the address to (must be non-NULL)
+ * @address: address that is being bound (must be non-NULL)
+ * @addrlen: length of @address
+ *
+ * Performs security checks before allowing a socket to bind to an address.
+ * Handles Unix domain sockets specially through aa_unix_bind_perm().
+ * For other socket families, uses generic permission check via aa_sk_perm().
+ *
+ * Return:
+ * 0 if binding is permitted
+ * error code on denial or invalid parameters
+ */
static int apparmor_socket_bind(struct socket *sock,
struct sockaddr *address, int addrlen)
{
@@ -1151,9 +1370,9 @@ static int apparmor_socket_bind(struct socket *sock,
AA_BUG(!address);
AA_BUG(in_interrupt());
- return af_select(sock->sk->sk_family,
- bind_perm(sock, address, addrlen),
- aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk));
+ if (sock->sk->sk_family == PF_UNIX)
+ return aa_unix_bind_perm(sock, address, addrlen);
+ return aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk);
}
static int apparmor_socket_connect(struct socket *sock,
@@ -1164,9 +1383,10 @@ static int apparmor_socket_connect(struct socket *sock,
AA_BUG(!address);
AA_BUG(in_interrupt());
- return af_select(sock->sk->sk_family,
- connect_perm(sock, address, addrlen),
- aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk));
+ /* PF_UNIX goes through unix_stream_connect && unix_may_send */
+ if (sock->sk->sk_family == PF_UNIX)
+ return 0;
+ return aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk);
}
static int apparmor_socket_listen(struct socket *sock, int backlog)
@@ -1175,9 +1395,9 @@ static int apparmor_socket_listen(struct socket *sock, int backlog)
AA_BUG(!sock->sk);
AA_BUG(in_interrupt());
- return af_select(sock->sk->sk_family,
- listen_perm(sock, backlog),
- aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk));
+ if (sock->sk->sk_family == PF_UNIX)
+ return aa_unix_listen_perm(sock, backlog);
+ return aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk);
}
/*
@@ -1191,9 +1411,9 @@ static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
AA_BUG(!newsock);
AA_BUG(in_interrupt());
- return af_select(sock->sk->sk_family,
- accept_perm(sock, newsock),
- aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk));
+ if (sock->sk->sk_family == PF_UNIX)
+ return aa_unix_accept_perm(sock, newsock);
+ return aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk);
}
static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
@@ -1204,9 +1424,10 @@ static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
AA_BUG(!msg);
AA_BUG(in_interrupt());
- return af_select(sock->sk->sk_family,
- msg_perm(op, request, sock, msg, size),
- aa_sk_perm(op, request, sock->sk));
+ /* PF_UNIX goes through unix_may_send */
+ if (sock->sk->sk_family == PF_UNIX)
+ return 0;
+ return aa_sk_perm(op, request, sock->sk);
}
static int apparmor_socket_sendmsg(struct socket *sock,
@@ -1228,9 +1449,9 @@ static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
AA_BUG(!sock->sk);
AA_BUG(in_interrupt());
- return af_select(sock->sk->sk_family,
- sock_perm(op, request, sock),
- aa_sk_perm(op, request, sock->sk));
+ if (sock->sk->sk_family == PF_UNIX)
+ return aa_unix_sock_perm(op, request, sock);
+ return aa_sk_perm(op, request, sock->sk);
}
static int apparmor_socket_getsockname(struct socket *sock)
@@ -1251,9 +1472,9 @@ static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
AA_BUG(!sock->sk);
AA_BUG(in_interrupt());
- return af_select(sock->sk->sk_family,
- opt_perm(op, request, sock, level, optname),
- aa_sk_perm(op, request, sock->sk));
+ if (sock->sk->sk_family == PF_UNIX)
+ return aa_unix_opt_perm(op, request, sock, level, optname);
+ return aa_sk_perm(op, request, sock->sk);
}
static int apparmor_socket_getsockopt(struct socket *sock, int level,
@@ -1289,6 +1510,7 @@ static int apparmor_socket_shutdown(struct socket *sock, int how)
static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct aa_sk_ctx *ctx = aa_sock(sk);
+ int error;
if (!skb->secmark)
return 0;
@@ -1297,23 +1519,31 @@ static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
* If reach here before socket_post_create hook is called, in which
* case label is null, drop the packet.
*/
- if (!ctx->label)
+ if (!rcu_access_pointer(ctx->label))
return -EACCES;
- return apparmor_secmark_check(ctx->label, OP_RECVMSG, AA_MAY_RECEIVE,
- skb->secmark, sk);
+ rcu_read_lock();
+ error = apparmor_secmark_check(rcu_dereference(ctx->label), OP_RECVMSG,
+ AA_MAY_RECEIVE, skb->secmark, sk);
+ rcu_read_unlock();
+
+ return error;
}
#endif
-static struct aa_label *sk_peer_label(struct sock *sk)
+static struct aa_label *sk_peer_get_label(struct sock *sk)
{
struct aa_sk_ctx *ctx = aa_sock(sk);
+ struct aa_label *label = ERR_PTR(-ENOPROTOOPT);
- if (ctx->peer)
- return ctx->peer;
+ if (rcu_access_pointer(ctx->peer))
+ return aa_get_label_rcu(&ctx->peer);
- return ERR_PTR(-ENOPROTOOPT);
+ if (sk->sk_family != PF_UNIX)
+ return ERR_PTR(-ENOPROTOOPT);
+
+ return label;
}
/**
@@ -1335,19 +1565,19 @@ static int apparmor_socket_getpeersec_stream(struct socket *sock,
struct aa_label *label;
struct aa_label *peer;
- label = begin_current_label_crit_section();
- peer = sk_peer_label(sock->sk);
+ peer = sk_peer_get_label(sock->sk);
if (IS_ERR(peer)) {
error = PTR_ERR(peer);
goto done;
}
+ label = begin_current_label_crit_section();
slen = aa_label_asxprint(&name, labels_ns(label), peer,
FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
FLAG_HIDDEN_UNCONFINED, GFP_KERNEL);
/* don't include terminating \0 in slen, it breaks some apps */
if (slen < 0) {
error = -ENOMEM;
- goto done;
+ goto done_put;
}
if (slen > len) {
error = -ERANGE;
@@ -1359,8 +1589,11 @@ static int apparmor_socket_getpeersec_stream(struct socket *sock,
done_len:
if (copy_to_sockptr(optlen, &slen, sizeof(slen)))
error = -EFAULT;
-done:
+
+done_put:
end_current_label_crit_section(label);
+ aa_put_label(peer);
+done:
kfree(name);
return error;
}
@@ -1396,8 +1629,9 @@ static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
{
struct aa_sk_ctx *ctx = aa_sock(sk);
- if (!ctx->label)
- ctx->label = aa_get_current_label();
+ /* setup - not live */
+ if (!rcu_access_pointer(ctx->label))
+ rcu_assign_pointer(ctx->label, aa_get_current_label());
}
#ifdef CONFIG_NETWORK_SECMARK
@@ -1405,12 +1639,17 @@ static int apparmor_inet_conn_request(const struct sock *sk, struct sk_buff *skb
struct request_sock *req)
{
struct aa_sk_ctx *ctx = aa_sock(sk);
+ int error;
if (!skb->secmark)
return 0;
- return apparmor_secmark_check(ctx->label, OP_CONNECT, AA_MAY_CONNECT,
- skb->secmark, sk);
+ rcu_read_lock();
+ error = apparmor_secmark_check(rcu_dereference(ctx->label), OP_CONNECT,
+ AA_MAY_CONNECT, skb->secmark, sk);
+ rcu_read_unlock();
+
+ return error;
}
#endif
@@ -1467,11 +1706,16 @@ static struct security_hook_list apparmor_hooks[] __ro_after_init = {
LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
+ LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security),
LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security),
LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
+ LSM_HOOK_INIT(unix_stream_connect, apparmor_unix_stream_connect),
+ LSM_HOOK_INIT(unix_may_send, apparmor_unix_may_send),
+
LSM_HOOK_INIT(socket_create, apparmor_socket_create),
LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create),
+ LSM_HOOK_INIT(socket_socketpair, apparmor_socket_socketpair),
LSM_HOOK_INIT(socket_bind, apparmor_socket_bind),
LSM_HOOK_INIT(socket_connect, apparmor_socket_connect),
LSM_HOOK_INIT(socket_listen, apparmor_socket_listen),
@@ -1571,6 +1815,9 @@ static const struct kernel_param_ops param_ops_aalockpolicy = {
.get = param_get_aalockpolicy
};
+static int param_set_debug(const char *val, const struct kernel_param *kp);
+static int param_get_debug(char *buffer, const struct kernel_param *kp);
+
static int param_set_audit(const char *val, const struct kernel_param *kp);
static int param_get_audit(char *buffer, const struct kernel_param *kp);
@@ -1604,8 +1851,9 @@ module_param_named(rawdata_compression_level, aa_g_rawdata_compression_level,
aacompressionlevel, 0400);
/* Debug mode */
-bool aa_g_debug = IS_ENABLED(CONFIG_SECURITY_APPARMOR_DEBUG_MESSAGES);
-module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR);
+int aa_g_debug;
+module_param_call(debug, param_set_debug, param_get_debug,
+ &aa_g_debug, 0600);
/* Audit mode */
enum audit_mode aa_g_audit;
@@ -1798,6 +2046,34 @@ static int param_get_aacompressionlevel(char *buffer,
return param_get_int(buffer, kp);
}
+static int param_get_debug(char *buffer, const struct kernel_param *kp)
+{
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
+ return -EPERM;
+ return aa_print_debug_params(buffer);
+}
+
+static int param_set_debug(const char *val, const struct kernel_param *kp)
+{
+ int i;
+
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (!val)
+ return -EINVAL;
+ if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
+ return -EPERM;
+
+ i = aa_parse_debug_params(val);
+ if (i == DEBUG_PARSE_ERROR)
+ return -EINVAL;
+
+ aa_g_debug = i;
+ return 0;
+}
+
static int param_get_audit(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
@@ -2006,7 +2282,7 @@ static int __init alloc_buffers(void)
* two should be enough, with more CPUs it is possible that more
* buffers will be used simultaneously. The preallocated pool may grow.
* This preallocation has also the side-effect that AppArmor will be
- * disabled early at boot if aa_g_path_max is extremly high.
+ * disabled early at boot if aa_g_path_max is extremely high.
*/
if (num_online_cpus() > 1)
num = 4 + RESERVE_COUNT;
@@ -2038,7 +2314,7 @@ static int apparmor_dointvec(const struct ctl_table *table, int write,
return proc_dointvec(table, write, buffer, lenp, ppos);
}
-static struct ctl_table apparmor_sysctl_table[] = {
+static const struct ctl_table apparmor_sysctl_table[] = {
#ifdef CONFIG_USER_NS
{
.procname = "unprivileged_userns_apparmor_policy",
@@ -2082,6 +2358,7 @@ static unsigned int apparmor_ip_postroute(void *priv,
{
struct aa_sk_ctx *ctx;
struct sock *sk;
+ int error;
if (!skb->secmark)
return NF_ACCEPT;
@@ -2091,8 +2368,11 @@ static unsigned int apparmor_ip_postroute(void *priv,
return NF_ACCEPT;
ctx = aa_sock(sk);
- if (!apparmor_secmark_check(ctx->label, OP_SENDMSG, AA_MAY_SEND,
- skb->secmark, sk))
+ rcu_read_lock();
+ error = apparmor_secmark_check(rcu_dereference(ctx->label), OP_SENDMSG,
+ AA_MAY_SEND, skb->secmark, sk);
+ rcu_read_unlock();
+ if (!error)
return NF_ACCEPT;
return NF_DROP_ERR(-ECONNREFUSED);
@@ -2149,12 +2429,12 @@ static int __init apparmor_nf_ip_init(void)
__initcall(apparmor_nf_ip_init);
#endif
-static char nulldfa_src[] = {
+static char nulldfa_src[] __aligned(8) = {
#include "nulldfa.in"
};
static struct aa_dfa *nulldfa;
-static char stacksplitdfa_src[] = {
+static char stacksplitdfa_src[] __aligned(8) = {
#include "stacksplitdfa.in"
};
struct aa_dfa *stacksplitdfa;
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index f2d9c57f8794..c5a91600842a 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -679,34 +679,35 @@ aa_state_t aa_dfa_matchn_until(struct aa_dfa *dfa, aa_state_t start,
return state;
}
-#define inc_wb_pos(wb) \
-do { \
+#define inc_wb_pos(wb) \
+do { \
+ BUILD_BUG_ON_NOT_POWER_OF_2(WB_HISTORY_SIZE); \
wb->pos = (wb->pos + 1) & (WB_HISTORY_SIZE - 1); \
- wb->len = (wb->len + 1) & (WB_HISTORY_SIZE - 1); \
+ wb->len = (wb->len + 1) > WB_HISTORY_SIZE ? WB_HISTORY_SIZE : \
+ wb->len + 1; \
} while (0)
/* For DFAs that don't support extended tagging of states */
+/* adjust is only set if is_loop returns true */
static bool is_loop(struct match_workbuf *wb, aa_state_t state,
unsigned int *adjust)
{
- aa_state_t pos = wb->pos;
- aa_state_t i;
+ int pos = wb->pos;
+ int i;
if (wb->history[pos] < state)
return false;
- for (i = 0; i <= wb->len; i++) {
+ for (i = 0; i < wb->len; i++) {
if (wb->history[pos] == state) {
*adjust = i;
return true;
}
- if (pos == 0)
- pos = WB_HISTORY_SIZE;
- pos--;
+ /* -1 wraps to WB_HISTORY_SIZE - 1 */
+ pos = (pos - 1) & (WB_HISTORY_SIZE - 1);
}
- *adjust = i;
- return true;
+ return false;
}
static aa_state_t leftmatch_fb(struct aa_dfa *dfa, aa_state_t start,
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index bf8863253e07..523570aa1a5a 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -311,8 +311,7 @@ static int match_mnt_path_str(const struct cred *subj_cred,
{
struct aa_perms perms = { };
const char *mntpnt = NULL, *info = NULL;
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
int pos, error;
AA_BUG(!profile);
@@ -371,8 +370,7 @@ static int match_mnt(const struct cred *subj_cred,
bool binary)
{
const char *devname = NULL, *info = NULL;
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
int error = -EACCES;
AA_BUG(!profile);
@@ -604,8 +602,7 @@ static int profile_umount(const struct cred *subj_cred,
struct aa_profile *profile, const struct path *path,
char *buffer)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
struct aa_perms perms = { };
const char *name = NULL, *info = NULL;
aa_state_t state;
@@ -668,8 +665,7 @@ static struct aa_label *build_pivotroot(const struct cred *subj_cred,
const struct path *old_path,
char *old_buffer)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
const char *old_name, *new_name = NULL, *info = NULL;
const char *trans_name = NULL;
struct aa_perms perms = { };
diff --git a/security/apparmor/net.c b/security/apparmor/net.c
index 77413a519117..45cf25605c34 100644
--- a/security/apparmor/net.c
+++ b/security/apparmor/net.c
@@ -8,6 +8,7 @@
* Copyright 2009-2017 Canonical Ltd.
*/
+#include "include/af_unix.h"
#include "include/apparmor.h"
#include "include/audit.h"
#include "include/cred.h"
@@ -24,6 +25,12 @@ struct aa_sfs_entry aa_sfs_entry_network[] = {
{ }
};
+struct aa_sfs_entry aa_sfs_entry_networkv9[] = {
+ AA_SFS_FILE_STRING("af_mask", AA_SFS_AF_MASK),
+ AA_SFS_FILE_BOOLEAN("af_unix", 1),
+ { }
+};
+
static const char * const net_mask_names[] = {
"unknown",
"send",
@@ -66,6 +73,42 @@ static const char * const net_mask_names[] = {
"unknown",
};
+static void audit_unix_addr(struct audit_buffer *ab, const char *str,
+ struct sockaddr_un *addr, int addrlen)
+{
+ int len = unix_addr_len(addrlen);
+
+ if (!addr || len <= 0) {
+ audit_log_format(ab, " %s=none", str);
+ } else if (addr->sun_path[0]) {
+ audit_log_format(ab, " %s=", str);
+ audit_log_untrustedstring(ab, addr->sun_path);
+ } else {
+ audit_log_format(ab, " %s=\"@", str);
+ if (audit_string_contains_control(&addr->sun_path[1], len - 1))
+ audit_log_n_hex(ab, &addr->sun_path[1], len - 1);
+ else
+ audit_log_format(ab, "%.*s", len - 1,
+ &addr->sun_path[1]);
+ audit_log_format(ab, "\"");
+ }
+}
+
+static void audit_unix_sk_addr(struct audit_buffer *ab, const char *str,
+ const struct sock *sk)
+{
+ const struct unix_sock *u = unix_sk(sk);
+
+ if (u && u->addr) {
+ int addrlen;
+ struct sockaddr_un *addr = aa_sunaddr(u, &addrlen);
+
+ audit_unix_addr(ab, str, addr, addrlen);
+ } else {
+ audit_unix_addr(ab, str, NULL, 0);
+
+ }
+}
/* audit callback for net specific fields */
void audit_net_cb(struct audit_buffer *ab, void *va)
@@ -73,12 +116,12 @@ void audit_net_cb(struct audit_buffer *ab, void *va)
struct common_audit_data *sa = va;
struct apparmor_audit_data *ad = aad(sa);
- if (address_family_names[sa->u.net->family])
+ if (address_family_names[ad->common.u.net->family])
audit_log_format(ab, " family=\"%s\"",
- address_family_names[sa->u.net->family]);
+ address_family_names[ad->common.u.net->family]);
else
audit_log_format(ab, " family=\"unknown(%d)\"",
- sa->u.net->family);
+ ad->common.u.net->family);
if (sock_type_names[ad->net.type])
audit_log_format(ab, " sock_type=\"%s\"",
sock_type_names[ad->net.type]);
@@ -98,6 +141,19 @@ void audit_net_cb(struct audit_buffer *ab, void *va)
net_mask_names, NET_PERMS_MASK);
}
}
+ if (ad->common.u.net->family == PF_UNIX) {
+ if (ad->net.addr || !ad->common.u.net->sk)
+ audit_unix_addr(ab, "addr",
+ unix_addr(ad->net.addr),
+ ad->net.addrlen);
+ else
+ audit_unix_sk_addr(ab, "addr", ad->common.u.net->sk);
+ if (ad->request & NET_PEER_MASK) {
+ audit_unix_addr(ab, "peer_addr",
+ unix_addr(ad->net.peer.addr),
+ ad->net.peer.addrlen);
+ }
+ }
if (ad->peer) {
audit_log_format(ab, " peer=");
aa_label_xaudit(ab, labels_ns(ad->subj_label), ad->peer,
@@ -105,45 +161,123 @@ void audit_net_cb(struct audit_buffer *ab, void *va)
}
}
+/* standard permission lookup pattern - supports early bailout */
+int aa_do_perms(struct aa_profile *profile, struct aa_policydb *policy,
+ aa_state_t state, u32 request,
+ struct aa_perms *p, struct apparmor_audit_data *ad)
+{
+ struct aa_perms perms;
+
+ AA_BUG(!profile);
+ AA_BUG(!policy);
+
+
+ if (state || !p)
+ p = aa_lookup_perms(policy, state);
+ perms = *p;
+ aa_apply_modes_to_perms(profile, &perms);
+ return aa_check_perms(profile, &perms, request, ad,
+ audit_net_cb);
+}
+
+/* only continue match if
+ * insufficient current perms at current state
+ * indicates there are more perms in later state
+ * Returns: perms struct if early match
+ */
+static struct aa_perms *early_match(struct aa_policydb *policy,
+ aa_state_t state, u32 request)
+{
+ struct aa_perms *p;
+
+ p = aa_lookup_perms(policy, state);
+ if (((p->allow & request) != request) && (p->allow & AA_CONT_MATCH))
+ return NULL;
+ return p;
+}
+
+static aa_state_t aa_dfa_match_be16(struct aa_dfa *dfa, aa_state_t state,
+ u16 data)
+{
+ __be16 buffer = cpu_to_be16(data);
+
+ return aa_dfa_match_len(dfa, state, (char *) &buffer, 2);
+}
+
+/**
+ * aa_match_to_prot - match the af, type, protocol triplet
+ * @policy: policy being matched
+ * @state: state to start in
+ * @request: permissions being requested, ignored if @p == NULL
+ * @af: socket address family
+ * @type: socket type
+ * @protocol: socket protocol
+ * @p: output - pointer to permission associated with match
+ * @info: output - pointer to string describing failure
+ *
+ * RETURNS: state match stopped in.
+ *
+ * If @(p) is assigned a value the returned state will be the
+ * corresponding state. Will not set @p on failure or if match completes
+ * only if an early match occurs
+ */
+aa_state_t aa_match_to_prot(struct aa_policydb *policy, aa_state_t state,
+ u32 request, u16 af, int type, int protocol,
+ struct aa_perms **p, const char **info)
+{
+ state = aa_dfa_match_be16(policy->dfa, state, (u16)af);
+ if (!state) {
+ *info = "failed af match";
+ return state;
+ }
+ state = aa_dfa_match_be16(policy->dfa, state, (u16)type);
+ if (state) {
+ if (p)
+ *p = early_match(policy, state, request);
+ if (!p || !*p) {
+ state = aa_dfa_match_be16(policy->dfa, state, (u16)protocol);
+ if (!state)
+ *info = "failed protocol match";
+ }
+ } else {
+ *info = "failed type match";
+ }
+
+ return state;
+}
+
/* Generic af perm */
int aa_profile_af_perm(struct aa_profile *profile,
struct apparmor_audit_data *ad, u32 request, u16 family,
- int type)
+ int type, int protocol)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
- struct aa_perms perms = { };
+ struct aa_ruleset *rules = profile->label.rules[0];
+ struct aa_perms *p = NULL;
aa_state_t state;
- __be16 buffer[2];
AA_BUG(family >= AF_MAX);
AA_BUG(type < 0 || type >= SOCK_MAX);
+ AA_BUG(profile_unconfined(profile));
if (profile_unconfined(profile))
return 0;
- state = RULE_MEDIATES(rules, AA_CLASS_NET);
+ state = RULE_MEDIATES_NET(rules);
if (!state)
return 0;
-
- buffer[0] = cpu_to_be16(family);
- buffer[1] = cpu_to_be16((u16) type);
- state = aa_dfa_match_len(rules->policy->dfa, state, (char *) &buffer,
- 4);
- perms = *aa_lookup_perms(rules->policy, state);
- aa_apply_modes_to_perms(profile, &perms);
-
- return aa_check_perms(profile, &perms, request, ad, audit_net_cb);
+ state = aa_match_to_prot(rules->policy, state, request, family, type,
+ protocol, &p, &ad->info);
+ return aa_do_perms(profile, rules->policy, state, request, p, ad);
}
int aa_af_perm(const struct cred *subj_cred, struct aa_label *label,
const char *op, u32 request, u16 family, int type, int protocol)
{
struct aa_profile *profile;
- DEFINE_AUDIT_NET(ad, op, NULL, family, type, protocol);
+ DEFINE_AUDIT_NET(ad, op, subj_cred, NULL, family, type, protocol);
return fn_for_each_confined(label, profile,
aa_profile_af_perm(profile, &ad, request, family,
- type));
+ type, protocol));
}
static int aa_label_sk_perm(const struct cred *subj_cred,
@@ -157,9 +291,9 @@ static int aa_label_sk_perm(const struct cred *subj_cred,
AA_BUG(!label);
AA_BUG(!sk);
- if (ctx->label != kernel_t && !unconfined(label)) {
+ if (rcu_access_pointer(ctx->label) != kernel_t && !unconfined(label)) {
struct aa_profile *profile;
- DEFINE_AUDIT_SK(ad, op, sk);
+ DEFINE_AUDIT_SK(ad, op, subj_cred, sk);
ad.subj_cred = subj_cred;
error = fn_for_each_confined(label, profile,
@@ -187,12 +321,16 @@ int aa_sk_perm(const char *op, u32 request, struct sock *sk)
int aa_sock_file_perm(const struct cred *subj_cred, struct aa_label *label,
- const char *op, u32 request, struct socket *sock)
+ const char *op, u32 request, struct file *file)
{
+ struct socket *sock = (struct socket *) file->private_data;
+
AA_BUG(!label);
AA_BUG(!sock);
AA_BUG(!sock->sk);
+ if (sock->sk->sk_family == PF_UNIX)
+ return aa_unix_file_perm(subj_cred, label, op, request, file);
return aa_label_sk_perm(subj_cred, label, op, request, sock->sk);
}
@@ -223,8 +361,7 @@ static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
{
int i, ret;
struct aa_perms perms = { };
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
if (rules->secmark_count == 0)
return 0;
@@ -257,7 +394,7 @@ int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
u32 secid, const struct sock *sk)
{
struct aa_profile *profile;
- DEFINE_AUDIT_SK(ad, op, sk);
+ DEFINE_AUDIT_SK(ad, op, NULL, sk);
return fn_for_each_confined(label, profile,
aa_secmark_perm(profile, request, secid,
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index d0244fab0653..50d5345ff5cb 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -243,6 +243,9 @@ static void free_ruleset(struct aa_ruleset *rules)
{
int i;
+ if (!rules)
+ return;
+
aa_put_pdb(rules->file);
aa_put_pdb(rules->policy);
aa_free_cap_rules(&rules->caps);
@@ -259,8 +262,6 @@ struct aa_ruleset *aa_alloc_ruleset(gfp_t gfp)
struct aa_ruleset *rules;
rules = kzalloc(sizeof(*rules), gfp);
- if (rules)
- INIT_LIST_HEAD(&rules->list);
return rules;
}
@@ -277,10 +278,9 @@ struct aa_ruleset *aa_alloc_ruleset(gfp_t gfp)
*/
void aa_free_profile(struct aa_profile *profile)
{
- struct aa_ruleset *rule, *tmp;
struct rhashtable *rht;
- AA_DEBUG("%s(%p)\n", __func__, profile);
+ AA_DEBUG(DEBUG_POLICY, "%s(%p)\n", __func__, profile);
if (!profile)
return;
@@ -299,10 +299,9 @@ void aa_free_profile(struct aa_profile *profile)
* at this point there are no tasks that can have a reference
* to rules
*/
- list_for_each_entry_safe(rule, tmp, &profile->rules, list) {
- list_del_init(&rule->list);
- free_ruleset(rule);
- }
+ for (int i = 0; i < profile->n_rules; i++)
+ free_ruleset(profile->label.rules[i]);
+
kfree_sensitive(profile->dirname);
if (profile->data) {
@@ -331,10 +330,12 @@ struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy,
gfp_t gfp)
{
struct aa_profile *profile;
- struct aa_ruleset *rules;
- /* freed by free_profile - usually through aa_put_profile */
- profile = kzalloc(struct_size(profile, label.vec, 2), gfp);
+ /* freed by free_profile - usually through aa_put_profile
+ * this adds space for a single ruleset in the rules section of the
+ * label
+ */
+ profile = kzalloc(struct_size(profile, label.rules, 1), gfp);
if (!profile)
return NULL;
@@ -343,13 +344,11 @@ struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy,
if (!aa_label_init(&profile->label, 1, gfp))
goto fail;
- INIT_LIST_HEAD(&profile->rules);
-
/* allocate the first ruleset, but leave it empty */
- rules = aa_alloc_ruleset(gfp);
- if (!rules)
+ profile->label.rules[0] = aa_alloc_ruleset(gfp);
+ if (!profile->label.rules[0])
goto fail;
- list_add(&rules->list, &profile->rules);
+ profile->n_rules = 1;
/* update being set needed by fs interface */
if (!proxy) {
@@ -364,6 +363,7 @@ struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy,
profile->label.flags |= FLAG_PROFILE;
profile->label.vec[0] = profile;
+ profile->signal = SIGKILL;
/* refcount released by caller */
return profile;
@@ -373,6 +373,41 @@ fail:
return NULL;
}
+static inline bool ANY_RULE_MEDIATES(struct aa_profile *profile,
+ unsigned char class)
+{
+ int i;
+
+ for (i = 0; i < profile->n_rules; i++) {
+ if (RULE_MEDIATES(profile->label.rules[i], class))
+ return true;
+ }
+ return false;
+}
+
+/* set of rules that are mediated by unconfined */
+static int unconfined_mediates[] = { AA_CLASS_NS, AA_CLASS_IO_URING, 0 };
+
+/* must be called after profile rulesets and start information is setup */
+void aa_compute_profile_mediates(struct aa_profile *profile)
+{
+ int c;
+
+ if (profile_unconfined(profile)) {
+ int *pos;
+
+ for (pos = unconfined_mediates; *pos; pos++) {
+ if (ANY_RULE_MEDIATES(profile, *pos))
+ profile->label.mediates |= ((u64) 1) << AA_CLASS_NS;
+ }
+ return;
+ }
+ for (c = 0; c <= AA_CLASS_LAST; c++) {
+ if (ANY_RULE_MEDIATES(profile, c))
+ profile->label.mediates |= ((u64) 1) << c;
+ }
+}
+
/* TODO: profile accounting - setup in remove */
/**
@@ -463,7 +498,7 @@ static struct aa_policy *__lookup_parent(struct aa_ns *ns,
}
/**
- * __create_missing_ancestors - create place holders for missing ancestores
+ * __create_missing_ancestors - create place holders for missing ancestors
* @ns: namespace to lookup profile in (NOT NULL)
* @hname: hierarchical profile name to find parent of (NOT NULL)
* @gfp: type of allocation.
@@ -621,13 +656,15 @@ struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name,
/* TODO: ideally we should inherit abi from parent */
profile->label.flags |= FLAG_NULL;
profile->attach.xmatch = aa_get_pdb(nullpdb);
- rules = list_first_entry(&profile->rules, typeof(*rules), list);
+ rules = profile->label.rules[0];
rules->file = aa_get_pdb(nullpdb);
rules->policy = aa_get_pdb(nullpdb);
+ aa_compute_profile_mediates(profile);
if (parent) {
profile->path_flags = parent->path_flags;
-
+ /* override/inherit what is mediated from parent */
+ profile->label.mediates = parent->label.mediates;
/* released on free_profile */
rcu_assign_pointer(profile->parent, aa_get_profile(parent));
profile->ns = aa_get_ns(parent->ns);
@@ -833,8 +870,8 @@ bool aa_policy_admin_capable(const struct cred *subj_cred,
bool capable = policy_ns_capable(subj_cred, label, user_ns,
CAP_MAC_ADMIN) == 0;
- AA_DEBUG("cap_mac_admin? %d\n", capable);
- AA_DEBUG("policy locked? %d\n", aa_g_lock_policy);
+ AA_DEBUG(DEBUG_POLICY, "cap_mac_admin? %d\n", capable);
+ AA_DEBUG(DEBUG_POLICY, "policy locked? %d\n", aa_g_lock_policy);
return aa_policy_view_capable(subj_cred, label, ns) && capable &&
!aa_g_lock_policy;
@@ -843,11 +880,11 @@ bool aa_policy_admin_capable(const struct cred *subj_cred,
bool aa_current_policy_view_capable(struct aa_ns *ns)
{
struct aa_label *label;
- bool res;
+ bool needput, res;
- label = __begin_current_label_crit_section();
+ label = __begin_current_label_crit_section(&needput);
res = aa_policy_view_capable(current_cred(), label, ns);
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
return res;
}
@@ -855,11 +892,11 @@ bool aa_current_policy_view_capable(struct aa_ns *ns)
bool aa_current_policy_admin_capable(struct aa_ns *ns)
{
struct aa_label *label;
- bool res;
+ bool needput, res;
- label = __begin_current_label_crit_section();
+ label = __begin_current_label_crit_section(&needput);
res = aa_policy_admin_capable(current_cred(), label, ns);
- __end_current_label_crit_section(label);
+ __end_current_label_crit_section(label, needput);
return res;
}
@@ -1068,7 +1105,7 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
goto out;
/* ensure that profiles are all for the same ns
- * TODO: update locking to remove this constaint. All profiles in
+ * TODO: update locking to remove this constraint. All profiles in
* the load set must succeed as a set or the load will
* fail. Sort ent list and take ns locks in hierarchy order
*/
diff --git a/security/apparmor/policy_compat.c b/security/apparmor/policy_compat.c
index 423227670e68..cfc2207e5a12 100644
--- a/security/apparmor/policy_compat.c
+++ b/security/apparmor/policy_compat.c
@@ -286,10 +286,10 @@ static void remap_dfa_accept(struct aa_dfa *dfa, unsigned int factor)
AA_BUG(!dfa);
- for (state = 0; state < state_count; state++)
+ for (state = 0; state < state_count; state++) {
ACCEPT_TABLE(dfa)[state] = state * factor;
- kvfree(dfa->tables[YYTD_ID_ACCEPT2]);
- dfa->tables[YYTD_ID_ACCEPT2] = NULL;
+ ACCEPT_TABLE2(dfa)[state] = factor > 1 ? ACCEPT_FLAG_OWNER : 0;
+ }
}
/* TODO: merge different dfa mappings into single map_policy fn */
diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c
index 1f02cfe1d974..64783ca3b0f2 100644
--- a/security/apparmor/policy_ns.c
+++ b/security/apparmor/policy_ns.c
@@ -107,7 +107,7 @@ static struct aa_ns *alloc_ns(const char *prefix, const char *name)
struct aa_ns *ns;
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
- AA_DEBUG("%s(%p)\n", __func__, ns);
+ AA_DEBUG(DEBUG_POLICY, "%s(%p)\n", __func__, ns);
if (!ns)
return NULL;
if (!aa_policy_init(&ns->base, prefix, name, GFP_KERNEL))
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 992b74c50d64..7523971e37d9 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -29,6 +29,7 @@
#include "include/policy.h"
#include "include/policy_unpack.h"
#include "include/policy_compat.h"
+#include "include/signal.h"
/* audit callback for unpack fields */
static void audit_cb(struct audit_buffer *ab, void *va)
@@ -598,8 +599,8 @@ static bool unpack_secmark(struct aa_ext *e, struct aa_ruleset *rules)
fail:
if (rules->secmark) {
for (i = 0; i < size; i++)
- kfree(rules->secmark[i].label);
- kfree(rules->secmark);
+ kfree_sensitive(rules->secmark[i].label);
+ kfree_sensitive(rules->secmark);
rules->secmark_count = 0;
rules->secmark = NULL;
}
@@ -716,6 +717,7 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy,
void *pos = e->pos;
int i, flags, error = -EPROTO;
ssize_t size;
+ u32 version = 0;
pdb = aa_alloc_pdb(GFP_KERNEL);
if (!pdb)
@@ -733,6 +735,9 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy,
if (pdb->perms) {
/* perms table present accept is index */
flags = TO_ACCEPT1_FLAG(YYTD_DATA32);
+ if (aa_unpack_u32(e, &version, "permsv") && version > 2)
+ /* accept2 used for dfa flags */
+ flags |= TO_ACCEPT2_FLAG(YYTD_DATA32);
} else {
/* packed perms in accept1 and accept2 */
flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
@@ -770,6 +775,21 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy,
}
}
+ /* accept2 is in some cases being allocated, even with perms */
+ if (pdb->perms && !pdb->dfa->tables[YYTD_ID_ACCEPT2]) {
+ /* add dfa flags table missing in v2 */
+ u32 noents = pdb->dfa->tables[YYTD_ID_ACCEPT]->td_lolen;
+ u16 tdflags = pdb->dfa->tables[YYTD_ID_ACCEPT]->td_flags;
+ size_t tsize = table_size(noents, tdflags);
+
+ pdb->dfa->tables[YYTD_ID_ACCEPT2] = kvzalloc(tsize, GFP_KERNEL);
+ if (!pdb->dfa->tables[YYTD_ID_ACCEPT2]) {
+ *info = "failed to alloc dfa flags table";
+ goto out;
+ }
+ pdb->dfa->tables[YYTD_ID_ACCEPT2]->td_lolen = noents;
+ pdb->dfa->tables[YYTD_ID_ACCEPT2]->td_flags = tdflags;
+ }
/*
* Unfortunately due to a bug in earlier userspaces, a
* transition table may be present even when the dfa is
@@ -783,9 +803,13 @@ static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy,
if (!pdb->dfa && pdb->trans.table)
aa_free_str_table(&pdb->trans);
- /* TODO: move compat mapping here, requires dfa merging first */
- /* TODO: move verify here, it has to be done after compat mappings */
-
+ /* TODO:
+ * - move compat mapping here, requires dfa merging first
+ * - move verify here, it has to be done after compat mappings
+ * - move free of unneeded trans table here, has to be done
+ * after perm mapping.
+ */
+out:
*policy = pdb;
return 0;
@@ -862,7 +886,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
error = -ENOMEM;
goto fail;
}
- rules = list_first_entry(&profile->rules, typeof(*rules), list);
+ rules = profile->label.rules[0];
/* profile renaming is optional */
(void) aa_unpack_str(e, &profile->rename, "rename");
@@ -898,6 +922,12 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
(void) aa_unpack_strdup(e, &disconnected, "disconnected");
profile->disconnected = disconnected;
+ /* optional */
+ (void) aa_unpack_u32(e, &profile->signal, "kill");
+ if (profile->signal < 1 || profile->signal > MAXMAPPED_SIG) {
+ info = "profile kill.signal invalid value";
+ goto fail;
+ }
/* per profile debug flags (complain, audit) */
if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
info = "profile missing flags";
@@ -1101,6 +1131,8 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
goto fail;
}
+ aa_compute_profile_mediates(profile);
+
return profile;
fail:
@@ -1215,21 +1247,32 @@ static bool verify_perm(struct aa_perms *perm)
static bool verify_perms(struct aa_policydb *pdb)
{
int i;
+ int xidx, xmax = -1;
for (i = 0; i < pdb->size; i++) {
if (!verify_perm(&pdb->perms[i]))
return false;
/* verify indexes into str table */
- if ((pdb->perms[i].xindex & AA_X_TYPE_MASK) == AA_X_TABLE &&
- (pdb->perms[i].xindex & AA_X_INDEX_MASK) >= pdb->trans.size)
- return false;
+ if ((pdb->perms[i].xindex & AA_X_TYPE_MASK) == AA_X_TABLE) {
+ xidx = pdb->perms[i].xindex & AA_X_INDEX_MASK;
+ if (xidx >= pdb->trans.size)
+ return false;
+ if (xmax < xidx)
+ xmax = xidx;
+ }
if (pdb->perms[i].tag && pdb->perms[i].tag >= pdb->trans.size)
return false;
if (pdb->perms[i].label &&
pdb->perms[i].label >= pdb->trans.size)
return false;
}
-
+ /* deal with incorrectly constructed string tables */
+ if (xmax == -1) {
+ aa_free_str_table(&pdb->trans);
+ } else if (pdb->trans.size > xmax + 1) {
+ if (!aa_resize_str_table(&pdb->trans, xmax + 1, GFP_KERNEL))
+ return false;
+ }
return true;
}
@@ -1243,8 +1286,8 @@ static bool verify_perms(struct aa_policydb *pdb)
*/
static int verify_profile(struct aa_profile *profile)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
+
if (!rules)
return 0;
diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
index f070902da8fc..cf18744dafe2 100644
--- a/security/apparmor/policy_unpack_test.c
+++ b/security/apparmor/policy_unpack_test.c
@@ -9,6 +9,8 @@
#include "include/policy.h"
#include "include/policy_unpack.h"
+#include <linux/unaligned.h>
+
#define TEST_STRING_NAME "TEST_STRING"
#define TEST_STRING_DATA "testing"
#define TEST_STRING_BUF_OFFSET \
@@ -44,7 +46,7 @@
#define TEST_ARRAY_BUF_OFFSET \
(TEST_NAMED_ARRAY_BUF_OFFSET + 3 + strlen(TEST_ARRAY_NAME) + 1)
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
struct policy_unpack_fixture {
struct aa_ext *e;
@@ -80,7 +82,7 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
*(buf + 1) = strlen(TEST_U32_NAME) + 1;
strscpy(buf + 3, TEST_U32_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32;
- *((__le32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = cpu_to_le32(TEST_U32_DATA);
+ put_unaligned_le32(TEST_U32_DATA, buf + 3 + strlen(TEST_U32_NAME) + 2);
buf = e->start + TEST_NAMED_U64_BUF_OFFSET;
*buf = AA_NAME;
@@ -103,7 +105,7 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
*(buf + 1) = strlen(TEST_ARRAY_NAME) + 1;
strscpy(buf + 3, TEST_ARRAY_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY;
- *((__le16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = cpu_to_le16(TEST_ARRAY_SIZE);
+ put_unaligned_le16(TEST_ARRAY_SIZE, buf + 3 + strlen(TEST_ARRAY_NAME) + 2);
return e;
}
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
index e3857e3d7c6c..ce40f15d4952 100644
--- a/security/apparmor/procattr.c
+++ b/security/apparmor/procattr.c
@@ -125,12 +125,14 @@ int aa_setprocattr_changehat(char *args, size_t size, int flags)
for (count = 0; (hat < end) && count < 16; ++count) {
char *next = hat + strlen(hat) + 1;
hats[count] = hat;
- AA_DEBUG("%s: (pid %d) Magic 0x%llx count %d hat '%s'\n"
+ AA_DEBUG(DEBUG_DOMAIN,
+ "%s: (pid %d) Magic 0x%llx count %d hat '%s'\n"
, __func__, current->pid, token, count, hat);
hat = next;
}
} else
- AA_DEBUG("%s: (pid %d) Magic 0x%llx count %d Hat '%s'\n",
+ AA_DEBUG(DEBUG_DOMAIN,
+ "%s: (pid %d) Magic 0x%llx count %d Hat '%s'\n",
__func__, current->pid, token, count, "<NULL>");
return aa_change_hat(hats, count, token, flags);
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index dcc94c3153d5..8e80db3ae21c 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -89,8 +89,7 @@ static int profile_setrlimit(const struct cred *subj_cred,
struct aa_profile *profile, unsigned int resource,
struct rlimit *new_rlim)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
int e = 0;
if (rules->rlimits.mask & (1 << resource) && new_rlim->rlim_max >
@@ -165,9 +164,7 @@ void __aa_transition_rlimits(struct aa_label *old_l, struct aa_label *new_l)
* to the lesser of the tasks hard limit and the init tasks soft limit
*/
label_for_each_confined(i, old_l, old) {
- struct aa_ruleset *rules = list_first_entry(&old->rules,
- typeof(*rules),
- list);
+ struct aa_ruleset *rules = old->label.rules[0];
if (rules->rlimits.mask) {
int j;
@@ -185,9 +182,7 @@ void __aa_transition_rlimits(struct aa_label *old_l, struct aa_label *new_l)
/* set any new hard limits as dictated by the new profile */
label_for_each_confined(i, new_l, new) {
- struct aa_ruleset *rules = list_first_entry(&new->rules,
- typeof(*rules),
- list);
+ struct aa_ruleset *rules = new->label.rules[0];
int j;
if (!rules->rlimits.mask)
diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c
index 47dc08fc583e..28caf66b9033 100644
--- a/security/apparmor/secid.c
+++ b/security/apparmor/secid.c
@@ -47,23 +47,21 @@ struct aa_label *aa_secid_to_label(u32 secid)
return xa_load(&aa_secids, secid);
}
-static int apparmor_label_to_secctx(struct aa_label *label, char **secdata,
- u32 *seclen)
+static int apparmor_label_to_secctx(struct aa_label *label,
+ struct lsm_context *cp)
{
/* TODO: cache secctx and ref count so we don't have to recreate */
int flags = FLAG_VIEW_SUBNS | FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT;
int len;
- AA_BUG(!seclen);
-
if (!label)
return -EINVAL;
if (apparmor_display_secid_mode)
flags |= FLAG_SHOW_MODE;
- if (secdata)
- len = aa_label_asxprint(secdata, root_ns, label,
+ if (cp)
+ len = aa_label_asxprint(&cp->context, root_ns, label,
flags, GFP_ATOMIC);
else
len = aa_label_snxprint(NULL, 0, root_ns, label, flags);
@@ -71,26 +69,28 @@ static int apparmor_label_to_secctx(struct aa_label *label, char **secdata,
if (len < 0)
return -ENOMEM;
- *seclen = len;
+ if (cp) {
+ cp->len = len;
+ cp->id = LSM_ID_APPARMOR;
+ }
- return 0;
+ return len;
}
-int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+int apparmor_secid_to_secctx(u32 secid, struct lsm_context *cp)
{
struct aa_label *label = aa_secid_to_label(secid);
- return apparmor_label_to_secctx(label, secdata, seclen);
+ return apparmor_label_to_secctx(label, cp);
}
-int apparmor_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata,
- u32 *seclen)
+int apparmor_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp)
{
struct aa_label *label;
label = prop->apparmor.label;
- return apparmor_label_to_secctx(label, secdata, seclen);
+ return apparmor_label_to_secctx(label, cp);
}
int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
@@ -106,9 +106,13 @@ int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
return 0;
}
-void apparmor_release_secctx(char *secdata, u32 seclen)
+void apparmor_release_secctx(struct lsm_context *cp)
{
- kfree(secdata);
+ if (cp->id == LSM_ID_APPARMOR) {
+ kfree(cp->context);
+ cp->context = NULL;
+ cp->id = LSM_ID_UNDEF;
+ }
}
/**
diff --git a/security/apparmor/task.c b/security/apparmor/task.c
index c87fb9f4ac18..c9bc9cc69475 100644
--- a/security/apparmor/task.c
+++ b/security/apparmor/task.c
@@ -228,8 +228,7 @@ static int profile_ptrace_perm(const struct cred *cred,
struct aa_label *peer, u32 request,
struct apparmor_audit_data *ad)
{
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules), list);
+ struct aa_ruleset *rules = profile->label.rules[0];
struct aa_perms perms = { };
ad->subj_cred = cred;
@@ -246,7 +245,7 @@ static int profile_tracee_perm(const struct cred *cred,
struct apparmor_audit_data *ad)
{
if (profile_unconfined(tracee) || unconfined(tracer) ||
- !ANY_RULE_MEDIATES(&tracee->rules, AA_CLASS_PTRACE))
+ !label_mediates(&tracee->label, AA_CLASS_PTRACE))
return 0;
return profile_ptrace_perm(cred, tracee, tracer, request, ad);
@@ -260,7 +259,7 @@ static int profile_tracer_perm(const struct cred *cred,
if (profile_unconfined(tracer))
return 0;
- if (ANY_RULE_MEDIATES(&tracer->rules, AA_CLASS_PTRACE))
+ if (label_mediates(&tracer->label, AA_CLASS_PTRACE))
return profile_ptrace_perm(cred, tracer, tracee, request, ad);
/* profile uses the old style capability check for ptrace */
@@ -324,9 +323,7 @@ int aa_profile_ns_perm(struct aa_profile *profile,
ad->request = request;
if (!profile_unconfined(profile)) {
- struct aa_ruleset *rules = list_first_entry(&profile->rules,
- typeof(*rules),
- list);
+ struct aa_ruleset *rules = profile->label.rules[0];
aa_state_t state;
state = RULE_MEDIATES(rules, ad->class);
diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
index 3663aec7bcbd..db759025abe1 100644
--- a/security/bpf/hooks.c
+++ b/security/bpf/hooks.c
@@ -13,7 +13,6 @@ static struct security_hook_list bpf_lsm_hooks[] __ro_after_init = {
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
LSM_HOOK_INIT(inode_free_security, bpf_inode_storage_free),
- LSM_HOOK_INIT(task_free, bpf_task_storage_free),
};
static const struct lsm_id bpf_lsmid = {
diff --git a/security/commoncap.c b/security/commoncap.c
index cefad323a0b1..6bd4adeb4795 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -27,6 +27,9 @@
#include <linux/mnt_idmapping.h>
#include <uapi/linux/lsm.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/capability.h>
+
/*
* If a non-root user executes a setuid-root binary in
* !secure(SECURE_NOROOT) mode, then we raise capabilities.
@@ -50,24 +53,24 @@ static void warn_setuid_and_fcaps_mixed(const char *fname)
}
/**
- * cap_capable - Determine whether a task has a particular effective capability
+ * cap_capable_helper - Determine whether a task has a particular effective
+ * capability.
* @cred: The credentials to use
- * @targ_ns: The user namespace in which we need the capability
+ * @target_ns: The user namespace of the resource being accessed
+ * @cred_ns: The user namespace of the credentials
* @cap: The capability to check for
- * @opts: Bitmask of options defined in include/linux/security.h
*
* Determine whether the nominated task has the specified capability amongst
* its effective set, returning 0 if it does, -ve if it does not.
*
- * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable()
- * and has_capability() functions. That is, it has the reverse semantics:
- * cap_has_capability() returns 0 when a task has a capability, but the
- * kernel's capable() and has_capability() returns 1 for this case.
+ * See cap_capable for more details.
*/
-int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
- int cap, unsigned int opts)
+static inline int cap_capable_helper(const struct cred *cred,
+ struct user_namespace *target_ns,
+ const struct user_namespace *cred_ns,
+ int cap)
{
- struct user_namespace *ns = targ_ns;
+ struct user_namespace *ns = target_ns;
/* See if cred has the capability in the target user namespace
* by examining the target user namespace and all of the target
@@ -75,21 +78,21 @@ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
*/
for (;;) {
/* Do we have the necessary capabilities? */
- if (ns == cred->user_ns)
+ if (likely(ns == cred_ns))
return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
/*
* If we're already at a lower level than we're looking for,
* we're done searching.
*/
- if (ns->level <= cred->user_ns->level)
+ if (ns->level <= cred_ns->level)
return -EPERM;
/*
* The owner of the user namespace in the parent of the
* user namespace has all caps.
*/
- if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid))
+ if ((ns->parent == cred_ns) && uid_eq(ns->owner, cred->euid))
return 0;
/*
@@ -103,6 +106,32 @@ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
}
/**
+ * cap_capable - Determine whether a task has a particular effective capability
+ * @cred: The credentials to use
+ * @target_ns: The user namespace of the resource being accessed
+ * @cap: The capability to check for
+ * @opts: Bitmask of options defined in include/linux/security.h (unused)
+ *
+ * Determine whether the nominated task has the specified capability amongst
+ * its effective set, returning 0 if it does, -ve if it does not.
+ *
+ * NOTE WELL: cap_capable() has reverse semantics to the capable() call
+ * and friends. That is cap_capable() returns an int 0 when a task has
+ * a capability, while the kernel's capable(), has_ns_capability(),
+ * has_ns_capability_noaudit(), and has_capability_noaudit() return a
+ * bool true (1) for this case.
+ */
+int cap_capable(const struct cred *cred, struct user_namespace *target_ns,
+ int cap, unsigned int opts)
+{
+ const struct user_namespace *cred_ns = cred->user_ns;
+ int ret = cap_capable_helper(cred, target_ns, cred_ns, cap);
+
+ trace_cap_capable(cred, target_ns, cred_ns, cap, ret);
+ return ret;
+}
+
+/**
* cap_settime - Determine whether the current process may set the system clock
* @ts: The time to set
* @tz: The timezone to set
@@ -827,12 +856,6 @@ static void handle_privileged_root(struct linux_binprm *bprm, bool has_fcap,
#define __cap_full(field, cred) \
cap_issubset(CAP_FULL_SET, cred->cap_##field)
-static inline bool __is_setuid(struct cred *new, const struct cred *old)
-{ return !uid_eq(new->euid, old->uid); }
-
-static inline bool __is_setgid(struct cred *new, const struct cred *old)
-{ return !gid_eq(new->egid, old->gid); }
-
/*
* 1) Audit candidate if current->cap_effective is set
*
@@ -862,7 +885,7 @@ static inline bool nonroot_raised_pE(struct cred *new, const struct cred *old,
(root_privileged() &&
__is_suid(root, new) &&
!__cap_full(effective, new)) ||
- (!__is_setuid(new, old) &&
+ (uid_eq(new->euid, old->euid) &&
((has_fcap &&
__cap_gained(permitted, new, old)) ||
__cap_gained(ambient, new, old))))
@@ -888,7 +911,7 @@ int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file)
/* Process setpcap binaries and capabilities for uid 0 */
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
- bool effective = false, has_fcap = false, is_setid;
+ bool effective = false, has_fcap = false, id_changed;
int ret;
kuid_t root_uid;
@@ -912,9 +935,9 @@ int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file)
*
* In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
*/
- is_setid = __is_setuid(new, old) || __is_setgid(new, old);
+ id_changed = !uid_eq(new->euid, old->euid) || !in_group_p(new->egid);
- if ((is_setid || __cap_gained(permitted, new, old)) &&
+ if ((id_changed || __cap_gained(permitted, new, old)) &&
((bprm->unsafe & ~LSM_UNSAFE_PTRACE) ||
!ptracer_capable(current, new->user_ns))) {
/* downgrade; they get no more than they had, and maybe less */
@@ -931,7 +954,7 @@ int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file)
new->sgid = new->fsgid = new->egid;
/* File caps or setid cancels ambient. */
- if (has_fcap || is_setid)
+ if (has_fcap || id_changed)
cap_clear(new->cap_ambient);
/*
@@ -964,7 +987,9 @@ int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file)
return -EPERM;
/* Check for privilege-elevated exec. */
- if (is_setid ||
+ if (id_changed ||
+ !uid_eq(new->euid, old->uid) ||
+ !gid_eq(new->egid, old->gid) ||
(!__is_real(root_uid, new) &&
(effective ||
__cap_grew(permitted, ambient, new))))
@@ -1302,21 +1327,38 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
& (old->securebits ^ arg2)) /*[1]*/
|| ((old->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
|| (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
- || (cap_capable(current_cred(),
- current_cred()->user_ns,
- CAP_SETPCAP,
- CAP_OPT_NONE) != 0) /*[4]*/
/*
* [1] no changing of bits that are locked
* [2] no unlocking of locks
* [3] no setting of unsupported bits
- * [4] doing anything requires privilege (go read about
- * the "sendmail capabilities bug")
*/
)
/* cannot change a locked bit */
return -EPERM;
+ /*
+ * Doing anything requires privilege (go read about the
+ * "sendmail capabilities bug"), except for unprivileged bits.
+ * Indeed, the SECURE_ALL_UNPRIVILEGED bits are not
+ * restrictions enforced by the kernel but by user space on
+ * itself.
+ */
+ if (cap_capable(current_cred(), current_cred()->user_ns,
+ CAP_SETPCAP, CAP_OPT_NONE) != 0) {
+ const unsigned long unpriv_and_locks =
+ SECURE_ALL_UNPRIVILEGED |
+ SECURE_ALL_UNPRIVILEGED << 1;
+ const unsigned long changed = old->securebits ^ arg2;
+
+ /* For legacy reason, denies non-change. */
+ if (!changed)
+ return -EPERM;
+
+ /* Denies privileged changes. */
+ if (changed & ~unpriv_and_locks)
+ return -EPERM;
+ }
+
new = prepare_creds();
if (!new)
return -ENOMEM;
@@ -1428,12 +1470,6 @@ int cap_mmap_addr(unsigned long addr)
return ret;
}
-int cap_mmap_file(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags)
-{
- return 0;
-}
-
#ifdef CONFIG_SECURITY
static const struct lsm_id capability_lsmid = {
@@ -1453,7 +1489,6 @@ static struct security_hook_list capability_hooks[] __ro_after_init = {
LSM_HOOK_INIT(inode_killpriv, cap_inode_killpriv),
LSM_HOOK_INIT(inode_getsecurity, cap_inode_getsecurity),
LSM_HOOK_INIT(mmap_addr, cap_mmap_addr),
- LSM_HOOK_INIT(mmap_file, cap_mmap_file),
LSM_HOOK_INIT(task_fix_setuid, cap_task_fix_setuid),
LSM_HOOK_INIT(task_prctl, cap_task_prctl),
LSM_HOOK_INIT(task_setscheduler, cap_task_setscheduler),
diff --git a/security/inode.c b/security/inode.c
index da3ab44c8e57..43382ef8896e 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -112,23 +112,25 @@ static struct dentry *securityfs_create_dentry(const char *name, umode_t mode,
struct dentry *dentry;
struct inode *dir, *inode;
int error;
+ bool pinned = false;
if (!(mode & S_IFMT))
mode = (mode & S_IALLUGO) | S_IFREG;
pr_debug("securityfs: creating file '%s'\n",name);
- error = simple_pin_fs(&fs_type, &mount, &mount_count);
- if (error)
- return ERR_PTR(error);
-
- if (!parent)
+ if (!parent) {
+ error = simple_pin_fs(&fs_type, &mount, &mount_count);
+ if (error)
+ return ERR_PTR(error);
+ pinned = true;
parent = mount->mnt_root;
+ }
dir = d_inode(parent);
inode_lock(dir);
- dentry = lookup_one_len(name, parent, strlen(name));
+ dentry = lookup_noperm(&QSTR(name), parent);
if (IS_ERR(dentry))
goto out;
@@ -159,7 +161,6 @@ static struct dentry *securityfs_create_dentry(const char *name, umode_t mode,
inode->i_fop = fops;
}
d_instantiate(dentry, inode);
- dget(dentry);
inode_unlock(dir);
return dentry;
@@ -168,7 +169,8 @@ out1:
dentry = ERR_PTR(error);
out:
inode_unlock(dir);
- simple_release_fs(&mount, &mount_count);
+ if (pinned)
+ simple_release_fs(&mount, &mount_count);
return dentry;
}
@@ -279,6 +281,12 @@ struct dentry *securityfs_create_symlink(const char *name,
}
EXPORT_SYMBOL_GPL(securityfs_create_symlink);
+static void remove_one(struct dentry *victim)
+{
+ if (victim->d_parent == victim->d_sb->s_root)
+ simple_release_fs(&mount, &mount_count);
+}
+
/**
* securityfs_remove - removes a file or directory from the securityfs filesystem
*
@@ -291,43 +299,11 @@ EXPORT_SYMBOL_GPL(securityfs_create_symlink);
* This function is required to be called in order for the file to be
* removed. No automatic cleanup of files will happen when a module is
* removed; you are responsible here.
- */
-void securityfs_remove(struct dentry *dentry)
-{
- struct inode *dir;
-
- if (IS_ERR_OR_NULL(dentry))
- return;
-
- dir = d_inode(dentry->d_parent);
- inode_lock(dir);
- if (simple_positive(dentry)) {
- if (d_is_dir(dentry))
- simple_rmdir(dir, dentry);
- else
- simple_unlink(dir, dentry);
- dput(dentry);
- }
- inode_unlock(dir);
- simple_release_fs(&mount, &mount_count);
-}
-EXPORT_SYMBOL_GPL(securityfs_remove);
-
-static void remove_one(struct dentry *victim)
-{
- simple_release_fs(&mount, &mount_count);
-}
-
-/**
- * securityfs_recursive_remove - recursively removes a file or directory
- *
- * @dentry: a pointer to a the dentry of the file or directory to be removed.
*
- * This function recursively removes a file or directory in securityfs that was
- * previously created with a call to another securityfs function (like
- * securityfs_create_file() or variants thereof.)
+ * AV: when applied to directory it will take all children out; no need to call
+ * it for descendents if ancestor is getting killed.
*/
-void securityfs_recursive_remove(struct dentry *dentry)
+void securityfs_remove(struct dentry *dentry)
{
if (IS_ERR_OR_NULL(dentry))
return;
@@ -336,7 +312,7 @@ void securityfs_recursive_remove(struct dentry *dentry)
simple_recursive_removal(dentry, remove_one);
simple_release_fs(&mount, &mount_count);
}
-EXPORT_SYMBOL_GPL(securityfs_recursive_remove);
+EXPORT_SYMBOL_GPL(securityfs_remove);
#ifdef CONFIG_SECURITY
static struct dentry *lsm_dentry;
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 7c06ffd633d2..a5e730ffda57 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -180,7 +180,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
}
/*
- * Dump large security xattr values as a continuous ascii hexademical string.
+ * Dump large security xattr values as a continuous ascii hexadecimal string.
* (pr_debug is limited to 64 bytes.)
*/
static void dump_security_xattr_l(const char *prefix, const void *src,
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 377e57e9084f..0add782e73ba 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -169,7 +169,7 @@ static int is_unsupported_hmac_fs(struct dentry *dentry)
* and compare it against the stored security.evm xattr.
*
* For performance:
- * - use the previoulsy retrieved xattr value and length to calculate the
+ * - use the previously retrieved xattr value and length to calculate the
* HMAC.)
* - cache the verification result in the iint, when available.
*
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index 9b907c2fee60..b0d2aad27850 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -17,7 +17,6 @@
#include "evm.h"
static struct dentry *evm_dir;
-static struct dentry *evm_init_tpm;
static struct dentry *evm_symlink;
#ifdef CONFIG_EVM_ADD_XATTRS
@@ -286,7 +285,7 @@ static int evm_init_xattrs(void)
{
evm_xattrs = securityfs_create_file("evm_xattrs", 0660, evm_dir, NULL,
&evm_xattr_ops);
- if (!evm_xattrs || IS_ERR(evm_xattrs))
+ if (IS_ERR(evm_xattrs))
return -EFAULT;
return 0;
@@ -301,21 +300,22 @@ static int evm_init_xattrs(void)
int __init evm_init_secfs(void)
{
int error = 0;
+ struct dentry *dentry;
evm_dir = securityfs_create_dir("evm", integrity_dir);
- if (!evm_dir || IS_ERR(evm_dir))
+ if (IS_ERR(evm_dir))
return -EFAULT;
- evm_init_tpm = securityfs_create_file("evm", 0660,
- evm_dir, NULL, &evm_key_ops);
- if (!evm_init_tpm || IS_ERR(evm_init_tpm)) {
+ dentry = securityfs_create_file("evm", 0660,
+ evm_dir, NULL, &evm_key_ops);
+ if (IS_ERR(dentry)) {
error = -EFAULT;
goto out;
}
evm_symlink = securityfs_create_symlink("evm", NULL,
"integrity/evm/evm", NULL);
- if (!evm_symlink || IS_ERR(evm_symlink)) {
+ if (IS_ERR(evm_symlink)) {
error = -EFAULT;
goto out;
}
@@ -328,7 +328,6 @@ int __init evm_init_secfs(void)
return 0;
out:
securityfs_remove(evm_symlink);
- securityfs_remove(evm_init_tpm);
securityfs_remove(evm_dir);
return error;
}
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 475c32615006..976e75f9b9ba 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -321,4 +321,15 @@ config IMA_DISABLE_HTABLE
help
This option disables htable to allow measurement of duplicate records.
+config IMA_KEXEC_EXTRA_MEMORY_KB
+ int "Extra memory for IMA measurements added during kexec soft reboot"
+ range 0 40
+ depends on IMA_KEXEC
+ default 0
+ help
+ IMA_KEXEC_EXTRA_MEMORY_KB determines the extra memory to be
+ allocated (in kb) for IMA measurements added during kexec soft reboot.
+ If set to the default value of 0, an extra half page of memory for those
+ additional measurements will be allocated.
+
endif
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index c0d3b716d11f..e3d71d8d56e3 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -149,6 +149,9 @@ struct ima_kexec_hdr {
#define IMA_CHECK_BLACKLIST 0x40000000
#define IMA_VERITY_REQUIRED 0x80000000
+/* Exclude non-action flags which are not rule-specific. */
+#define IMA_NONACTION_RULE_FLAGS (IMA_NONACTION_FLAGS & ~IMA_NEW_FILE)
+
#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
IMA_HASH | IMA_APPRAISE_SUBMASK)
#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
@@ -178,7 +181,8 @@ struct ima_kexec_hdr {
#define IMA_UPDATE_XATTR 1
#define IMA_CHANGE_ATTR 2
#define IMA_DIGSIG 3
-#define IMA_MUST_MEASURE 4
+#define IMA_MAY_EMIT_TOMTOU 4
+#define IMA_EMITTED_OPENWRITERS 5
/* IMA integrity metadata associated with an inode */
struct ima_iint_cache {
@@ -240,6 +244,12 @@ void ima_post_key_create_or_update(struct key *keyring, struct key *key,
unsigned long flags, bool create);
#endif
+#ifdef CONFIG_IMA_KEXEC
+void ima_measure_kexec_event(const char *event_name);
+#else
+static inline void ima_measure_kexec_event(const char *event_name) {}
+#endif
+
/*
* The default binary_runtime_measurements list format is defined as the
* platform native format. The canonical format is defined as little-endian.
@@ -278,6 +288,7 @@ unsigned long ima_get_binary_runtime_size(void);
int ima_init_template(void);
void ima_init_template_list(void);
int __init ima_init_digests(void);
+void __init ima_init_reboot_notifier(void);
int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
void *lsm_data);
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 884a3533f7af..f435eff4667f 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/file.h>
+#include <linux/binfmts.h>
#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/magic.h>
@@ -469,6 +470,17 @@ int ima_check_blacklist(struct ima_iint_cache *iint,
return rc;
}
+static bool is_bprm_creds_for_exec(enum ima_hooks func, struct file *file)
+{
+ struct linux_binprm *bprm;
+
+ if (func == BPRM_CHECK) {
+ bprm = container_of(&file, struct linux_binprm, file);
+ return bprm->is_check;
+ }
+ return false;
+}
+
/*
* ima_appraise_measurement - appraise file measurement
*
@@ -483,6 +495,7 @@ int ima_appraise_measurement(enum ima_hooks func, struct ima_iint_cache *iint,
int xattr_len, const struct modsig *modsig)
{
static const char op[] = "appraise_data";
+ int audit_msgno = AUDIT_INTEGRITY_DATA;
const char *cause = "unknown";
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_backing_inode(dentry);
@@ -494,6 +507,16 @@ int ima_appraise_measurement(enum ima_hooks func, struct ima_iint_cache *iint,
if (!(inode->i_opflags & IOP_XATTR) && !try_modsig)
return INTEGRITY_UNKNOWN;
+ /*
+ * Unlike any of the other LSM hooks where the kernel enforces file
+ * integrity, enforcing file integrity for the bprm_creds_for_exec()
+ * LSM hook with the AT_EXECVE_CHECK flag is left up to the discretion
+ * of the script interpreter(userspace). Differentiate kernel and
+ * userspace enforced integrity audit messages.
+ */
+ if (is_bprm_creds_for_exec(func, file))
+ audit_msgno = AUDIT_INTEGRITY_USERSPACE;
+
/* If reading the xattr failed and there's no modsig, error out. */
if (rc <= 0 && !try_modsig) {
if (rc && rc != -ENODATA)
@@ -569,7 +592,7 @@ out:
(iint->flags & IMA_FAIL_UNVERIFIABLE_SIGS))) {
status = INTEGRITY_FAIL;
cause = "unverifiable-signature";
- integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+ integrity_audit_msg(audit_msgno, inode, filename,
op, cause, rc, 0);
} else if (status != INTEGRITY_PASS) {
/* Fix mode, but don't replace file signatures. */
@@ -589,7 +612,7 @@ out:
status = INTEGRITY_PASS;
}
- integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+ integrity_audit_msg(audit_msgno, inode, filename,
op, cause, rc, 0);
} else {
ima_cache_flags(iint, func);
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index e4a79a9b2d58..87045b09f120 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -116,28 +116,6 @@ void ima_putc(struct seq_file *m, void *data, int datalen)
seq_putc(m, *(char *)data++);
}
-static struct dentry **ascii_securityfs_measurement_lists __ro_after_init;
-static struct dentry **binary_securityfs_measurement_lists __ro_after_init;
-static int securityfs_measurement_list_count __ro_after_init;
-
-static void lookup_template_data_hash_algo(int *algo_idx, enum hash_algo *algo,
- struct seq_file *m,
- struct dentry **lists)
-{
- struct dentry *dentry;
- int i;
-
- dentry = file_dentry(m->file);
-
- for (i = 0; i < securityfs_measurement_list_count; i++) {
- if (dentry == lists[i]) {
- *algo_idx = i;
- *algo = ima_algo_array[i].algo;
- break;
- }
- }
-}
-
/* print format:
* 32bit-le=pcr#
* char[n]=template digest
@@ -160,9 +138,10 @@ int ima_measurements_show(struct seq_file *m, void *v)
algo_idx = ima_sha1_idx;
algo = HASH_ALGO_SHA1;
- if (m->file != NULL)
- lookup_template_data_hash_algo(&algo_idx, &algo, m,
- binary_securityfs_measurement_lists);
+ if (m->file != NULL) {
+ algo_idx = (unsigned long)file_inode(m->file)->i_private;
+ algo = ima_algo_array[algo_idx].algo;
+ }
/* get entry */
e = qe->entry;
@@ -256,9 +235,10 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
algo_idx = ima_sha1_idx;
algo = HASH_ALGO_SHA1;
- if (m->file != NULL)
- lookup_template_data_hash_algo(&algo_idx, &algo, m,
- ascii_securityfs_measurement_lists);
+ if (m->file != NULL) {
+ algo_idx = (unsigned long)file_inode(m->file)->i_private;
+ algo = ima_algo_array[algo_idx].algo;
+ }
/* get entry */
e = qe->entry;
@@ -396,11 +376,6 @@ out:
static struct dentry *ima_dir;
static struct dentry *ima_symlink;
-static struct dentry *binary_runtime_measurements;
-static struct dentry *ascii_runtime_measurements;
-static struct dentry *runtime_measurements_count;
-static struct dentry *violations;
-static struct dentry *ima_policy;
enum ima_fs_flags {
IMA_FS_BUSY,
@@ -417,64 +392,33 @@ static const struct seq_operations ima_policy_seqops = {
};
#endif
-static void __init remove_securityfs_measurement_lists(struct dentry **lists)
-{
- int i;
-
- if (lists) {
- for (i = 0; i < securityfs_measurement_list_count; i++)
- securityfs_remove(lists[i]);
-
- kfree(lists);
- }
-}
-
static int __init create_securityfs_measurement_lists(void)
{
- char file_name[NAME_MAX + 1];
- struct dentry *dentry;
- u16 algo;
- int i;
-
- securityfs_measurement_list_count = NR_BANKS(ima_tpm_chip);
+ int count = NR_BANKS(ima_tpm_chip);
if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip))
- securityfs_measurement_list_count++;
+ count++;
- ascii_securityfs_measurement_lists =
- kcalloc(securityfs_measurement_list_count, sizeof(struct dentry *),
- GFP_KERNEL);
- if (!ascii_securityfs_measurement_lists)
- return -ENOMEM;
-
- binary_securityfs_measurement_lists =
- kcalloc(securityfs_measurement_list_count, sizeof(struct dentry *),
- GFP_KERNEL);
- if (!binary_securityfs_measurement_lists)
- return -ENOMEM;
-
- for (i = 0; i < securityfs_measurement_list_count; i++) {
- algo = ima_algo_array[i].algo;
+ for (int i = 0; i < count; i++) {
+ u16 algo = ima_algo_array[i].algo;
+ char file_name[NAME_MAX + 1];
+ struct dentry *dentry;
sprintf(file_name, "ascii_runtime_measurements_%s",
hash_algo_name[algo]);
dentry = securityfs_create_file(file_name, S_IRUSR | S_IRGRP,
- ima_dir, NULL,
+ ima_dir, (void *)(uintptr_t)i,
&ima_ascii_measurements_ops);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- ascii_securityfs_measurement_lists[i] = dentry;
-
sprintf(file_name, "binary_runtime_measurements_%s",
hash_algo_name[algo]);
dentry = securityfs_create_file(file_name, S_IRUSR | S_IRGRP,
- ima_dir, NULL,
+ ima_dir, (void *)(uintptr_t)i,
&ima_measurements_ops);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
-
- binary_securityfs_measurement_lists[i] = dentry;
}
return 0;
@@ -533,8 +477,7 @@ static int ima_release_policy(struct inode *inode, struct file *file)
ima_update_policy();
#if !defined(CONFIG_IMA_WRITE_POLICY) && !defined(CONFIG_IMA_READ_POLICY)
- securityfs_remove(ima_policy);
- ima_policy = NULL;
+ securityfs_remove(file->f_path.dentry);
#elif defined(CONFIG_IMA_WRITE_POLICY)
clear_bit(IMA_FS_BUSY, &ima_fs_flags);
#elif defined(CONFIG_IMA_READ_POLICY)
@@ -553,11 +496,9 @@ static const struct file_operations ima_measure_policy_ops = {
int __init ima_fs_init(void)
{
+ struct dentry *dentry;
int ret;
- ascii_securityfs_measurement_lists = NULL;
- binary_securityfs_measurement_lists = NULL;
-
ima_dir = securityfs_create_dir("ima", integrity_dir);
if (IS_ERR(ima_dir))
return PTR_ERR(ima_dir);
@@ -573,57 +514,45 @@ int __init ima_fs_init(void)
if (ret != 0)
goto out;
- binary_runtime_measurements =
- securityfs_create_symlink("binary_runtime_measurements", ima_dir,
+ dentry = securityfs_create_symlink("binary_runtime_measurements", ima_dir,
"binary_runtime_measurements_sha1", NULL);
- if (IS_ERR(binary_runtime_measurements)) {
- ret = PTR_ERR(binary_runtime_measurements);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto out;
}
- ascii_runtime_measurements =
- securityfs_create_symlink("ascii_runtime_measurements", ima_dir,
+ dentry = securityfs_create_symlink("ascii_runtime_measurements", ima_dir,
"ascii_runtime_measurements_sha1", NULL);
- if (IS_ERR(ascii_runtime_measurements)) {
- ret = PTR_ERR(ascii_runtime_measurements);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto out;
}
- runtime_measurements_count =
- securityfs_create_file("runtime_measurements_count",
+ dentry = securityfs_create_file("runtime_measurements_count",
S_IRUSR | S_IRGRP, ima_dir, NULL,
&ima_measurements_count_ops);
- if (IS_ERR(runtime_measurements_count)) {
- ret = PTR_ERR(runtime_measurements_count);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto out;
}
- violations =
- securityfs_create_file("violations", S_IRUSR | S_IRGRP,
+ dentry = securityfs_create_file("violations", S_IRUSR | S_IRGRP,
ima_dir, NULL, &ima_htable_violations_ops);
- if (IS_ERR(violations)) {
- ret = PTR_ERR(violations);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto out;
}
- ima_policy = securityfs_create_file("policy", POLICY_FILE_FLAGS,
+ dentry = securityfs_create_file("policy", POLICY_FILE_FLAGS,
ima_dir, NULL,
&ima_measure_policy_ops);
- if (IS_ERR(ima_policy)) {
- ret = PTR_ERR(ima_policy);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto out;
}
return 0;
out:
- securityfs_remove(ima_policy);
- securityfs_remove(violations);
- securityfs_remove(runtime_measurements_count);
- securityfs_remove(ascii_runtime_measurements);
- securityfs_remove(binary_runtime_measurements);
- remove_securityfs_measurement_lists(ascii_securityfs_measurement_lists);
- remove_securityfs_measurement_lists(binary_securityfs_measurement_lists);
- securityfs_measurement_list_count = 0;
securityfs_remove(ima_symlink);
securityfs_remove(ima_dir);
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 4e208239a40e..a2f34f2d8ad7 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -152,6 +152,8 @@ int __init ima_init(void)
ima_init_key_queue();
+ ima_init_reboot_notifier();
+
ima_measure_critical_data("kernel_info", "kernel_version",
UTS_RELEASE, strlen(UTS_RELEASE), false,
NULL, 0);
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
index 52e00332defe..7362f68f2d8b 100644
--- a/security/integrity/ima/ima_kexec.c
+++ b/security/integrity/ima/ima_kexec.c
@@ -12,65 +12,118 @@
#include <linux/kexec.h>
#include <linux/of.h>
#include <linux/ima.h>
+#include <linux/reboot.h>
+#include <asm/page.h>
#include "ima.h"
#ifdef CONFIG_IMA_KEXEC
+#define IMA_KEXEC_EVENT_LEN 256
+
+static bool ima_kexec_update_registered;
+static struct seq_file ima_kexec_file;
+static size_t kexec_segment_size;
+static void *ima_kexec_buffer;
+
+static void ima_free_kexec_file_buf(struct seq_file *sf)
+{
+ vfree(sf->buf);
+ sf->buf = NULL;
+ sf->size = 0;
+ sf->read_pos = 0;
+ sf->count = 0;
+}
+
+void ima_measure_kexec_event(const char *event_name)
+{
+ char ima_kexec_event[IMA_KEXEC_EVENT_LEN];
+ size_t buf_size = 0;
+ long len;
+ int n;
+
+ buf_size = ima_get_binary_runtime_size();
+ len = atomic_long_read(&ima_htable.len);
+
+ n = scnprintf(ima_kexec_event, IMA_KEXEC_EVENT_LEN,
+ "kexec_segment_size=%lu;ima_binary_runtime_size=%lu;"
+ "ima_runtime_measurements_count=%ld;",
+ kexec_segment_size, buf_size, len);
+
+ ima_measure_critical_data("ima_kexec", event_name, ima_kexec_event, n, false, NULL, 0);
+}
+
+static int ima_alloc_kexec_file_buf(size_t segment_size)
+{
+ /*
+ * kexec 'load' may be called multiple times.
+ * Free and realloc the buffer only if the segment_size is
+ * changed from the previous kexec 'load' call.
+ */
+ if (ima_kexec_file.buf && ima_kexec_file.size == segment_size)
+ goto out;
+
+ ima_free_kexec_file_buf(&ima_kexec_file);
+
+ /* segment size can't change between kexec load and execute */
+ ima_kexec_file.buf = vmalloc(segment_size);
+ if (!ima_kexec_file.buf)
+ return -ENOMEM;
+
+ ima_kexec_file.size = segment_size;
+
+out:
+ ima_kexec_file.read_pos = 0;
+ ima_kexec_file.count = sizeof(struct ima_kexec_hdr); /* reserved space */
+ ima_measure_kexec_event("kexec_load");
+
+ return 0;
+}
+
static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
unsigned long segment_size)
{
struct ima_queue_entry *qe;
- struct seq_file file;
struct ima_kexec_hdr khdr;
int ret = 0;
/* segment size can't change between kexec load and execute */
- file.buf = vmalloc(segment_size);
- if (!file.buf) {
- ret = -ENOMEM;
- goto out;
+ if (!ima_kexec_file.buf) {
+ pr_err("Kexec file buf not allocated\n");
+ return -EINVAL;
}
- file.file = NULL;
- file.size = segment_size;
- file.read_pos = 0;
- file.count = sizeof(khdr); /* reserved space */
-
memset(&khdr, 0, sizeof(khdr));
khdr.version = 1;
- list_for_each_entry_rcu(qe, &ima_measurements, later) {
- if (file.count < file.size) {
+ /* This is an append-only list, no need to hold the RCU read lock */
+ list_for_each_entry_rcu(qe, &ima_measurements, later, true) {
+ if (ima_kexec_file.count < ima_kexec_file.size) {
khdr.count++;
- ima_measurements_show(&file, qe);
+ ima_measurements_show(&ima_kexec_file, qe);
} else {
ret = -EINVAL;
break;
}
}
- if (ret < 0)
- goto out;
-
/*
* fill in reserved space with some buffer details
* (eg. version, buffer size, number of measurements)
*/
- khdr.buffer_size = file.count;
+ khdr.buffer_size = ima_kexec_file.count;
if (ima_canonical_fmt) {
khdr.version = cpu_to_le16(khdr.version);
khdr.count = cpu_to_le64(khdr.count);
khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
}
- memcpy(file.buf, &khdr, sizeof(khdr));
+ memcpy(ima_kexec_file.buf, &khdr, sizeof(khdr));
print_hex_dump_debug("ima dump: ", DUMP_PREFIX_NONE, 16, 1,
- file.buf, file.count < 100 ? file.count : 100,
+ ima_kexec_file.buf, ima_kexec_file.count < 100 ?
+ ima_kexec_file.count : 100,
true);
- *buffer_size = file.count;
- *buffer = file.buf;
-out:
- if (ret == -EINVAL)
- vfree(file.buf);
+ *buffer_size = ima_kexec_file.count;
+ *buffer = ima_kexec_file.buf;
+
return ret;
}
@@ -86,32 +139,39 @@ void ima_add_kexec_buffer(struct kimage *image)
.buf_min = 0, .buf_max = ULONG_MAX,
.top_down = true };
unsigned long binary_runtime_size;
+ unsigned long extra_memory;
/* use more understandable variable names than defined in kbuf */
+ size_t kexec_buffer_size = 0;
void *kexec_buffer = NULL;
- size_t kexec_buffer_size;
- size_t kexec_segment_size;
int ret;
+ if (image->type == KEXEC_TYPE_CRASH)
+ return;
+
/*
- * Reserve an extra half page of memory for additional measurements
- * added during the kexec load.
+ * Reserve extra memory for measurements added during kexec.
*/
- binary_runtime_size = ima_get_binary_runtime_size();
+ if (CONFIG_IMA_KEXEC_EXTRA_MEMORY_KB <= 0)
+ extra_memory = PAGE_SIZE / 2;
+ else
+ extra_memory = CONFIG_IMA_KEXEC_EXTRA_MEMORY_KB * 1024;
+
+ binary_runtime_size = ima_get_binary_runtime_size() + extra_memory;
+
if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
kexec_segment_size = ULONG_MAX;
else
- kexec_segment_size = ALIGN(ima_get_binary_runtime_size() +
- PAGE_SIZE / 2, PAGE_SIZE);
+ kexec_segment_size = ALIGN(binary_runtime_size, PAGE_SIZE);
+
if ((kexec_segment_size == ULONG_MAX) ||
((kexec_segment_size >> PAGE_SHIFT) > totalram_pages() / 2)) {
pr_err("Binary measurement list too large.\n");
return;
}
- ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer,
- kexec_segment_size);
- if (!kexec_buffer) {
+ ret = ima_alloc_kexec_file_buf(kexec_segment_size);
+ if (ret < 0) {
pr_err("Not enough memory for the kexec measurement buffer.\n");
return;
}
@@ -119,6 +179,7 @@ void ima_add_kexec_buffer(struct kimage *image)
kbuf.buffer = kexec_buffer;
kbuf.bufsz = kexec_buffer_size;
kbuf.memsz = kexec_segment_size;
+ image->is_ima_segment_index_set = false;
ret = kexec_add_buffer(&kbuf);
if (ret) {
pr_err("Error passing over kexec measurement buffer.\n");
@@ -129,10 +190,80 @@ void ima_add_kexec_buffer(struct kimage *image)
image->ima_buffer_addr = kbuf.mem;
image->ima_buffer_size = kexec_segment_size;
image->ima_buffer = kexec_buffer;
+ image->ima_segment_index = image->nr_segments - 1;
+ image->is_ima_segment_index_set = true;
kexec_dprintk("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
kbuf.mem);
}
+
+/*
+ * Called during kexec execute so that IMA can update the measurement list.
+ */
+static int ima_update_kexec_buffer(struct notifier_block *self,
+ unsigned long action, void *data)
+{
+ size_t buf_size = 0;
+ int ret = NOTIFY_OK;
+ void *buf = NULL;
+
+ if (!kexec_in_progress) {
+ pr_info("No kexec in progress.\n");
+ return ret;
+ }
+
+ if (!ima_kexec_buffer) {
+ pr_err("Kexec buffer not set.\n");
+ return ret;
+ }
+
+ ret = ima_dump_measurement_list(&buf_size, &buf, kexec_segment_size);
+
+ if (ret)
+ pr_err("Dump measurements failed. Error:%d\n", ret);
+
+ if (buf_size != 0)
+ memcpy(ima_kexec_buffer, buf, buf_size);
+
+ kimage_unmap_segment(ima_kexec_buffer);
+ ima_kexec_buffer = NULL;
+
+ return ret;
+}
+
+static struct notifier_block update_buffer_nb = {
+ .notifier_call = ima_update_kexec_buffer,
+ .priority = INT_MIN
+};
+
+/*
+ * Create a mapping for the source pages that contain the IMA buffer
+ * so we can update it later.
+ */
+void ima_kexec_post_load(struct kimage *image)
+{
+ if (ima_kexec_buffer) {
+ kimage_unmap_segment(ima_kexec_buffer);
+ ima_kexec_buffer = NULL;
+ }
+
+ if (!image->ima_buffer_addr)
+ return;
+
+ ima_kexec_buffer = kimage_map_segment(image,
+ image->ima_buffer_addr,
+ image->ima_buffer_size);
+ if (!ima_kexec_buffer) {
+ pr_err("Could not map measurements buffer.\n");
+ return;
+ }
+
+ if (!ima_kexec_update_registered) {
+ register_reboot_notifier(&update_buffer_nb);
+ ima_kexec_update_registered = true;
+ }
+}
+
#endif /* IMA_KEXEC */
/*
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 9b87556b03a7..cdd225f65a62 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -27,6 +27,7 @@
#include <linux/fs.h>
#include <linux/iversion.h>
#include <linux/evm.h>
+#include <linux/crash_dump.h>
#include "ima.h"
@@ -38,11 +39,30 @@ int ima_appraise;
int __ro_after_init ima_hash_algo = HASH_ALGO_SHA1;
static int hash_setup_done;
+static int ima_disabled __ro_after_init;
static struct notifier_block ima_lsm_policy_notifier = {
.notifier_call = ima_lsm_policy_change,
};
+static int __init ima_setup(char *str)
+{
+ if (!is_kdump_kernel()) {
+ pr_info("Warning: ima setup option only permitted in kdump");
+ return 1;
+ }
+
+ if (strncmp(str, "off", 3) == 0)
+ ima_disabled = 1;
+ else if (strncmp(str, "on", 2) == 0)
+ ima_disabled = 0;
+ else
+ pr_err("Invalid ima setup option: \"%s\" , please specify ima=on|off.", str);
+
+ return 1;
+}
+__setup("ima=", ima_setup);
+
static int __init hash_setup(char *str)
{
struct ima_template_desc *template_desc = ima_template_desc_current();
@@ -129,16 +149,22 @@ static void ima_rdwr_violation_check(struct file *file,
if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) {
if (!iint)
iint = ima_iint_find(inode);
+
/* IMA_MEASURE is set from reader side */
- if (iint && test_bit(IMA_MUST_MEASURE,
- &iint->atomic_flags))
+ if (iint && test_and_clear_bit(IMA_MAY_EMIT_TOMTOU,
+ &iint->atomic_flags))
send_tomtou = true;
}
} else {
if (must_measure)
- set_bit(IMA_MUST_MEASURE, &iint->atomic_flags);
- if (inode_is_open_for_write(inode) && must_measure)
- send_writers = true;
+ set_bit(IMA_MAY_EMIT_TOMTOU, &iint->atomic_flags);
+
+ /* Limit number of open_writers violations */
+ if (inode_is_open_for_write(inode) && must_measure) {
+ if (!test_and_set_bit(IMA_EMITTED_OPENWRITERS,
+ &iint->atomic_flags))
+ send_writers = true;
+ }
}
if (!send_tomtou && !send_writers)
@@ -167,6 +193,8 @@ static void ima_check_last_writer(struct ima_iint_cache *iint,
if (atomic_read(&inode->i_writecount) == 1) {
struct kstat stat;
+ clear_bit(IMA_EMITTED_OPENWRITERS, &iint->atomic_flags);
+
update = test_and_clear_bit(IMA_UPDATE_XATTR,
&iint->atomic_flags);
if ((iint->flags & IMA_NEW_FILE) ||
@@ -237,7 +265,9 @@ static int process_measurement(struct file *file, const struct cred *cred,
&allowed_algos);
violation_check = ((func == FILE_CHECK || func == MMAP_CHECK ||
func == MMAP_CHECK_REQPROT) &&
- (ima_policy_flag & IMA_MEASURE));
+ (ima_policy_flag & IMA_MEASURE) &&
+ ((action & IMA_MEASURE) ||
+ (file->f_mode & FMODE_WRITE)));
if (!action && !violation_check)
return 0;
@@ -269,10 +299,13 @@ static int process_measurement(struct file *file, const struct cred *cred,
mutex_lock(&iint->mutex);
if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
- /* reset appraisal flags if ima_inode_post_setattr was called */
+ /*
+ * Reset appraisal flags (action and non-action rule-specific)
+ * if ima_inode_post_setattr was called.
+ */
iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
- IMA_NONACTION_FLAGS);
+ IMA_NONACTION_RULE_FLAGS);
/*
* Re-evaulate the file if either the xattr has changed or the
@@ -555,6 +588,34 @@ static int ima_bprm_check(struct linux_binprm *bprm)
}
/**
+ * ima_bprm_creds_for_exec - collect/store/appraise measurement.
+ * @bprm: contains the linux_binprm structure
+ *
+ * Based on the IMA policy and the execveat(2) AT_EXECVE_CHECK flag, measure
+ * and appraise the integrity of a file to be executed by script interpreters.
+ * Unlike any of the other LSM hooks where the kernel enforces file integrity,
+ * enforcing file integrity is left up to the discretion of the script
+ * interpreter (userspace).
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+static int ima_bprm_creds_for_exec(struct linux_binprm *bprm)
+{
+ /*
+ * As security_bprm_check() is called multiple times, both
+ * the script and the shebang interpreter are measured, appraised,
+ * and audited. Limit usage of this LSM hook to just measuring,
+ * appraising, and auditing the indirect script execution
+ * (e.g. ./sh example.sh).
+ */
+ if (!bprm->is_check)
+ return 0;
+
+ return ima_bprm_check(bprm);
+}
+
+/**
* ima_file_check - based on policy, collect/store measurement.
* @file: pointer to the file to be measured
* @mask: contains MAY_READ, MAY_WRITE, MAY_EXEC or MAY_APPEND
@@ -983,9 +1044,9 @@ int process_buffer_measurement(struct mnt_idmap *idmap,
}
/*
- * Both LSM hooks and auxilary based buffer measurements are
- * based on policy. To avoid code duplication, differentiate
- * between the LSM hooks and auxilary buffer measurements,
+ * Both LSM hooks and auxiliary based buffer measurements are
+ * based on policy. To avoid code duplication, differentiate
+ * between the LSM hooks and auxiliary buffer measurements,
* retrieving the policy rule information only for the LSM hook
* buffer measurements.
*/
@@ -1145,6 +1206,12 @@ static int __init init_ima(void)
{
int error;
+ /*Note that turning IMA off is intentionally limited to kdump kernel.*/
+ if (ima_disabled && is_kdump_kernel()) {
+ pr_info("IMA functionality is disabled");
+ return 0;
+ }
+
ima_appraise_parse_cmdline();
ima_init_template_list();
hash_setup(CONFIG_IMA_DEFAULT_HASH);
@@ -1174,6 +1241,7 @@ static int __init init_ima(void)
static struct security_hook_list ima_hooks[] __ro_after_init = {
LSM_HOOK_INIT(bprm_check_security, ima_bprm_check),
+ LSM_HOOK_INIT(bprm_creds_for_exec, ima_bprm_creds_for_exec),
LSM_HOOK_INIT(file_post_open, ima_file_check),
LSM_HOOK_INIT(inode_post_create_tmpfile, ima_post_create_tmpfile),
LSM_HOOK_INIT(file_release, ima_file_free),
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 21a8e54c383f..128fab897930 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -148,7 +148,8 @@ static struct ima_rule_entry dont_measure_rules[] __ro_after_init = {
{.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .func = FILE_CHECK,
+ .flags = IMA_FSMAGIC | IMA_FUNC},
{.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
{.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
@@ -1431,7 +1432,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
int token;
unsigned long lnum;
- if (result < 0)
+ if (result < 0 || *p == '#') /* ignore suffixed comment */
break;
if ((*p == '\0') || (*p == ' ') || (*p == '\t'))
continue;
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 532da87ce519..590637e81ad1 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -16,6 +16,7 @@
*/
#include <linux/rculist.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include "ima.h"
@@ -44,6 +45,12 @@ struct ima_h_table ima_htable = {
*/
static DEFINE_MUTEX(ima_extend_list_mutex);
+/*
+ * Used internally by the kernel to suspend measurements.
+ * Protected by ima_extend_list_mutex.
+ */
+static bool ima_measurements_suspended;
+
/* lookup up the digest value in the hash table, and return the entry */
static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
int pcr)
@@ -168,6 +175,18 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
int result = 0, tpmresult = 0;
mutex_lock(&ima_extend_list_mutex);
+
+ /*
+ * Avoid appending to the measurement log when the TPM subsystem has
+ * been shut down while preparing for system reboot.
+ */
+ if (ima_measurements_suspended) {
+ audit_cause = "measurements_suspended";
+ audit_info = 0;
+ result = -ENODEV;
+ goto out;
+ }
+
if (!violation && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) {
if (ima_lookup_digest_entry(digest, entry->pcr)) {
audit_cause = "hash_exists";
@@ -211,6 +230,36 @@ int ima_restore_measurement_entry(struct ima_template_entry *entry)
return result;
}
+static void ima_measurements_suspend(void)
+{
+ mutex_lock(&ima_extend_list_mutex);
+ ima_measurements_suspended = true;
+ mutex_unlock(&ima_extend_list_mutex);
+}
+
+static int ima_reboot_notifier(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+#ifdef CONFIG_IMA_KEXEC
+ if (action == SYS_RESTART && data && !strcmp(data, "kexec reboot"))
+ ima_measure_kexec_event("kexec_execute");
+#endif
+
+ ima_measurements_suspend();
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ima_reboot_nb = {
+ .notifier_call = ima_reboot_notifier,
+};
+
+void __init ima_init_reboot_notifier(void)
+{
+ register_reboot_notifier(&ima_reboot_nb);
+}
+
int __init ima_init_digests(void)
{
u16 digest_size;
diff --git a/security/integrity/platform_certs/load_powerpc.c b/security/integrity/platform_certs/load_powerpc.c
index c85febca3343..714c961a00f5 100644
--- a/security/integrity/platform_certs/load_powerpc.c
+++ b/security/integrity/platform_certs/load_powerpc.c
@@ -75,12 +75,13 @@ static int __init load_powerpc_certs(void)
return -ENODEV;
// Check for known secure boot implementations from OPAL or PLPKS
- if (strcmp("ibm,edk2-compat-v1", buf) && strcmp("ibm,plpks-sb-v1", buf)) {
+ if (strcmp("ibm,edk2-compat-v1", buf) && strcmp("ibm,plpks-sb-v1", buf) &&
+ strcmp("ibm,plpks-sb-v0", buf)) {
pr_err("Unsupported secvar implementation \"%s\", not loading certs\n", buf);
return -ENODEV;
}
- if (strcmp("ibm,plpks-sb-v1", buf) == 0)
+ if (strcmp("ibm,plpks-sb-v1", buf) == 0 || strcmp("ibm,plpks-sb-v0", buf) == 0)
/* PLPKS authenticated variables ESL data is prefixed with 8 bytes of timestamp */
offset = 8;
diff --git a/security/ipe/Kconfig b/security/ipe/Kconfig
index 3c75bf267da4..a110a6cd848b 100644
--- a/security/ipe/Kconfig
+++ b/security/ipe/Kconfig
@@ -6,6 +6,7 @@
menuconfig SECURITY_IPE
bool "Integrity Policy Enforcement (IPE)"
depends on SECURITY && SECURITYFS && AUDIT && AUDITSYSCALL
+ select CRYPTO_LIB_SHA256
select PKCS7_MESSAGE_PARSER
select SYSTEM_DATA_VERIFICATION
select IPE_PROP_DM_VERITY if DM_VERITY
diff --git a/security/ipe/audit.c b/security/ipe/audit.c
index f05f0caa4850..de5fed62592e 100644
--- a/security/ipe/audit.c
+++ b/security/ipe/audit.c
@@ -6,7 +6,7 @@
#include <linux/slab.h>
#include <linux/audit.h>
#include <linux/types.h>
-#include <crypto/hash.h>
+#include <crypto/sha2.h>
#include "ipe.h"
#include "eval.h"
@@ -17,10 +17,12 @@
#define ACTSTR(x) ((x) == IPE_ACTION_ALLOW ? "ALLOW" : "DENY")
-#define IPE_AUDIT_HASH_ALG "sha256"
+#define IPE_AUDIT_HASH_ALG "sha256" /* keep in sync with audit_policy() */
#define AUDIT_POLICY_LOAD_FMT "policy_name=\"%s\" policy_version=%hu.%hu.%hu "\
"policy_digest=" IPE_AUDIT_HASH_ALG ":"
+#define AUDIT_POLICY_LOAD_NULL_FMT "policy_name=? policy_version=? "\
+ "policy_digest=?"
#define AUDIT_OLD_ACTIVE_POLICY_FMT "old_active_pol_name=\"%s\" "\
"old_active_pol_version=%hu.%hu.%hu "\
"old_policy_digest=" IPE_AUDIT_HASH_ALG ":"
@@ -180,37 +182,14 @@ static void audit_policy(struct audit_buffer *ab,
const char *audit_format,
const struct ipe_policy *const p)
{
- SHASH_DESC_ON_STACK(desc, tfm);
- struct crypto_shash *tfm;
- u8 *digest = NULL;
+ u8 digest[SHA256_DIGEST_SIZE];
- tfm = crypto_alloc_shash(IPE_AUDIT_HASH_ALG, 0, 0);
- if (IS_ERR(tfm))
- return;
-
- desc->tfm = tfm;
-
- digest = kzalloc(crypto_shash_digestsize(tfm), GFP_KERNEL);
- if (!digest)
- goto out;
-
- if (crypto_shash_init(desc))
- goto out;
-
- if (crypto_shash_update(desc, p->pkcs7, p->pkcs7len))
- goto out;
-
- if (crypto_shash_final(desc, digest))
- goto out;
+ sha256(p->pkcs7, p->pkcs7len, digest);
audit_log_format(ab, audit_format, p->parsed->name,
p->parsed->version.major, p->parsed->version.minor,
p->parsed->version.rev);
- audit_log_n_hex(ab, digest, crypto_shash_digestsize(tfm));
-
-out:
- kfree(digest);
- crypto_free_shash(tfm);
+ audit_log_n_hex(ab, digest, sizeof(digest));
}
/**
@@ -248,22 +227,29 @@ void ipe_audit_policy_activation(const struct ipe_policy *const op,
}
/**
- * ipe_audit_policy_load() - Audit a policy being loaded into the kernel.
- * @p: Supplies a pointer to the policy to audit.
+ * ipe_audit_policy_load() - Audit a policy loading event.
+ * @p: Supplies a pointer to the policy to audit or an error pointer.
*/
void ipe_audit_policy_load(const struct ipe_policy *const p)
{
struct audit_buffer *ab;
+ int err = 0;
ab = audit_log_start(audit_context(), GFP_KERNEL,
AUDIT_IPE_POLICY_LOAD);
if (!ab)
return;
- audit_policy(ab, AUDIT_POLICY_LOAD_FMT, p);
- audit_log_format(ab, " auid=%u ses=%u lsm=ipe res=1",
+ if (!IS_ERR(p)) {
+ audit_policy(ab, AUDIT_POLICY_LOAD_FMT, p);
+ } else {
+ audit_log_format(ab, AUDIT_POLICY_LOAD_NULL_FMT);
+ err = PTR_ERR(p);
+ }
+
+ audit_log_format(ab, " auid=%u ses=%u lsm=ipe res=%d errno=%d",
from_kuid(&init_user_ns, audit_get_loginuid(current)),
- audit_get_sessionid(current));
+ audit_get_sessionid(current), !err, err);
audit_log_end(ab);
}
diff --git a/security/ipe/fs.c b/security/ipe/fs.c
index 5b6d19fb844a..0bb9468b8026 100644
--- a/security/ipe/fs.c
+++ b/security/ipe/fs.c
@@ -12,11 +12,8 @@
#include "policy.h"
#include "audit.h"
-static struct dentry *np __ro_after_init;
static struct dentry *root __ro_after_init;
struct dentry *policy_root __ro_after_init;
-static struct dentry *audit_node __ro_after_init;
-static struct dentry *enforce_node __ro_after_init;
/**
* setaudit() - Write handler for the securityfs node, "ipe/success_audit"
@@ -133,6 +130,8 @@ static ssize_t getenforce(struct file *f, char __user *data,
* * %-ERANGE - Policy version number overflow
* * %-EINVAL - Policy version parsing error
* * %-EEXIST - Same name policy already deployed
+ * * %-ENOKEY - Policy signing key not found
+ * * %-EKEYREJECTED - Policy signature verification failed
*/
static ssize_t new_policy(struct file *f, const char __user *data,
size_t len, loff_t *offset)
@@ -141,12 +140,17 @@ static ssize_t new_policy(struct file *f, const char __user *data,
char *copy = NULL;
int rc = 0;
- if (!file_ns_capable(f, &init_user_ns, CAP_MAC_ADMIN))
- return -EPERM;
+ if (!file_ns_capable(f, &init_user_ns, CAP_MAC_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
copy = memdup_user_nul(data, len);
- if (IS_ERR(copy))
- return PTR_ERR(copy);
+ if (IS_ERR(copy)) {
+ rc = PTR_ERR(copy);
+ copy = NULL;
+ goto out;
+ }
p = ipe_new_policy(NULL, 0, copy, len);
if (IS_ERR(p)) {
@@ -158,12 +162,14 @@ static ssize_t new_policy(struct file *f, const char __user *data,
if (rc)
goto out;
- ipe_audit_policy_load(p);
-
out:
- if (rc < 0)
- ipe_free_policy(p);
kfree(copy);
+ if (rc < 0) {
+ ipe_free_policy(p);
+ ipe_audit_policy_load(ERR_PTR(rc));
+ } else {
+ ipe_audit_policy_load(p);
+ }
return (rc < 0) ? rc : len;
}
@@ -191,27 +197,26 @@ static int __init ipe_init_securityfs(void)
{
int rc = 0;
struct ipe_policy *ap;
+ struct dentry *dentry;
if (!ipe_enabled)
return -EOPNOTSUPP;
root = securityfs_create_dir("ipe", NULL);
- if (IS_ERR(root)) {
- rc = PTR_ERR(root);
- goto err;
- }
+ if (IS_ERR(root))
+ return PTR_ERR(root);
- audit_node = securityfs_create_file("success_audit", 0600, root,
+ dentry = securityfs_create_file("success_audit", 0600, root,
NULL, &audit_fops);
- if (IS_ERR(audit_node)) {
- rc = PTR_ERR(audit_node);
+ if (IS_ERR(dentry)) {
+ rc = PTR_ERR(dentry);
goto err;
}
- enforce_node = securityfs_create_file("enforce", 0600, root, NULL,
+ dentry = securityfs_create_file("enforce", 0600, root, NULL,
&enforce_fops);
- if (IS_ERR(enforce_node)) {
- rc = PTR_ERR(enforce_node);
+ if (IS_ERR(dentry)) {
+ rc = PTR_ERR(dentry);
goto err;
}
@@ -228,18 +233,14 @@ static int __init ipe_init_securityfs(void)
goto err;
}
- np = securityfs_create_file("new_policy", 0200, root, NULL, &np_fops);
- if (IS_ERR(np)) {
- rc = PTR_ERR(np);
+ dentry = securityfs_create_file("new_policy", 0200, root, NULL, &np_fops);
+ if (IS_ERR(dentry)) {
+ rc = PTR_ERR(dentry);
goto err;
}
return 0;
err:
- securityfs_remove(np);
- securityfs_remove(policy_root);
- securityfs_remove(enforce_node);
- securityfs_remove(audit_node);
securityfs_remove(root);
return rc;
}
diff --git a/security/ipe/policy.c b/security/ipe/policy.c
index b628f696e32b..1c58c29886e8 100644
--- a/security/ipe/policy.c
+++ b/security/ipe/policy.c
@@ -84,8 +84,11 @@ static int set_pkcs7_data(void *ctx, const void *data, size_t len,
* ipe_new_policy.
*
* Context: Requires root->i_rwsem to be held.
- * Return: %0 on success. If an error occurs, the function will return
- * the -errno.
+ * Return:
+ * * %0 - Success
+ * * %-ENOENT - Policy was deleted while updating
+ * * %-EINVAL - Policy name mismatch
+ * * %-ESTALE - Policy version too old
*/
int ipe_update_policy(struct inode *root, const char *text, size_t textlen,
const char *pkcs7, size_t pkcs7len)
@@ -146,10 +149,12 @@ err:
*
* Return:
* * a pointer to the ipe_policy structure - Success
- * * %-EBADMSG - Policy is invalid
- * * %-ENOMEM - Out of memory (OOM)
- * * %-ERANGE - Policy version number overflow
- * * %-EINVAL - Policy version parsing error
+ * * %-EBADMSG - Policy is invalid
+ * * %-ENOMEM - Out of memory (OOM)
+ * * %-ERANGE - Policy version number overflow
+ * * %-EINVAL - Policy version parsing error
+ * * %-ENOKEY - Policy signing key not found
+ * * %-EKEYREJECTED - Policy signature verification failed
*/
struct ipe_policy *ipe_new_policy(const char *text, size_t textlen,
const char *pkcs7, size_t pkcs7len)
diff --git a/security/ipe/policy_fs.c b/security/ipe/policy_fs.c
index 3bcd8cbd09df..9d92d8a14b13 100644
--- a/security/ipe/policy_fs.c
+++ b/security/ipe/policy_fs.c
@@ -12,11 +12,16 @@
#include "policy.h"
#include "eval.h"
#include "fs.h"
+#include "audit.h"
#define MAX_VERSION_SIZE ARRAY_SIZE("65535.65535.65535")
/**
- * ipefs_file - defines a file in securityfs.
+ * struct ipefs_file - defines a file in securityfs.
+ *
+ * @name: file name inside the policy subdirectory
+ * @access: file permissions
+ * @fops: &file_operations specific to this file
*/
struct ipefs_file {
const char *name;
@@ -282,8 +287,13 @@ static ssize_t getactive(struct file *f, char __user *data,
* On success this updates the policy represented by $name,
* in-place.
*
- * Return: Length of buffer written on success. If an error occurs,
- * the function will return the -errno.
+ * Return:
+ * * Length of buffer written - Success
+ * * %-EPERM - Insufficient permission
+ * * %-ENOMEM - Out of memory (OOM)
+ * * %-ENOENT - Policy was deleted while updating
+ * * %-EINVAL - Policy name mismatch
+ * * %-ESTALE - Policy version too old
*/
static ssize_t update_policy(struct file *f, const char __user *data,
size_t len, loff_t *offset)
@@ -292,21 +302,29 @@ static ssize_t update_policy(struct file *f, const char __user *data,
char *copy = NULL;
int rc = 0;
- if (!file_ns_capable(f, &init_user_ns, CAP_MAC_ADMIN))
- return -EPERM;
+ if (!file_ns_capable(f, &init_user_ns, CAP_MAC_ADMIN)) {
+ rc = -EPERM;
+ goto out;
+ }
copy = memdup_user(data, len);
- if (IS_ERR(copy))
- return PTR_ERR(copy);
+ if (IS_ERR(copy)) {
+ rc = PTR_ERR(copy);
+ copy = NULL;
+ goto out;
+ }
root = d_inode(f->f_path.dentry->d_parent);
inode_lock(root);
rc = ipe_update_policy(root, NULL, 0, copy, len);
inode_unlock(root);
+out:
kfree(copy);
- if (rc)
+ if (rc) {
+ ipe_audit_policy_load(ERR_PTR(rc));
return rc;
+ }
return len;
}
@@ -401,7 +419,7 @@ static const struct file_operations delete_fops = {
.write = delete_policy,
};
-/**
+/*
* policy_subdir - files under a policy subdirectory
*/
static const struct ipefs_file policy_subdir[] = {
@@ -420,7 +438,7 @@ static const struct ipefs_file policy_subdir[] = {
*/
void ipe_del_policyfs_node(struct ipe_policy *p)
{
- securityfs_recursive_remove(p->policyfs);
+ securityfs_remove(p->policyfs);
p->policyfs = NULL;
}
@@ -467,6 +485,6 @@ int ipe_new_policyfs_node(struct ipe_policy *p)
return 0;
err:
- securityfs_recursive_remove(policyfs);
+ securityfs_remove(policyfs);
return rc;
}
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index abb03a1b2a5c..d4f5fc1e7263 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -60,7 +60,7 @@ config BIG_KEYS
bool "Large payload keys"
depends on KEYS
depends on TMPFS
- depends on CRYPTO_LIB_CHACHA20POLY1305 = y
+ select CRYPTO_LIB_CHACHA20POLY1305
help
This option provides support for holding large keys within the kernel
(for example Kerberos ticket caches). The data may be stored out to
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 7d687b0962b1..748e83818a76 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -218,8 +218,10 @@ continue_scanning:
key = rb_entry(cursor, struct key, serial_node);
cursor = rb_next(cursor);
- if (refcount_read(&key->usage) == 0)
+ if (!test_bit_acquire(KEY_FLAG_USER_ALIVE, &key->flags)) {
+ /* Clobber key->user after final put seen. */
goto found_unreferenced_key;
+ }
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
if (key->type == key_gc_dead_keytype) {
diff --git a/security/keys/key.c b/security/keys/key.c
index 3d7d185019d3..3bbdde778631 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -298,6 +298,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->restrict_link = restrict_link;
key->last_used_at = ktime_get_real_seconds();
+ key->flags |= 1 << KEY_FLAG_USER_ALIVE;
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
if (flags & KEY_ALLOC_BUILT_IN)
@@ -658,6 +659,8 @@ void key_put(struct key *key)
key->user->qnbytes -= key->quotalen;
spin_unlock_irqrestore(&key->user->lock, flags);
}
+ /* Mark key as safe for GC after key->user done. */
+ clear_bit_unlock(KEY_FLAG_USER_ALIVE, &key->flags);
schedule_work(&key_gc_work);
}
}
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
index 91f000eef3ad..cde08c478f32 100644
--- a/security/keys/sysctl.c
+++ b/security/keys/sysctl.c
@@ -9,7 +9,7 @@
#include <linux/sysctl.h>
#include "internal.h"
-static struct ctl_table key_sysctls[] = {
+static const struct ctl_table key_sysctls[] = {
{
.procname = "maxkeys",
.data = &key_quota_maxkeys,
diff --git a/security/keys/trusted-keys/trusted_dcp.c b/security/keys/trusted-keys/trusted_dcp.c
index e908c53a803c..7b6eb655df0c 100644
--- a/security/keys/trusted-keys/trusted_dcp.c
+++ b/security/keys/trusted-keys/trusted_dcp.c
@@ -201,12 +201,16 @@ static int trusted_dcp_seal(struct trusted_key_payload *p, char *datablob)
{
struct dcp_blob_fmt *b = (struct dcp_blob_fmt *)p->blob;
int blen, ret;
- u8 plain_blob_key[AES_KEYSIZE_128];
+ u8 *plain_blob_key;
blen = calc_blob_len(p->key_len);
if (blen > MAX_BLOB_SIZE)
return -E2BIG;
+ plain_blob_key = kmalloc(AES_KEYSIZE_128, GFP_KERNEL);
+ if (!plain_blob_key)
+ return -ENOMEM;
+
b->fmt_version = DCP_BLOB_VERSION;
get_random_bytes(b->nonce, AES_KEYSIZE_128);
get_random_bytes(plain_blob_key, AES_KEYSIZE_128);
@@ -229,7 +233,8 @@ static int trusted_dcp_seal(struct trusted_key_payload *p, char *datablob)
ret = 0;
out:
- memzero_explicit(plain_blob_key, sizeof(plain_blob_key));
+ memzero_explicit(plain_blob_key, AES_KEYSIZE_128);
+ kfree(plain_blob_key);
return ret;
}
@@ -238,7 +243,7 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
{
struct dcp_blob_fmt *b = (struct dcp_blob_fmt *)p->blob;
int blen, ret;
- u8 plain_blob_key[AES_KEYSIZE_128];
+ u8 *plain_blob_key = NULL;
if (b->fmt_version != DCP_BLOB_VERSION) {
pr_err("DCP blob has bad version: %i, expected %i\n",
@@ -256,6 +261,12 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
goto out;
}
+ plain_blob_key = kmalloc(AES_KEYSIZE_128, GFP_KERNEL);
+ if (!plain_blob_key) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
ret = decrypt_blob_key(b->blob_key, plain_blob_key);
if (ret) {
pr_err("Unable to decrypt blob key: %i\n", ret);
@@ -271,7 +282,10 @@ static int trusted_dcp_unseal(struct trusted_key_payload *p, char *datablob)
ret = 0;
out:
- memzero_explicit(plain_blob_key, sizeof(plain_blob_key));
+ if (plain_blob_key) {
+ memzero_explicit(plain_blob_key, AES_KEYSIZE_128);
+ kfree(plain_blob_key);
+ }
return ret;
}
diff --git a/security/landlock/.kunitconfig b/security/landlock/.kunitconfig
index 03e119466604..f9423f01ac5b 100644
--- a/security/landlock/.kunitconfig
+++ b/security/landlock/.kunitconfig
@@ -1,4 +1,6 @@
+CONFIG_AUDIT=y
CONFIG_KUNIT=y
+CONFIG_NET=y
CONFIG_SECURITY=y
CONFIG_SECURITY_LANDLOCK=y
CONFIG_SECURITY_LANDLOCK_KUNIT_TEST=y
diff --git a/security/landlock/Makefile b/security/landlock/Makefile
index b4538b7cf7d2..3160c2bdac1d 100644
--- a/security/landlock/Makefile
+++ b/security/landlock/Makefile
@@ -4,3 +4,8 @@ landlock-y := setup.o syscalls.o object.o ruleset.o \
cred.o task.o fs.o
landlock-$(CONFIG_INET) += net.o
+
+landlock-$(CONFIG_AUDIT) += \
+ id.o \
+ audit.o \
+ domain.o
diff --git a/security/landlock/access.h b/security/landlock/access.h
new file mode 100644
index 000000000000..7961c6630a2d
--- /dev/null
+++ b/security/landlock/access.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock - Access types and helpers
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ * Copyright © 2024-2025 Microsoft Corporation
+ */
+
+#ifndef _SECURITY_LANDLOCK_ACCESS_H
+#define _SECURITY_LANDLOCK_ACCESS_H
+
+#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/kernel.h>
+#include <uapi/linux/landlock.h>
+
+#include "limits.h"
+
+/*
+ * All access rights that are denied by default whether they are handled or not
+ * by a ruleset/layer. This must be ORed with all ruleset->access_masks[]
+ * entries when we need to get the absolute handled access masks, see
+ * landlock_upgrade_handled_access_masks().
+ */
+/* clang-format off */
+#define _LANDLOCK_ACCESS_FS_INITIALLY_DENIED ( \
+ LANDLOCK_ACCESS_FS_REFER)
+/* clang-format on */
+
+/* clang-format off */
+#define _LANDLOCK_ACCESS_FS_OPTIONAL ( \
+ LANDLOCK_ACCESS_FS_TRUNCATE | \
+ LANDLOCK_ACCESS_FS_IOCTL_DEV)
+/* clang-format on */
+
+typedef u16 access_mask_t;
+
+/* Makes sure all filesystem access rights can be stored. */
+static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS);
+/* Makes sure all network access rights can be stored. */
+static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_NET);
+/* Makes sure all scoped rights can be stored. */
+static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_SCOPE);
+/* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */
+static_assert(sizeof(unsigned long) >= sizeof(access_mask_t));
+
+/* Ruleset access masks. */
+struct access_masks {
+ access_mask_t fs : LANDLOCK_NUM_ACCESS_FS;
+ access_mask_t net : LANDLOCK_NUM_ACCESS_NET;
+ access_mask_t scope : LANDLOCK_NUM_SCOPE;
+};
+
+union access_masks_all {
+ struct access_masks masks;
+ u32 all;
+};
+
+/* Makes sure all fields are covered. */
+static_assert(sizeof(typeof_member(union access_masks_all, masks)) ==
+ sizeof(typeof_member(union access_masks_all, all)));
+
+typedef u16 layer_mask_t;
+
+/* Makes sure all layers can be checked. */
+static_assert(BITS_PER_TYPE(layer_mask_t) >= LANDLOCK_MAX_NUM_LAYERS);
+
+/*
+ * Tracks domains responsible of a denied access. This is required to avoid
+ * storing in each object the full layer_masks[] required by update_request().
+ */
+typedef u8 deny_masks_t;
+
+/*
+ * Makes sure all optional access rights can be tied to a layer index (cf.
+ * get_deny_mask).
+ */
+static_assert(BITS_PER_TYPE(deny_masks_t) >=
+ (HWEIGHT(LANDLOCK_MAX_NUM_LAYERS - 1) *
+ HWEIGHT(_LANDLOCK_ACCESS_FS_OPTIONAL)));
+
+/* LANDLOCK_MAX_NUM_LAYERS must be a power of two (cf. deny_masks_t assert). */
+static_assert(HWEIGHT(LANDLOCK_MAX_NUM_LAYERS) == 1);
+
+/* Upgrades with all initially denied by default access rights. */
+static inline struct access_masks
+landlock_upgrade_handled_access_masks(struct access_masks access_masks)
+{
+ /*
+ * All access rights that are denied by default whether they are
+ * explicitly handled or not.
+ */
+ if (access_masks.fs)
+ access_masks.fs |= _LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
+
+ return access_masks;
+}
+
+#endif /* _SECURITY_LANDLOCK_ACCESS_H */
diff --git a/security/landlock/audit.c b/security/landlock/audit.c
new file mode 100644
index 000000000000..c52d079cdb77
--- /dev/null
+++ b/security/landlock/audit.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock - Audit helpers
+ *
+ * Copyright © 2023-2025 Microsoft Corporation
+ */
+
+#include <kunit/test.h>
+#include <linux/audit.h>
+#include <linux/bitops.h>
+#include <linux/lsm_audit.h>
+#include <linux/pid.h>
+#include <uapi/linux/landlock.h>
+
+#include "access.h"
+#include "audit.h"
+#include "common.h"
+#include "cred.h"
+#include "domain.h"
+#include "limits.h"
+#include "ruleset.h"
+
+static const char *const fs_access_strings[] = {
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = "fs.execute",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_WRITE_FILE)] = "fs.write_file",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = "fs.read_file",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_DIR)] = "fs.read_dir",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_REMOVE_DIR)] = "fs.remove_dir",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_REMOVE_FILE)] = "fs.remove_file",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_CHAR)] = "fs.make_char",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_DIR)] = "fs.make_dir",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_REG)] = "fs.make_reg",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_SOCK)] = "fs.make_sock",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_FIFO)] = "fs.make_fifo",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_BLOCK)] = "fs.make_block",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_MAKE_SYM)] = "fs.make_sym",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_REFER)] = "fs.refer",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE)] = "fs.truncate",
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV)] = "fs.ioctl_dev",
+};
+
+static_assert(ARRAY_SIZE(fs_access_strings) == LANDLOCK_NUM_ACCESS_FS);
+
+static const char *const net_access_strings[] = {
+ [BIT_INDEX(LANDLOCK_ACCESS_NET_BIND_TCP)] = "net.bind_tcp",
+ [BIT_INDEX(LANDLOCK_ACCESS_NET_CONNECT_TCP)] = "net.connect_tcp",
+};
+
+static_assert(ARRAY_SIZE(net_access_strings) == LANDLOCK_NUM_ACCESS_NET);
+
+static __attribute_const__ const char *
+get_blocker(const enum landlock_request_type type,
+ const unsigned long access_bit)
+{
+ switch (type) {
+ case LANDLOCK_REQUEST_PTRACE:
+ WARN_ON_ONCE(access_bit != -1);
+ return "ptrace";
+
+ case LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY:
+ WARN_ON_ONCE(access_bit != -1);
+ return "fs.change_topology";
+
+ case LANDLOCK_REQUEST_FS_ACCESS:
+ if (WARN_ON_ONCE(access_bit >= ARRAY_SIZE(fs_access_strings)))
+ return "unknown";
+ return fs_access_strings[access_bit];
+
+ case LANDLOCK_REQUEST_NET_ACCESS:
+ if (WARN_ON_ONCE(access_bit >= ARRAY_SIZE(net_access_strings)))
+ return "unknown";
+ return net_access_strings[access_bit];
+
+ case LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET:
+ WARN_ON_ONCE(access_bit != -1);
+ return "scope.abstract_unix_socket";
+
+ case LANDLOCK_REQUEST_SCOPE_SIGNAL:
+ WARN_ON_ONCE(access_bit != -1);
+ return "scope.signal";
+ }
+
+ WARN_ON_ONCE(1);
+ return "unknown";
+}
+
+static void log_blockers(struct audit_buffer *const ab,
+ const enum landlock_request_type type,
+ const access_mask_t access)
+{
+ const unsigned long access_mask = access;
+ unsigned long access_bit;
+ bool is_first = true;
+
+ for_each_set_bit(access_bit, &access_mask, BITS_PER_TYPE(access)) {
+ audit_log_format(ab, "%s%s", is_first ? "" : ",",
+ get_blocker(type, access_bit));
+ is_first = false;
+ }
+ if (is_first)
+ audit_log_format(ab, "%s", get_blocker(type, -1));
+}
+
+static void log_domain(struct landlock_hierarchy *const hierarchy)
+{
+ struct audit_buffer *ab;
+
+ /* Ignores already logged domains. */
+ if (READ_ONCE(hierarchy->log_status) == LANDLOCK_LOG_RECORDED)
+ return;
+
+ /* Uses consistent allocation flags wrt common_lsm_audit(). */
+ ab = audit_log_start(audit_context(), GFP_ATOMIC | __GFP_NOWARN,
+ AUDIT_LANDLOCK_DOMAIN);
+ if (!ab)
+ return;
+
+ WARN_ON_ONCE(hierarchy->id == 0);
+ audit_log_format(
+ ab,
+ "domain=%llx status=allocated mode=enforcing pid=%d uid=%u exe=",
+ hierarchy->id, pid_nr(hierarchy->details->pid),
+ hierarchy->details->uid);
+ audit_log_untrustedstring(ab, hierarchy->details->exe_path);
+ audit_log_format(ab, " comm=");
+ audit_log_untrustedstring(ab, hierarchy->details->comm);
+ audit_log_end(ab);
+
+ /*
+ * There may be race condition leading to logging of the same domain
+ * several times but that is OK.
+ */
+ WRITE_ONCE(hierarchy->log_status, LANDLOCK_LOG_RECORDED);
+}
+
+static struct landlock_hierarchy *
+get_hierarchy(const struct landlock_ruleset *const domain, const size_t layer)
+{
+ struct landlock_hierarchy *hierarchy = domain->hierarchy;
+ ssize_t i;
+
+ if (WARN_ON_ONCE(layer >= domain->num_layers))
+ return hierarchy;
+
+ for (i = domain->num_layers - 1; i > layer; i--) {
+ if (WARN_ON_ONCE(!hierarchy->parent))
+ break;
+
+ hierarchy = hierarchy->parent;
+ }
+
+ return hierarchy;
+}
+
+#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
+
+static void test_get_hierarchy(struct kunit *const test)
+{
+ struct landlock_hierarchy dom0_hierarchy = {
+ .id = 10,
+ };
+ struct landlock_hierarchy dom1_hierarchy = {
+ .parent = &dom0_hierarchy,
+ .id = 20,
+ };
+ struct landlock_hierarchy dom2_hierarchy = {
+ .parent = &dom1_hierarchy,
+ .id = 30,
+ };
+ struct landlock_ruleset dom2 = {
+ .hierarchy = &dom2_hierarchy,
+ .num_layers = 3,
+ };
+
+ KUNIT_EXPECT_EQ(test, 10, get_hierarchy(&dom2, 0)->id);
+ KUNIT_EXPECT_EQ(test, 20, get_hierarchy(&dom2, 1)->id);
+ KUNIT_EXPECT_EQ(test, 30, get_hierarchy(&dom2, 2)->id);
+ /* KUNIT_EXPECT_EQ(test, 30, get_hierarchy(&dom2, -1)->id); */
+}
+
+#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
+
+static size_t get_denied_layer(const struct landlock_ruleset *const domain,
+ access_mask_t *const access_request,
+ const layer_mask_t (*const layer_masks)[],
+ const size_t layer_masks_size)
+{
+ const unsigned long access_req = *access_request;
+ unsigned long access_bit;
+ access_mask_t missing = 0;
+ long youngest_layer = -1;
+
+ for_each_set_bit(access_bit, &access_req, layer_masks_size) {
+ const access_mask_t mask = (*layer_masks)[access_bit];
+ long layer;
+
+ if (!mask)
+ continue;
+
+ /* __fls(1) == 0 */
+ layer = __fls(mask);
+ if (layer > youngest_layer) {
+ youngest_layer = layer;
+ missing = BIT(access_bit);
+ } else if (layer == youngest_layer) {
+ missing |= BIT(access_bit);
+ }
+ }
+
+ *access_request = missing;
+ if (youngest_layer == -1)
+ return domain->num_layers - 1;
+
+ return youngest_layer;
+}
+
+#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
+
+static void test_get_denied_layer(struct kunit *const test)
+{
+ const struct landlock_ruleset dom = {
+ .num_layers = 5,
+ };
+ const layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT(0),
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_FILE)] = BIT(1),
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_READ_DIR)] = BIT(1) | BIT(0),
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_REMOVE_DIR)] = BIT(2),
+ };
+ access_mask_t access;
+
+ access = LANDLOCK_ACCESS_FS_EXECUTE;
+ KUNIT_EXPECT_EQ(test, 0,
+ get_denied_layer(&dom, &access, &layer_masks,
+ sizeof(layer_masks)));
+ KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_EXECUTE);
+
+ access = LANDLOCK_ACCESS_FS_READ_FILE;
+ KUNIT_EXPECT_EQ(test, 1,
+ get_denied_layer(&dom, &access, &layer_masks,
+ sizeof(layer_masks)));
+ KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_READ_FILE);
+
+ access = LANDLOCK_ACCESS_FS_READ_DIR;
+ KUNIT_EXPECT_EQ(test, 1,
+ get_denied_layer(&dom, &access, &layer_masks,
+ sizeof(layer_masks)));
+ KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_READ_DIR);
+
+ access = LANDLOCK_ACCESS_FS_READ_FILE | LANDLOCK_ACCESS_FS_READ_DIR;
+ KUNIT_EXPECT_EQ(test, 1,
+ get_denied_layer(&dom, &access, &layer_masks,
+ sizeof(layer_masks)));
+ KUNIT_EXPECT_EQ(test, access,
+ LANDLOCK_ACCESS_FS_READ_FILE |
+ LANDLOCK_ACCESS_FS_READ_DIR);
+
+ access = LANDLOCK_ACCESS_FS_EXECUTE | LANDLOCK_ACCESS_FS_READ_DIR;
+ KUNIT_EXPECT_EQ(test, 1,
+ get_denied_layer(&dom, &access, &layer_masks,
+ sizeof(layer_masks)));
+ KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_READ_DIR);
+
+ access = LANDLOCK_ACCESS_FS_WRITE_FILE;
+ KUNIT_EXPECT_EQ(test, 4,
+ get_denied_layer(&dom, &access, &layer_masks,
+ sizeof(layer_masks)));
+ KUNIT_EXPECT_EQ(test, access, 0);
+}
+
+#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
+
+static size_t
+get_layer_from_deny_masks(access_mask_t *const access_request,
+ const access_mask_t all_existing_optional_access,
+ const deny_masks_t deny_masks)
+{
+ const unsigned long access_opt = all_existing_optional_access;
+ const unsigned long access_req = *access_request;
+ access_mask_t missing = 0;
+ size_t youngest_layer = 0;
+ size_t access_index = 0;
+ unsigned long access_bit;
+
+ /* This will require change with new object types. */
+ WARN_ON_ONCE(access_opt != _LANDLOCK_ACCESS_FS_OPTIONAL);
+
+ for_each_set_bit(access_bit, &access_opt,
+ BITS_PER_TYPE(access_mask_t)) {
+ if (access_req & BIT(access_bit)) {
+ const size_t layer =
+ (deny_masks >> (access_index * 4)) &
+ (LANDLOCK_MAX_NUM_LAYERS - 1);
+
+ if (layer > youngest_layer) {
+ youngest_layer = layer;
+ missing = BIT(access_bit);
+ } else if (layer == youngest_layer) {
+ missing |= BIT(access_bit);
+ }
+ }
+ access_index++;
+ }
+
+ *access_request = missing;
+ return youngest_layer;
+}
+
+#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
+
+static void test_get_layer_from_deny_masks(struct kunit *const test)
+{
+ deny_masks_t deny_mask;
+ access_mask_t access;
+
+ /* truncate:0 ioctl_dev:2 */
+ deny_mask = 0x20;
+
+ access = LANDLOCK_ACCESS_FS_TRUNCATE;
+ KUNIT_EXPECT_EQ(test, 0,
+ get_layer_from_deny_masks(&access,
+ _LANDLOCK_ACCESS_FS_OPTIONAL,
+ deny_mask));
+ KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_TRUNCATE);
+
+ access = LANDLOCK_ACCESS_FS_TRUNCATE | LANDLOCK_ACCESS_FS_IOCTL_DEV;
+ KUNIT_EXPECT_EQ(test, 2,
+ get_layer_from_deny_masks(&access,
+ _LANDLOCK_ACCESS_FS_OPTIONAL,
+ deny_mask));
+ KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_IOCTL_DEV);
+
+ /* truncate:15 ioctl_dev:15 */
+ deny_mask = 0xff;
+
+ access = LANDLOCK_ACCESS_FS_TRUNCATE;
+ KUNIT_EXPECT_EQ(test, 15,
+ get_layer_from_deny_masks(&access,
+ _LANDLOCK_ACCESS_FS_OPTIONAL,
+ deny_mask));
+ KUNIT_EXPECT_EQ(test, access, LANDLOCK_ACCESS_FS_TRUNCATE);
+
+ access = LANDLOCK_ACCESS_FS_TRUNCATE | LANDLOCK_ACCESS_FS_IOCTL_DEV;
+ KUNIT_EXPECT_EQ(test, 15,
+ get_layer_from_deny_masks(&access,
+ _LANDLOCK_ACCESS_FS_OPTIONAL,
+ deny_mask));
+ KUNIT_EXPECT_EQ(test, access,
+ LANDLOCK_ACCESS_FS_TRUNCATE |
+ LANDLOCK_ACCESS_FS_IOCTL_DEV);
+}
+
+#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
+
+static bool is_valid_request(const struct landlock_request *const request)
+{
+ if (WARN_ON_ONCE(request->layer_plus_one > LANDLOCK_MAX_NUM_LAYERS))
+ return false;
+
+ if (WARN_ON_ONCE(!(!!request->layer_plus_one ^ !!request->access)))
+ return false;
+
+ if (request->access) {
+ if (WARN_ON_ONCE(!(!!request->layer_masks ^
+ !!request->all_existing_optional_access)))
+ return false;
+ } else {
+ if (WARN_ON_ONCE(request->layer_masks ||
+ request->all_existing_optional_access))
+ return false;
+ }
+
+ if (WARN_ON_ONCE(!!request->layer_masks ^ !!request->layer_masks_size))
+ return false;
+
+ if (request->deny_masks) {
+ if (WARN_ON_ONCE(!request->all_existing_optional_access))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * landlock_log_denial - Create audit records related to a denial
+ *
+ * @subject: The Landlock subject's credential denying an action.
+ * @request: Detail of the user space request.
+ */
+void landlock_log_denial(const struct landlock_cred_security *const subject,
+ const struct landlock_request *const request)
+{
+ struct audit_buffer *ab;
+ struct landlock_hierarchy *youngest_denied;
+ size_t youngest_layer;
+ access_mask_t missing;
+
+ if (WARN_ON_ONCE(!subject || !subject->domain ||
+ !subject->domain->hierarchy || !request))
+ return;
+
+ if (!is_valid_request(request))
+ return;
+
+ missing = request->access;
+ if (missing) {
+ /* Gets the nearest domain that denies the request. */
+ if (request->layer_masks) {
+ youngest_layer = get_denied_layer(
+ subject->domain, &missing, request->layer_masks,
+ request->layer_masks_size);
+ } else {
+ youngest_layer = get_layer_from_deny_masks(
+ &missing, request->all_existing_optional_access,
+ request->deny_masks);
+ }
+ youngest_denied =
+ get_hierarchy(subject->domain, youngest_layer);
+ } else {
+ youngest_layer = request->layer_plus_one - 1;
+ youngest_denied =
+ get_hierarchy(subject->domain, youngest_layer);
+ }
+
+ if (READ_ONCE(youngest_denied->log_status) == LANDLOCK_LOG_DISABLED)
+ return;
+
+ /*
+ * Consistently keeps track of the number of denied access requests
+ * even if audit is currently disabled, or if audit rules currently
+ * exclude this record type, or if landlock_restrict_self(2)'s flags
+ * quiet logs.
+ */
+ atomic64_inc(&youngest_denied->num_denials);
+
+ if (!audit_enabled)
+ return;
+
+ /* Checks if the current exec was restricting itself. */
+ if (subject->domain_exec & BIT(youngest_layer)) {
+ /* Ignores denials for the same execution. */
+ if (!youngest_denied->log_same_exec)
+ return;
+ } else {
+ /* Ignores denials after a new execution. */
+ if (!youngest_denied->log_new_exec)
+ return;
+ }
+
+ /* Uses consistent allocation flags wrt common_lsm_audit(). */
+ ab = audit_log_start(audit_context(), GFP_ATOMIC | __GFP_NOWARN,
+ AUDIT_LANDLOCK_ACCESS);
+ if (!ab)
+ return;
+
+ audit_log_format(ab, "domain=%llx blockers=", youngest_denied->id);
+ log_blockers(ab, request->type, missing);
+ audit_log_lsm_data(ab, &request->audit);
+ audit_log_end(ab);
+
+ /* Logs this domain the first time it shows in log. */
+ log_domain(youngest_denied);
+}
+
+/**
+ * landlock_log_drop_domain - Create an audit record on domain deallocation
+ *
+ * @hierarchy: The domain's hierarchy being deallocated.
+ *
+ * Only domains which previously appeared in the audit logs are logged again.
+ * This is useful to know when a domain will never show again in the audit log.
+ *
+ * Called in a work queue scheduled by landlock_put_ruleset_deferred() called
+ * by hook_cred_free().
+ */
+void landlock_log_drop_domain(const struct landlock_hierarchy *const hierarchy)
+{
+ struct audit_buffer *ab;
+
+ if (WARN_ON_ONCE(!hierarchy))
+ return;
+
+ if (!audit_enabled)
+ return;
+
+ /* Ignores domains that were not logged. */
+ if (READ_ONCE(hierarchy->log_status) != LANDLOCK_LOG_RECORDED)
+ return;
+
+ /*
+ * If logging of domain allocation succeeded, warns about failure to log
+ * domain deallocation to highlight unbalanced domain lifetime logs.
+ */
+ ab = audit_log_start(audit_context(), GFP_KERNEL,
+ AUDIT_LANDLOCK_DOMAIN);
+ if (!ab)
+ return;
+
+ audit_log_format(ab, "domain=%llx status=deallocated denials=%llu",
+ hierarchy->id, atomic64_read(&hierarchy->num_denials));
+ audit_log_end(ab);
+}
+
+#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
+
+static struct kunit_case test_cases[] = {
+ /* clang-format off */
+ KUNIT_CASE(test_get_hierarchy),
+ KUNIT_CASE(test_get_denied_layer),
+ KUNIT_CASE(test_get_layer_from_deny_masks),
+ {}
+ /* clang-format on */
+};
+
+static struct kunit_suite test_suite = {
+ .name = "landlock_audit",
+ .test_cases = test_cases,
+};
+
+kunit_test_suite(test_suite);
+
+#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
diff --git a/security/landlock/audit.h b/security/landlock/audit.h
new file mode 100644
index 000000000000..92428b7fc4d8
--- /dev/null
+++ b/security/landlock/audit.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock - Audit helpers
+ *
+ * Copyright © 2023-2025 Microsoft Corporation
+ */
+
+#ifndef _SECURITY_LANDLOCK_AUDIT_H
+#define _SECURITY_LANDLOCK_AUDIT_H
+
+#include <linux/audit.h>
+#include <linux/lsm_audit.h>
+
+#include "access.h"
+#include "cred.h"
+
+enum landlock_request_type {
+ LANDLOCK_REQUEST_PTRACE = 1,
+ LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY,
+ LANDLOCK_REQUEST_FS_ACCESS,
+ LANDLOCK_REQUEST_NET_ACCESS,
+ LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET,
+ LANDLOCK_REQUEST_SCOPE_SIGNAL,
+};
+
+/*
+ * We should be careful to only use a variable of this type for
+ * landlock_log_denial(). This way, the compiler can remove it entirely if
+ * CONFIG_AUDIT is not set.
+ */
+struct landlock_request {
+ /* Mandatory fields. */
+ enum landlock_request_type type;
+ struct common_audit_data audit;
+
+ /**
+ * layer_plus_one: First layer level that denies the request + 1. The
+ * extra one is useful to detect uninitialized field.
+ */
+ size_t layer_plus_one;
+
+ /* Required field for configurable access control. */
+ access_mask_t access;
+
+ /* Required fields for requests with layer masks. */
+ const layer_mask_t (*layer_masks)[];
+ size_t layer_masks_size;
+
+ /* Required fields for requests with deny masks. */
+ const access_mask_t all_existing_optional_access;
+ deny_masks_t deny_masks;
+};
+
+#ifdef CONFIG_AUDIT
+
+void landlock_log_drop_domain(const struct landlock_hierarchy *const hierarchy);
+
+void landlock_log_denial(const struct landlock_cred_security *const subject,
+ const struct landlock_request *const request);
+
+#else /* CONFIG_AUDIT */
+
+static inline void
+landlock_log_drop_domain(const struct landlock_hierarchy *const hierarchy)
+{
+}
+
+static inline void
+landlock_log_denial(const struct landlock_cred_security *const subject,
+ const struct landlock_request *const request)
+{
+}
+
+#endif /* CONFIG_AUDIT */
+
+#endif /* _SECURITY_LANDLOCK_AUDIT_H */
diff --git a/security/landlock/cred.c b/security/landlock/cred.c
index db9fe7d906ba..0cb3edde4d18 100644
--- a/security/landlock/cred.c
+++ b/security/landlock/cred.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Landlock LSM - Credential hooks
+ * Landlock - Credential hooks
*
* Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
* Copyright © 2018-2020 ANSSI
+ * Copyright © 2024-2025 Microsoft Corporation
*/
+#include <linux/binfmts.h>
#include <linux/cred.h>
#include <linux/lsm_hooks.h>
@@ -17,11 +19,12 @@
static void hook_cred_transfer(struct cred *const new,
const struct cred *const old)
{
- struct landlock_ruleset *const old_dom = landlock_cred(old)->domain;
+ const struct landlock_cred_security *const old_llcred =
+ landlock_cred(old);
- if (old_dom) {
- landlock_get_ruleset(old_dom);
- landlock_cred(new)->domain = old_dom;
+ if (old_llcred->domain) {
+ landlock_get_ruleset(old_llcred->domain);
+ *landlock_cred(new) = *old_llcred;
}
}
@@ -40,10 +43,25 @@ static void hook_cred_free(struct cred *const cred)
landlock_put_ruleset_deferred(dom);
}
+#ifdef CONFIG_AUDIT
+
+static int hook_bprm_creds_for_exec(struct linux_binprm *const bprm)
+{
+ /* Resets for each execution. */
+ landlock_cred(bprm->cred)->domain_exec = 0;
+ return 0;
+}
+
+#endif /* CONFIG_AUDIT */
+
static struct security_hook_list landlock_hooks[] __ro_after_init = {
LSM_HOOK_INIT(cred_prepare, hook_cred_prepare),
LSM_HOOK_INIT(cred_transfer, hook_cred_transfer),
LSM_HOOK_INIT(cred_free, hook_cred_free),
+
+#ifdef CONFIG_AUDIT
+ LSM_HOOK_INIT(bprm_creds_for_exec, hook_bprm_creds_for_exec),
+#endif /* CONFIG_AUDIT */
};
__init void landlock_add_cred_hooks(void)
diff --git a/security/landlock/cred.h b/security/landlock/cred.h
index bf755459838a..c82fe63ec598 100644
--- a/security/landlock/cred.h
+++ b/security/landlock/cred.h
@@ -1,24 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Landlock LSM - Credential hooks
+ * Landlock - Credential hooks
*
* Copyright © 2019-2020 Mickaël Salaün <mic@digikod.net>
* Copyright © 2019-2020 ANSSI
+ * Copyright © 2021-2025 Microsoft Corporation
*/
#ifndef _SECURITY_LANDLOCK_CRED_H
#define _SECURITY_LANDLOCK_CRED_H
+#include <linux/container_of.h>
#include <linux/cred.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
+#include "access.h"
+#include "limits.h"
#include "ruleset.h"
#include "setup.h"
+/**
+ * struct landlock_cred_security - Credential security blob
+ *
+ * This structure is packed to minimize the size of struct
+ * landlock_file_security. However, it is always aligned in the LSM cred blob,
+ * see lsm_set_blob_size().
+ */
struct landlock_cred_security {
+ /**
+ * @domain: Immutable ruleset enforced on a task.
+ */
struct landlock_ruleset *domain;
-};
+
+#ifdef CONFIG_AUDIT
+ /**
+ * @domain_exec: Bitmask identifying the domain layers that were enforced by
+ * the current task's executed file (i.e. no new execve(2) since
+ * landlock_restrict_self(2)).
+ */
+ u16 domain_exec;
+ /**
+ * @log_subdomains_off: Set if the domain descendants's log_status should be
+ * set to %LANDLOCK_LOG_DISABLED. This is not a landlock_hierarchy
+ * configuration because it applies to future descendant domains and it does
+ * not require a current domain.
+ */
+ u8 log_subdomains_off : 1;
+#endif /* CONFIG_AUDIT */
+} __packed;
+
+#ifdef CONFIG_AUDIT
+
+/* Makes sure all layer executions can be stored. */
+static_assert(BITS_PER_TYPE(typeof_member(struct landlock_cred_security,
+ domain_exec)) >=
+ LANDLOCK_MAX_NUM_LAYERS);
+
+#endif /* CONFIG_AUDIT */
static inline struct landlock_cred_security *
landlock_cred(const struct cred *cred)
@@ -53,6 +92,55 @@ static inline bool landlocked(const struct task_struct *const task)
return has_dom;
}
+/**
+ * landlock_get_applicable_subject - Return the subject's Landlock credential
+ * if its enforced domain applies to (i.e.
+ * handles) at least one of the access rights
+ * specified in @masks
+ *
+ * @cred: credential
+ * @masks: access masks
+ * @handle_layer: returned youngest layer handling a subset of @masks. Not set
+ * if the function returns NULL.
+ *
+ * Returns: landlock_cred(@cred) if any access rights specified in @masks is
+ * handled, or NULL otherwise.
+ */
+static inline const struct landlock_cred_security *
+landlock_get_applicable_subject(const struct cred *const cred,
+ const struct access_masks masks,
+ size_t *const handle_layer)
+{
+ const union access_masks_all masks_all = {
+ .masks = masks,
+ };
+ const struct landlock_ruleset *domain;
+ ssize_t layer_level;
+
+ if (!cred)
+ return NULL;
+
+ domain = landlock_cred(cred)->domain;
+ if (!domain)
+ return NULL;
+
+ for (layer_level = domain->num_layers - 1; layer_level >= 0;
+ layer_level--) {
+ union access_masks_all layer = {
+ .masks = domain->access_masks[layer_level],
+ };
+
+ if (layer.all & masks_all.all) {
+ if (handle_layer)
+ *handle_layer = layer_level;
+
+ return landlock_cred(cred);
+ }
+ }
+
+ return NULL;
+}
+
__init void landlock_add_cred_hooks(void);
#endif /* _SECURITY_LANDLOCK_CRED_H */
diff --git a/security/landlock/domain.c b/security/landlock/domain.c
new file mode 100644
index 000000000000..a647b68e8d06
--- /dev/null
+++ b/security/landlock/domain.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock - Domain management
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ * Copyright © 2024-2025 Microsoft Corporation
+ */
+
+#include <kunit/test.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/cred.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/path.h>
+#include <linux/pid.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/uidgid.h>
+
+#include "access.h"
+#include "common.h"
+#include "domain.h"
+#include "id.h"
+
+#ifdef CONFIG_AUDIT
+
+/**
+ * get_current_exe - Get the current's executable path, if any
+ *
+ * @exe_str: Returned pointer to a path string with a lifetime tied to the
+ * returned buffer, if any.
+ * @exe_size: Returned size of @exe_str (including the trailing null
+ * character), if any.
+ *
+ * Returns: A pointer to an allocated buffer where @exe_str point to, %NULL if
+ * there is no executable path, or an error otherwise.
+ */
+static const void *get_current_exe(const char **const exe_str,
+ size_t *const exe_size)
+{
+ const size_t buffer_size = LANDLOCK_PATH_MAX_SIZE;
+ struct mm_struct *mm = current->mm;
+ struct file *file __free(fput) = NULL;
+ char *buffer __free(kfree) = NULL;
+ const char *exe;
+ ssize_t size;
+
+ if (!mm)
+ return NULL;
+
+ file = get_mm_exe_file(mm);
+ if (!file)
+ return NULL;
+
+ buffer = kmalloc(buffer_size, GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ exe = d_path(&file->f_path, buffer, buffer_size);
+ if (WARN_ON_ONCE(IS_ERR(exe)))
+ /* Should never happen according to LANDLOCK_PATH_MAX_SIZE. */
+ return ERR_CAST(exe);
+
+ size = buffer + buffer_size - exe;
+ if (WARN_ON_ONCE(size <= 0))
+ return ERR_PTR(-ENAMETOOLONG);
+
+ *exe_size = size;
+ *exe_str = exe;
+ return no_free_ptr(buffer);
+}
+
+/*
+ * Returns: A newly allocated object describing a domain, or an error
+ * otherwise.
+ */
+static struct landlock_details *get_current_details(void)
+{
+ /* Cf. audit_log_d_path_exe() */
+ static const char null_path[] = "(null)";
+ const char *path_str = null_path;
+ size_t path_size = sizeof(null_path);
+ const void *buffer __free(kfree) = NULL;
+ struct landlock_details *details;
+
+ buffer = get_current_exe(&path_str, &path_size);
+ if (IS_ERR(buffer))
+ return ERR_CAST(buffer);
+
+ /*
+ * Create the new details according to the path's length. Do not
+ * allocate with GFP_KERNEL_ACCOUNT because it is independent from the
+ * caller.
+ */
+ details =
+ kzalloc(struct_size(details, exe_path, path_size), GFP_KERNEL);
+ if (!details)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(details->exe_path, path_str, path_size);
+ details->pid = get_pid(task_tgid(current));
+ details->uid = from_kuid(&init_user_ns, current_uid());
+ get_task_comm(details->comm, current);
+ return details;
+}
+
+/**
+ * landlock_init_hierarchy_log - Partially initialize landlock_hierarchy
+ *
+ * @hierarchy: The hierarchy to initialize.
+ *
+ * The current task is referenced as the domain that is enforcing the
+ * restriction. The subjective credentials must not be in an overridden state.
+ *
+ * @hierarchy->parent and @hierarchy->usage should already be set.
+ */
+int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy)
+{
+ struct landlock_details *details;
+
+ details = get_current_details();
+ if (IS_ERR(details))
+ return PTR_ERR(details);
+
+ hierarchy->details = details;
+ hierarchy->id = landlock_get_id_range(1);
+ hierarchy->log_status = LANDLOCK_LOG_PENDING;
+ hierarchy->log_same_exec = true;
+ hierarchy->log_new_exec = false;
+ atomic64_set(&hierarchy->num_denials, 0);
+ return 0;
+}
+
+static deny_masks_t
+get_layer_deny_mask(const access_mask_t all_existing_optional_access,
+ const unsigned long access_bit, const size_t layer)
+{
+ unsigned long access_weight;
+
+ /* This may require change with new object types. */
+ WARN_ON_ONCE(all_existing_optional_access !=
+ _LANDLOCK_ACCESS_FS_OPTIONAL);
+
+ if (WARN_ON_ONCE(layer >= LANDLOCK_MAX_NUM_LAYERS))
+ return 0;
+
+ access_weight = hweight_long(all_existing_optional_access &
+ GENMASK(access_bit, 0));
+ if (WARN_ON_ONCE(access_weight < 1))
+ return 0;
+
+ return layer
+ << ((access_weight - 1) * HWEIGHT(LANDLOCK_MAX_NUM_LAYERS - 1));
+}
+
+#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
+
+static void test_get_layer_deny_mask(struct kunit *const test)
+{
+ const unsigned long truncate = BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE);
+ const unsigned long ioctl_dev = BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV);
+
+ KUNIT_EXPECT_EQ(test, 0,
+ get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL,
+ truncate, 0));
+ KUNIT_EXPECT_EQ(test, 0x3,
+ get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL,
+ truncate, 3));
+
+ KUNIT_EXPECT_EQ(test, 0,
+ get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL,
+ ioctl_dev, 0));
+ KUNIT_EXPECT_EQ(test, 0xf0,
+ get_layer_deny_mask(_LANDLOCK_ACCESS_FS_OPTIONAL,
+ ioctl_dev, 15));
+}
+
+#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
+
+deny_masks_t
+landlock_get_deny_masks(const access_mask_t all_existing_optional_access,
+ const access_mask_t optional_access,
+ const layer_mask_t (*const layer_masks)[],
+ const size_t layer_masks_size)
+{
+ const unsigned long access_opt = optional_access;
+ unsigned long access_bit;
+ deny_masks_t deny_masks = 0;
+
+ /* This may require change with new object types. */
+ WARN_ON_ONCE(access_opt !=
+ (optional_access & all_existing_optional_access));
+
+ if (WARN_ON_ONCE(!layer_masks))
+ return 0;
+
+ if (WARN_ON_ONCE(!access_opt))
+ return 0;
+
+ for_each_set_bit(access_bit, &access_opt, layer_masks_size) {
+ const layer_mask_t mask = (*layer_masks)[access_bit];
+
+ if (!mask)
+ continue;
+
+ /* __fls(1) == 0 */
+ deny_masks |= get_layer_deny_mask(all_existing_optional_access,
+ access_bit, __fls(mask));
+ }
+ return deny_masks;
+}
+
+#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
+
+static void test_landlock_get_deny_masks(struct kunit *const test)
+{
+ const layer_mask_t layers1[BITS_PER_TYPE(access_mask_t)] = {
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_EXECUTE)] = BIT_ULL(0) |
+ BIT_ULL(9),
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_TRUNCATE)] = BIT_ULL(1),
+ [BIT_INDEX(LANDLOCK_ACCESS_FS_IOCTL_DEV)] = BIT_ULL(2) |
+ BIT_ULL(0),
+ };
+
+ KUNIT_EXPECT_EQ(test, 0x1,
+ landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL,
+ LANDLOCK_ACCESS_FS_TRUNCATE,
+ &layers1, ARRAY_SIZE(layers1)));
+ KUNIT_EXPECT_EQ(test, 0x20,
+ landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL,
+ LANDLOCK_ACCESS_FS_IOCTL_DEV,
+ &layers1, ARRAY_SIZE(layers1)));
+ KUNIT_EXPECT_EQ(
+ test, 0x21,
+ landlock_get_deny_masks(_LANDLOCK_ACCESS_FS_OPTIONAL,
+ LANDLOCK_ACCESS_FS_TRUNCATE |
+ LANDLOCK_ACCESS_FS_IOCTL_DEV,
+ &layers1, ARRAY_SIZE(layers1)));
+}
+
+#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
+
+#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
+
+static struct kunit_case test_cases[] = {
+ /* clang-format off */
+ KUNIT_CASE(test_get_layer_deny_mask),
+ KUNIT_CASE(test_landlock_get_deny_masks),
+ {}
+ /* clang-format on */
+};
+
+static struct kunit_suite test_suite = {
+ .name = "landlock_domain",
+ .test_cases = test_cases,
+};
+
+kunit_test_suite(test_suite);
+
+#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
+
+#endif /* CONFIG_AUDIT */
diff --git a/security/landlock/domain.h b/security/landlock/domain.h
new file mode 100644
index 000000000000..7fb70b25f85a
--- /dev/null
+++ b/security/landlock/domain.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock - Domain management
+ *
+ * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
+ * Copyright © 2018-2020 ANSSI
+ * Copyright © 2024-2025 Microsoft Corporation
+ */
+
+#ifndef _SECURITY_LANDLOCK_DOMAIN_H
+#define _SECURITY_LANDLOCK_DOMAIN_H
+
+#include <linux/limits.h>
+#include <linux/mm.h>
+#include <linux/path.h>
+#include <linux/pid.h>
+#include <linux/refcount.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "access.h"
+#include "audit.h"
+
+enum landlock_log_status {
+ LANDLOCK_LOG_PENDING = 0,
+ LANDLOCK_LOG_RECORDED,
+ LANDLOCK_LOG_DISABLED,
+};
+
+/**
+ * struct landlock_details - Domain's creation information
+ *
+ * Rarely accessed, mainly when logging the first domain's denial.
+ *
+ * The contained pointers are initialized at the domain creation time and never
+ * changed again. Contrary to most other Landlock object types, this one is
+ * not allocated with GFP_KERNEL_ACCOUNT because its size may not be under the
+ * caller's control (e.g. unknown exe_path) and the data is not explicitly
+ * requested nor used by tasks.
+ */
+struct landlock_details {
+ /**
+ * @pid: PID of the task that initially restricted itself. It still
+ * identifies the same task. Keeping a reference to this PID ensures that
+ * it will not be recycled.
+ */
+ struct pid *pid;
+ /**
+ * @uid: UID of the task that initially restricted itself, at creation time.
+ */
+ uid_t uid;
+ /**
+ * @comm: Command line of the task that initially restricted itself, at
+ * creation time. Always NULL terminated.
+ */
+ char comm[TASK_COMM_LEN];
+ /**
+ * @exe_path: Executable path of the task that initially restricted
+ * itself, at creation time. Always NULL terminated, and never greater
+ * than LANDLOCK_PATH_MAX_SIZE.
+ */
+ char exe_path[];
+};
+
+/* Adds 11 extra characters for the potential " (deleted)" suffix. */
+#define LANDLOCK_PATH_MAX_SIZE (PATH_MAX + 11)
+
+/* Makes sure the greatest landlock_details can be allocated. */
+static_assert(struct_size_t(struct landlock_details, exe_path,
+ LANDLOCK_PATH_MAX_SIZE) <= KMALLOC_MAX_SIZE);
+
+/**
+ * struct landlock_hierarchy - Node in a domain hierarchy
+ */
+struct landlock_hierarchy {
+ /**
+ * @parent: Pointer to the parent node, or NULL if it is a root
+ * Landlock domain.
+ */
+ struct landlock_hierarchy *parent;
+ /**
+ * @usage: Number of potential children domains plus their parent
+ * domain.
+ */
+ refcount_t usage;
+
+#ifdef CONFIG_AUDIT
+ /**
+ * @log_status: Whether this domain should be logged or not. Because
+ * concurrent log entries may be created at the same time, it is still
+ * possible to have several domain records of the same domain.
+ */
+ enum landlock_log_status log_status;
+ /**
+ * @num_denials: Number of access requests denied by this domain.
+ * Masked (i.e. never logged) denials are still counted.
+ */
+ atomic64_t num_denials;
+ /**
+ * @id: Landlock domain ID, sets once at domain creation time.
+ */
+ u64 id;
+ /**
+ * @details: Information about the related domain.
+ */
+ const struct landlock_details *details;
+ /**
+ * @log_same_exec: Set if the domain is *not* configured with
+ * %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF. Set to true by default.
+ */
+ u32 log_same_exec : 1,
+ /**
+ * @log_new_exec: Set if the domain is configured with
+ * %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON. Set to false by default.
+ */
+ log_new_exec : 1;
+#endif /* CONFIG_AUDIT */
+};
+
+#ifdef CONFIG_AUDIT
+
+deny_masks_t
+landlock_get_deny_masks(const access_mask_t all_existing_optional_access,
+ const access_mask_t optional_access,
+ const layer_mask_t (*const layer_masks)[],
+ size_t layer_masks_size);
+
+int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy);
+
+static inline void
+landlock_free_hierarchy_details(struct landlock_hierarchy *const hierarchy)
+{
+ if (!hierarchy || !hierarchy->details)
+ return;
+
+ put_pid(hierarchy->details->pid);
+ kfree(hierarchy->details);
+}
+
+#else /* CONFIG_AUDIT */
+
+static inline int
+landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy)
+{
+ return 0;
+}
+
+static inline void
+landlock_free_hierarchy_details(struct landlock_hierarchy *const hierarchy)
+{
+}
+
+#endif /* CONFIG_AUDIT */
+
+static inline void
+landlock_get_hierarchy(struct landlock_hierarchy *const hierarchy)
+{
+ if (hierarchy)
+ refcount_inc(&hierarchy->usage);
+}
+
+static inline void landlock_put_hierarchy(struct landlock_hierarchy *hierarchy)
+{
+ while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) {
+ const struct landlock_hierarchy *const freeme = hierarchy;
+
+ landlock_log_drop_domain(hierarchy);
+ landlock_free_hierarchy_details(hierarchy);
+ hierarchy = hierarchy->parent;
+ kfree(freeme);
+ }
+}
+
+#endif /* _SECURITY_LANDLOCK_DOMAIN_H */
diff --git a/security/landlock/errata.h b/security/landlock/errata.h
new file mode 100644
index 000000000000..8e626accac10
--- /dev/null
+++ b/security/landlock/errata.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock - Errata information
+ *
+ * Copyright © 2025 Microsoft Corporation
+ */
+
+#ifndef _SECURITY_LANDLOCK_ERRATA_H
+#define _SECURITY_LANDLOCK_ERRATA_H
+
+#include <linux/init.h>
+
+struct landlock_erratum {
+ const int abi;
+ const u8 number;
+};
+
+/* clang-format off */
+#define LANDLOCK_ERRATUM(NUMBER) \
+ { \
+ .abi = LANDLOCK_ERRATA_ABI, \
+ .number = NUMBER, \
+ },
+/* clang-format on */
+
+/*
+ * Some fixes may require user space to check if they are applied on the running
+ * kernel before using a specific feature. For instance, this applies when a
+ * restriction was previously too restrictive and is now getting relaxed (for
+ * compatibility or semantic reasons). However, non-visible changes for
+ * legitimate use (e.g. security fixes) do not require an erratum.
+ */
+static const struct landlock_erratum landlock_errata_init[] __initconst = {
+
+/*
+ * Only Sparse may not implement __has_include. If a compiler does not
+ * implement __has_include, a warning will be printed at boot time (see
+ * setup.c).
+ */
+#ifdef __has_include
+
+#define LANDLOCK_ERRATA_ABI 1
+#if __has_include("errata/abi-1.h")
+#include "errata/abi-1.h"
+#endif
+#undef LANDLOCK_ERRATA_ABI
+
+#define LANDLOCK_ERRATA_ABI 2
+#if __has_include("errata/abi-2.h")
+#include "errata/abi-2.h"
+#endif
+#undef LANDLOCK_ERRATA_ABI
+
+#define LANDLOCK_ERRATA_ABI 3
+#if __has_include("errata/abi-3.h")
+#include "errata/abi-3.h"
+#endif
+#undef LANDLOCK_ERRATA_ABI
+
+#define LANDLOCK_ERRATA_ABI 4
+#if __has_include("errata/abi-4.h")
+#include "errata/abi-4.h"
+#endif
+#undef LANDLOCK_ERRATA_ABI
+
+#define LANDLOCK_ERRATA_ABI 5
+#if __has_include("errata/abi-5.h")
+#include "errata/abi-5.h"
+#endif
+#undef LANDLOCK_ERRATA_ABI
+
+#define LANDLOCK_ERRATA_ABI 6
+#if __has_include("errata/abi-6.h")
+#include "errata/abi-6.h"
+#endif
+#undef LANDLOCK_ERRATA_ABI
+
+/*
+ * For each new erratum, we need to include all the ABI files up to the impacted
+ * ABI to make all potential future intermediate errata easy to backport.
+ *
+ * If such change involves more than one ABI addition, then it must be in a
+ * dedicated commit with the same Fixes tag as used for the actual fix.
+ *
+ * Each commit creating a new security/landlock/errata/abi-*.h file must have a
+ * Depends-on tag to reference the commit that previously added the line to
+ * include this new file, except if the original Fixes tag is enough.
+ *
+ * Each erratum must be documented in its related ABI file, and a dedicated
+ * commit must update Documentation/userspace-api/landlock.rst to include this
+ * erratum. This commit will not be backported.
+ */
+
+#endif
+
+ {}
+};
+
+#endif /* _SECURITY_LANDLOCK_ERRATA_H */
diff --git a/security/landlock/errata/abi-4.h b/security/landlock/errata/abi-4.h
new file mode 100644
index 000000000000..c052ee54f89f
--- /dev/null
+++ b/security/landlock/errata/abi-4.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/**
+ * DOC: erratum_1
+ *
+ * Erratum 1: TCP socket identification
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This fix addresses an issue where IPv4 and IPv6 stream sockets (e.g., SMC,
+ * MPTCP, or SCTP) were incorrectly restricted by TCP access rights during
+ * :manpage:`bind(2)` and :manpage:`connect(2)` operations. This change ensures
+ * that only TCP sockets are subject to TCP access rights, allowing other
+ * protocols to operate without unnecessary restrictions.
+ */
+LANDLOCK_ERRATUM(1)
diff --git a/security/landlock/errata/abi-6.h b/security/landlock/errata/abi-6.h
new file mode 100644
index 000000000000..df7bc0e1fdf4
--- /dev/null
+++ b/security/landlock/errata/abi-6.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/**
+ * DOC: erratum_2
+ *
+ * Erratum 2: Scoped signal handling
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This fix addresses an issue where signal scoping was overly restrictive,
+ * preventing sandboxed threads from signaling other threads within the same
+ * process if they belonged to different domains. Because threads are not
+ * security boundaries, user space might assume that any thread within the same
+ * process can send signals between themselves (see :manpage:`nptl(7)` and
+ * :manpage:`libpsx(3)`). Consistent with :manpage:`ptrace(2)` behavior, direct
+ * interaction between threads of the same process should always be allowed.
+ * This change ensures that any thread is allowed to send signals to any other
+ * thread within the same process, regardless of their domain.
+ */
+LANDLOCK_ERRATUM(2)
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index e31b97a9f175..c04f8879ad03 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Landlock LSM - Filesystem management and hooks
+ * Landlock - Filesystem management and hooks
*
* Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
* Copyright © 2018-2020 ANSSI
- * Copyright © 2021-2022 Microsoft Corporation
+ * Copyright © 2021-2025 Microsoft Corporation
* Copyright © 2022 Günther Noack <gnoack3000@gmail.com>
* Copyright © 2023-2024 Google LLC
*/
@@ -23,11 +23,14 @@
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/list.h>
+#include <linux/lsm_audit.h>
#include <linux/lsm_hooks.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/path.h>
+#include <linux/pid.h>
#include <linux/rcupdate.h>
+#include <linux/sched/signal.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/types.h>
@@ -36,8 +39,11 @@
#include <uapi/linux/fiemap.h>
#include <uapi/linux/landlock.h>
+#include "access.h"
+#include "audit.h"
#include "common.h"
#include "cred.h"
+#include "domain.h"
#include "fs.h"
#include "limits.h"
#include "object.h"
@@ -388,24 +394,10 @@ static bool is_nouser_or_private(const struct dentry *dentry)
unlikely(IS_PRIVATE(d_backing_inode(dentry))));
}
-static access_mask_t
-get_handled_fs_accesses(const struct landlock_ruleset *const domain)
-{
- /* Handles all initially denied by default access rights. */
- return landlock_union_access_masks(domain).fs |
- LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
-}
-
static const struct access_masks any_fs = {
.fs = ~0,
};
-static const struct landlock_ruleset *get_current_fs_domain(void)
-{
- return landlock_get_applicable_domain(landlock_get_current_domain(),
- any_fs);
-}
-
/*
* Check that a destination file hierarchy has more restrictions than a source
* file hierarchy. This is only used for link and rename actions.
@@ -572,6 +564,12 @@ static void test_no_more_access(struct kunit *const test)
#undef NMA_TRUE
#undef NMA_FALSE
+static bool is_layer_masks_allowed(
+ layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
+{
+ return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
+}
+
/*
* Removes @layer_masks accesses that are not requested.
*
@@ -589,7 +587,8 @@ scope_to_request(const access_mask_t access_request,
for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
(*layer_masks)[access_bit] = 0;
- return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
+
+ return is_layer_masks_allowed(layer_masks);
}
#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
@@ -728,6 +727,7 @@ static void test_is_eacces_with_write(struct kunit *const test)
* those identified by @access_request_parent1). This matrix can
* initially refer to domain layer masks and, when the accesses for the
* destination and source are the same, to requested layer masks.
+ * @log_request_parent1: Audit request to fill if the related access is denied.
* @dentry_child1: Dentry to the initial child of the parent1 path. This
* pointer must be NULL for non-refer actions (i.e. not link nor rename).
* @access_request_parent2: Similar to @access_request_parent1 but for a
@@ -736,6 +736,7 @@ static void test_is_eacces_with_write(struct kunit *const test)
* the source. Must be set to 0 when using a simple path request.
* @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer
* action. This must be NULL otherwise.
+ * @log_request_parent2: Audit request to fill if the related access is denied.
* @dentry_child2: Dentry to the initial child of the parent2 path. This
* pointer is only set for RENAME_EXCHANGE actions and must be NULL
* otherwise.
@@ -755,10 +756,12 @@ static bool is_access_to_paths_allowed(
const struct path *const path,
const access_mask_t access_request_parent1,
layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
- const struct dentry *const dentry_child1,
+ struct landlock_request *const log_request_parent1,
+ struct dentry *const dentry_child1,
const access_mask_t access_request_parent2,
layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
- const struct dentry *const dentry_child2)
+ struct landlock_request *const log_request_parent2,
+ struct dentry *const dentry_child2)
{
bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check,
child1_is_directory = true, child2_is_directory = true;
@@ -771,23 +774,31 @@ static bool is_access_to_paths_allowed(
if (!access_request_parent1 && !access_request_parent2)
return true;
- if (WARN_ON_ONCE(!domain || !path))
+
+ if (WARN_ON_ONCE(!path))
return true;
+
if (is_nouser_or_private(path->dentry))
return true;
- if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1))
+
+ if (WARN_ON_ONCE(!layer_masks_parent1))
return false;
+ allowed_parent1 = is_layer_masks_allowed(layer_masks_parent1);
+
if (unlikely(layer_masks_parent2)) {
if (WARN_ON_ONCE(!dentry_child1))
return false;
+
+ allowed_parent2 = is_layer_masks_allowed(layer_masks_parent2);
+
/*
* For a double request, first check for potential privilege
* escalation by looking at domain handled accesses (which are
* a superset of the meaningful requested accesses).
*/
access_masked_parent1 = access_masked_parent2 =
- get_handled_fs_accesses(domain);
+ landlock_union_access_masks(domain).fs;
is_dom_check = true;
} else {
if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
@@ -847,15 +858,6 @@ static bool is_access_to_paths_allowed(
child1_is_directory, layer_masks_parent2,
layer_masks_child2,
child2_is_directory))) {
- allowed_parent1 = scope_to_request(
- access_request_parent1, layer_masks_parent1);
- allowed_parent2 = scope_to_request(
- access_request_parent2, layer_masks_parent2);
-
- /* Stops when all accesses are granted. */
- if (allowed_parent1 && allowed_parent2)
- break;
-
/*
* Now, downgrades the remaining checks from domain
* handled accesses to requested accesses.
@@ -863,19 +865,37 @@ static bool is_access_to_paths_allowed(
is_dom_check = false;
access_masked_parent1 = access_request_parent1;
access_masked_parent2 = access_request_parent2;
+
+ allowed_parent1 =
+ allowed_parent1 ||
+ scope_to_request(access_masked_parent1,
+ layer_masks_parent1);
+ allowed_parent2 =
+ allowed_parent2 ||
+ scope_to_request(access_masked_parent2,
+ layer_masks_parent2);
+
+ /* Stops when all accesses are granted. */
+ if (allowed_parent1 && allowed_parent2)
+ break;
}
rule = find_rule(domain, walker_path.dentry);
- allowed_parent1 = landlock_unmask_layers(
- rule, access_masked_parent1, layer_masks_parent1,
- ARRAY_SIZE(*layer_masks_parent1));
- allowed_parent2 = landlock_unmask_layers(
- rule, access_masked_parent2, layer_masks_parent2,
- ARRAY_SIZE(*layer_masks_parent2));
+ allowed_parent1 = allowed_parent1 ||
+ landlock_unmask_layers(
+ rule, access_masked_parent1,
+ layer_masks_parent1,
+ ARRAY_SIZE(*layer_masks_parent1));
+ allowed_parent2 = allowed_parent2 ||
+ landlock_unmask_layers(
+ rule, access_masked_parent2,
+ layer_masks_parent2,
+ ARRAY_SIZE(*layer_masks_parent2));
/* Stops when a rule from each layer grants access. */
if (allowed_parent1 && allowed_parent2)
break;
+
jump_up:
if (walker_path.dentry == walker_path.mnt->mnt_root) {
if (follow_up(&walker_path)) {
@@ -895,8 +915,10 @@ jump_up:
* access to internal filesystems (e.g. nsfs, which is
* reachable through /proc/<pid>/ns/<namespace>).
*/
- allowed_parent1 = allowed_parent2 =
- !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
+ if (walker_path.mnt->mnt_flags & MNT_INTERNAL) {
+ allowed_parent1 = true;
+ allowed_parent2 = true;
+ }
break;
}
parent_dentry = dget_parent(walker_path.dentry);
@@ -905,42 +927,59 @@ jump_up:
}
path_put(&walker_path);
+ if (!allowed_parent1) {
+ log_request_parent1->type = LANDLOCK_REQUEST_FS_ACCESS;
+ log_request_parent1->audit.type = LSM_AUDIT_DATA_PATH;
+ log_request_parent1->audit.u.path = *path;
+ log_request_parent1->access = access_masked_parent1;
+ log_request_parent1->layer_masks = layer_masks_parent1;
+ log_request_parent1->layer_masks_size =
+ ARRAY_SIZE(*layer_masks_parent1);
+ }
+
+ if (!allowed_parent2) {
+ log_request_parent2->type = LANDLOCK_REQUEST_FS_ACCESS;
+ log_request_parent2->audit.type = LSM_AUDIT_DATA_PATH;
+ log_request_parent2->audit.u.path = *path;
+ log_request_parent2->access = access_masked_parent2;
+ log_request_parent2->layer_masks = layer_masks_parent2;
+ log_request_parent2->layer_masks_size =
+ ARRAY_SIZE(*layer_masks_parent2);
+ }
return allowed_parent1 && allowed_parent2;
}
-static int check_access_path(const struct landlock_ruleset *const domain,
- const struct path *const path,
- access_mask_t access_request)
+static int current_check_access_path(const struct path *const path,
+ access_mask_t access_request)
{
+ const struct access_masks masks = {
+ .fs = access_request,
+ };
+ const struct landlock_cred_security *const subject =
+ landlock_get_applicable_subject(current_cred(), masks, NULL);
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
+ struct landlock_request request = {};
- access_request = landlock_init_layer_masks(
- domain, access_request, &layer_masks, LANDLOCK_KEY_INODE);
- if (is_access_to_paths_allowed(domain, path, access_request,
- &layer_masks, NULL, 0, NULL, NULL))
+ if (!subject)
return 0;
- return -EACCES;
-}
-static int current_check_access_path(const struct path *const path,
- const access_mask_t access_request)
-{
- const struct landlock_ruleset *const dom = get_current_fs_domain();
-
- if (!dom)
+ access_request = landlock_init_layer_masks(subject->domain,
+ access_request, &layer_masks,
+ LANDLOCK_KEY_INODE);
+ if (is_access_to_paths_allowed(subject->domain, path, access_request,
+ &layer_masks, &request, NULL, 0, NULL,
+ NULL, NULL))
return 0;
- return check_access_path(dom, path, access_request);
+
+ landlock_log_denial(subject, &request);
+ return -EACCES;
}
-static access_mask_t get_mode_access(const umode_t mode)
+static __attribute_const__ access_mask_t get_mode_access(const umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFLNK:
return LANDLOCK_ACCESS_FS_MAKE_SYM;
- case 0:
- /* A zero mode translates to S_IFREG. */
- case S_IFREG:
- return LANDLOCK_ACCESS_FS_MAKE_REG;
case S_IFDIR:
return LANDLOCK_ACCESS_FS_MAKE_DIR;
case S_IFCHR:
@@ -951,9 +990,12 @@ static access_mask_t get_mode_access(const umode_t mode)
return LANDLOCK_ACCESS_FS_MAKE_FIFO;
case S_IFSOCK:
return LANDLOCK_ACCESS_FS_MAKE_SOCK;
+ case S_IFREG:
+ case 0:
+ /* A zero mode translates to S_IFREG. */
default:
- WARN_ON_ONCE(1);
- return 0;
+ /* Treats weird files as regular files. */
+ return LANDLOCK_ACCESS_FS_MAKE_REG;
}
}
@@ -1090,18 +1132,19 @@ static int current_check_refer_path(struct dentry *const old_dentry,
struct dentry *const new_dentry,
const bool removable, const bool exchange)
{
- const struct landlock_ruleset *const dom = get_current_fs_domain();
+ const struct landlock_cred_security *const subject =
+ landlock_get_applicable_subject(current_cred(), any_fs, NULL);
bool allow_parent1, allow_parent2;
access_mask_t access_request_parent1, access_request_parent2;
struct path mnt_dir;
struct dentry *old_parent;
layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
+ struct landlock_request request1 = {}, request2 = {};
- if (!dom)
+ if (!subject)
return 0;
- if (WARN_ON_ONCE(dom->num_layers < 1))
- return -EACCES;
+
if (unlikely(d_is_negative(old_dentry)))
return -ENOENT;
if (exchange) {
@@ -1126,12 +1169,16 @@ static int current_check_refer_path(struct dentry *const old_dentry,
* for same-directory referer (i.e. no reparenting).
*/
access_request_parent1 = landlock_init_layer_masks(
- dom, access_request_parent1 | access_request_parent2,
+ subject->domain,
+ access_request_parent1 | access_request_parent2,
&layer_masks_parent1, LANDLOCK_KEY_INODE);
- if (is_access_to_paths_allowed(
- dom, new_dir, access_request_parent1,
- &layer_masks_parent1, NULL, 0, NULL, NULL))
+ if (is_access_to_paths_allowed(subject->domain, new_dir,
+ access_request_parent1,
+ &layer_masks_parent1, &request1,
+ NULL, 0, NULL, NULL, NULL))
return 0;
+
+ landlock_log_denial(subject, &request1);
return -EACCES;
}
@@ -1152,10 +1199,12 @@ static int current_check_refer_path(struct dentry *const old_dentry,
old_dentry->d_parent;
/* new_dir->dentry is equal to new_dentry->d_parent */
- allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
+ allow_parent1 = collect_domain_accesses(subject->domain, mnt_dir.dentry,
+ old_parent,
&layer_masks_parent1);
- allow_parent2 = collect_domain_accesses(
- dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
+ allow_parent2 = collect_domain_accesses(subject->domain, mnt_dir.dentry,
+ new_dir->dentry,
+ &layer_masks_parent2);
if (allow_parent1 && allow_parent2)
return 0;
@@ -1167,11 +1216,21 @@ static int current_check_refer_path(struct dentry *const old_dentry,
* destination parent access rights.
*/
if (is_access_to_paths_allowed(
- dom, &mnt_dir, access_request_parent1, &layer_masks_parent1,
- old_dentry, access_request_parent2, &layer_masks_parent2,
+ subject->domain, &mnt_dir, access_request_parent1,
+ &layer_masks_parent1, &request1, old_dentry,
+ access_request_parent2, &layer_masks_parent2, &request2,
exchange ? new_dentry : NULL))
return 0;
+ if (request1.access) {
+ request1.audit.u.path.dentry = old_parent;
+ landlock_log_denial(subject, &request1);
+ }
+ if (request2.access) {
+ request2.audit.u.path.dentry = new_dir->dentry;
+ landlock_log_denial(subject, &request2);
+ }
+
/*
* This prioritizes EACCES over EXDEV for all actions, including
* renames with RENAME_EXCHANGE.
@@ -1208,7 +1267,7 @@ static void hook_inode_free_security_rcu(void *inode_security)
/*
* Release the inodes used in a security policy.
*
- * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
+ * Cf. fsnotify_unmount_inodes() and evict_inodes()
*/
static void hook_sb_delete(struct super_block *const sb)
{
@@ -1314,6 +1373,34 @@ static void hook_sb_delete(struct super_block *const sb)
!atomic_long_read(&landlock_superblock(sb)->inode_refs));
}
+static void
+log_fs_change_topology_path(const struct landlock_cred_security *const subject,
+ size_t handle_layer, const struct path *const path)
+{
+ landlock_log_denial(subject, &(struct landlock_request) {
+ .type = LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY,
+ .audit = {
+ .type = LSM_AUDIT_DATA_PATH,
+ .u.path = *path,
+ },
+ .layer_plus_one = handle_layer + 1,
+ });
+}
+
+static void log_fs_change_topology_dentry(
+ const struct landlock_cred_security *const subject, size_t handle_layer,
+ struct dentry *const dentry)
+{
+ landlock_log_denial(subject, &(struct landlock_request) {
+ .type = LANDLOCK_REQUEST_FS_CHANGE_TOPOLOGY,
+ .audit = {
+ .type = LSM_AUDIT_DATA_DENTRY,
+ .u.dentry = dentry,
+ },
+ .layer_plus_one = handle_layer + 1,
+ });
+}
+
/*
* Because a Landlock security policy is defined according to the filesystem
* topology (i.e. the mount namespace), changing it may grant access to files
@@ -1336,16 +1423,30 @@ static int hook_sb_mount(const char *const dev_name,
const struct path *const path, const char *const type,
const unsigned long flags, void *const data)
{
- if (!get_current_fs_domain())
+ size_t handle_layer;
+ const struct landlock_cred_security *const subject =
+ landlock_get_applicable_subject(current_cred(), any_fs,
+ &handle_layer);
+
+ if (!subject)
return 0;
+
+ log_fs_change_topology_path(subject, handle_layer, path);
return -EPERM;
}
static int hook_move_mount(const struct path *const from_path,
const struct path *const to_path)
{
- if (!get_current_fs_domain())
+ size_t handle_layer;
+ const struct landlock_cred_security *const subject =
+ landlock_get_applicable_subject(current_cred(), any_fs,
+ &handle_layer);
+
+ if (!subject)
return 0;
+
+ log_fs_change_topology_path(subject, handle_layer, to_path);
return -EPERM;
}
@@ -1355,15 +1456,29 @@ static int hook_move_mount(const struct path *const from_path,
*/
static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
{
- if (!get_current_fs_domain())
+ size_t handle_layer;
+ const struct landlock_cred_security *const subject =
+ landlock_get_applicable_subject(current_cred(), any_fs,
+ &handle_layer);
+
+ if (!subject)
return 0;
+
+ log_fs_change_topology_dentry(subject, handle_layer, mnt->mnt_root);
return -EPERM;
}
static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
{
- if (!get_current_fs_domain())
+ size_t handle_layer;
+ const struct landlock_cred_security *const subject =
+ landlock_get_applicable_subject(current_cred(), any_fs,
+ &handle_layer);
+
+ if (!subject)
return 0;
+
+ log_fs_change_topology_dentry(subject, handle_layer, sb->s_root);
return -EPERM;
}
@@ -1378,8 +1493,15 @@ static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
static int hook_sb_pivotroot(const struct path *const old_path,
const struct path *const new_path)
{
- if (!get_current_fs_domain())
+ size_t handle_layer;
+ const struct landlock_cred_security *const subject =
+ landlock_get_applicable_subject(current_cred(), any_fs,
+ &handle_layer);
+
+ if (!subject)
return 0;
+
+ log_fs_change_topology_path(subject, handle_layer, new_path);
return -EPERM;
}
@@ -1414,11 +1536,7 @@ static int hook_path_mknod(const struct path *const dir,
struct dentry *const dentry, const umode_t mode,
const unsigned int dev)
{
- const struct landlock_ruleset *const dom = get_current_fs_domain();
-
- if (!dom)
- return 0;
- return check_access_path(dom, dir, get_mode_access(mode));
+ return current_check_access_path(dir, get_mode_access(mode));
}
static int hook_path_symlink(const struct path *const dir,
@@ -1500,11 +1618,11 @@ static int hook_file_open(struct file *const file)
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
access_mask_t open_access_request, full_access_request, allowed_access,
optional_access;
- const struct landlock_ruleset *const dom =
- landlock_get_applicable_domain(
- landlock_cred(file->f_cred)->domain, any_fs);
+ const struct landlock_cred_security *const subject =
+ landlock_get_applicable_subject(file->f_cred, any_fs, NULL);
+ struct landlock_request request = {};
- if (!dom)
+ if (!subject)
return 0;
/*
@@ -1525,10 +1643,11 @@ static int hook_file_open(struct file *const file)
full_access_request = open_access_request | optional_access;
if (is_access_to_paths_allowed(
- dom, &file->f_path,
- landlock_init_layer_masks(dom, full_access_request,
- &layer_masks, LANDLOCK_KEY_INODE),
- &layer_masks, NULL, 0, NULL, NULL)) {
+ subject->domain, &file->f_path,
+ landlock_init_layer_masks(subject->domain,
+ full_access_request, &layer_masks,
+ LANDLOCK_KEY_INODE),
+ &layer_masks, &request, NULL, 0, NULL, NULL, NULL)) {
allowed_access = full_access_request;
} else {
unsigned long access_bit;
@@ -1554,10 +1673,18 @@ static int hook_file_open(struct file *const file)
* file access rights in the opened struct file.
*/
landlock_file(file)->allowed_access = allowed_access;
+#ifdef CONFIG_AUDIT
+ landlock_file(file)->deny_masks = landlock_get_deny_masks(
+ _LANDLOCK_ACCESS_FS_OPTIONAL, optional_access, &layer_masks,
+ ARRAY_SIZE(layer_masks));
+#endif /* CONFIG_AUDIT */
if ((open_access_request & allowed_access) == open_access_request)
return 0;
+ /* Sets access to reflect the actual request. */
+ request.access = open_access_request;
+ landlock_log_denial(subject, &request);
return -EACCES;
}
@@ -1575,11 +1702,24 @@ static int hook_file_truncate(struct file *const file)
*/
if (landlock_file(file)->allowed_access & LANDLOCK_ACCESS_FS_TRUNCATE)
return 0;
+
+ landlock_log_denial(landlock_cred(file->f_cred), &(struct landlock_request) {
+ .type = LANDLOCK_REQUEST_FS_ACCESS,
+ .audit = {
+ .type = LSM_AUDIT_DATA_FILE,
+ .u.file = file,
+ },
+ .all_existing_optional_access = _LANDLOCK_ACCESS_FS_OPTIONAL,
+ .access = LANDLOCK_ACCESS_FS_TRUNCATE,
+#ifdef CONFIG_AUDIT
+ .deny_masks = landlock_file(file)->deny_masks,
+#endif /* CONFIG_AUDIT */
+ });
return -EACCES;
}
-static int hook_file_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static int hook_file_ioctl_common(const struct file *const file,
+ const unsigned int cmd, const bool is_compat)
{
access_mask_t allowed_access = landlock_file(file)->allowed_access;
@@ -1595,56 +1735,98 @@ static int hook_file_ioctl(struct file *file, unsigned int cmd,
if (!is_device(file))
return 0;
- if (is_masked_device_ioctl(cmd))
+ if (unlikely(is_compat) ? is_masked_device_ioctl_compat(cmd) :
+ is_masked_device_ioctl(cmd))
return 0;
+ landlock_log_denial(landlock_cred(file->f_cred), &(struct landlock_request) {
+ .type = LANDLOCK_REQUEST_FS_ACCESS,
+ .audit = {
+ .type = LSM_AUDIT_DATA_IOCTL_OP,
+ .u.op = &(struct lsm_ioctlop_audit) {
+ .path = file->f_path,
+ .cmd = cmd,
+ },
+ },
+ .all_existing_optional_access = _LANDLOCK_ACCESS_FS_OPTIONAL,
+ .access = LANDLOCK_ACCESS_FS_IOCTL_DEV,
+#ifdef CONFIG_AUDIT
+ .deny_masks = landlock_file(file)->deny_masks,
+#endif /* CONFIG_AUDIT */
+ });
return -EACCES;
}
+static int hook_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return hook_file_ioctl_common(file, cmd, false);
+}
+
static int hook_file_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
{
- access_mask_t allowed_access = landlock_file(file)->allowed_access;
+ return hook_file_ioctl_common(file, cmd, true);
+}
+
+/*
+ * Always allow sending signals between threads of the same process. This
+ * ensures consistency with hook_task_kill().
+ */
+static bool control_current_fowner(struct fown_struct *const fown)
+{
+ struct task_struct *p;
/*
- * It is the access rights at the time of opening the file which
- * determine whether IOCTL can be used on the opened file later.
- *
- * The access right is attached to the opened file in hook_file_open().
+ * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix
+ * file_set_fowner LSM hook inconsistencies").
*/
- if (allowed_access & LANDLOCK_ACCESS_FS_IOCTL_DEV)
- return 0;
+ lockdep_assert_held(&fown->lock);
- if (!is_device(file))
- return 0;
-
- if (is_masked_device_ioctl_compat(cmd))
- return 0;
+ /*
+ * Some callers (e.g. fcntl_dirnotify) may not be in an RCU read-side
+ * critical section.
+ */
+ guard(rcu)();
+ p = pid_task(fown->pid, fown->pid_type);
+ if (!p)
+ return true;
- return -EACCES;
+ return !same_thread_group(p, current);
}
static void hook_file_set_fowner(struct file *file)
{
- struct landlock_ruleset *new_dom, *prev_dom;
+ struct landlock_ruleset *prev_dom;
+ struct landlock_cred_security fown_subject = {};
+ size_t fown_layer = 0;
+
+ if (control_current_fowner(file_f_owner(file))) {
+ static const struct access_masks signal_scope = {
+ .scope = LANDLOCK_SCOPE_SIGNAL,
+ };
+ const struct landlock_cred_security *new_subject =
+ landlock_get_applicable_subject(
+ current_cred(), signal_scope, &fown_layer);
+ if (new_subject) {
+ landlock_get_ruleset(new_subject->domain);
+ fown_subject = *new_subject;
+ }
+ }
- /*
- * Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix
- * file_set_fowner LSM hook inconsistencies").
- */
- lockdep_assert_held(&file_f_owner(file)->lock);
- new_dom = landlock_get_current_domain();
- landlock_get_ruleset(new_dom);
- prev_dom = landlock_file(file)->fown_domain;
- landlock_file(file)->fown_domain = new_dom;
+ prev_dom = landlock_file(file)->fown_subject.domain;
+ landlock_file(file)->fown_subject = fown_subject;
+#ifdef CONFIG_AUDIT
+ landlock_file(file)->fown_layer = fown_layer;
+#endif /* CONFIG_AUDIT*/
- /* Called in an RCU read-side critical section. */
+ /* May be called in an RCU read-side critical section. */
landlock_put_ruleset_deferred(prev_dom);
}
static void hook_file_free_security(struct file *file)
{
- landlock_put_ruleset_deferred(landlock_file(file)->fown_domain);
+ landlock_put_ruleset_deferred(landlock_file(file)->fown_subject.domain);
}
static struct security_hook_list landlock_hooks[] __ro_after_init = {
diff --git a/security/landlock/fs.h b/security/landlock/fs.h
index 1487e1f023a1..bf9948941f2f 100644
--- a/security/landlock/fs.h
+++ b/security/landlock/fs.h
@@ -1,18 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Landlock LSM - Filesystem management and hooks
+ * Landlock - Filesystem management and hooks
*
* Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
* Copyright © 2018-2020 ANSSI
+ * Copyright © 2024-2025 Microsoft Corporation
*/
#ifndef _SECURITY_LANDLOCK_FS_H
#define _SECURITY_LANDLOCK_FS_H
+#include <linux/build_bug.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
+#include "access.h"
+#include "cred.h"
#include "ruleset.h"
#include "setup.h"
@@ -52,15 +56,40 @@ struct landlock_file_security {
* needed to authorize later operations on the open file.
*/
access_mask_t allowed_access;
+
+#ifdef CONFIG_AUDIT
+ /**
+ * @deny_masks: Domain layer levels that deny an optional access (see
+ * _LANDLOCK_ACCESS_FS_OPTIONAL).
+ */
+ deny_masks_t deny_masks;
/**
- * @fown_domain: Domain of the task that set the PID that may receive a
- * signal e.g., SIGURG when writing MSG_OOB to the related socket.
- * This pointer is protected by the related file->f_owner->lock, as for
- * fown_struct's members: pid, uid, and euid.
+ * @fown_layer: Layer level of @fown_subject->domain with
+ * LANDLOCK_SCOPE_SIGNAL.
*/
- struct landlock_ruleset *fown_domain;
+ u8 fown_layer;
+#endif /* CONFIG_AUDIT */
+
+ /**
+ * @fown_subject: Landlock credential of the task that set the PID that
+ * may receive a signal e.g., SIGURG when writing MSG_OOB to the
+ * related socket. This pointer is protected by the related
+ * file->f_owner->lock, as for fown_struct's members: pid, uid, and
+ * euid.
+ */
+ struct landlock_cred_security fown_subject;
};
+#ifdef CONFIG_AUDIT
+
+/* Makes sure all layers can be identified. */
+/* clang-format off */
+static_assert((typeof_member(struct landlock_file_security, fown_layer))~0 >=
+ LANDLOCK_MAX_NUM_LAYERS);
+/* clang-format off */
+
+#endif /* CONFIG_AUDIT */
+
/**
* struct landlock_superblock_security - Superblock security blob
*
diff --git a/security/landlock/id.c b/security/landlock/id.c
new file mode 100644
index 000000000000..838c3ed7bb82
--- /dev/null
+++ b/security/landlock/id.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Landlock - Unique identification number generator
+ *
+ * Copyright © 2024-2025 Microsoft Corporation
+ */
+
+#include <kunit/test.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+
+#include "common.h"
+#include "id.h"
+
+#define COUNTER_PRE_INIT 0
+
+static atomic64_t next_id = ATOMIC64_INIT(COUNTER_PRE_INIT);
+
+static void __init init_id(atomic64_t *const counter, const u32 random_32bits)
+{
+ u64 init;
+
+ /*
+ * Ensures sure 64-bit values are always used by user space (or may
+ * fail with -EOVERFLOW), and makes this testable.
+ */
+ init = BIT_ULL(32);
+
+ /*
+ * Makes a large (2^32) boot-time value to limit ID collision in logs
+ * from different boots, and to limit info leak about the number of
+ * initially (relative to the reader) created elements (e.g. domains).
+ */
+ init += random_32bits;
+
+ /* Sets first or ignores. This will be the first ID. */
+ atomic64_cmpxchg(counter, COUNTER_PRE_INIT, init);
+}
+
+#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
+
+static void __init test_init_min(struct kunit *const test)
+{
+ atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT);
+
+ init_id(&counter, 0);
+ KUNIT_EXPECT_EQ(test, atomic64_read(&counter), 1ULL + U32_MAX);
+}
+
+static void __init test_init_max(struct kunit *const test)
+{
+ atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT);
+
+ init_id(&counter, ~0);
+ KUNIT_EXPECT_EQ(test, atomic64_read(&counter), 1 + (2ULL * U32_MAX));
+}
+
+static void __init test_init_once(struct kunit *const test)
+{
+ const u64 first_init = 1ULL + U32_MAX;
+ atomic64_t counter = ATOMIC64_INIT(COUNTER_PRE_INIT);
+
+ init_id(&counter, 0);
+ KUNIT_EXPECT_EQ(test, atomic64_read(&counter), first_init);
+
+ init_id(&counter, ~0);
+ KUNIT_EXPECT_EQ_MSG(
+ test, atomic64_read(&counter), first_init,
+ "Should still have the same value after the subsequent init_id()");
+}
+
+#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
+
+void __init landlock_init_id(void)
+{
+ return init_id(&next_id, get_random_u32());
+}
+
+/*
+ * It's not worth it to try to hide the monotonic counter because it can still
+ * be inferred (with N counter ranges), and if we are allowed to read the inode
+ * number we should also be allowed to read the time creation anyway, and it
+ * can be handy to store and sort domain IDs for user space.
+ *
+ * Returns the value of next_id and increment it to let some space for the next
+ * one.
+ */
+static u64 get_id_range(size_t number_of_ids, atomic64_t *const counter,
+ u8 random_4bits)
+{
+ u64 id, step;
+
+ /*
+ * We should return at least 1 ID, and we may need a set of consecutive
+ * ones (e.g. to generate a set of inodes).
+ */
+ if (WARN_ON_ONCE(number_of_ids <= 0))
+ number_of_ids = 1;
+
+ /*
+ * Blurs the next ID guess with 1/16 ratio. We get 2^(64 - 4) -
+ * (2 * 2^32), so a bit less than 2^60 available IDs, which should be
+ * much more than enough considering the number of CPU cycles required
+ * to get a new ID (e.g. a full landlock_restrict_self() call), and the
+ * cost of draining all available IDs during the system's uptime.
+ */
+ random_4bits &= 0b1111;
+ step = number_of_ids + random_4bits;
+
+ /* It is safe to cast a signed atomic to an unsigned value. */
+ id = atomic64_fetch_add(step, counter);
+
+ /* Warns if landlock_init_id() was not called. */
+ WARN_ON_ONCE(id == COUNTER_PRE_INIT);
+ return id;
+}
+
+#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
+
+static u8 get_random_u8_positive(void)
+{
+ /* max() evaluates its arguments once. */
+ return max(1, get_random_u8());
+}
+
+static void test_range1_rand0(struct kunit *const test)
+{
+ atomic64_t counter;
+ u64 init;
+
+ init = get_random_u32();
+ atomic64_set(&counter, init);
+ KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 0), init);
+ KUNIT_EXPECT_EQ(test,
+ get_id_range(get_random_u8_positive(), &counter,
+ get_random_u8()),
+ init + 1);
+}
+
+static void test_range1_rand1(struct kunit *const test)
+{
+ atomic64_t counter;
+ u64 init;
+
+ init = get_random_u32();
+ atomic64_set(&counter, init);
+ KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 1), init);
+ KUNIT_EXPECT_EQ(test,
+ get_id_range(get_random_u8_positive(), &counter,
+ get_random_u8()),
+ init + 2);
+}
+
+static void test_range1_rand15(struct kunit *const test)
+{
+ atomic64_t counter;
+ u64 init;
+
+ init = get_random_u32();
+ atomic64_set(&counter, init);
+ KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 15), init);
+ KUNIT_EXPECT_EQ(test,
+ get_id_range(get_random_u8_positive(), &counter,
+ get_random_u8()),
+ init + 16);
+}
+
+static void test_range1_rand16(struct kunit *const test)
+{
+ atomic64_t counter;
+ u64 init;
+
+ init = get_random_u32();
+ atomic64_set(&counter, init);
+ KUNIT_EXPECT_EQ(test, get_id_range(1, &counter, 16), init);
+ KUNIT_EXPECT_EQ(test,
+ get_id_range(get_random_u8_positive(), &counter,
+ get_random_u8()),
+ init + 1);
+}
+
+static void test_range2_rand0(struct kunit *const test)
+{
+ atomic64_t counter;
+ u64 init;
+
+ init = get_random_u32();
+ atomic64_set(&counter, init);
+ KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 0), init);
+ KUNIT_EXPECT_EQ(test,
+ get_id_range(get_random_u8_positive(), &counter,
+ get_random_u8()),
+ init + 2);
+}
+
+static void test_range2_rand1(struct kunit *const test)
+{
+ atomic64_t counter;
+ u64 init;
+
+ init = get_random_u32();
+ atomic64_set(&counter, init);
+ KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 1), init);
+ KUNIT_EXPECT_EQ(test,
+ get_id_range(get_random_u8_positive(), &counter,
+ get_random_u8()),
+ init + 3);
+}
+
+static void test_range2_rand2(struct kunit *const test)
+{
+ atomic64_t counter;
+ u64 init;
+
+ init = get_random_u32();
+ atomic64_set(&counter, init);
+ KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 2), init);
+ KUNIT_EXPECT_EQ(test,
+ get_id_range(get_random_u8_positive(), &counter,
+ get_random_u8()),
+ init + 4);
+}
+
+static void test_range2_rand15(struct kunit *const test)
+{
+ atomic64_t counter;
+ u64 init;
+
+ init = get_random_u32();
+ atomic64_set(&counter, init);
+ KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 15), init);
+ KUNIT_EXPECT_EQ(test,
+ get_id_range(get_random_u8_positive(), &counter,
+ get_random_u8()),
+ init + 17);
+}
+
+static void test_range2_rand16(struct kunit *const test)
+{
+ atomic64_t counter;
+ u64 init;
+
+ init = get_random_u32();
+ atomic64_set(&counter, init);
+ KUNIT_EXPECT_EQ(test, get_id_range(2, &counter, 16), init);
+ KUNIT_EXPECT_EQ(test,
+ get_id_range(get_random_u8_positive(), &counter,
+ get_random_u8()),
+ init + 2);
+}
+
+#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
+
+/**
+ * landlock_get_id_range - Get a range of unique IDs
+ *
+ * @number_of_ids: Number of IDs to hold. Must be greater than one.
+ *
+ * Returns: The first ID in the range.
+ */
+u64 landlock_get_id_range(size_t number_of_ids)
+{
+ return get_id_range(number_of_ids, &next_id, get_random_u8());
+}
+
+#ifdef CONFIG_SECURITY_LANDLOCK_KUNIT_TEST
+
+static struct kunit_case __refdata test_cases[] = {
+ /* clang-format off */
+ KUNIT_CASE(test_init_min),
+ KUNIT_CASE(test_init_max),
+ KUNIT_CASE(test_init_once),
+ KUNIT_CASE(test_range1_rand0),
+ KUNIT_CASE(test_range1_rand1),
+ KUNIT_CASE(test_range1_rand15),
+ KUNIT_CASE(test_range1_rand16),
+ KUNIT_CASE(test_range2_rand0),
+ KUNIT_CASE(test_range2_rand1),
+ KUNIT_CASE(test_range2_rand2),
+ KUNIT_CASE(test_range2_rand15),
+ KUNIT_CASE(test_range2_rand16),
+ {}
+ /* clang-format on */
+};
+
+static struct kunit_suite test_suite = {
+ .name = "landlock_id",
+ .test_cases = test_cases,
+};
+
+kunit_test_init_section_suite(test_suite);
+
+#endif /* CONFIG_SECURITY_LANDLOCK_KUNIT_TEST */
diff --git a/security/landlock/id.h b/security/landlock/id.h
new file mode 100644
index 000000000000..45dcfb9e9a8b
--- /dev/null
+++ b/security/landlock/id.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Landlock - Unique identification number generator
+ *
+ * Copyright © 2024-2025 Microsoft Corporation
+ */
+
+#ifndef _SECURITY_LANDLOCK_ID_H
+#define _SECURITY_LANDLOCK_ID_H
+
+#ifdef CONFIG_AUDIT
+
+void __init landlock_init_id(void);
+
+u64 landlock_get_id_range(size_t number_of_ids);
+
+#else /* CONFIG_AUDIT */
+
+static inline void __init landlock_init_id(void)
+{
+}
+
+#endif /* CONFIG_AUDIT */
+
+#endif /* _SECURITY_LANDLOCK_ID_H */
diff --git a/security/landlock/limits.h b/security/landlock/limits.h
index 15f7606066c8..65b5ff051674 100644
--- a/security/landlock/limits.h
+++ b/security/landlock/limits.h
@@ -1,9 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Landlock LSM - Limits for different components
+ * Landlock - Limits for different components
*
* Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
* Copyright © 2018-2020 ANSSI
+ * Copyright © 2021-2025 Microsoft Corporation
*/
#ifndef _SECURITY_LANDLOCK_LIMITS_H
@@ -29,6 +30,10 @@
#define LANDLOCK_LAST_SCOPE LANDLOCK_SCOPE_SIGNAL
#define LANDLOCK_MASK_SCOPE ((LANDLOCK_LAST_SCOPE << 1) - 1)
#define LANDLOCK_NUM_SCOPE __const_hweight64(LANDLOCK_MASK_SCOPE)
+
+#define LANDLOCK_LAST_RESTRICT_SELF LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
+#define LANDLOCK_MASK_RESTRICT_SELF ((LANDLOCK_LAST_RESTRICT_SELF << 1) - 1)
+
/* clang-format on */
#endif /* _SECURITY_LANDLOCK_LIMITS_H */
diff --git a/security/landlock/net.c b/security/landlock/net.c
index d5dcc4407a19..1f3915a90a80 100644
--- a/security/landlock/net.c
+++ b/security/landlock/net.c
@@ -1,16 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Landlock LSM - Network management and hooks
+ * Landlock - Network management and hooks
*
* Copyright © 2022-2023 Huawei Tech. Co., Ltd.
- * Copyright © 2022-2023 Microsoft Corporation
+ * Copyright © 2022-2025 Microsoft Corporation
*/
#include <linux/in.h>
+#include <linux/lsm_audit.h>
#include <linux/net.h>
#include <linux/socket.h>
#include <net/ipv6.h>
+#include "audit.h"
#include "common.h"
#include "cred.h"
#include "limits.h"
@@ -39,10 +41,6 @@ int landlock_append_net_rule(struct landlock_ruleset *const ruleset,
return err;
}
-static const struct access_masks any_net = {
- .net = ~0,
-};
-
static int current_check_access_socket(struct socket *const sock,
struct sockaddr *const address,
const int addrlen,
@@ -54,17 +52,17 @@ static int current_check_access_socket(struct socket *const sock,
struct landlock_id id = {
.type = LANDLOCK_KEY_NET_PORT,
};
- const struct landlock_ruleset *const dom =
- landlock_get_applicable_domain(landlock_get_current_domain(),
- any_net);
+ const struct access_masks masks = {
+ .net = access_request,
+ };
+ const struct landlock_cred_security *const subject =
+ landlock_get_applicable_subject(current_cred(), masks, NULL);
+ struct lsm_network_audit audit_net = {};
- if (!dom)
+ if (!subject)
return 0;
- if (WARN_ON_ONCE(dom->num_layers < 1))
- return -EACCES;
- /* Checks if it's a (potential) TCP socket. */
- if (sock->type != SOCK_STREAM)
+ if (!sk_is_tcp(sock->sk))
return 0;
/* Checks for minimal header length to safely read sa_family. */
@@ -73,18 +71,48 @@ static int current_check_access_socket(struct socket *const sock,
switch (address->sa_family) {
case AF_UNSPEC:
- case AF_INET:
+ case AF_INET: {
+ const struct sockaddr_in *addr4;
+
if (addrlen < sizeof(struct sockaddr_in))
return -EINVAL;
- port = ((struct sockaddr_in *)address)->sin_port;
+
+ addr4 = (struct sockaddr_in *)address;
+ port = addr4->sin_port;
+
+ if (access_request == LANDLOCK_ACCESS_NET_CONNECT_TCP) {
+ audit_net.dport = port;
+ audit_net.v4info.daddr = addr4->sin_addr.s_addr;
+ } else if (access_request == LANDLOCK_ACCESS_NET_BIND_TCP) {
+ audit_net.sport = port;
+ audit_net.v4info.saddr = addr4->sin_addr.s_addr;
+ } else {
+ WARN_ON_ONCE(1);
+ }
break;
+ }
#if IS_ENABLED(CONFIG_IPV6)
- case AF_INET6:
+ case AF_INET6: {
+ const struct sockaddr_in6 *addr6;
+
if (addrlen < SIN6_LEN_RFC2133)
return -EINVAL;
- port = ((struct sockaddr_in6 *)address)->sin6_port;
+
+ addr6 = (struct sockaddr_in6 *)address;
+ port = addr6->sin6_port;
+
+ if (access_request == LANDLOCK_ACCESS_NET_CONNECT_TCP) {
+ audit_net.dport = port;
+ audit_net.v6info.daddr = addr6->sin6_addr;
+ } else if (access_request == LANDLOCK_ACCESS_NET_BIND_TCP) {
+ audit_net.sport = port;
+ audit_net.v6info.saddr = addr6->sin6_addr;
+ } else {
+ WARN_ON_ONCE(1);
+ }
break;
+ }
#endif /* IS_ENABLED(CONFIG_IPV6) */
default:
@@ -146,13 +174,24 @@ static int current_check_access_socket(struct socket *const sock,
id.key.data = (__force uintptr_t)port;
BUILD_BUG_ON(sizeof(port) > sizeof(id.key.data));
- rule = landlock_find_rule(dom, id);
- access_request = landlock_init_layer_masks(
- dom, access_request, &layer_masks, LANDLOCK_KEY_NET_PORT);
+ rule = landlock_find_rule(subject->domain, id);
+ access_request = landlock_init_layer_masks(subject->domain,
+ access_request, &layer_masks,
+ LANDLOCK_KEY_NET_PORT);
if (landlock_unmask_layers(rule, access_request, &layer_masks,
ARRAY_SIZE(layer_masks)))
return 0;
+ audit_net.family = address->sa_family;
+ landlock_log_denial(subject,
+ &(struct landlock_request){
+ .type = LANDLOCK_REQUEST_NET_ACCESS,
+ .audit.type = LSM_AUDIT_DATA_NET,
+ .audit.u.net = &audit_net,
+ .access = access_request,
+ .layer_masks = &layer_masks,
+ .layer_masks_size = ARRAY_SIZE(layer_masks),
+ });
return -EACCES;
}
diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c
index a93bdbf52fff..ce7940efea51 100644
--- a/security/landlock/ruleset.c
+++ b/security/landlock/ruleset.c
@@ -8,11 +8,13 @@
#include <linux/bits.h>
#include <linux/bug.h>
+#include <linux/cleanup.h>
#include <linux/compiler_types.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/lockdep.h>
+#include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
@@ -20,6 +22,9 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include "access.h"
+#include "audit.h"
+#include "domain.h"
#include "limits.h"
#include "object.h"
#include "ruleset.h"
@@ -121,7 +126,7 @@ create_rule(const struct landlock_id id,
return ERR_PTR(-ENOMEM);
RB_CLEAR_NODE(&new_rule->node);
if (is_object_pointer(id.type)) {
- /* This should be catched by insert_rule(). */
+ /* This should have been caught by insert_rule(). */
WARN_ON_ONCE(!id.key.object);
landlock_get_object(id.key.object);
}
@@ -304,22 +309,6 @@ int landlock_insert_rule(struct landlock_ruleset *const ruleset,
return insert_rule(ruleset, id, &layers, ARRAY_SIZE(layers));
}
-static void get_hierarchy(struct landlock_hierarchy *const hierarchy)
-{
- if (hierarchy)
- refcount_inc(&hierarchy->usage);
-}
-
-static void put_hierarchy(struct landlock_hierarchy *hierarchy)
-{
- while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) {
- const struct landlock_hierarchy *const freeme = hierarchy;
-
- hierarchy = hierarchy->parent;
- kfree(freeme);
- }
-}
-
static int merge_tree(struct landlock_ruleset *const dst,
struct landlock_ruleset *const src,
const enum landlock_key_type key_type)
@@ -384,7 +373,8 @@ static int merge_ruleset(struct landlock_ruleset *const dst,
err = -EINVAL;
goto out_unlock;
}
- dst->access_masks[dst->num_layers - 1] = src->access_masks[0];
+ dst->access_masks[dst->num_layers - 1] =
+ landlock_upgrade_handled_access_masks(src->access_masks[0]);
/* Merges the @src inode tree. */
err = merge_tree(dst, src, LANDLOCK_KEY_INODE);
@@ -473,7 +463,7 @@ static int inherit_ruleset(struct landlock_ruleset *const parent,
err = -EINVAL;
goto out_unlock;
}
- get_hierarchy(parent->hierarchy);
+ landlock_get_hierarchy(parent->hierarchy);
child->hierarchy->parent = parent->hierarchy;
out_unlock:
@@ -497,7 +487,7 @@ static void free_ruleset(struct landlock_ruleset *const ruleset)
free_rule(freeme, LANDLOCK_KEY_NET_PORT);
#endif /* IS_ENABLED(CONFIG_INET) */
- put_hierarchy(ruleset->hierarchy);
+ landlock_put_hierarchy(ruleset->hierarchy);
kfree(ruleset);
}
@@ -516,6 +506,7 @@ static void free_ruleset_work(struct work_struct *const work)
free_ruleset(ruleset);
}
+/* Only called by hook_cred_free(). */
void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
{
if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
@@ -530,6 +521,9 @@ void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
* @parent: Parent domain.
* @ruleset: New ruleset to be merged.
*
+ * The current task is requesting to be restricted. The subjective credentials
+ * must not be in an overridden state. cf. landlock_init_hierarchy_log().
+ *
* Returns the intersection of @parent and @ruleset, or returns @parent if
* @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
*/
@@ -537,7 +531,7 @@ struct landlock_ruleset *
landlock_merge_ruleset(struct landlock_ruleset *const parent,
struct landlock_ruleset *const ruleset)
{
- struct landlock_ruleset *new_dom;
+ struct landlock_ruleset *new_dom __free(landlock_put_ruleset) = NULL;
u32 num_layers;
int err;
@@ -557,29 +551,29 @@ landlock_merge_ruleset(struct landlock_ruleset *const parent,
new_dom = create_ruleset(num_layers);
if (IS_ERR(new_dom))
return new_dom;
+
new_dom->hierarchy =
kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT);
- if (!new_dom->hierarchy) {
- err = -ENOMEM;
- goto out_put_dom;
- }
+ if (!new_dom->hierarchy)
+ return ERR_PTR(-ENOMEM);
+
refcount_set(&new_dom->hierarchy->usage, 1);
/* ...as a child of @parent... */
err = inherit_ruleset(parent, new_dom);
if (err)
- goto out_put_dom;
+ return ERR_PTR(err);
/* ...and including @ruleset. */
err = merge_ruleset(new_dom, ruleset);
if (err)
- goto out_put_dom;
+ return ERR_PTR(err);
- return new_dom;
+ err = landlock_init_hierarchy_log(new_dom->hierarchy);
+ if (err)
+ return ERR_PTR(err);
-out_put_dom:
- landlock_put_ruleset(new_dom);
- return ERR_PTR(err);
+ return no_free_ptr(new_dom);
}
/*
diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h
index 631e24d4ffe9..5da9a64f5af7 100644
--- a/security/landlock/ruleset.h
+++ b/security/landlock/ruleset.h
@@ -9,57 +9,18 @@
#ifndef _SECURITY_LANDLOCK_RULESET_H
#define _SECURITY_LANDLOCK_RULESET_H
-#include <linux/bitops.h>
-#include <linux/build_bug.h>
-#include <linux/kernel.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
-#include <uapi/linux/landlock.h>
+#include "access.h"
#include "limits.h"
#include "object.h"
-/*
- * All access rights that are denied by default whether they are handled or not
- * by a ruleset/layer. This must be ORed with all ruleset->access_masks[]
- * entries when we need to get the absolute handled access masks.
- */
-/* clang-format off */
-#define LANDLOCK_ACCESS_FS_INITIALLY_DENIED ( \
- LANDLOCK_ACCESS_FS_REFER)
-/* clang-format on */
-
-typedef u16 access_mask_t;
-/* Makes sure all filesystem access rights can be stored. */
-static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_FS);
-/* Makes sure all network access rights can be stored. */
-static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_ACCESS_NET);
-/* Makes sure all scoped rights can be stored. */
-static_assert(BITS_PER_TYPE(access_mask_t) >= LANDLOCK_NUM_SCOPE);
-/* Makes sure for_each_set_bit() and for_each_clear_bit() calls are OK. */
-static_assert(sizeof(unsigned long) >= sizeof(access_mask_t));
-
-/* Ruleset access masks. */
-struct access_masks {
- access_mask_t fs : LANDLOCK_NUM_ACCESS_FS;
- access_mask_t net : LANDLOCK_NUM_ACCESS_NET;
- access_mask_t scope : LANDLOCK_NUM_SCOPE;
-};
-
-union access_masks_all {
- struct access_masks masks;
- u32 all;
-};
-
-/* Makes sure all fields are covered. */
-static_assert(sizeof(typeof_member(union access_masks_all, masks)) ==
- sizeof(typeof_member(union access_masks_all, all)));
-
-typedef u16 layer_mask_t;
-/* Makes sure all layers can be checked. */
-static_assert(BITS_PER_TYPE(layer_mask_t) >= LANDLOCK_MAX_NUM_LAYERS);
+struct landlock_hierarchy;
/**
* struct landlock_layer - Access rights for a given layer
@@ -150,22 +111,6 @@ struct landlock_rule {
};
/**
- * struct landlock_hierarchy - Node in a ruleset hierarchy
- */
-struct landlock_hierarchy {
- /**
- * @parent: Pointer to the parent node, or NULL if it is a root
- * Landlock domain.
- */
- struct landlock_hierarchy *parent;
- /**
- * @usage: Number of potential children domains plus their parent
- * domain.
- */
- refcount_t usage;
-};
-
-/**
* struct landlock_ruleset - Landlock ruleset
*
* This data structure must contain unique entries, be updatable, and quick to
@@ -252,6 +197,9 @@ landlock_create_ruleset(const access_mask_t access_mask_fs,
void landlock_put_ruleset(struct landlock_ruleset *const ruleset);
void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset);
+DEFINE_FREE(landlock_put_ruleset, struct landlock_ruleset *,
+ if (!IS_ERR_OR_NULL(_T)) landlock_put_ruleset(_T))
+
int landlock_insert_rule(struct landlock_ruleset *const ruleset,
const struct landlock_id id,
const access_mask_t access);
@@ -295,36 +243,6 @@ landlock_union_access_masks(const struct landlock_ruleset *const domain)
return matches.masks;
}
-/**
- * landlock_get_applicable_domain - Return @domain if it applies to (handles)
- * at least one of the access rights specified
- * in @masks
- *
- * @domain: Landlock ruleset (used as a domain)
- * @masks: access masks
- *
- * Returns: @domain if any access rights specified in @masks is handled, or
- * NULL otherwise.
- */
-static inline const struct landlock_ruleset *
-landlock_get_applicable_domain(const struct landlock_ruleset *const domain,
- const struct access_masks masks)
-{
- const union access_masks_all masks_all = {
- .masks = masks,
- };
- union access_masks_all merge = {};
-
- if (!domain)
- return NULL;
-
- merge.masks = landlock_union_access_masks(domain);
- if (merge.all & masks_all.all)
- return domain;
-
- return NULL;
-}
-
static inline void
landlock_add_fs_access_mask(struct landlock_ruleset *const ruleset,
const access_mask_t fs_access_mask,
@@ -366,7 +284,7 @@ landlock_get_fs_access_mask(const struct landlock_ruleset *const ruleset,
{
/* Handles all initially denied by default access rights. */
return ruleset->access_masks[layer_level].fs |
- LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
+ _LANDLOCK_ACCESS_FS_INITIALLY_DENIED;
}
static inline access_mask_t
diff --git a/security/landlock/setup.c b/security/landlock/setup.c
index 28519a45b11f..bd53c7a56ab9 100644
--- a/security/landlock/setup.c
+++ b/security/landlock/setup.c
@@ -6,19 +6,27 @@
* Copyright © 2018-2020 ANSSI
*/
+#include <linux/bits.h>
#include <linux/init.h>
#include <linux/lsm_hooks.h>
#include <uapi/linux/lsm.h>
#include "common.h"
#include "cred.h"
+#include "errata.h"
#include "fs.h"
+#include "id.h"
#include "net.h"
#include "setup.h"
#include "task.h"
bool landlock_initialized __ro_after_init = false;
+const struct lsm_id landlock_lsmid = {
+ .name = LANDLOCK_NAME,
+ .id = LSM_ID_LANDLOCK,
+};
+
struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = {
.lbs_cred = sizeof(struct landlock_cred_security),
.lbs_file = sizeof(struct landlock_file_security),
@@ -26,17 +34,41 @@ struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = {
.lbs_superblock = sizeof(struct landlock_superblock_security),
};
-const struct lsm_id landlock_lsmid = {
- .name = LANDLOCK_NAME,
- .id = LSM_ID_LANDLOCK,
-};
+int landlock_errata __ro_after_init;
+
+static void __init compute_errata(void)
+{
+ size_t i;
+
+#ifndef __has_include
+ /*
+ * This is a safeguard to make sure the compiler implements
+ * __has_include (see errata.h).
+ */
+ WARN_ON_ONCE(1);
+ return;
+#endif
+
+ for (i = 0; landlock_errata_init[i].number; i++) {
+ const int prev_errata = landlock_errata;
+
+ if (WARN_ON_ONCE(landlock_errata_init[i].abi >
+ landlock_abi_version))
+ continue;
+
+ landlock_errata |= BIT(landlock_errata_init[i].number - 1);
+ WARN_ON_ONCE(prev_errata == landlock_errata);
+ }
+}
static int __init landlock_init(void)
{
+ compute_errata();
landlock_add_cred_hooks();
landlock_add_task_hooks();
landlock_add_fs_hooks();
landlock_add_net_hooks();
+ landlock_init_id();
landlock_initialized = true;
pr_info("Up and running.\n");
return 0;
diff --git a/security/landlock/setup.h b/security/landlock/setup.h
index c4252d46d49d..fca307c35fee 100644
--- a/security/landlock/setup.h
+++ b/security/landlock/setup.h
@@ -11,7 +11,10 @@
#include <linux/lsm_hooks.h>
+extern const int landlock_abi_version;
+
extern bool landlock_initialized;
+extern int landlock_errata;
extern struct lsm_blob_sizes landlock_blob_sizes;
extern const struct lsm_id landlock_lsmid;
diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
index 4ed8e70c25ed..0116e9f93ffe 100644
--- a/security/landlock/syscalls.c
+++ b/security/landlock/syscalls.c
@@ -1,15 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Landlock LSM - System call implementations and user space interfaces
+ * Landlock - System call implementations and user space interfaces
*
* Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
* Copyright © 2018-2020 ANSSI
+ * Copyright © 2021-2025 Microsoft Corporation
*/
#include <asm/current.h>
#include <linux/anon_inodes.h>
+#include <linux/bitops.h>
#include <linux/build_bug.h>
#include <linux/capability.h>
+#include <linux/cleanup.h>
#include <linux/compiler_types.h>
#include <linux/dcache.h>
#include <linux/err.h>
@@ -27,6 +30,7 @@
#include <uapi/linux/landlock.h>
#include "cred.h"
+#include "domain.h"
#include "fs.h"
#include "limits.h"
#include "net.h"
@@ -150,7 +154,14 @@ static const struct file_operations ruleset_fops = {
.write = fop_dummy_write,
};
-#define LANDLOCK_ABI_VERSION 6
+/*
+ * The Landlock ABI version should be incremented for each new Landlock-related
+ * user space visible change (e.g. Landlock syscalls). This version should
+ * only be incremented once per Linux release, and the date in
+ * Documentation/userspace-api/landlock.rst should be updated to reflect the
+ * UAPI change.
+ */
+const int landlock_abi_version = 7;
/**
* sys_landlock_create_ruleset - Create a new ruleset
@@ -159,14 +170,16 @@ static const struct file_operations ruleset_fops = {
* the new ruleset.
* @size: Size of the pointed &struct landlock_ruleset_attr (needed for
* backward and forward compatibility).
- * @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION.
+ * @flags: Supported values:
+ *
+ * - %LANDLOCK_CREATE_RULESET_VERSION
+ * - %LANDLOCK_CREATE_RULESET_ERRATA
*
* This system call enables to create a new Landlock ruleset, and returns the
* related file descriptor on success.
*
- * If @flags is %LANDLOCK_CREATE_RULESET_VERSION and @attr is NULL and @size is
- * 0, then the returned value is the highest supported Landlock ABI version
- * (starting at 1).
+ * If %LANDLOCK_CREATE_RULESET_VERSION or %LANDLOCK_CREATE_RULESET_ERRATA is
+ * set, then @attr must be NULL and @size must be 0.
*
* Possible returned errors are:
*
@@ -175,6 +188,9 @@ static const struct file_operations ruleset_fops = {
* - %E2BIG: @attr or @size inconsistencies;
* - %EFAULT: @attr or @size inconsistencies;
* - %ENOMSG: empty &landlock_ruleset_attr.handled_access_fs.
+ *
+ * .. kernel-doc:: include/uapi/linux/landlock.h
+ * :identifiers: landlock_create_ruleset_flags
*/
SYSCALL_DEFINE3(landlock_create_ruleset,
const struct landlock_ruleset_attr __user *const, attr,
@@ -191,9 +207,15 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
return -EOPNOTSUPP;
if (flags) {
- if ((flags == LANDLOCK_CREATE_RULESET_VERSION) && !attr &&
- !size)
- return LANDLOCK_ABI_VERSION;
+ if (attr || size)
+ return -EINVAL;
+
+ if (flags == LANDLOCK_CREATE_RULESET_VERSION)
+ return landlock_abi_version;
+
+ if (flags == LANDLOCK_CREATE_RULESET_ERRATA)
+ return landlock_errata;
+
return -EINVAL;
}
@@ -281,7 +303,6 @@ static int get_path_from_fd(const s32 fd, struct path *const path)
if ((fd_file(f)->f_op == &ruleset_fops) ||
(fd_file(f)->f_path.mnt->mnt_flags & MNT_INTERNAL) ||
(fd_file(f)->f_path.dentry->d_sb->s_flags & SB_NOUSER) ||
- d_is_negative(fd_file(f)->f_path.dentry) ||
IS_PRIVATE(d_backing_inode(fd_file(f)->f_path.dentry)))
return -EBADFD;
@@ -398,8 +419,7 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
const enum landlock_rule_type, rule_type,
const void __user *const, rule_attr, const __u32, flags)
{
- struct landlock_ruleset *ruleset;
- int err;
+ struct landlock_ruleset *ruleset __free(landlock_put_ruleset) = NULL;
if (!is_initialized())
return -EOPNOTSUPP;
@@ -415,17 +435,12 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
switch (rule_type) {
case LANDLOCK_RULE_PATH_BENEATH:
- err = add_rule_path_beneath(ruleset, rule_attr);
- break;
+ return add_rule_path_beneath(ruleset, rule_attr);
case LANDLOCK_RULE_NET_PORT:
- err = add_rule_net_port(ruleset, rule_attr);
- break;
+ return add_rule_net_port(ruleset, rule_attr);
default:
- err = -EINVAL;
- break;
+ return -EINVAL;
}
- landlock_put_ruleset(ruleset);
- return err;
}
/* Enforcement */
@@ -434,7 +449,11 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
* sys_landlock_restrict_self - Enforce a ruleset on the calling thread
*
* @ruleset_fd: File descriptor tied to the ruleset to merge with the target.
- * @flags: Must be 0.
+ * @flags: Supported values:
+ *
+ * - %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF
+ * - %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON
+ * - %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
*
* This system call enables to enforce a Landlock ruleset on the current
* thread. Enforcing a ruleset requires that the task has %CAP_SYS_ADMIN in its
@@ -444,7 +463,7 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
* Possible returned errors are:
*
* - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
- * - %EINVAL: @flags is not 0.
+ * - %EINVAL: @flags contains an unknown bit.
* - %EBADF: @ruleset_fd is not a file descriptor for the current thread;
* - %EBADFD: @ruleset_fd is not a ruleset file descriptor;
* - %EPERM: @ruleset_fd has no read access to the underlying ruleset, or the
@@ -452,14 +471,19 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
* %CAP_SYS_ADMIN in its namespace.
* - %E2BIG: The maximum number of stacked rulesets is reached for the current
* thread.
+ *
+ * .. kernel-doc:: include/uapi/linux/landlock.h
+ * :identifiers: landlock_restrict_self_flags
*/
SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
flags)
{
- struct landlock_ruleset *new_dom, *ruleset;
+ struct landlock_ruleset *new_dom,
+ *ruleset __free(landlock_put_ruleset) = NULL;
struct cred *new_cred;
struct landlock_cred_security *new_llcred;
- int err;
+ bool __maybe_unused log_same_exec, log_new_exec, log_subdomains,
+ prev_log_subdomains;
if (!is_initialized())
return -EOPNOTSUPP;
@@ -472,44 +496,75 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
!ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
- /* No flag for now. */
- if (flags)
+ if ((flags | LANDLOCK_MASK_RESTRICT_SELF) !=
+ LANDLOCK_MASK_RESTRICT_SELF)
return -EINVAL;
- /* Gets and checks the ruleset. */
- ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ);
- if (IS_ERR(ruleset))
- return PTR_ERR(ruleset);
+ /* Translates "off" flag to boolean. */
+ log_same_exec = !(flags & LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF);
+ /* Translates "on" flag to boolean. */
+ log_new_exec = !!(flags & LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON);
+ /* Translates "off" flag to boolean. */
+ log_subdomains = !(flags & LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF);
+
+ /*
+ * It is allowed to set LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF with
+ * -1 as ruleset_fd, but no other flag must be set.
+ */
+ if (!(ruleset_fd == -1 &&
+ flags == LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF)) {
+ /* Gets and checks the ruleset. */
+ ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+ }
/* Prepares new credentials. */
new_cred = prepare_creds();
- if (!new_cred) {
- err = -ENOMEM;
- goto out_put_ruleset;
- }
+ if (!new_cred)
+ return -ENOMEM;
+
new_llcred = landlock_cred(new_cred);
+#ifdef CONFIG_AUDIT
+ prev_log_subdomains = !new_llcred->log_subdomains_off;
+ new_llcred->log_subdomains_off = !prev_log_subdomains ||
+ !log_subdomains;
+#endif /* CONFIG_AUDIT */
+
+ /*
+ * The only case when a ruleset may not be set is if
+ * LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF is set and ruleset_fd is -1.
+ * We could optimize this case by not calling commit_creds() if this flag
+ * was already set, but it is not worth the complexity.
+ */
+ if (!ruleset)
+ return commit_creds(new_cred);
+
/*
* There is no possible race condition while copying and manipulating
* the current credentials because they are dedicated per thread.
*/
new_dom = landlock_merge_ruleset(new_llcred->domain, ruleset);
if (IS_ERR(new_dom)) {
- err = PTR_ERR(new_dom);
- goto out_put_creds;
+ abort_creds(new_cred);
+ return PTR_ERR(new_dom);
}
+#ifdef CONFIG_AUDIT
+ new_dom->hierarchy->log_same_exec = log_same_exec;
+ new_dom->hierarchy->log_new_exec = log_new_exec;
+ if ((!log_same_exec && !log_new_exec) || !prev_log_subdomains)
+ new_dom->hierarchy->log_status = LANDLOCK_LOG_DISABLED;
+#endif /* CONFIG_AUDIT */
+
/* Replaces the old (prepared) domain. */
landlock_put_ruleset(new_llcred->domain);
new_llcred->domain = new_dom;
- landlock_put_ruleset(ruleset);
- return commit_creds(new_cred);
+#ifdef CONFIG_AUDIT
+ new_llcred->domain_exec |= BIT(new_dom->num_layers - 1);
+#endif /* CONFIG_AUDIT */
-out_put_creds:
- abort_creds(new_cred);
-
-out_put_ruleset:
- landlock_put_ruleset(ruleset);
- return err;
+ return commit_creds(new_cred);
}
diff --git a/security/landlock/task.c b/security/landlock/task.c
index dc7dab78392e..2385017418ca 100644
--- a/security/landlock/task.c
+++ b/security/landlock/task.c
@@ -1,23 +1,29 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Landlock LSM - Ptrace hooks
+ * Landlock - Ptrace and scope hooks
*
* Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
* Copyright © 2019-2020 ANSSI
+ * Copyright © 2024-2025 Microsoft Corporation
*/
#include <asm/current.h>
+#include <linux/cleanup.h>
#include <linux/cred.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/lsm_audit.h>
#include <linux/lsm_hooks.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <net/af_unix.h>
#include <net/sock.h>
+#include "audit.h"
#include "common.h"
#include "cred.h"
+#include "domain.h"
#include "fs.h"
#include "ruleset.h"
#include "setup.h"
@@ -37,41 +43,29 @@ static bool domain_scope_le(const struct landlock_ruleset *const parent,
{
const struct landlock_hierarchy *walker;
+ /* Quick return for non-landlocked tasks. */
if (!parent)
return true;
+
if (!child)
return false;
+
for (walker = child->hierarchy; walker; walker = walker->parent) {
if (walker == parent->hierarchy)
/* @parent is in the scoped hierarchy of @child. */
return true;
}
+
/* There is no relationship between @parent and @child. */
return false;
}
-static bool task_is_scoped(const struct task_struct *const parent,
- const struct task_struct *const child)
-{
- bool is_scoped;
- const struct landlock_ruleset *dom_parent, *dom_child;
-
- rcu_read_lock();
- dom_parent = landlock_get_task_domain(parent);
- dom_child = landlock_get_task_domain(child);
- is_scoped = domain_scope_le(dom_parent, dom_child);
- rcu_read_unlock();
- return is_scoped;
-}
-
-static int task_ptrace(const struct task_struct *const parent,
- const struct task_struct *const child)
+static int domain_ptrace(const struct landlock_ruleset *const parent,
+ const struct landlock_ruleset *const child)
{
- /* Quick return for non-landlocked tasks. */
- if (!landlocked(parent))
- return 0;
- if (task_is_scoped(parent, child))
+ if (domain_scope_le(parent, child))
return 0;
+
return -EPERM;
}
@@ -91,7 +85,39 @@ static int task_ptrace(const struct task_struct *const parent,
static int hook_ptrace_access_check(struct task_struct *const child,
const unsigned int mode)
{
- return task_ptrace(current, child);
+ const struct landlock_cred_security *parent_subject;
+ const struct landlock_ruleset *child_dom;
+ int err;
+
+ /* Quick return for non-landlocked tasks. */
+ parent_subject = landlock_cred(current_cred());
+ if (!parent_subject)
+ return 0;
+
+ scoped_guard(rcu)
+ {
+ child_dom = landlock_get_task_domain(child);
+ err = domain_ptrace(parent_subject->domain, child_dom);
+ }
+
+ if (!err)
+ return 0;
+
+ /*
+ * For the ptrace_access_check case, we log the current/parent domain
+ * and the child task.
+ */
+ if (!(mode & PTRACE_MODE_NOAUDIT))
+ landlock_log_denial(parent_subject, &(struct landlock_request) {
+ .type = LANDLOCK_REQUEST_PTRACE,
+ .audit = {
+ .type = LSM_AUDIT_DATA_TASK,
+ .u.tsk = child,
+ },
+ .layer_plus_one = parent_subject->domain->num_layers,
+ });
+
+ return err;
}
/**
@@ -108,7 +134,35 @@ static int hook_ptrace_access_check(struct task_struct *const child,
*/
static int hook_ptrace_traceme(struct task_struct *const parent)
{
- return task_ptrace(parent, current);
+ const struct landlock_cred_security *parent_subject;
+ const struct landlock_ruleset *child_dom;
+ int err;
+
+ child_dom = landlock_get_current_domain();
+
+ guard(rcu)();
+ parent_subject = landlock_cred(__task_cred(parent));
+ err = domain_ptrace(parent_subject->domain, child_dom);
+
+ if (!err)
+ return 0;
+
+ /*
+ * For the ptrace_traceme case, we log the domain which is the cause of
+ * the denial, which means the parent domain instead of the current
+ * domain. This may look unusual because the ptrace_traceme action is a
+ * request to be traced, but the semantic is consistent with
+ * hook_ptrace_access_check().
+ */
+ landlock_log_denial(parent_subject, &(struct landlock_request) {
+ .type = LANDLOCK_REQUEST_PTRACE,
+ .audit = {
+ .type = LSM_AUDIT_DATA_TASK,
+ .u.tsk = current,
+ },
+ .layer_plus_one = parent_subject->domain->num_layers,
+ });
+ return err;
}
/**
@@ -127,7 +181,7 @@ static bool domain_is_scoped(const struct landlock_ruleset *const client,
access_mask_t scope)
{
int client_layer, server_layer;
- struct landlock_hierarchy *client_walker, *server_walker;
+ const struct landlock_hierarchy *client_walker, *server_walker;
/* Quick return if client has no domain */
if (WARN_ON_ONCE(!client))
@@ -212,28 +266,43 @@ static int hook_unix_stream_connect(struct sock *const sock,
struct sock *const other,
struct sock *const newsk)
{
- const struct landlock_ruleset *const dom =
- landlock_get_applicable_domain(landlock_get_current_domain(),
- unix_scope);
+ size_t handle_layer;
+ const struct landlock_cred_security *const subject =
+ landlock_get_applicable_subject(current_cred(), unix_scope,
+ &handle_layer);
/* Quick return for non-landlocked tasks. */
- if (!dom)
+ if (!subject)
+ return 0;
+
+ if (!is_abstract_socket(other))
return 0;
- if (is_abstract_socket(other) && sock_is_scoped(other, dom))
- return -EPERM;
+ if (!sock_is_scoped(other, subject->domain))
+ return 0;
- return 0;
+ landlock_log_denial(subject, &(struct landlock_request) {
+ .type = LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET,
+ .audit = {
+ .type = LSM_AUDIT_DATA_NET,
+ .u.net = &(struct lsm_network_audit) {
+ .sk = other,
+ },
+ },
+ .layer_plus_one = handle_layer + 1,
+ });
+ return -EPERM;
}
static int hook_unix_may_send(struct socket *const sock,
struct socket *const other)
{
- const struct landlock_ruleset *const dom =
- landlock_get_applicable_domain(landlock_get_current_domain(),
- unix_scope);
+ size_t handle_layer;
+ const struct landlock_cred_security *const subject =
+ landlock_get_applicable_subject(current_cred(), unix_scope,
+ &handle_layer);
- if (!dom)
+ if (!subject)
return 0;
/*
@@ -243,10 +312,23 @@ static int hook_unix_may_send(struct socket *const sock,
if (unix_peer(sock->sk) == other->sk)
return 0;
- if (is_abstract_socket(other->sk) && sock_is_scoped(other->sk, dom))
- return -EPERM;
+ if (!is_abstract_socket(other->sk))
+ return 0;
+
+ if (!sock_is_scoped(other->sk, subject->domain))
+ return 0;
- return 0;
+ landlock_log_denial(subject, &(struct landlock_request) {
+ .type = LANDLOCK_REQUEST_SCOPE_ABSTRACT_UNIX_SOCKET,
+ .audit = {
+ .type = LSM_AUDIT_DATA_NET,
+ .u.net = &(struct lsm_network_audit) {
+ .sk = other->sk,
+ },
+ },
+ .layer_plus_one = handle_layer + 1,
+ });
+ return -EPERM;
}
static const struct access_masks signal_scope = {
@@ -255,56 +337,97 @@ static const struct access_masks signal_scope = {
static int hook_task_kill(struct task_struct *const p,
struct kernel_siginfo *const info, const int sig,
- const struct cred *const cred)
+ const struct cred *cred)
{
bool is_scoped;
- const struct landlock_ruleset *dom;
-
- if (cred) {
- /* Dealing with USB IO. */
- dom = landlock_cred(cred)->domain;
- } else {
- dom = landlock_get_current_domain();
+ size_t handle_layer;
+ const struct landlock_cred_security *subject;
+
+ if (!cred) {
+ /*
+ * Always allow sending signals between threads of the same process.
+ * This is required for process credential changes by the Native POSIX
+ * Threads Library and implemented by the set*id(2) wrappers and
+ * libcap(3) with tgkill(2). See nptl(7) and libpsx(3).
+ *
+ * This exception is similar to the __ptrace_may_access() one.
+ */
+ if (same_thread_group(p, current))
+ return 0;
+
+ /* Not dealing with USB IO. */
+ cred = current_cred();
}
- dom = landlock_get_applicable_domain(dom, signal_scope);
+
+ subject = landlock_get_applicable_subject(cred, signal_scope,
+ &handle_layer);
/* Quick return for non-landlocked tasks. */
- if (!dom)
+ if (!subject)
return 0;
- rcu_read_lock();
- is_scoped = domain_is_scoped(dom, landlock_get_task_domain(p),
- LANDLOCK_SCOPE_SIGNAL);
- rcu_read_unlock();
- if (is_scoped)
- return -EPERM;
+ scoped_guard(rcu)
+ {
+ is_scoped = domain_is_scoped(subject->domain,
+ landlock_get_task_domain(p),
+ signal_scope.scope);
+ }
+
+ if (!is_scoped)
+ return 0;
- return 0;
+ landlock_log_denial(subject, &(struct landlock_request) {
+ .type = LANDLOCK_REQUEST_SCOPE_SIGNAL,
+ .audit = {
+ .type = LSM_AUDIT_DATA_TASK,
+ .u.tsk = p,
+ },
+ .layer_plus_one = handle_layer + 1,
+ });
+ return -EPERM;
}
static int hook_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int signum)
{
- const struct landlock_ruleset *dom;
+ const struct landlock_cred_security *subject;
bool is_scoped = false;
/* Lock already held by send_sigio() and send_sigurg(). */
lockdep_assert_held(&fown->lock);
- dom = landlock_get_applicable_domain(
- landlock_file(fown->file)->fown_domain, signal_scope);
+ subject = &landlock_file(fown->file)->fown_subject;
- /* Quick return for unowned socket. */
- if (!dom)
+ /*
+ * Quick return for unowned socket.
+ *
+ * subject->domain has already been filtered when saved by
+ * hook_file_set_fowner(), so there is no need to call
+ * landlock_get_applicable_subject() here.
+ */
+ if (!subject->domain)
return 0;
- rcu_read_lock();
- is_scoped = domain_is_scoped(dom, landlock_get_task_domain(tsk),
- LANDLOCK_SCOPE_SIGNAL);
- rcu_read_unlock();
- if (is_scoped)
- return -EPERM;
+ scoped_guard(rcu)
+ {
+ is_scoped = domain_is_scoped(subject->domain,
+ landlock_get_task_domain(tsk),
+ signal_scope.scope);
+ }
+
+ if (!is_scoped)
+ return 0;
- return 0;
+ landlock_log_denial(subject, &(struct landlock_request) {
+ .type = LANDLOCK_REQUEST_SCOPE_SIGNAL,
+ .audit = {
+ .type = LSM_AUDIT_DATA_TASK,
+ .u.tsk = tsk,
+ },
+#ifdef CONFIG_AUDIT
+ .layer_plus_one = landlock_file(fown->file)->fown_layer + 1,
+#endif /* CONFIG_AUDIT */
+ });
+ return -EPERM;
}
static struct security_hook_list landlock_hooks[] __ro_after_init = {
diff --git a/security/loadpin/Kconfig b/security/loadpin/Kconfig
index 848f8b4a6019..aef63d3e30df 100644
--- a/security/loadpin/Kconfig
+++ b/security/loadpin/Kconfig
@@ -16,7 +16,7 @@ config SECURITY_LOADPIN_ENFORCE
depends on SECURITY_LOADPIN
# Module compression breaks LoadPin unless modules are decompressed in
# the kernel.
- depends on !MODULES || (MODULE_COMPRESS_NONE || MODULE_DECOMPRESS)
+ depends on !MODULE_COMPRESS || MODULE_DECOMPRESS
help
If selected, LoadPin will enforce pinning at boot. If not
selected, it can be enabled at boot with the kernel parameter
diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c
index f2bdbd55aa2b..cf83afa1d879 100644
--- a/security/lockdown/lockdown.c
+++ b/security/lockdown/lockdown.c
@@ -96,7 +96,7 @@ static int __init lockdown_lsm_init(void)
static ssize_t lockdown_read(struct file *filp, char __user *buf, size_t count,
loff_t *ppos)
{
- char temp[80];
+ char temp[80] = "";
int i, offset = 0;
for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) {
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 9a8352972086..7d623b00495c 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -3,7 +3,7 @@
* common LSM auditing functions
*
* Based on code written for SELinux by :
- * Stephen Smalley, <sds@tycho.nsa.gov>
+ * Stephen Smalley
* James Morris <jmorris@redhat.com>
* Author : Etienne Basset, <etienne.basset@ensta.org>
*/
@@ -24,7 +24,6 @@
#include <net/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
-#include <linux/dccp.h>
#include <linux/sctp.h>
#include <linux/lsm_audit.h>
#include <linux/security.h>
@@ -68,13 +67,6 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
ad->u.net->dport = uh->dest;
break;
}
- case IPPROTO_DCCP: {
- struct dccp_hdr *dh = dccp_hdr(skb);
-
- ad->u.net->sport = dh->dccph_sport;
- ad->u.net->dport = dh->dccph_dport;
- break;
- }
case IPPROTO_SCTP: {
struct sctphdr *sh = sctp_hdr(skb);
@@ -140,17 +132,6 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
ad->u.net->dport = uh->dest;
break;
}
- case IPPROTO_DCCP: {
- struct dccp_hdr _dccph, *dh;
-
- dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
- if (dh == NULL)
- break;
-
- ad->u.net->sport = dh->dccph_sport;
- ad->u.net->dport = dh->dccph_dport;
- break;
- }
case IPPROTO_SCTP: {
struct sctphdr _sctph, *sh;
@@ -171,7 +152,7 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
static inline void print_ipv6_addr(struct audit_buffer *ab,
const struct in6_addr *addr, __be16 port,
- char *name1, char *name2)
+ const char *name1, const char *name2)
{
if (!ipv6_addr_any(addr))
audit_log_format(ab, " %s=%pI6c", name1, addr);
@@ -180,7 +161,7 @@ static inline void print_ipv6_addr(struct audit_buffer *ab,
}
static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
- __be16 port, char *name1, char *name2)
+ __be16 port, const char *name1, const char *name2)
{
if (addr)
audit_log_format(ab, " %s=%pI4", name1, &addr);
@@ -189,16 +170,13 @@ static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
}
/**
- * dump_common_audit_data - helper to dump common audit data
+ * audit_log_lsm_data - helper to log common LSM audit data
* @ab : the audit buffer
* @a : common audit data
- *
*/
-static void dump_common_audit_data(struct audit_buffer *ab,
- struct common_audit_data *a)
+void audit_log_lsm_data(struct audit_buffer *ab,
+ const struct common_audit_data *a)
{
- char comm[sizeof(current->comm)];
-
/*
* To keep stack sizes in check force programmers to notice if they
* start making this union too large! See struct lsm_network_audit
@@ -206,9 +184,6 @@ static void dump_common_audit_data(struct audit_buffer *ab,
*/
BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
- audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
- audit_log_untrustedstring(ab, get_task_comm(comm, current));
-
switch (a->type) {
case LSM_AUDIT_DATA_NONE:
return;
@@ -299,10 +274,10 @@ static void dump_common_audit_data(struct audit_buffer *ab,
if (tsk) {
pid_t pid = task_tgid_nr(tsk);
if (pid) {
- char comm[sizeof(tsk->comm)];
+ char tskcomm[sizeof(tsk->comm)];
audit_log_format(ab, " opid=%d ocomm=", pid);
audit_log_untrustedstring(ab,
- get_task_comm(comm, tsk));
+ get_task_comm(tskcomm, tsk));
}
}
break;
@@ -425,10 +400,28 @@ static void dump_common_audit_data(struct audit_buffer *ab,
case LSM_AUDIT_DATA_ANONINODE:
audit_log_format(ab, " anonclass=%s", a->u.anonclass);
break;
+ case LSM_AUDIT_DATA_NLMSGTYPE:
+ audit_log_format(ab, " nl-msgtype=%hu", a->u.nlmsg_type);
+ break;
} /* switch (a->type) */
}
/**
+ * dump_common_audit_data - helper to dump common audit data
+ * @ab : the audit buffer
+ * @a : common audit data
+ */
+static void dump_common_audit_data(struct audit_buffer *ab,
+ const struct common_audit_data *a)
+{
+ char comm[sizeof(current->comm)];
+
+ audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
+ audit_log_untrustedstring(ab, get_task_comm(comm, current));
+ audit_log_lsm_data(ab, a);
+}
+
+/**
* common_lsm_audit - generic LSM auditing function
* @a: auxiliary audit data
* @pre_audit: lsm-specific pre-audit callback
diff --git a/security/min_addr.c b/security/min_addr.c
index 0ce267c041ab..df1bc643d886 100644
--- a/security/min_addr.c
+++ b/security/min_addr.c
@@ -44,8 +44,19 @@ int mmap_min_addr_handler(const struct ctl_table *table, int write,
return ret;
}
+static const struct ctl_table min_addr_sysctl_table[] = {
+ {
+ .procname = "mmap_min_addr",
+ .data = &dac_mmap_min_addr,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = mmap_min_addr_handler,
+ },
+};
+
static int __init init_mmap_min_addr(void)
{
+ register_sysctl_init("vm", min_addr_sysctl_table);
update_mmap_min_addr();
return 0;
diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
index 25310468bcdd..8e1ffd70b18a 100644
--- a/security/safesetid/securityfs.c
+++ b/security/safesetid/securityfs.c
@@ -143,6 +143,9 @@ static ssize_t handle_policy_update(struct file *file,
char *buf, *p, *end;
int err;
+ if (len >= KMALLOC_MAX_SIZE)
+ return -EINVAL;
+
pol = kmalloc(sizeof(struct setid_ruleset), GFP_KERNEL);
if (!pol)
return -ENOMEM;
diff --git a/security/security.c b/security/security.c
index 09664e09fec9..ad163f06bf7a 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1248,6 +1248,12 @@ int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
* to 1 if AT_SECURE should be set to request libc enable secure mode. @bprm
* contains the linux_binprm structure.
*
+ * If execveat(2) is called with the AT_EXECVE_CHECK flag, bprm->is_check is
+ * set. The result must be the same as without this flag even if the execution
+ * will never really happen and @bprm will always be dropped.
+ *
+ * This hook must not change current->cred, only @bprm->cred.
+ *
* Return: Returns 0 if the hook is successful and permission is granted.
*/
int security_bprm_creds_for_exec(struct linux_binprm *bprm)
@@ -1735,8 +1741,7 @@ void security_inode_free(struct inode *inode)
* @mode: mode used to determine resource type
* @name: name of the last path component
* @xattr_name: name of the security/LSM xattr
- * @ctx: pointer to the resulting LSM context
- * @ctxlen: length of @ctx
+ * @lsmctx: pointer to the resulting LSM context
*
* Compute a context for a dentry as the inode is not yet available since NFSv4
* has no label backed by an EA anyway. It is important to note that
@@ -1746,11 +1751,11 @@ void security_inode_free(struct inode *inode)
*/
int security_dentry_init_security(struct dentry *dentry, int mode,
const struct qstr *name,
- const char **xattr_name, void **ctx,
- u32 *ctxlen)
+ const char **xattr_name,
+ struct lsm_context *lsmctx)
{
return call_int_hook(dentry_init_security, dentry, mode, name,
- xattr_name, ctx, ctxlen);
+ xattr_name, lsmctx);
}
EXPORT_SYMBOL(security_dentry_init_security);
@@ -2176,7 +2181,7 @@ int security_inode_symlink(struct inode *dir, struct dentry *dentry,
}
/**
- * security_inode_mkdir() - Check if creation a new director is allowed
+ * security_inode_mkdir() - Check if creating a new directory is allowed
* @dir: parent directory
* @dentry: new directory
* @mode: new directory mode
@@ -2618,6 +2623,36 @@ void security_inode_post_removexattr(struct dentry *dentry, const char *name)
}
/**
+ * security_inode_file_setattr() - check if setting fsxattr is allowed
+ * @dentry: file to set filesystem extended attributes on
+ * @fa: extended attributes to set on the inode
+ *
+ * Called when file_setattr() syscall or FS_IOC_FSSETXATTR ioctl() is called on
+ * inode
+ *
+ * Return: Returns 0 if permission is granted.
+ */
+int security_inode_file_setattr(struct dentry *dentry, struct file_kattr *fa)
+{
+ return call_int_hook(inode_file_setattr, dentry, fa);
+}
+
+/**
+ * security_inode_file_getattr() - check if retrieving fsxattr is allowed
+ * @dentry: file to retrieve filesystem extended attributes from
+ * @fa: extended attributes to get
+ *
+ * Called when file_getattr() syscall or FS_IOC_FSGETXATTR ioctl() is called on
+ * inode
+ *
+ * Return: Returns 0 if permission is granted.
+ */
+int security_inode_file_getattr(struct dentry *dentry, struct file_kattr *fa)
+{
+ return call_int_hook(inode_file_getattr, dentry, fa);
+}
+
+/**
* security_inode_need_killpriv() - Check if security_inode_killpriv() required
* @dentry: associated dentry
*
@@ -3098,6 +3133,10 @@ int security_file_receive(struct file *file)
* Save open-time permission checking state for later use upon file_permission,
* and recheck access if anything has changed since inode_permission.
*
+ * We can check if a file is opened for execution (e.g. execve(2) call), either
+ * directly or indirectly (e.g. ELF's ld.so) by checking file->f_flags &
+ * __FMODE_EXEC .
+ *
* Return: Returns 0 if permission is granted.
*/
int security_file_open(struct file *file)
@@ -4139,10 +4178,8 @@ int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx,
if (base)
uctx = (struct lsm_ctx __user *)(base + total);
rc = scall->hl->hook.getselfattr(attr, uctx, &entrysize, flags);
- if (rc == -EOPNOTSUPP) {
- rc = 0;
+ if (rc == -EOPNOTSUPP)
continue;
- }
if (rc == -E2BIG) {
rc = 0;
left = 0;
@@ -4270,24 +4307,6 @@ int security_setprocattr(int lsmid, const char *name, void *value, size_t size)
}
/**
- * security_netlink_send() - Save info and check if netlink sending is allowed
- * @sk: sending socket
- * @skb: netlink message
- *
- * Save security information for a netlink message so that permission checking
- * can be performed when the message is processed. The security information
- * can be saved using the eff_cap field of the netlink_skb_parms structure.
- * Also may be used to provide fine grained control over message transmission.
- *
- * Return: Returns 0 if the information was successfully saved and message is
- * allowed to be transmitted.
- */
-int security_netlink_send(struct sock *sk, struct sk_buff *skb)
-{
- return call_int_hook(netlink_send, sk, skb);
-}
-
-/**
* security_ismaclabel() - Check if the named attribute is a MAC label
* @name: full extended attribute name
*
@@ -4304,40 +4323,36 @@ EXPORT_SYMBOL(security_ismaclabel);
/**
* security_secid_to_secctx() - Convert a secid to a secctx
* @secid: secid
- * @secdata: secctx
- * @seclen: secctx length
+ * @cp: the LSM context
*
- * Convert secid to security context. If @secdata is NULL the length of the
- * result will be returned in @seclen, but no @secdata will be returned. This
+ * Convert secid to security context. If @cp is NULL the length of the
+ * result will be returned, but no data will be returned. This
* does mean that the length could change between calls to check the length and
- * the next call which actually allocates and returns the @secdata.
+ * the next call which actually allocates and returns the data.
*
- * Return: Return 0 on success, error on failure.
+ * Return: Return length of data on success, error on failure.
*/
-int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+int security_secid_to_secctx(u32 secid, struct lsm_context *cp)
{
- return call_int_hook(secid_to_secctx, secid, secdata, seclen);
+ return call_int_hook(secid_to_secctx, secid, cp);
}
EXPORT_SYMBOL(security_secid_to_secctx);
/**
* security_lsmprop_to_secctx() - Convert a lsm_prop to a secctx
* @prop: lsm specific information
- * @secdata: secctx
- * @seclen: secctx length
+ * @cp: the LSM context
*
- * Convert a @prop entry to security context. If @secdata is NULL the
- * length of the result will be returned in @seclen, but no @secdata
- * will be returned. This does mean that the length could change between
- * calls to check the length and the next call which actually allocates
- * and returns the @secdata.
+ * Convert a @prop entry to security context. If @cp is NULL the
+ * length of the result will be returned. This does mean that the
+ * length could change between calls to check the length and the
+ * next call which actually allocates and returns the @cp.
*
- * Return: Return 0 on success, error on failure.
+ * Return: Return length of data on success, error on failure.
*/
-int security_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata,
- u32 *seclen)
+int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp)
{
- return call_int_hook(lsmprop_to_secctx, prop, secdata, seclen);
+ return call_int_hook(lsmprop_to_secctx, prop, cp);
}
EXPORT_SYMBOL(security_lsmprop_to_secctx);
@@ -4360,14 +4375,14 @@ EXPORT_SYMBOL(security_secctx_to_secid);
/**
* security_release_secctx() - Free a secctx buffer
- * @secdata: secctx
- * @seclen: length of secctx
+ * @cp: the security context
*
* Release the security context.
*/
-void security_release_secctx(char *secdata, u32 seclen)
+void security_release_secctx(struct lsm_context *cp)
{
- call_void_hook(release_secctx, secdata, seclen);
+ call_void_hook(release_secctx, cp);
+ memset(cp, 0, sizeof(*cp));
}
EXPORT_SYMBOL(security_release_secctx);
@@ -4430,17 +4445,17 @@ EXPORT_SYMBOL(security_inode_setsecctx);
/**
* security_inode_getsecctx() - Get the security label of an inode
* @inode: inode
- * @ctx: secctx
- * @ctxlen: length of secctx
+ * @cp: security context
*
- * On success, returns 0 and fills out @ctx and @ctxlen with the security
- * context for the given @inode.
+ * On success, returns 0 and fills out @cp with the security context
+ * for the given @inode.
*
* Return: Returns 0 on success, error on failure.
*/
-int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+int security_inode_getsecctx(struct inode *inode, struct lsm_context *cp)
{
- return call_int_hook(inode_getsecctx, inode, ctx, ctxlen);
+ memset(cp, 0, sizeof(*cp));
+ return call_int_hook(inode_getsecctx, inode, cp);
}
EXPORT_SYMBOL(security_inode_getsecctx);
@@ -4481,6 +4496,24 @@ int security_watch_key(struct key *key)
#ifdef CONFIG_SECURITY_NETWORK
/**
+ * security_netlink_send() - Save info and check if netlink sending is allowed
+ * @sk: sending socket
+ * @skb: netlink message
+ *
+ * Save security information for a netlink message so that permission checking
+ * can be performed when the message is processed. The security information
+ * can be saved using the eff_cap field of the netlink_skb_parms structure.
+ * Also may be used to provide fine grained control over message transmission.
+ *
+ * Return: Returns 0 if the information was successfully saved and message is
+ * allowed to be transmitted.
+ */
+int security_netlink_send(struct sock *sk, struct sk_buff *skb)
+{
+ return call_int_hook(netlink_send, sk, skb);
+}
+
+/**
* security_unix_stream_connect() - Check if a AF_UNIX stream is allowed
* @sock: originating sock
* @other: peer sock
@@ -5624,6 +5657,7 @@ int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op,
* @cmd: command
* @attr: bpf attribute
* @size: size
+ * @kernel: whether or not call originated from kernel
*
* Do a initial check for all bpf syscalls after the attribute is copied into
* the kernel. The actual security module can implement their own rules to
@@ -5631,9 +5665,9 @@ int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op,
*
* Return: Returns 0 if permission is granted.
*/
-int security_bpf(int cmd, union bpf_attr *attr, unsigned int size)
+int security_bpf(int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
{
- return call_int_hook(bpf, cmd, attr, size);
+ return call_int_hook(bpf, cmd, attr, size, kernel);
}
/**
@@ -5670,6 +5704,7 @@ int security_bpf_prog(struct bpf_prog *prog)
* @map: BPF map object
* @attr: BPF syscall attributes used to create BPF map
* @token: BPF token used to grant user access
+ * @kernel: whether or not call originated from kernel
*
* Do a check when the kernel creates a new BPF map. This is also the
* point where LSM blob is allocated for LSMs that need them.
@@ -5677,9 +5712,9 @@ int security_bpf_prog(struct bpf_prog *prog)
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
- return call_int_hook(bpf_map_create, map, attr, token);
+ return call_int_hook(bpf_map_create, map, attr, token, kernel);
}
/**
@@ -5687,6 +5722,7 @@ int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
* @prog: BPF program object
* @attr: BPF syscall attributes used to create BPF program
* @token: BPF token used to grant user access to BPF subsystem
+ * @kernel: whether or not call originated from kernel
*
* Perform an access control check when the kernel loads a BPF program and
* allocates associated BPF program object. This hook is also responsible for
@@ -5695,9 +5731,9 @@ int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
- return call_int_hook(bpf_prog_load, prog, attr, token);
+ return call_int_hook(bpf_prog_load, prog, attr, token, kernel);
}
/**
@@ -5880,16 +5916,15 @@ EXPORT_SYMBOL(security_bdev_setintegrity);
#ifdef CONFIG_PERF_EVENTS
/**
* security_perf_event_open() - Check if a perf event open is allowed
- * @attr: perf event attribute
* @type: type of event
*
* Check whether the @type of perf_event_open syscall is allowed.
*
* Return: Returns 0 if permission is granted.
*/
-int security_perf_event_open(struct perf_event_attr *attr, int type)
+int security_perf_event_open(int type)
{
- return call_int_hook(perf_event_open, attr, type);
+ return call_int_hook(perf_event_open, type);
}
/**
@@ -5996,6 +6031,18 @@ int security_uring_cmd(struct io_uring_cmd *ioucmd)
{
return call_int_hook(uring_cmd, ioucmd);
}
+
+/**
+ * security_uring_allowed() - Check if io_uring_setup() is allowed
+ *
+ * Check whether the current task is allowed to call io_uring_setup().
+ *
+ * Return: Returns 0 if permission is granted.
+ */
+int security_uring_allowed(void)
+{
+ return call_int_hook(uring_allowed);
+}
#endif /* CONFIG_IO_URING */
/**
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index 86f0575f670d..66e56e9011df 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -33,11 +33,10 @@ $(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
quiet_cmd_genhdrs = GEN $(addprefix $(obj)/,$(genhdrs))
cmd_genhdrs = $< $(addprefix $(obj)/,$(genhdrs))
-# see the note above, replace the $targets and 'flask.h' rule with the lines
-# below:
-# targets += $(genhdrs)
+targets += $(genhdrs)
+
+# see the note above, replace the 'flask.h' rule with the line below:
# $(addprefix $(obj)/,$(genhdrs)) &: $(obj)/genheaders FORCE
-targets += flask.h
$(obj)/flask.h: $(obj)/genheaders FORCE
$(call if_changed,genhdrs)
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index cc0b0af20296..4b4837a20225 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -174,13 +174,15 @@ int avc_get_hash_stats(char *page)
* using a linked list for extended_perms_decision lookup because the list is
* always small. i.e. less than 5, typically 1
*/
-static struct extended_perms_decision *avc_xperms_decision_lookup(u8 driver,
- struct avc_xperms_node *xp_node)
+static struct extended_perms_decision *
+avc_xperms_decision_lookup(u8 driver, u8 base_perm,
+ struct avc_xperms_node *xp_node)
{
struct avc_xperms_decision_node *xpd_node;
list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) {
- if (xpd_node->xpd.driver == driver)
+ if (xpd_node->xpd.driver == driver &&
+ xpd_node->xpd.base_perm == base_perm)
return &xpd_node->xpd;
}
return NULL;
@@ -205,11 +207,12 @@ avc_xperms_has_perm(struct extended_perms_decision *xpd,
}
static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node,
- u8 driver, u8 perm)
+ u8 driver, u8 base_perm, u8 perm)
{
struct extended_perms_decision *xpd;
security_xperm_set(xp_node->xp.drivers.p, driver);
- xpd = avc_xperms_decision_lookup(driver, xp_node);
+ xp_node->xp.base_perms |= base_perm;
+ xpd = avc_xperms_decision_lookup(driver, base_perm, xp_node);
if (xpd && xpd->allowed)
security_xperm_set(xpd->allowed->p, perm);
}
@@ -245,6 +248,7 @@ static void avc_xperms_free(struct avc_xperms_node *xp_node)
static void avc_copy_xperms_decision(struct extended_perms_decision *dest,
struct extended_perms_decision *src)
{
+ dest->base_perm = src->base_perm;
dest->driver = src->driver;
dest->used = src->used;
if (dest->used & XPERMS_ALLOWED)
@@ -272,6 +276,7 @@ static inline void avc_quick_copy_xperms_decision(u8 perm,
*/
u8 i = perm >> 5;
+ dest->base_perm = src->base_perm;
dest->used = src->used;
if (dest->used & XPERMS_ALLOWED)
dest->allowed->p[i] = src->allowed->p[i];
@@ -357,6 +362,7 @@ static int avc_xperms_populate(struct avc_node *node,
memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p));
dest->xp.len = src->xp.len;
+ dest->xp.base_perms = src->xp.base_perms;
/* for each source xpd allocate a destination xpd and copy */
list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) {
@@ -807,6 +813,7 @@ out:
* @event : Updating event
* @perms : Permission mask bits
* @driver: xperm driver information
+ * @base_perm: the base permission associated with the extended permission
* @xperm: xperm permissions
* @ssid: AVC entry source sid
* @tsid: AVC entry target sid
@@ -820,10 +827,9 @@ out:
* otherwise, this function updates the AVC entry. The original AVC-entry object
* will release later by RCU.
*/
-static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
- u32 tsid, u16 tclass, u32 seqno,
- struct extended_perms_decision *xpd,
- u32 flags)
+static int avc_update_node(u32 event, u32 perms, u8 driver, u8 base_perm,
+ u8 xperm, u32 ssid, u32 tsid, u16 tclass, u32 seqno,
+ struct extended_perms_decision *xpd, u32 flags)
{
u32 hvalue;
int rc = 0;
@@ -880,7 +886,7 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
case AVC_CALLBACK_GRANT:
node->ae.avd.allowed |= perms;
if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS))
- avc_xperms_allow_perm(node->ae.xp_node, driver, xperm);
+ avc_xperms_allow_perm(node->ae.xp_node, driver, base_perm, xperm);
break;
case AVC_CALLBACK_TRY_REVOKE:
case AVC_CALLBACK_REVOKE:
@@ -930,7 +936,7 @@ static void avc_flush(void)
spin_lock_irqsave(lock, flag);
/*
- * With preemptable RCU, the outer spinlock does not
+ * With preemptible RCU, the outer spinlock does not
* prevent RCU grace periods from ending.
*/
rcu_read_lock();
@@ -987,10 +993,9 @@ static noinline void avc_compute_av(u32 ssid, u32 tsid, u16 tclass,
avc_insert(ssid, tsid, tclass, avd, xp_node);
}
-static noinline int avc_denied(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- u8 driver, u8 xperm, unsigned int flags,
- struct av_decision *avd)
+static noinline int avc_denied(u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ u8 driver, u8 base_perm, u8 xperm,
+ unsigned int flags, struct av_decision *avd)
{
if (flags & AVC_STRICT)
return -EACCES;
@@ -999,7 +1004,7 @@ static noinline int avc_denied(u32 ssid, u32 tsid,
!(avd->flags & AVD_FLAGS_PERMISSIVE))
return -EACCES;
- avc_update_node(AVC_CALLBACK_GRANT, requested, driver,
+ avc_update_node(AVC_CALLBACK_GRANT, requested, driver, base_perm,
xperm, ssid, tsid, tclass, avd->seqno, NULL, flags);
return 0;
}
@@ -1012,7 +1017,8 @@ static noinline int avc_denied(u32 ssid, u32 tsid,
* driver field is used to specify which set contains the permission.
*/
int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
- u8 driver, u8 xperm, struct common_audit_data *ad)
+ u8 driver, u8 base_perm, u8 xperm,
+ struct common_audit_data *ad)
{
struct avc_node *node;
struct av_decision avd;
@@ -1047,22 +1053,23 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
local_xpd.auditallow = &auditallow;
local_xpd.dontaudit = &dontaudit;
- xpd = avc_xperms_decision_lookup(driver, xp_node);
+ xpd = avc_xperms_decision_lookup(driver, base_perm, xp_node);
if (unlikely(!xpd)) {
/*
* Compute the extended_perms_decision only if the driver
- * is flagged
+ * is flagged and the base permission is known.
*/
- if (!security_xperm_test(xp_node->xp.drivers.p, driver)) {
+ if (!security_xperm_test(xp_node->xp.drivers.p, driver) ||
+ !(xp_node->xp.base_perms & base_perm)) {
avd.allowed &= ~requested;
goto decision;
}
rcu_read_unlock();
- security_compute_xperms_decision(ssid, tsid, tclass,
- driver, &local_xpd);
+ security_compute_xperms_decision(ssid, tsid, tclass, driver,
+ base_perm, &local_xpd);
rcu_read_lock();
- avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested,
- driver, xperm, ssid, tsid, tclass, avd.seqno,
+ avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested, driver,
+ base_perm, xperm, ssid, tsid, tclass, avd.seqno,
&local_xpd, 0);
} else {
avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd);
@@ -1075,8 +1082,8 @@ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
decision:
denied = requested & ~(avd.allowed);
if (unlikely(denied))
- rc = avc_denied(ssid, tsid, tclass, requested,
- driver, xperm, AVC_EXTENDED_PERMS, &avd);
+ rc = avc_denied(ssid, tsid, tclass, requested, driver,
+ base_perm, xperm, AVC_EXTENDED_PERMS, &avd);
rcu_read_unlock();
@@ -1110,7 +1117,7 @@ static noinline int avc_perm_nonode(u32 ssid, u32 tsid, u16 tclass,
avc_compute_av(ssid, tsid, tclass, avd, &xp_node);
denied = requested & ~(avd->allowed);
if (unlikely(denied))
- return avc_denied(ssid, tsid, tclass, requested, 0, 0,
+ return avc_denied(ssid, tsid, tclass, requested, 0, 0, 0,
flags, avd);
return 0;
}
@@ -1158,7 +1165,7 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
rcu_read_unlock();
if (unlikely(denied))
- return avc_denied(ssid, tsid, tclass, requested, 0, 0,
+ return avc_denied(ssid, tsid, tclass, requested, 0, 0, 0,
flags, avd);
return 0;
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index f5a08f94e094..c95a5874bf7d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -65,7 +65,6 @@
#include <net/netlink.h>
#include <linux/tcp.h>
#include <linux/udp.h>
-#include <linux/dccp.h>
#include <linux/sctp.h>
#include <net/sctp/structs.h>
#include <linux/quota.h>
@@ -213,8 +212,10 @@ static void cred_init_security(void)
{
struct task_security_struct *tsec;
+ /* NOTE: the lsm framework zeros out the buffer on allocation */
+
tsec = selinux_cred(unrcu_pointer(current->real_cred));
- tsec->osid = tsec->sid = SECINITSID_KERNEL;
+ tsec->osid = tsec->sid = tsec->avdcache.sid = SECINITSID_KERNEL;
}
/*
@@ -278,27 +279,21 @@ static int __inode_security_revalidate(struct inode *inode,
struct dentry *dentry,
bool may_sleep)
{
- struct inode_security_struct *isec = selinux_inode(inode);
+ if (!selinux_initialized())
+ return 0;
- might_sleep_if(may_sleep);
+ if (may_sleep)
+ might_sleep();
+ else
+ return -ECHILD;
/*
- * The check of isec->initialized below is racy but
- * inode_doinit_with_dentry() will recheck with
- * isec->lock held.
+ * Check to ensure that an inode's SELinux state is valid and try
+ * reloading the inode security label if necessary. This will fail if
+ * @dentry is NULL and no dentry for this inode can be found; in that
+ * case, continue using the old label.
*/
- if (selinux_initialized() &&
- data_race(isec->initialized != LABEL_INITIALIZED)) {
- if (!may_sleep)
- return -ECHILD;
-
- /*
- * Try reloading the inode security label. This will fail if
- * @opt_dentry is NULL and no dentry for this inode can be
- * found; in that case, continue using the old label.
- */
- inode_doinit_with_dentry(inode, dentry);
- }
+ inode_doinit_with_dentry(inode, dentry);
return 0;
}
@@ -307,41 +302,53 @@ static struct inode_security_struct *inode_security_novalidate(struct inode *ino
return selinux_inode(inode);
}
-static struct inode_security_struct *inode_security_rcu(struct inode *inode, bool rcu)
+static inline struct inode_security_struct *inode_security_rcu(struct inode *inode,
+ bool rcu)
{
- int error;
+ int rc;
+ struct inode_security_struct *isec = selinux_inode(inode);
- error = __inode_security_revalidate(inode, NULL, !rcu);
- if (error)
- return ERR_PTR(error);
- return selinux_inode(inode);
+ /* check below is racy, but revalidate will recheck with lock held */
+ if (data_race(likely(isec->initialized == LABEL_INITIALIZED)))
+ return isec;
+ rc = __inode_security_revalidate(inode, NULL, !rcu);
+ if (rc)
+ return ERR_PTR(rc);
+ return isec;
}
/*
* Get the security label of an inode.
*/
-static struct inode_security_struct *inode_security(struct inode *inode)
+static inline struct inode_security_struct *inode_security(struct inode *inode)
{
+ struct inode_security_struct *isec = selinux_inode(inode);
+
+ /* check below is racy, but revalidate will recheck with lock held */
+ if (data_race(likely(isec->initialized == LABEL_INITIALIZED)))
+ return isec;
__inode_security_revalidate(inode, NULL, true);
- return selinux_inode(inode);
+ return isec;
}
-static struct inode_security_struct *backing_inode_security_novalidate(struct dentry *dentry)
+static inline struct inode_security_struct *backing_inode_security_novalidate(struct dentry *dentry)
{
- struct inode *inode = d_backing_inode(dentry);
-
- return selinux_inode(inode);
+ return selinux_inode(d_backing_inode(dentry));
}
/*
* Get the security label of a dentry's backing inode.
*/
-static struct inode_security_struct *backing_inode_security(struct dentry *dentry)
+static inline struct inode_security_struct *backing_inode_security(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
+ struct inode_security_struct *isec = selinux_inode(inode);
+ /* check below is racy, but revalidate will recheck with lock held */
+ if (data_race(likely(isec->initialized == LABEL_INITIALIZED)))
+ return isec;
__inode_security_revalidate(inode, dentry, true);
- return selinux_inode(inode);
+ return isec;
}
static void inode_free_security(struct inode *inode)
@@ -407,7 +414,7 @@ static const struct {
static int match_opt_prefix(char *s, int l, char **arg)
{
- int i;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(tokens); i++) {
size_t len = tokens[i].len;
@@ -1191,8 +1198,6 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc
return SECCLASS_ICMP_SOCKET;
else
return SECCLASS_RAWIP_SOCKET;
- case SOCK_DCCP:
- return SECCLASS_DCCP_SOCKET;
default:
return SECCLASS_RAWIP_SOCKET;
}
@@ -1683,12 +1688,15 @@ static inline int dentry_has_perm(const struct cred *cred,
struct dentry *dentry,
u32 av)
{
- struct inode *inode = d_backing_inode(dentry);
struct common_audit_data ad;
+ struct inode *inode = d_backing_inode(dentry);
+ struct inode_security_struct *isec = selinux_inode(inode);
ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
- __inode_security_revalidate(inode, dentry, true);
+ /* check below is racy, but revalidate will recheck with lock held */
+ if (data_race(unlikely(isec->initialized != LABEL_INITIALIZED)))
+ __inode_security_revalidate(inode, dentry, true);
return inode_has_perm(cred, inode, av, &ad);
}
@@ -1699,12 +1707,15 @@ static inline int path_has_perm(const struct cred *cred,
const struct path *path,
u32 av)
{
- struct inode *inode = d_backing_inode(path->dentry);
struct common_audit_data ad;
+ struct inode *inode = d_backing_inode(path->dentry);
+ struct inode_security_struct *isec = selinux_inode(inode);
ad.type = LSM_AUDIT_DATA_PATH;
ad.u.path = *path;
- __inode_security_revalidate(inode, path->dentry, true);
+ /* check below is racy, but revalidate will recheck with lock held */
+ if (data_race(unlikely(isec->initialized != LABEL_INITIALIZED)))
+ __inode_security_revalidate(inode, path->dentry, true);
return inode_has_perm(cred, inode, av, &ad);
}
@@ -2869,8 +2880,8 @@ static void selinux_inode_free_security(struct inode *inode)
static int selinux_dentry_init_security(struct dentry *dentry, int mode,
const struct qstr *name,
- const char **xattr_name, void **ctx,
- u32 *ctxlen)
+ const char **xattr_name,
+ struct lsm_context *cp)
{
u32 newsid;
int rc;
@@ -2885,8 +2896,8 @@ static int selinux_dentry_init_security(struct dentry *dentry, int mode,
if (xattr_name)
*xattr_name = XATTR_NAME_SELINUX;
- return security_sid_to_context(newsid, (char **)ctx,
- ctxlen);
+ cp->id = LSM_ID_SELINUX;
+ return security_sid_to_context(newsid, &cp->context, &cp->len);
}
static int selinux_dentry_create_files_as(struct dentry *dentry, int mode,
@@ -3088,44 +3099,152 @@ static noinline int audit_inode_permission(struct inode *inode,
audited, denied, result, &ad);
}
-static int selinux_inode_permission(struct inode *inode, int mask)
+/**
+ * task_avdcache_reset - Reset the task's AVD cache
+ * @tsec: the task's security state
+ *
+ * Clear the task's AVD cache in @tsec and reset it to the current policy's
+ * and task's info.
+ */
+static inline void task_avdcache_reset(struct task_security_struct *tsec)
+{
+ memset(&tsec->avdcache.dir, 0, sizeof(tsec->avdcache.dir));
+ tsec->avdcache.sid = tsec->sid;
+ tsec->avdcache.seqno = avc_policy_seqno();
+ tsec->avdcache.dir_spot = TSEC_AVDC_DIR_SIZE - 1;
+}
+
+/**
+ * task_avdcache_search - Search the task's AVD cache
+ * @tsec: the task's security state
+ * @isec: the inode to search for in the cache
+ * @avdc: matching avd cache entry returned to the caller
+ *
+ * Search @tsec for a AVD cache entry that matches @isec and return it to the
+ * caller via @avdc. Returns 0 if a match is found, negative values otherwise.
+ */
+static inline int task_avdcache_search(struct task_security_struct *tsec,
+ struct inode_security_struct *isec,
+ struct avdc_entry **avdc)
{
+ int orig, iter;
+
+ /* focused on path walk optimization, only cache directories */
+ if (isec->sclass != SECCLASS_DIR)
+ return -ENOENT;
+
+ if (unlikely(tsec->sid != tsec->avdcache.sid ||
+ tsec->avdcache.seqno != avc_policy_seqno())) {
+ task_avdcache_reset(tsec);
+ return -ENOENT;
+ }
+
+ orig = iter = tsec->avdcache.dir_spot;
+ do {
+ if (tsec->avdcache.dir[iter].isid == isec->sid) {
+ /* cache hit */
+ tsec->avdcache.dir_spot = iter;
+ *avdc = &tsec->avdcache.dir[iter];
+ return 0;
+ }
+ iter = (iter - 1) & (TSEC_AVDC_DIR_SIZE - 1);
+ } while (iter != orig);
+
+ return -ENOENT;
+}
+
+/**
+ * task_avdcache_update - Update the task's AVD cache
+ * @tsec: the task's security state
+ * @isec: the inode associated with the cache entry
+ * @avd: the AVD to cache
+ * @audited: the permission audit bitmask to cache
+ *
+ * Update the AVD cache in @tsec with the @avdc and @audited info associated
+ * with @isec.
+ */
+static inline void task_avdcache_update(struct task_security_struct *tsec,
+ struct inode_security_struct *isec,
+ struct av_decision *avd,
+ u32 audited)
+{
+ int spot;
+
+ /* focused on path walk optimization, only cache directories */
+ if (isec->sclass != SECCLASS_DIR)
+ return;
+
+ /* update cache */
+ spot = (tsec->avdcache.dir_spot + 1) & (TSEC_AVDC_DIR_SIZE - 1);
+ tsec->avdcache.dir_spot = spot;
+ tsec->avdcache.dir[spot].isid = isec->sid;
+ tsec->avdcache.dir[spot].audited = audited;
+ tsec->avdcache.dir[spot].allowed = avd->allowed;
+ tsec->avdcache.dir[spot].permissive = avd->flags & AVD_FLAGS_PERMISSIVE;
+ tsec->avdcache.permissive_neveraudit =
+ (avd->flags == (AVD_FLAGS_PERMISSIVE|AVD_FLAGS_NEVERAUDIT));
+}
+
+/**
+ * selinux_inode_permission - Check if the current task can access an inode
+ * @inode: the inode that is being accessed
+ * @requested: the accesses being requested
+ *
+ * Check if the current task is allowed to access @inode according to
+ * @requested. Returns 0 if allowed, negative values otherwise.
+ */
+static int selinux_inode_permission(struct inode *inode, int requested)
+{
+ int mask;
u32 perms;
- bool from_access;
- bool no_block = mask & MAY_NOT_BLOCK;
+ struct task_security_struct *tsec;
struct inode_security_struct *isec;
- u32 sid = current_sid();
- struct av_decision avd;
+ struct avdc_entry *avdc;
int rc, rc2;
u32 audited, denied;
- from_access = mask & MAY_ACCESS;
- mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
+ mask = requested & (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
/* No permission to check. Existence test. */
if (!mask)
return 0;
- if (unlikely(IS_PRIVATE(inode)))
+ tsec = selinux_cred(current_cred());
+ if (task_avdcache_permnoaudit(tsec))
return 0;
- perms = file_mask_to_av(inode->i_mode, mask);
-
- isec = inode_security_rcu(inode, no_block);
+ isec = inode_security_rcu(inode, requested & MAY_NOT_BLOCK);
if (IS_ERR(isec))
return PTR_ERR(isec);
+ perms = file_mask_to_av(inode->i_mode, mask);
+
+ rc = task_avdcache_search(tsec, isec, &avdc);
+ if (likely(!rc)) {
+ /* Cache hit. */
+ audited = perms & avdc->audited;
+ denied = perms & ~avdc->allowed;
+ if (unlikely(denied && enforcing_enabled() &&
+ !avdc->permissive))
+ rc = -EACCES;
+ } else {
+ struct av_decision avd;
+
+ /* Cache miss. */
+ rc = avc_has_perm_noaudit(tsec->sid, isec->sid, isec->sclass,
+ perms, 0, &avd);
+ audited = avc_audit_required(perms, &avd, rc,
+ (requested & MAY_ACCESS) ? FILE__AUDIT_ACCESS : 0,
+ &denied);
+ task_avdcache_update(tsec, isec, &avd, audited);
+ }
- rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, perms, 0,
- &avd);
- audited = avc_audit_required(perms, &avd, rc,
- from_access ? FILE__AUDIT_ACCESS : 0,
- &denied);
if (likely(!audited))
return rc;
rc2 = audit_inode_permission(inode, perms, audited, denied, rc);
if (rc2)
return rc2;
+
return rc;
}
@@ -3135,7 +3254,7 @@ static int selinux_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
const struct cred *cred = current_cred();
struct inode *inode = d_backing_inode(dentry);
unsigned int ia_valid = iattr->ia_valid;
- __u32 av = FILE__WRITE;
+ u32 av = FILE__WRITE;
/* ATTR_FORCE is just used for ATTR_KILL_S[UG]ID. */
if (ia_valid & ATTR_FORCE) {
@@ -3160,6 +3279,13 @@ static int selinux_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
static int selinux_inode_getattr(const struct path *path)
{
+ struct task_security_struct *tsec;
+
+ tsec = selinux_cred(current_cred());
+
+ if (task_avdcache_permnoaudit(tsec))
+ return 0;
+
return path_has_perm(current_cred(), path, FILE__GETATTR);
}
@@ -3366,6 +3492,18 @@ static int selinux_inode_removexattr(struct mnt_idmap *idmap,
return -EACCES;
}
+static int selinux_inode_file_setattr(struct dentry *dentry,
+ struct file_kattr *fa)
+{
+ return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
+}
+
+static int selinux_inode_file_getattr(struct dentry *dentry,
+ struct file_kattr *fa)
+{
+ return dentry_has_perm(current_cred(), dentry, FILE__GETATTR);
+}
+
static int selinux_path_notify(const struct path *path, u64 mask,
unsigned int obj_type)
{
@@ -3395,6 +3533,9 @@ static int selinux_path_notify(const struct path *path, u64 mask,
case FSNOTIFY_OBJ_TYPE_INODE:
perm = FILE__WATCH;
break;
+ case FSNOTIFY_OBJ_TYPE_MNTNS:
+ perm = FILE__WATCH_MOUNTNS;
+ break;
default:
return -EINVAL;
}
@@ -3404,7 +3545,8 @@ static int selinux_path_notify(const struct path *path, u64 mask,
perm |= FILE__WATCH_WITH_PERM;
/* watches on read-like events need the file:watch_reads permission */
- if (mask & (FS_ACCESS | FS_ACCESS_PERM | FS_CLOSE_NOWRITE))
+ if (mask & (FS_ACCESS | FS_ACCESS_PERM | FS_PRE_ACCESS |
+ FS_CLOSE_NOWRITE))
perm |= FILE__WATCH_READS;
return path_has_perm(current_cred(), path, perm);
@@ -3583,10 +3725,13 @@ static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
newsid = tsec->create_sid;
} else {
u16 secclass = inode_mode_to_security_class(kn->mode);
+ const char *kn_name;
struct qstr q;
- q.name = kn->name;
- q.hash_len = hashlen_string(kn_dir, kn->name);
+ /* kn is fresh, can't be renamed, name goes not away */
+ kn_name = rcu_dereference_check(kn->name, true);
+ q.name = kn_name;
+ q.hash_len = hashlen_string(kn_dir, kn_name);
rc = security_transition_sid(tsec->sid,
parent_sid, secclass, &q,
@@ -3688,8 +3833,8 @@ static int ioctl_has_perm(const struct cred *cred, struct file *file,
return 0;
isec = inode_security(inode);
- rc = avc_has_extended_perms(ssid, isec->sid, isec->sclass,
- requested, driver, xperm, &ad);
+ rc = avc_has_extended_perms(ssid, isec->sid, isec->sclass, requested,
+ driver, AVC_EXT_IOCTL, xperm, &ad);
out:
return rc;
}
@@ -4095,7 +4240,7 @@ static int selinux_kernel_module_request(char *kmod_name)
SYSTEM__MODULE_REQUEST, &ad);
}
-static int selinux_kernel_module_from_file(struct file *file)
+static int selinux_kernel_load_from_file(struct file *file, u32 requested)
{
struct common_audit_data ad;
struct inode_security_struct *isec;
@@ -4103,12 +4248,8 @@ static int selinux_kernel_module_from_file(struct file *file)
u32 sid = current_sid();
int rc;
- /* init_module */
if (file == NULL)
- return avc_has_perm(sid, sid, SECCLASS_SYSTEM,
- SYSTEM__MODULE_LOAD, NULL);
-
- /* finit_module */
+ return avc_has_perm(sid, sid, SECCLASS_SYSTEM, requested, NULL);
ad.type = LSM_AUDIT_DATA_FILE;
ad.u.file = file;
@@ -4121,8 +4262,7 @@ static int selinux_kernel_module_from_file(struct file *file)
}
isec = inode_security(file_inode(file));
- return avc_has_perm(sid, isec->sid, SECCLASS_SYSTEM,
- SYSTEM__MODULE_LOAD, &ad);
+ return avc_has_perm(sid, isec->sid, SECCLASS_SYSTEM, requested, &ad);
}
static int selinux_kernel_read_file(struct file *file,
@@ -4131,9 +4271,30 @@ static int selinux_kernel_read_file(struct file *file,
{
int rc = 0;
+ BUILD_BUG_ON_MSG(READING_MAX_ID > 7,
+ "New kernel_read_file_id introduced; update SELinux!");
+
switch (id) {
+ case READING_FIRMWARE:
+ rc = selinux_kernel_load_from_file(file, SYSTEM__FIRMWARE_LOAD);
+ break;
case READING_MODULE:
- rc = selinux_kernel_module_from_file(contents ? file : NULL);
+ rc = selinux_kernel_load_from_file(file, SYSTEM__MODULE_LOAD);
+ break;
+ case READING_KEXEC_IMAGE:
+ rc = selinux_kernel_load_from_file(file,
+ SYSTEM__KEXEC_IMAGE_LOAD);
+ break;
+ case READING_KEXEC_INITRAMFS:
+ rc = selinux_kernel_load_from_file(file,
+ SYSTEM__KEXEC_INITRAMFS_LOAD);
+ break;
+ case READING_POLICY:
+ rc = selinux_kernel_load_from_file(file, SYSTEM__POLICY_LOAD);
+ break;
+ case READING_X509_CERTIFICATE:
+ rc = selinux_kernel_load_from_file(file,
+ SYSTEM__X509_CERTIFICATE_LOAD);
break;
default:
break;
@@ -4146,9 +4307,31 @@ static int selinux_kernel_load_data(enum kernel_load_data_id id, bool contents)
{
int rc = 0;
+ BUILD_BUG_ON_MSG(LOADING_MAX_ID > 7,
+ "New kernel_load_data_id introduced; update SELinux!");
+
switch (id) {
+ case LOADING_FIRMWARE:
+ rc = selinux_kernel_load_from_file(NULL, SYSTEM__FIRMWARE_LOAD);
+ break;
case LOADING_MODULE:
- rc = selinux_kernel_module_from_file(NULL);
+ rc = selinux_kernel_load_from_file(NULL, SYSTEM__MODULE_LOAD);
+ break;
+ case LOADING_KEXEC_IMAGE:
+ rc = selinux_kernel_load_from_file(NULL,
+ SYSTEM__KEXEC_IMAGE_LOAD);
+ break;
+ case LOADING_KEXEC_INITRAMFS:
+ rc = selinux_kernel_load_from_file(NULL,
+ SYSTEM__KEXEC_INITRAMFS_LOAD);
+ break;
+ case LOADING_POLICY:
+ rc = selinux_kernel_load_from_file(NULL,
+ SYSTEM__POLICY_LOAD);
+ break;
+ case LOADING_X509_CERTIFICATE:
+ rc = selinux_kernel_load_from_file(NULL,
+ SYSTEM__X509_CERTIFICATE_LOAD);
break;
default:
break;
@@ -4347,22 +4530,6 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
break;
}
- case IPPROTO_DCCP: {
- struct dccp_hdr _dccph, *dh;
-
- if (ntohs(ih->frag_off) & IP_OFFSET)
- break;
-
- offset += ihlen;
- dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
- if (dh == NULL)
- break;
-
- ad->u.net->sport = dh->dccph_sport;
- ad->u.net->dport = dh->dccph_dport;
- break;
- }
-
#if IS_ENABLED(CONFIG_IP_SCTP)
case IPPROTO_SCTP: {
struct sctphdr _sctph, *sh;
@@ -4441,18 +4608,6 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
break;
}
- case IPPROTO_DCCP: {
- struct dccp_hdr _dccph, *dh;
-
- dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
- if (dh == NULL)
- break;
-
- ad->u.net->sport = dh->dccph_sport;
- ad->u.net->dport = dh->dccph_dport;
- break;
- }
-
#if IS_ENABLED(CONFIG_IP_SCTP)
case IPPROTO_SCTP: {
struct sctphdr _sctph, *sh;
@@ -4804,10 +4959,6 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
node_perm = UDP_SOCKET__NODE_BIND;
break;
- case SECCLASS_DCCP_SOCKET:
- node_perm = DCCP_SOCKET__NODE_BIND;
- break;
-
case SECCLASS_SCTP_SOCKET:
node_perm = SCTP_SOCKET__NODE_BIND;
break;
@@ -4835,7 +4986,7 @@ out:
return err;
err_af:
/* Note that SCTP services expect -EINVAL, others -EAFNOSUPPORT. */
- if (sksec->sclass == SECCLASS_SCTP_SOCKET)
+ if (sk->sk_protocol == IPPROTO_SCTP)
return -EINVAL;
return -EAFNOSUPPORT;
}
@@ -4863,11 +5014,10 @@ static int selinux_socket_connect_helper(struct socket *sock,
return 0;
/*
- * If a TCP, DCCP or SCTP socket, check name_connect permission
+ * If a TCP or SCTP socket, check name_connect permission
* for the port.
*/
if (sksec->sclass == SECCLASS_TCP_SOCKET ||
- sksec->sclass == SECCLASS_DCCP_SOCKET ||
sksec->sclass == SECCLASS_SCTP_SOCKET) {
struct common_audit_data ad;
struct lsm_network_audit net = {0,};
@@ -4912,9 +5062,6 @@ static int selinux_socket_connect_helper(struct socket *sock,
case SECCLASS_TCP_SOCKET:
perm = TCP_SOCKET__NAME_CONNECT;
break;
- case SECCLASS_DCCP_SOCKET:
- perm = DCCP_SOCKET__NAME_CONNECT;
- break;
case SECCLASS_SCTP_SOCKET:
perm = SCTP_SOCKET__NAME_CONNECT;
break;
@@ -5738,7 +5885,7 @@ static unsigned int selinux_ip_output(void *priv, struct sk_buff *skb,
/* we do this in the LOCAL_OUT path and not the POST_ROUTING path
* because we want to make sure we apply the necessary labeling
* before IPsec is applied so we can leverage AH protection */
- sk = skb->sk;
+ sk = sk_to_full_sk(skb->sk);
if (sk) {
struct sk_security_struct *sksec;
@@ -5939,20 +6086,20 @@ static int nlmsg_sock_has_extended_perms(struct sock *sk, u32 perms, u16 nlmsg_t
{
struct sk_security_struct *sksec = sk->sk_security;
struct common_audit_data ad;
- struct lsm_network_audit net;
u8 driver;
u8 xperm;
if (sock_skip_has_perm(sksec->sid))
return 0;
- ad_net_init_from_sk(&ad, &net, sk);
+ ad.type = LSM_AUDIT_DATA_NLMSGTYPE;
+ ad.u.nlmsg_type = nlmsg_type;
driver = nlmsg_type >> 8;
xperm = nlmsg_type & 0xff;
return avc_has_extended_perms(current_sid(), sksec->sid, sksec->sclass,
- perms, driver, xperm, &ad);
+ perms, driver, AVC_EXT_NLMSG, xperm, &ad);
}
static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
@@ -6640,15 +6787,28 @@ static int selinux_ismaclabel(const char *name)
return (strcmp(name, XATTR_SELINUX_SUFFIX) == 0);
}
-static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+static int selinux_secid_to_secctx(u32 secid, struct lsm_context *cp)
{
- return security_sid_to_context(secid, secdata, seclen);
+ u32 seclen;
+ int ret;
+
+ if (cp) {
+ cp->id = LSM_ID_SELINUX;
+ ret = security_sid_to_context(secid, &cp->context, &cp->len);
+ if (ret < 0)
+ return ret;
+ return cp->len;
+ }
+ ret = security_sid_to_context(secid, NULL, &seclen);
+ if (ret < 0)
+ return ret;
+ return seclen;
}
-static int selinux_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata,
- u32 *seclen)
+static int selinux_lsmprop_to_secctx(struct lsm_prop *prop,
+ struct lsm_context *cp)
{
- return selinux_secid_to_secctx(prop->selinux.secid, secdata, seclen);
+ return selinux_secid_to_secctx(prop->selinux.secid, cp);
}
static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
@@ -6657,9 +6817,13 @@ static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
secid, GFP_KERNEL);
}
-static void selinux_release_secctx(char *secdata, u32 seclen)
+static void selinux_release_secctx(struct lsm_context *cp)
{
- kfree(secdata);
+ if (cp->id == LSM_ID_SELINUX) {
+ kfree(cp->context);
+ cp->context = NULL;
+ cp->id = LSM_ID_UNDEF;
+ }
}
static void selinux_inode_invalidate_secctx(struct inode *inode)
@@ -6691,14 +6855,16 @@ static int selinux_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
ctx, ctxlen, 0, NULL);
}
-static int selinux_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+static int selinux_inode_getsecctx(struct inode *inode, struct lsm_context *cp)
{
- int len = 0;
+ int len;
len = selinux_inode_getsecurity(&nop_mnt_idmap, inode,
- XATTR_SELINUX_SUFFIX, ctx, true);
+ XATTR_SELINUX_SUFFIX,
+ (void **)&cp->context, true);
if (len < 0)
return len;
- *ctxlen = len;
+ cp->len = len;
+ cp->id = LSM_ID_SELINUX;
return 0;
}
#ifdef CONFIG_KEYS
@@ -6846,7 +7012,7 @@ static int selinux_ib_alloc_security(void *ib_sec)
#ifdef CONFIG_BPF_SYSCALL
static int selinux_bpf(int cmd, union bpf_attr *attr,
- unsigned int size)
+ unsigned int size, bool kernel)
{
u32 sid = current_sid();
int ret;
@@ -6933,7 +7099,7 @@ static int selinux_bpf_prog(struct bpf_prog *prog)
}
static int selinux_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
struct bpf_security_struct *bpfsec;
@@ -6956,7 +7122,7 @@ static void selinux_bpf_map_free(struct bpf_map *map)
}
static int selinux_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
struct bpf_security_struct *bpfsec;
@@ -7020,7 +7186,7 @@ struct lsm_blob_sizes selinux_blob_sizes __ro_after_init = {
};
#ifdef CONFIG_PERF_EVENTS
-static int selinux_perf_event_open(struct perf_event_attr *attr, int type)
+static int selinux_perf_event_open(int type)
{
u32 requested, sid = current_sid();
@@ -7117,6 +7283,19 @@ static int selinux_uring_cmd(struct io_uring_cmd *ioucmd)
return avc_has_perm(current_sid(), isec->sid,
SECCLASS_IO_URING, IO_URING__CMD, &ad);
}
+
+/**
+ * selinux_uring_allowed - check if io_uring_setup() can be called
+ *
+ * Check to see if the current task is allowed to call io_uring_setup().
+ */
+static int selinux_uring_allowed(void)
+{
+ u32 sid = current_sid();
+
+ return avc_has_perm(sid, sid, SECCLASS_IO_URING, IO_URING__ALLOWED,
+ NULL);
+}
#endif /* CONFIG_IO_URING */
static const struct lsm_id selinux_lsmid = {
@@ -7195,6 +7374,8 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
LSM_HOOK_INIT(inode_getxattr, selinux_inode_getxattr),
LSM_HOOK_INIT(inode_listxattr, selinux_inode_listxattr),
LSM_HOOK_INIT(inode_removexattr, selinux_inode_removexattr),
+ LSM_HOOK_INIT(inode_file_getattr, selinux_inode_file_getattr),
+ LSM_HOOK_INIT(inode_file_setattr, selinux_inode_file_setattr),
LSM_HOOK_INIT(inode_set_acl, selinux_inode_set_acl),
LSM_HOOK_INIT(inode_get_acl, selinux_inode_get_acl),
LSM_HOOK_INIT(inode_remove_acl, selinux_inode_remove_acl),
@@ -7370,6 +7551,7 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
LSM_HOOK_INIT(uring_override_creds, selinux_uring_override_creds),
LSM_HOOK_INIT(uring_sqpoll, selinux_uring_sqpoll),
LSM_HOOK_INIT(uring_cmd, selinux_uring_cmd),
+ LSM_HOOK_INIT(uring_allowed, selinux_uring_allowed),
#endif
/*
diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c
index 48f537b41c58..470481cfe0e8 100644
--- a/security/selinux/ibpkey.c
+++ b/security/selinux/ibpkey.c
@@ -130,7 +130,7 @@ static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid)
{
int ret;
struct sel_ib_pkey *pkey;
- struct sel_ib_pkey *new = NULL;
+ struct sel_ib_pkey *new;
unsigned long flags;
spin_lock_irqsave(&sel_ib_pkey_lock, flags);
@@ -146,12 +146,11 @@ static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid)
if (ret)
goto out;
- /* If this memory allocation fails still return 0. The SID
- * is valid, it just won't be added to the cache.
- */
- new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new) {
- ret = -ENOMEM;
+ /* If this memory allocation fails still return 0. The SID
+ * is valid, it just won't be added to the cache.
+ */
goto out;
}
@@ -184,7 +183,7 @@ int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *sid)
rcu_read_lock();
pkey = sel_ib_pkey_find(subnet_prefix, pkey_num);
- if (pkey) {
+ if (likely(pkey)) {
*sid = pkey->psec.sid;
rcu_read_unlock();
return 0;
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 96a614d47df8..01b5167fee1a 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -65,6 +65,10 @@ static inline u32 avc_audit_required(u32 requested, struct av_decision *avd,
int result, u32 auditdeny, u32 *deniedp)
{
u32 denied, audited;
+
+ if (avd->flags & AVD_FLAGS_NEVERAUDIT)
+ return 0;
+
denied = requested & ~avd->allowed;
if (unlikely(denied)) {
audited = denied & avd->auditdeny;
@@ -136,8 +140,11 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested,
int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested,
struct common_audit_data *auditdata);
+#define AVC_EXT_IOCTL (1 << 0) /* Cache entry for an ioctl extended permission */
+#define AVC_EXT_NLMSG (1 << 1) /* Cache entry for an nlmsg extended permission */
int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested,
- u8 driver, u8 perm, struct common_audit_data *ad);
+ u8 driver, u8 base_perm, u8 perm,
+ struct common_audit_data *ad);
u32 avc_policy_seqno(void);
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 2bc20135324a..5665aa5e7853 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -8,7 +8,7 @@
COMMON_FILE_SOCK_PERMS, "unlink", "link", "rename", "execute", \
"quotaon", "mounton", "audit_access", "open", "execmod", \
"watch", "watch_mount", "watch_sb", "watch_with_perm", \
- "watch_reads"
+ "watch_reads", "watch_mountns"
#define COMMON_SOCK_PERMS \
COMMON_FILE_SOCK_PERMS, "bind", "connect", "listen", "accept", \
@@ -63,7 +63,9 @@ const struct security_class_mapping secclass_map[] = {
{ "process2", { "nnp_transition", "nosuid_transition", NULL } },
{ "system",
{ "ipc_info", "syslog_read", "syslog_mod", "syslog_console",
- "module_request", "module_load", NULL } },
+ "module_request", "module_load", "firmware_load",
+ "kexec_image_load", "kexec_initramfs_load", "policy_load",
+ "x509_certificate_load", NULL } },
{ "capability", { COMMON_CAP_PERMS, NULL } },
{ "filesystem",
{ "mount", "remount", "unmount", "getattr", "relabelfrom",
@@ -125,8 +127,6 @@ const struct security_class_mapping secclass_map[] = {
{ "key",
{ "view", "read", "write", "search", "link", "setattr", "create",
NULL } },
- { "dccp_socket",
- { COMMON_SOCK_PERMS, "node_bind", "name_connect", NULL } },
{ "memprotect", { "mmap_zero", NULL } },
{ "peer", { "recv", NULL } },
{ "capability2", { COMMON_CAP2_PERMS, NULL } },
@@ -177,9 +177,9 @@ const struct security_class_mapping secclass_map[] = {
{ "perf_event",
{ "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
{ "anon_inode", { COMMON_FILE_PERMS, NULL } },
- { "io_uring", { "override_creds", "sqpoll", "cmd", NULL } },
+ { "io_uring", { "override_creds", "sqpoll", "cmd", "allowed", NULL } },
{ "user_namespace", { "create", NULL } },
- { NULL }
+ /* last one */ { NULL, {} }
};
#ifdef __KERNEL__ /* avoid this check when building host programs */
diff --git a/security/selinux/include/conditional.h b/security/selinux/include/conditional.h
index 5910bb7c2eca..060833e2dba2 100644
--- a/security/selinux/include/conditional.h
+++ b/security/selinux/include/conditional.h
@@ -16,7 +16,7 @@
int security_get_bools(struct selinux_policy *policy, u32 *len, char ***names,
int **values);
-int security_set_bools(u32 len, int *values);
+int security_set_bools(u32 len, const int *values);
int security_get_bool_value(u32 index);
diff --git a/security/selinux/include/netnode.h b/security/selinux/include/netnode.h
index 9b8b655a8cd3..e4dc904c3585 100644
--- a/security/selinux/include/netnode.h
+++ b/security/selinux/include/netnode.h
@@ -21,6 +21,6 @@
void sel_netnode_flush(void);
-int sel_netnode_sid(void *addr, u16 family, u32 *sid);
+int sel_netnode_sid(const void *addr, u16 family, u32 *sid);
#endif
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index c88cae81ee4c..1d7ac59015a1 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -29,6 +29,13 @@
#include "flask.h"
#include "avc.h"
+struct avdc_entry {
+ u32 isid; /* inode SID */
+ u32 allowed; /* allowed permission bitmask */
+ u32 audited; /* audited permission bitmask */
+ bool permissive; /* AVC permissive flag */
+};
+
struct task_security_struct {
u32 osid; /* SID prior to last execve */
u32 sid; /* current SID */
@@ -36,8 +43,23 @@ struct task_security_struct {
u32 create_sid; /* fscreate SID */
u32 keycreate_sid; /* keycreate SID */
u32 sockcreate_sid; /* fscreate SID */
+#define TSEC_AVDC_DIR_SIZE (1 << 2)
+ struct {
+ u32 sid; /* current SID for cached entries */
+ u32 seqno; /* AVC sequence number */
+ unsigned int dir_spot; /* dir cache index to check first */
+ struct avdc_entry dir[TSEC_AVDC_DIR_SIZE]; /* dir entries */
+ bool permissive_neveraudit; /* permissive and neveraudit */
+ } avdcache;
} __randomize_layout;
+static inline bool task_avdcache_permnoaudit(struct task_security_struct *tsec)
+{
+ return (tsec->avdcache.permissive_neveraudit &&
+ tsec->sid == tsec->avdcache.sid &&
+ tsec->avdcache.seqno == avc_policy_seqno());
+}
+
enum label_initialized {
LABEL_INVALID, /* invalid or not initialized */
LABEL_INITIALIZED, /* initialized */
@@ -82,7 +104,7 @@ struct ipc_security_struct {
};
struct netif_security_struct {
- struct net *ns; /* network namespace */
+ const struct net *ns; /* network namespace */
int ifindex; /* device index */
u32 sid; /* SID for this interface */
};
diff --git a/security/selinux/include/policycap.h b/security/selinux/include/policycap.h
index 079679fe7254..7405154e6c42 100644
--- a/security/selinux/include/policycap.h
+++ b/security/selinux/include/policycap.h
@@ -15,6 +15,8 @@ enum {
POLICYDB_CAP_IOCTL_SKIP_CLOEXEC,
POLICYDB_CAP_USERSPACE_INITIAL_CONTEXT,
POLICYDB_CAP_NETLINK_XPERM,
+ POLICYDB_CAP_NETIF_WILDCARD,
+ POLICYDB_CAP_GENFS_SECLABEL_WILDCARD,
__POLICYDB_CAP_MAX
};
#define POLICYDB_CAP_MAX (__POLICYDB_CAP_MAX - 1)
diff --git a/security/selinux/include/policycap_names.h b/security/selinux/include/policycap_names.h
index e080827408c4..d8962fcf2ff9 100644
--- a/security/selinux/include/policycap_names.h
+++ b/security/selinux/include/policycap_names.h
@@ -18,6 +18,8 @@ const char *const selinux_policycap_names[__POLICYDB_CAP_MAX] = {
"ioctl_skip_cloexec",
"userspace_initial_context",
"netlink_xperm",
+ "netif_wildcard",
+ "genfs_seclabel_wildcard",
};
/* clang-format on */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index c7f2731abd03..8201e6a3ac0f 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -46,10 +46,12 @@
#define POLICYDB_VERSION_INFINIBAND 31
#define POLICYDB_VERSION_GLBLUB 32
#define POLICYDB_VERSION_COMP_FTRANS 33 /* compressed filename transitions */
+#define POLICYDB_VERSION_COND_XPERMS 34 /* extended permissions in conditional policies */
+#define POLICYDB_VERSION_NEVERAUDIT 35 /* neveraudit types */
/* Range of policy versions we understand*/
#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
-#define POLICYDB_VERSION_MAX POLICYDB_VERSION_COMP_FTRANS
+#define POLICYDB_VERSION_MAX POLICYDB_VERSION_NEVERAUDIT
/* Mask for just the mount related flags */
#define SE_MNTMASK 0x0f
@@ -201,6 +203,12 @@ static inline bool selinux_policycap_netlink_xperm(void)
selinux_state.policycap[POLICYDB_CAP_NETLINK_XPERM]);
}
+static inline bool selinux_policycap_netif_wildcard(void)
+{
+ return READ_ONCE(
+ selinux_state.policycap[POLICYDB_CAP_NETIF_WILDCARD]);
+}
+
struct selinux_policy_convert_data;
struct selinux_load_state {
@@ -239,6 +247,7 @@ struct extended_perms_data {
struct extended_perms_decision {
u8 used;
u8 driver;
+ u8 base_perm;
struct extended_perms_data *allowed;
struct extended_perms_data *auditallow;
struct extended_perms_data *dontaudit;
@@ -246,17 +255,20 @@ struct extended_perms_decision {
struct extended_perms {
u16 len; /* length associated decision chain */
+ u8 base_perms; /* which base permissions are covered */
struct extended_perms_data drivers; /* flag drivers that are used */
};
/* definitions of av_decision.flags */
#define AVD_FLAGS_PERMISSIVE 0x0001
+#define AVD_FLAGS_NEVERAUDIT 0x0002
void security_compute_av(u32 ssid, u32 tsid, u16 tclass,
struct av_decision *avd,
struct extended_perms *xperms);
void security_compute_xperms_decision(u32 ssid, u32 tsid, u16 tclass, u8 driver,
+ u8 base_perm,
struct extended_perms_decision *xpermd);
void security_compute_av_user(u32 ssid, u32 tsid, u16 tclass,
@@ -289,7 +301,7 @@ int security_context_to_sid_default(const char *scontext, u32 scontext_len,
int security_context_to_sid_force(const char *scontext, u32 scontext_len,
u32 *sid);
-int security_get_user_sids(u32 callsid, char *username, u32 **sids, u32 *nel);
+int security_get_user_sids(u32 fromsid, const char *username, u32 **sids, u32 *nel);
int security_port_sid(u8 protocol, u16 port, u32 *out_sid);
@@ -297,9 +309,9 @@ int security_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *out_sid);
int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid);
-int security_netif_sid(char *name, u32 *if_sid);
+int security_netif_sid(const char *name, u32 *if_sid);
-int security_node_sid(u16 domain, void *addr, u32 addrlen, u32 *out_sid);
+int security_node_sid(u16 domain, const void *addr, u32 addrlen, u32 *out_sid);
int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
u16 tclass);
@@ -307,7 +319,7 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
int security_validate_transition_user(u32 oldsid, u32 newsid, u32 tasksid,
u16 tclass);
-int security_bounded_transition(u32 oldsid, u32 newsid);
+int security_bounded_transition(u32 old_sid, u32 new_sid);
int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid);
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index 43a0d3594b72..78afbecdbe57 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -156,7 +156,11 @@ static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
ret = security_netif_sid(dev->name, sid);
if (ret != 0)
goto out;
- new = kzalloc(sizeof(*new), GFP_ATOMIC);
+
+ /* If this memory allocation fails still return 0. The SID
+ * is valid, it just won't be added to the cache.
+ */
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (new) {
new->nsec.ns = ns;
new->nsec.ifindex = ifindex;
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index 5c8c77e50aad..5d0ed08d46e5 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -187,7 +187,7 @@ static void sel_netnode_insert(struct sel_netnode *node)
* failure.
*
*/
-static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
+static int sel_netnode_sid_slow(const void *addr, u16 family, u32 *sid)
{
int ret;
struct sel_netnode *node;
@@ -201,19 +201,22 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
return 0;
}
- new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ /* If this memory allocation fails still return 0. The SID
+ * is valid, it just won't be added to the cache.
+ */
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
switch (family) {
case PF_INET:
ret = security_node_sid(PF_INET,
addr, sizeof(struct in_addr), sid);
if (new)
- new->nsec.addr.ipv4 = *(__be32 *)addr;
+ new->nsec.addr.ipv4 = *(const __be32 *)addr;
break;
case PF_INET6:
ret = security_node_sid(PF_INET6,
addr, sizeof(struct in6_addr), sid);
if (new)
- new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
+ new->nsec.addr.ipv6 = *(const struct in6_addr *)addr;
break;
default:
BUG();
@@ -247,13 +250,13 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
* on failure.
*
*/
-int sel_netnode_sid(void *addr, u16 family, u32 *sid)
+int sel_netnode_sid(const void *addr, u16 family, u32 *sid)
{
struct sel_netnode *node;
rcu_read_lock();
node = sel_netnode_find(addr, family);
- if (node != NULL) {
+ if (likely(node != NULL)) {
*sid = node->nsec.sid;
rcu_read_unlock();
return 0;
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index 2e22ad9c2bd0..6fd7da4b3576 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -47,12 +47,6 @@ struct sel_netport {
struct rcu_head rcu;
};
-/* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason
- * for this is that I suspect most users will not make heavy use of both
- * address families at the same time so one table will usually end up wasted,
- * if this becomes a problem we can always add a hash table for each address
- * family later */
-
static DEFINE_SPINLOCK(sel_netport_lock);
static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE];
@@ -151,7 +145,11 @@ static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
ret = security_port_sid(protocol, pnum, sid);
if (ret != 0)
goto out;
- new = kzalloc(sizeof(*new), GFP_ATOMIC);
+
+ /* If this memory allocation fails still return 0. The SID
+ * is valid, it just won't be added to the cache.
+ */
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (new) {
new->psec.port = pnum;
new->psec.protocol = protocol;
@@ -186,7 +184,7 @@ int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid)
rcu_read_lock();
port = sel_netport_find(protocol, pnum);
- if (port != NULL) {
+ if (likely(port != NULL)) {
*sid = port->psec.sid;
rcu_read_unlock();
return 0;
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 3a95986b134f..2c0b07f9fbbd 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -98,7 +98,6 @@ static const struct nlmsg_perm nlmsg_route_perms[] = {
static const struct nlmsg_perm nlmsg_tcpdiag_perms[] = {
{ TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
- { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
{ SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
{ SOCK_DESTROY, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE },
};
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 6cd5bb0ba380..9aa1d03ab612 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1072,6 +1072,7 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
pr_warn_ratelimited("SELinux: %s (%d) wrote to /sys/fs/selinux/user!"
" This will not be supported in the future; please update your"
" userspace.\n", current->comm, current->pid);
+ ssleep(5);
length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__COMPUTE_USER,
@@ -1515,7 +1516,7 @@ static const struct file_operations sel_avc_hash_stats_ops = {
#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx)
{
- int cpu;
+ loff_t cpu;
for (cpu = *idx; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
@@ -2001,7 +2002,7 @@ static int sel_fill_super(struct super_block *sb, struct fs_context *fc)
[SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUGO},
[SEL_VALIDATE_TRANS] = {"validatetrans", &sel_transition_ops,
S_IWUGO},
- /* last one */ {""}
+ /* last one */ {"", NULL, 0}
};
ret = selinux_fs_info_create(sb);
@@ -2097,8 +2098,6 @@ err:
pr_err("SELinux: %s: failed while creating inodes\n",
__func__);
- selinux_fs_info_free(sb);
-
return ret;
}
@@ -2158,8 +2157,8 @@ static int __init init_sel_fs(void)
return err;
}
- selinux_null.dentry = d_hash_and_lookup(selinux_null.mnt->mnt_root,
- &null_name);
+ selinux_null.dentry = try_lookup_noperm(&null_name,
+ selinux_null.mnt->mnt_root);
if (IS_ERR(selinux_null.dentry)) {
pr_err("selinuxfs: could not lookup null!\n");
err = PTR_ERR(selinux_null.dentry);
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 8e400dd736b7..c2c31521cace 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -336,10 +336,10 @@ static const uint16_t spec_order[] = {
};
/* clang-format on */
-int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
+int avtab_read_item(struct avtab *a, struct policy_file *fp, struct policydb *pol,
int (*insertf)(struct avtab *a, const struct avtab_key *k,
const struct avtab_datum *d, void *p),
- void *p)
+ void *p, bool conditional)
{
__le16 buf16[4];
u16 enabled;
@@ -457,6 +457,13 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
"was specified\n",
vers);
return -EINVAL;
+ } else if ((vers < POLICYDB_VERSION_COND_XPERMS) &&
+ (key.specified & AVTAB_XPERMS) && conditional) {
+ pr_err("SELinux: avtab: policy version %u does not "
+ "support extended permissions rules in conditional "
+ "policies and one was specified\n",
+ vers);
+ return -EINVAL;
} else if (key.specified & AVTAB_XPERMS) {
memset(&xperms, 0, sizeof(struct avtab_extended_perms));
rc = next_entry(&xperms.specified, fp, sizeof(u8));
@@ -500,7 +507,7 @@ static int avtab_insertf(struct avtab *a, const struct avtab_key *k,
return avtab_insert(a, k, d);
}
-int avtab_read(struct avtab *a, void *fp, struct policydb *pol)
+int avtab_read(struct avtab *a, struct policy_file *fp, struct policydb *pol)
{
int rc;
__le32 buf[1];
@@ -523,7 +530,7 @@ int avtab_read(struct avtab *a, void *fp, struct policydb *pol)
goto bad;
for (i = 0; i < nel; i++) {
- rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL);
+ rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL, false);
if (rc) {
if (rc == -ENOMEM)
pr_err("SELinux: avtab: out of memory\n");
@@ -543,7 +550,7 @@ bad:
goto out;
}
-int avtab_write_item(struct policydb *p, const struct avtab_node *cur, void *fp)
+int avtab_write_item(struct policydb *p, const struct avtab_node *cur, struct policy_file *fp)
{
__le16 buf16[4];
__le32 buf32[ARRAY_SIZE(cur->datum.u.xperms->perms.p)];
@@ -579,7 +586,7 @@ int avtab_write_item(struct policydb *p, const struct avtab_node *cur, void *fp)
return 0;
}
-int avtab_write(struct policydb *p, struct avtab *a, void *fp)
+int avtab_write(struct policydb *p, struct avtab *a, struct policy_file *fp)
{
u32 i;
int rc = 0;
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index f4407185401c..850b3453f259 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -89,7 +89,7 @@ struct avtab {
};
void avtab_init(struct avtab *h);
-int avtab_alloc(struct avtab *, u32);
+int avtab_alloc(struct avtab *h, u32 nrules);
int avtab_alloc_dup(struct avtab *new, const struct avtab *orig);
void avtab_destroy(struct avtab *h);
@@ -105,15 +105,16 @@ static inline void avtab_hash_eval(struct avtab *h, const char *tag)
#endif
struct policydb;
-int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
+struct policy_file;
+int avtab_read_item(struct avtab *a, struct policy_file *fp, struct policydb *pol,
int (*insert)(struct avtab *a, const struct avtab_key *k,
const struct avtab_datum *d, void *p),
- void *p);
+ void *p, bool conditional);
-int avtab_read(struct avtab *a, void *fp, struct policydb *pol);
+int avtab_read(struct avtab *a, struct policy_file *fp, struct policydb *pol);
int avtab_write_item(struct policydb *p, const struct avtab_node *cur,
- void *fp);
-int avtab_write(struct policydb *p, struct avtab *a, void *fp);
+ struct policy_file *fp);
+int avtab_write(struct policydb *p, struct avtab *a, struct policy_file *fp);
struct avtab_node *avtab_insert_nonunique(struct avtab *h,
const struct avtab_key *key,
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 64ba95e40a6f..1bebfcb9c6a1 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -206,7 +206,7 @@ static int bool_isvalid(struct cond_bool_datum *b)
return 1;
}
-int cond_read_bool(struct policydb *p, struct symtab *s, void *fp)
+int cond_read_bool(struct policydb *p, struct symtab *s, struct policy_file *fp)
{
char *key = NULL;
struct cond_bool_datum *booldatum;
@@ -230,17 +230,11 @@ int cond_read_bool(struct policydb *p, struct symtab *s, void *fp)
goto err;
len = le32_to_cpu(buf[2]);
- if (((len == 0) || (len == (u32)-1)))
- goto err;
- rc = -ENOMEM;
- key = kmalloc(len + 1, GFP_KERNEL);
- if (!key)
- goto err;
- rc = next_entry(key, fp, len);
+ rc = str_read(&key, GFP_KERNEL, fp, len);
if (rc)
goto err;
- key[len] = '\0';
+
rc = symtab_insert(s, key, booldatum);
if (rc)
goto err;
@@ -323,7 +317,7 @@ static int cond_insertf(struct avtab *a, const struct avtab_key *k,
return 0;
}
-static int cond_read_av_list(struct policydb *p, void *fp,
+static int cond_read_av_list(struct policydb *p, struct policy_file *fp,
struct cond_av_list *list,
struct cond_av_list *other)
{
@@ -349,7 +343,7 @@ static int cond_read_av_list(struct policydb *p, void *fp,
for (i = 0; i < len; i++) {
data.dst = &list->nodes[i];
rc = avtab_read_item(&p->te_cond_avtab, fp, p, cond_insertf,
- &data);
+ &data, true);
if (rc) {
kfree(list->nodes);
list->nodes = NULL;
@@ -375,7 +369,7 @@ static int expr_node_isvalid(struct policydb *p, struct cond_expr_node *expr)
return 1;
}
-static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
+static int cond_read_node(struct policydb *p, struct cond_node *node, struct policy_file *fp)
{
__le32 buf[2];
u32 i, len;
@@ -415,7 +409,7 @@ static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
return cond_read_av_list(p, fp, &node->false_list, &node->true_list);
}
-int cond_read_list(struct policydb *p, void *fp)
+int cond_read_list(struct policydb *p, struct policy_file *fp)
{
__le32 buf[1];
u32 i, len;
@@ -453,7 +447,7 @@ int cond_write_bool(void *vkey, void *datum, void *ptr)
char *key = vkey;
struct cond_bool_datum *booldatum = datum;
struct policy_data *pd = ptr;
- void *fp = pd->fp;
+ struct policy_file *fp = pd->fp;
__le32 buf[3];
u32 len;
int rc;
@@ -536,7 +530,7 @@ static int cond_write_node(struct policydb *p, struct cond_node *node,
return 0;
}
-int cond_write_list(struct policydb *p, void *fp)
+int cond_write_list(struct policydb *p, struct policy_file *fp)
{
u32 i;
__le32 buf[1];
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
index 8827715bad75..468e98ad3ea1 100644
--- a/security/selinux/ss/conditional.h
+++ b/security/selinux/ss/conditional.h
@@ -68,10 +68,10 @@ int cond_destroy_bool(void *key, void *datum, void *p);
int cond_index_bool(void *key, void *datum, void *datap);
-int cond_read_bool(struct policydb *p, struct symtab *s, void *fp);
-int cond_read_list(struct policydb *p, void *fp);
+int cond_read_bool(struct policydb *p, struct symtab *s, struct policy_file *fp);
+int cond_read_list(struct policydb *p, struct policy_file *fp);
int cond_write_bool(void *key, void *datum, void *ptr);
-int cond_write_list(struct policydb *p, void *fp);
+int cond_write_list(struct policydb *p, struct policy_file *fp);
void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
struct av_decision *avd, struct extended_perms *xperms);
diff --git a/security/selinux/ss/context.c b/security/selinux/ss/context.c
index e39990f494dd..a528b7f76280 100644
--- a/security/selinux/ss/context.c
+++ b/security/selinux/ss/context.c
@@ -20,7 +20,7 @@ u32 context_compute_hash(const struct context *c)
* context struct with only the len & str set (and vice versa)
* under a given policy. Since context structs from different
* policies should never meet, it is safe to hash valid and
- * invalid contexts differently. The context_cmp() function
+ * invalid contexts differently. The context_equal() function
* already operates under the same assumption.
*/
if (c->len)
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
index 7ccab2e6965f..dd3b9b5b588e 100644
--- a/security/selinux/ss/context.h
+++ b/security/selinux/ss/context.h
@@ -132,13 +132,13 @@ out:
return rc;
}
-static inline int mls_context_cmp(const struct context *c1,
- const struct context *c2)
+static inline bool mls_context_equal(const struct context *c1,
+ const struct context *c2)
{
return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
- ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
+ ebitmap_equal(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
(c1->range.level[1].sens == c2->range.level[1].sens) &&
- ebitmap_cmp(&c1->range.level[1].cat, &c2->range.level[1].cat));
+ ebitmap_equal(&c1->range.level[1].cat, &c2->range.level[1].cat));
}
static inline void mls_context_destroy(struct context *c)
@@ -188,15 +188,15 @@ static inline void context_destroy(struct context *c)
mls_context_destroy(c);
}
-static inline int context_cmp(const struct context *c1,
- const struct context *c2)
+static inline bool context_equal(const struct context *c1,
+ const struct context *c2)
{
if (c1->len && c2->len)
return (c1->len == c2->len && !strcmp(c1->str, c2->str));
if (c1->len || c2->len)
return 0;
return ((c1->user == c2->user) && (c1->role == c2->role) &&
- (c1->type == c2->type) && mls_context_cmp(c1, c2));
+ (c1->type == c2->type) && mls_context_equal(c1, c2));
}
u32 context_compute_hash(const struct context *c);
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 99c01be15115..43bc19e21960 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -25,12 +25,12 @@
static struct kmem_cache *ebitmap_node_cachep __ro_after_init;
-int ebitmap_cmp(const struct ebitmap *e1, const struct ebitmap *e2)
+bool ebitmap_equal(const struct ebitmap *e1, const struct ebitmap *e2)
{
const struct ebitmap_node *n1, *n2;
if (e1->highbit != e2->highbit)
- return 0;
+ return false;
n1 = e1->node;
n2 = e2->node;
@@ -41,9 +41,9 @@ int ebitmap_cmp(const struct ebitmap *e1, const struct ebitmap *e2)
}
if (n1 || n2)
- return 0;
+ return false;
- return 1;
+ return true;
}
int ebitmap_cpy(struct ebitmap *dst, const struct ebitmap *src)
@@ -360,7 +360,7 @@ void ebitmap_destroy(struct ebitmap *e)
e->node = NULL;
}
-int ebitmap_read(struct ebitmap *e, void *fp)
+int ebitmap_read(struct ebitmap *e, struct policy_file *fp)
{
struct ebitmap_node *n = NULL;
u32 mapunit, count, startbit, index, i;
@@ -478,7 +478,7 @@ bad:
goto out;
}
-int ebitmap_write(const struct ebitmap *e, void *fp)
+int ebitmap_write(const struct ebitmap *e, struct policy_file *fp)
{
struct ebitmap_node *n;
u32 bit, count, last_bit, last_startbit;
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index ba2ac3da1153..c9569998f287 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -120,7 +120,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n, u32 bit)
(bit) < ebitmap_length(e); \
(bit) = ebitmap_next_positive(e, &(n), bit))
-int ebitmap_cmp(const struct ebitmap *e1, const struct ebitmap *e2);
+bool ebitmap_equal(const struct ebitmap *e1, const struct ebitmap *e2);
int ebitmap_cpy(struct ebitmap *dst, const struct ebitmap *src);
int ebitmap_and(struct ebitmap *dst, const struct ebitmap *e1,
const struct ebitmap *e2);
@@ -129,8 +129,9 @@ int ebitmap_contains(const struct ebitmap *e1, const struct ebitmap *e2,
int ebitmap_get_bit(const struct ebitmap *e, u32 bit);
int ebitmap_set_bit(struct ebitmap *e, u32 bit, int value);
void ebitmap_destroy(struct ebitmap *e);
-int ebitmap_read(struct ebitmap *e, void *fp);
-int ebitmap_write(const struct ebitmap *e, void *fp);
+struct policy_file;
+int ebitmap_read(struct ebitmap *e, struct policy_file *fp);
+int ebitmap_write(const struct ebitmap *e, struct policy_file *fp);
u32 ebitmap_hash(const struct ebitmap *e, u32 hash);
#ifdef CONFIG_NETLABEL
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 383fd2d70878..1382eb3bfde1 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -40,7 +40,8 @@ int hashtab_init(struct hashtab *h, u32 nel_hint)
h->htable = NULL;
if (size) {
- h->htable = kcalloc(size, sizeof(*h->htable), GFP_KERNEL);
+ h->htable = kcalloc(size, sizeof(*h->htable),
+ GFP_KERNEL | __GFP_NOWARN);
if (!h->htable)
return -ENOMEM;
h->size = size;
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 989c809d310d..a6e49269f535 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -171,7 +171,7 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
* levdatum->level->cat and no bit in l->cat is larger than
* p->p_cats.nprim.
*/
- return ebitmap_contains(&levdatum->level->cat, &l->cat,
+ return ebitmap_contains(&levdatum->level.cat, &l->cat,
p->p_cats.nprim);
}
@@ -289,7 +289,7 @@ int mls_context_to_sid(struct policydb *pol, char oldc, char *scontext,
levdatum = symtab_search(&pol->p_levels, sensitivity);
if (!levdatum)
return -EINVAL;
- context->range.level[l].sens = levdatum->level->sens;
+ context->range.level[l].sens = levdatum->level.sens;
/* Extract category set. */
while (next_cat != NULL) {
@@ -456,7 +456,7 @@ int mls_convert_context(struct policydb *oldp, struct policydb *newp,
if (!levdatum)
return -EINVAL;
- newc->range.level[l].sens = levdatum->level->sens;
+ newc->range.level[l].sens = levdatum->level.sens;
ebitmap_for_each_positive_bit(&oldc->range.level[l].cat, node,
i)
diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
index 7ef6e8cb0cf4..51df2ebd1211 100644
--- a/security/selinux/ss/mls_types.h
+++ b/security/selinux/ss/mls_types.h
@@ -29,7 +29,7 @@ struct mls_range {
static inline int mls_level_eq(const struct mls_level *l1,
const struct mls_level *l2)
{
- return ((l1->sens == l2->sens) && ebitmap_cmp(&l1->cat, &l2->cat));
+ return ((l1->sens == l2->sens) && ebitmap_equal(&l1->cat, &l2->cat));
}
static inline int mls_level_dom(const struct mls_level *l1,
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 383f3ae82a73..91df3db6a88c 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -155,6 +155,16 @@ static const struct policydb_compat_info policydb_compat[] = {
.sym_num = SYM_NUM,
.ocon_num = OCON_NUM,
},
+ {
+ .version = POLICYDB_VERSION_COND_XPERMS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_NEVERAUDIT,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
};
static const struct policydb_compat_info *
@@ -296,9 +306,7 @@ static int sens_destroy(void *key, void *datum, void *p)
kfree(key);
if (datum) {
levdatum = datum;
- if (levdatum->level)
- ebitmap_destroy(&levdatum->level->cat);
- kfree(levdatum->level);
+ ebitmap_destroy(&levdatum->level.cat);
}
kfree(datum);
return 0;
@@ -528,6 +536,7 @@ static void policydb_init(struct policydb *p)
ebitmap_init(&p->filename_trans_ttypes);
ebitmap_init(&p->policycaps);
ebitmap_init(&p->permissive_map);
+ ebitmap_init(&p->neveraudit_map);
}
/*
@@ -630,11 +639,11 @@ static int sens_index(void *key, void *datum, void *datap)
p = datap;
if (!levdatum->isalias) {
- if (!levdatum->level->sens ||
- levdatum->level->sens > p->p_levels.nprim)
+ if (!levdatum->level.sens ||
+ levdatum->level.sens > p->p_levels.nprim)
return -EINVAL;
- p->sym_val_to_name[SYM_LEVELS][levdatum->level->sens - 1] = key;
+ p->sym_val_to_name[SYM_LEVELS][levdatum->level.sens - 1] = key;
}
return 0;
@@ -849,6 +858,7 @@ void policydb_destroy(struct policydb *p)
ebitmap_destroy(&p->filename_trans_ttypes);
ebitmap_destroy(&p->policycaps);
ebitmap_destroy(&p->permissive_map);
+ ebitmap_destroy(&p->neveraudit_map);
}
/*
@@ -992,7 +1002,7 @@ int policydb_context_isvalid(struct policydb *p, struct context *c)
* Read a MLS range structure from a policydb binary
* representation file.
*/
-static int mls_read_range_helper(struct mls_range *r, void *fp)
+static int mls_read_range_helper(struct mls_range *r, struct policy_file *fp)
{
__le32 buf[2];
u32 items;
@@ -1052,7 +1062,7 @@ out:
* from a policydb binary representation file.
*/
static int context_read_and_validate(struct context *c, struct policydb *p,
- void *fp)
+ struct policy_file *fp)
{
__le32 buf[3];
int rc;
@@ -1090,7 +1100,7 @@ out:
* binary representation file.
*/
-static int str_read(char **strp, gfp_t flags, void *fp, u32 len)
+int str_read(char **strp, gfp_t flags, struct policy_file *fp, u32 len)
{
int rc;
char *str;
@@ -1113,7 +1123,7 @@ static int str_read(char **strp, gfp_t flags, void *fp, u32 len)
return 0;
}
-static int perm_read(struct policydb *p, struct symtab *s, void *fp)
+static int perm_read(struct policydb *p, struct symtab *s, struct policy_file *fp)
{
char *key = NULL;
struct perm_datum *perdatum;
@@ -1146,7 +1156,7 @@ bad:
return rc;
}
-static int common_read(struct policydb *p, struct symtab *s, void *fp)
+static int common_read(struct policydb *p, struct symtab *s, struct policy_file *fp)
{
char *key = NULL;
struct common_datum *comdatum;
@@ -1198,7 +1208,7 @@ static void type_set_init(struct type_set *t)
ebitmap_init(&t->negset);
}
-static int type_set_read(struct type_set *t, void *fp)
+static int type_set_read(struct type_set *t, struct policy_file *fp)
{
__le32 buf[1];
int rc;
@@ -1217,7 +1227,7 @@ static int type_set_read(struct type_set *t, void *fp)
}
static int read_cons_helper(struct policydb *p, struct constraint_node **nodep,
- u32 ncons, int allowxtarget, void *fp)
+ u32 ncons, int allowxtarget, struct policy_file *fp)
{
struct constraint_node *c, *lc;
struct constraint_expr *e, *le;
@@ -1311,7 +1321,7 @@ static int read_cons_helper(struct policydb *p, struct constraint_node **nodep,
return 0;
}
-static int class_read(struct policydb *p, struct symtab *s, void *fp)
+static int class_read(struct policydb *p, struct symtab *s, struct policy_file *fp)
{
char *key = NULL;
struct class_datum *cladatum;
@@ -1408,7 +1418,7 @@ bad:
return rc;
}
-static int role_read(struct policydb *p, struct symtab *s, void *fp)
+static int role_read(struct policydb *p, struct symtab *s, struct policy_file *fp)
{
char *key = NULL;
struct role_datum *role;
@@ -1465,7 +1475,7 @@ bad:
return rc;
}
-static int type_read(struct policydb *p, struct symtab *s, void *fp)
+static int type_read(struct policydb *p, struct symtab *s, struct policy_file *fp)
{
char *key = NULL;
struct type_datum *typdatum;
@@ -1517,7 +1527,7 @@ bad:
* Read a MLS level structure from a policydb binary
* representation file.
*/
-static int mls_read_level(struct mls_level *lp, void *fp)
+static int mls_read_level(struct mls_level *lp, struct policy_file *fp)
{
__le32 buf[1];
int rc;
@@ -1539,7 +1549,7 @@ static int mls_read_level(struct mls_level *lp, void *fp)
return 0;
}
-static int user_read(struct policydb *p, struct symtab *s, void *fp)
+static int user_read(struct policydb *p, struct symtab *s, struct policy_file *fp)
{
char *key = NULL;
struct user_datum *usrdatum;
@@ -1590,7 +1600,7 @@ bad:
return rc;
}
-static int sens_read(struct policydb *p, struct symtab *s, void *fp)
+static int sens_read(struct policydb *p, struct symtab *s, struct policy_file *fp)
{
char *key = NULL;
struct level_datum *levdatum;
@@ -1613,12 +1623,7 @@ static int sens_read(struct policydb *p, struct symtab *s, void *fp)
if (rc)
goto bad;
- rc = -ENOMEM;
- levdatum->level = kmalloc(sizeof(*levdatum->level), GFP_KERNEL);
- if (!levdatum->level)
- goto bad;
-
- rc = mls_read_level(levdatum->level, fp);
+ rc = mls_read_level(&levdatum->level, fp);
if (rc)
goto bad;
@@ -1631,7 +1636,7 @@ bad:
return rc;
}
-static int cat_read(struct policydb *p, struct symtab *s, void *fp)
+static int cat_read(struct policydb *p, struct symtab *s, struct policy_file *fp)
{
char *key = NULL;
struct cat_datum *catdatum;
@@ -1666,7 +1671,7 @@ bad:
/* clang-format off */
static int (*const read_f[SYM_NUM])(struct policydb *p, struct symtab *s,
- void *fp) = {
+ struct policy_file *fp) = {
common_read,
class_read,
role_read,
@@ -1836,7 +1841,7 @@ u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name)
return 1U << (perdatum->value - 1);
}
-static int range_read(struct policydb *p, void *fp)
+static int range_read(struct policydb *p, struct policy_file *fp)
{
struct range_trans *rt = NULL;
struct mls_range *r = NULL;
@@ -1913,7 +1918,7 @@ out:
return rc;
}
-static int filename_trans_read_helper_compat(struct policydb *p, void *fp)
+static int filename_trans_read_helper_compat(struct policydb *p, struct policy_file *fp)
{
struct filename_trans_key key, *ft = NULL;
struct filename_trans_datum *last, *datum = NULL;
@@ -1998,7 +2003,7 @@ out:
return rc;
}
-static int filename_trans_read_helper(struct policydb *p, void *fp)
+static int filename_trans_read_helper(struct policydb *p, struct policy_file *fp)
{
struct filename_trans_key *ft = NULL;
struct filename_trans_datum **dst, *datum, *first = NULL;
@@ -2087,7 +2092,7 @@ out:
return rc;
}
-static int filename_trans_read(struct policydb *p, void *fp)
+static int filename_trans_read(struct policydb *p, struct policy_file *fp)
{
u32 nel, i;
__le32 buf[1];
@@ -2128,7 +2133,7 @@ static int filename_trans_read(struct policydb *p, void *fp)
return 0;
}
-static int genfs_read(struct policydb *p, void *fp)
+static int genfs_read(struct policydb *p, struct policy_file *fp)
{
int rc;
u32 i, j, nel, nel2, len, len2;
@@ -2242,7 +2247,7 @@ out:
}
static int ocontext_read(struct policydb *p,
- const struct policydb_compat_info *info, void *fp)
+ const struct policydb_compat_info *info, struct policy_file *fp)
{
int rc;
unsigned int i;
@@ -2439,7 +2444,7 @@ out:
* Read the configuration data from a policy database binary
* representation file into a policy database structure.
*/
-int policydb_read(struct policydb *p, void *fp)
+int policydb_read(struct policydb *p, struct policy_file *fp)
{
struct role_allow *ra, *lra;
struct role_trans_key *rtk = NULL;
@@ -2475,24 +2480,18 @@ int policydb_read(struct policydb *p, void *fp)
goto bad;
}
- rc = -ENOMEM;
- policydb_str = kmalloc(len + 1, GFP_KERNEL);
- if (!policydb_str) {
- pr_err("SELinux: unable to allocate memory for policydb "
- "string of length %d\n",
- len);
- goto bad;
- }
-
- rc = next_entry(policydb_str, fp, len);
+ rc = str_read(&policydb_str, GFP_KERNEL, fp, len);
if (rc) {
- pr_err("SELinux: truncated policydb string identifier\n");
- kfree(policydb_str);
+ if (rc == -ENOMEM) {
+ pr_err("SELinux: unable to allocate memory for policydb string of length %d\n",
+ len);
+ } else {
+ pr_err("SELinux: truncated policydb string identifier\n");
+ }
goto bad;
}
rc = -EINVAL;
- policydb_str[len] = '\0';
if (strcmp(policydb_str, POLICYDB_STRING)) {
pr_err("SELinux: policydb string %s does not match "
"my string %s\n",
@@ -2546,6 +2545,12 @@ int policydb_read(struct policydb *p, void *fp)
goto bad;
}
+ if (p->policyvers >= POLICYDB_VERSION_NEVERAUDIT) {
+ rc = ebitmap_read(&p->neveraudit_map, fp);
+ if (rc)
+ goto bad;
+ }
+
rc = -EINVAL;
info = policydb_lookup_compat(p->policyvers);
if (!info) {
@@ -2762,7 +2767,7 @@ bad:
* Write a MLS level structure to a policydb binary
* representation file.
*/
-static int mls_write_level(struct mls_level *l, void *fp)
+static int mls_write_level(struct mls_level *l, struct policy_file *fp)
{
__le32 buf[1];
int rc;
@@ -2783,7 +2788,7 @@ static int mls_write_level(struct mls_level *l, void *fp)
* Write a MLS range structure to a policydb binary
* representation file.
*/
-static int mls_write_range_helper(struct mls_range *r, void *fp)
+static int mls_write_range_helper(struct mls_range *r, struct policy_file *fp)
{
__le32 buf[3];
size_t items;
@@ -2823,7 +2828,7 @@ static int sens_write(void *vkey, void *datum, void *ptr)
char *key = vkey;
struct level_datum *levdatum = datum;
struct policy_data *pd = ptr;
- void *fp = pd->fp;
+ struct policy_file *fp = pd->fp;
__le32 buf[2];
size_t len;
int rc;
@@ -2839,7 +2844,7 @@ static int sens_write(void *vkey, void *datum, void *ptr)
if (rc)
return rc;
- rc = mls_write_level(levdatum->level, fp);
+ rc = mls_write_level(&levdatum->level, fp);
if (rc)
return rc;
@@ -2851,7 +2856,7 @@ static int cat_write(void *vkey, void *datum, void *ptr)
char *key = vkey;
struct cat_datum *catdatum = datum;
struct policy_data *pd = ptr;
- void *fp = pd->fp;
+ struct policy_file *fp = pd->fp;
__le32 buf[3];
size_t len;
int rc;
@@ -2876,7 +2881,7 @@ static int role_trans_write_one(void *key, void *datum, void *ptr)
struct role_trans_key *rtk = key;
struct role_trans_datum *rtd = datum;
struct policy_data *pd = ptr;
- void *fp = pd->fp;
+ struct policy_file *fp = pd->fp;
struct policydb *p = pd->p;
__le32 buf[3];
int rc;
@@ -2896,7 +2901,7 @@ static int role_trans_write_one(void *key, void *datum, void *ptr)
return 0;
}
-static int role_trans_write(struct policydb *p, void *fp)
+static int role_trans_write(struct policydb *p, struct policy_file *fp)
{
struct policy_data pd = { .p = p, .fp = fp };
__le32 buf[1];
@@ -2910,7 +2915,7 @@ static int role_trans_write(struct policydb *p, void *fp)
return hashtab_map(&p->role_tr, role_trans_write_one, &pd);
}
-static int role_allow_write(struct role_allow *r, void *fp)
+static int role_allow_write(struct role_allow *r, struct policy_file *fp)
{
struct role_allow *ra;
__le32 buf[2];
@@ -2938,7 +2943,7 @@ static int role_allow_write(struct role_allow *r, void *fp)
* Write a security context structure
* to a policydb binary representation file.
*/
-static int context_write(struct policydb *p, struct context *c, void *fp)
+static int context_write(struct policydb *p, struct context *c, struct policy_file *fp)
{
int rc;
__le32 buf[3];
@@ -2991,7 +2996,7 @@ static int common_write(void *vkey, void *datum, void *ptr)
char *key = vkey;
struct common_datum *comdatum = datum;
struct policy_data *pd = ptr;
- void *fp = pd->fp;
+ struct policy_file *fp = pd->fp;
__le32 buf[4];
size_t len;
int rc;
@@ -3016,7 +3021,7 @@ static int common_write(void *vkey, void *datum, void *ptr)
return 0;
}
-static int type_set_write(struct type_set *t, void *fp)
+static int type_set_write(struct type_set *t, struct policy_file *fp)
{
int rc;
__le32 buf[1];
@@ -3035,7 +3040,7 @@ static int type_set_write(struct type_set *t, void *fp)
}
static int write_cons_helper(struct policydb *p, struct constraint_node *node,
- void *fp)
+ struct policy_file *fp)
{
struct constraint_node *c;
struct constraint_expr *e;
@@ -3086,7 +3091,7 @@ static int class_write(void *vkey, void *datum, void *ptr)
char *key = vkey;
struct class_datum *cladatum = datum;
struct policy_data *pd = ptr;
- void *fp = pd->fp;
+ struct policy_file *fp = pd->fp;
struct policydb *p = pd->p;
struct constraint_node *c;
__le32 buf[6];
@@ -3171,7 +3176,7 @@ static int role_write(void *vkey, void *datum, void *ptr)
char *key = vkey;
struct role_datum *role = datum;
struct policy_data *pd = ptr;
- void *fp = pd->fp;
+ struct policy_file *fp = pd->fp;
struct policydb *p = pd->p;
__le32 buf[3];
size_t items, len;
@@ -3211,7 +3216,7 @@ static int type_write(void *vkey, void *datum, void *ptr)
struct type_datum *typdatum = datum;
struct policy_data *pd = ptr;
struct policydb *p = pd->p;
- void *fp = pd->fp;
+ struct policy_file *fp = pd->fp;
__le32 buf[4];
int rc;
size_t items, len;
@@ -3252,7 +3257,7 @@ static int user_write(void *vkey, void *datum, void *ptr)
struct user_datum *usrdatum = datum;
struct policy_data *pd = ptr;
struct policydb *p = pd->p;
- void *fp = pd->fp;
+ struct policy_file *fp = pd->fp;
__le32 buf[3];
size_t items, len;
int rc;
@@ -3301,7 +3306,8 @@ static int (*const write_f[SYM_NUM])(void *key, void *datum, void *datap) = {
/* clang-format on */
static int ocontext_write(struct policydb *p,
- const struct policydb_compat_info *info, void *fp)
+ const struct policydb_compat_info *info,
+ struct policy_file *fp)
{
unsigned int i, j;
int rc;
@@ -3437,7 +3443,7 @@ static int ocontext_write(struct policydb *p,
return 0;
}
-static int genfs_write(struct policydb *p, void *fp)
+static int genfs_write(struct policydb *p, struct policy_file *fp)
{
struct genfs *genfs;
struct ocontext *c;
@@ -3495,7 +3501,7 @@ static int range_write_helper(void *key, void *data, void *ptr)
struct range_trans *rt = key;
struct mls_range *r = data;
struct policy_data *pd = ptr;
- void *fp = pd->fp;
+ struct policy_file *fp = pd->fp;
struct policydb *p = pd->p;
int rc;
@@ -3517,7 +3523,7 @@ static int range_write_helper(void *key, void *data, void *ptr)
return 0;
}
-static int range_write(struct policydb *p, void *fp)
+static int range_write(struct policydb *p, struct policy_file *fp)
{
__le32 buf[1];
int rc;
@@ -3544,7 +3550,7 @@ static int filename_write_helper_compat(void *key, void *data, void *ptr)
struct filename_trans_key *ft = key;
struct filename_trans_datum *datum = data;
struct ebitmap_node *node;
- void *fp = ptr;
+ struct policy_file *fp = ptr;
__le32 buf[4];
int rc;
u32 bit, len = strlen(ft->name);
@@ -3581,7 +3587,7 @@ static int filename_write_helper(void *key, void *data, void *ptr)
{
struct filename_trans_key *ft = key;
struct filename_trans_datum *datum;
- void *fp = ptr;
+ struct policy_file *fp = ptr;
__le32 buf[3];
int rc;
u32 ndatum, len = strlen(ft->name);
@@ -3626,7 +3632,7 @@ static int filename_write_helper(void *key, void *data, void *ptr)
return 0;
}
-static int filename_trans_write(struct policydb *p, void *fp)
+static int filename_trans_write(struct policydb *p, struct policy_file *fp)
{
__le32 buf[1];
int rc;
@@ -3658,7 +3664,7 @@ static int filename_trans_write(struct policydb *p, void *fp)
* structure to a policy database binary representation
* file.
*/
-int policydb_write(struct policydb *p, void *fp)
+int policydb_write(struct policydb *p, struct policy_file *fp)
{
unsigned int num_syms;
int rc;
@@ -3730,6 +3736,12 @@ int policydb_write(struct policydb *p, void *fp)
return rc;
}
+ if (p->policyvers >= POLICYDB_VERSION_NEVERAUDIT) {
+ rc = ebitmap_write(&p->neveraudit_map, fp);
+ if (rc)
+ return rc;
+ }
+
num_syms = info->sym_num;
for (i = 0; i < num_syms; i++) {
struct policy_data pd;
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 4bba386264a3..89a180b1742f 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -126,7 +126,7 @@ struct user_datum {
/* Sensitivity attributes */
struct level_datum {
- struct mls_level *level; /* sensitivity and associated categories */
+ struct mls_level level; /* sensitivity and associated categories */
unsigned char isalias; /* is this sensitivity an alias for another? */
};
@@ -144,7 +144,7 @@ struct range_trans {
/* Boolean data type */
struct cond_bool_datum {
- __u32 value; /* internal type value */
+ u32 value; /* internal type value */
int state;
};
@@ -300,6 +300,8 @@ struct policydb {
struct ebitmap permissive_map;
+ struct ebitmap neveraudit_map;
+
/* length of this policy when it was loaded */
size_t len;
@@ -312,14 +314,19 @@ struct policydb {
u32 process_trans_perms;
} __randomize_layout;
+struct policy_file {
+ char *data;
+ size_t len;
+};
+
extern void policydb_destroy(struct policydb *p);
extern int policydb_load_isids(struct policydb *p, struct sidtab *s);
extern int policydb_context_isvalid(struct policydb *p, struct context *c);
extern int policydb_class_isvalid(struct policydb *p, unsigned int class);
extern int policydb_type_isvalid(struct policydb *p, unsigned int type);
extern int policydb_role_isvalid(struct policydb *p, unsigned int role);
-extern int policydb_read(struct policydb *p, void *fp);
-extern int policydb_write(struct policydb *p, void *fp);
+extern int policydb_read(struct policydb *p, struct policy_file *fp);
+extern int policydb_write(struct policydb *p, struct policy_file *fp);
extern struct filename_trans_datum *
policydb_filenametr_search(struct policydb *p, struct filename_trans_key *key);
@@ -342,14 +349,9 @@ policydb_roletr_search(struct policydb *p, struct role_trans_key *key);
#define POLICYDB_MAGIC SELINUX_MAGIC
#define POLICYDB_STRING "SE Linux"
-struct policy_file {
- char *data;
- size_t len;
-};
-
struct policy_data {
struct policydb *p;
- void *fp;
+ struct policy_file *fp;
};
static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
@@ -386,6 +388,8 @@ static inline char *sym_name(struct policydb *p, unsigned int sym_num,
return p->sym_val_to_name[sym_num][element_nr];
}
+extern int str_read(char **strp, gfp_t flags, struct policy_file *fp, u32 len);
+
extern u16 string_to_security_class(struct policydb *p, const char *name);
extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 971c45d576ba..713130bd43c4 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -46,6 +46,7 @@
#include <linux/in.h>
#include <linux/sched.h>
#include <linux/audit.h>
+#include <linux/parser.h>
#include <linux/vmalloc.h>
#include <linux/lsm_hooks.h>
#include <net/netlabel.h>
@@ -582,7 +583,7 @@ static void type_attribute_bounds_av(struct policydb *policydb,
}
/*
- * Flag which drivers have permissions.
+ * Flag which drivers have permissions and which base permissions are covered.
*/
void services_compute_xperms_drivers(
struct extended_perms *xperms,
@@ -592,12 +593,19 @@ void services_compute_xperms_drivers(
switch (node->datum.u.xperms->specified) {
case AVTAB_XPERMS_IOCTLDRIVER:
+ xperms->base_perms |= AVC_EXT_IOCTL;
/* if one or more driver has all permissions allowed */
for (i = 0; i < ARRAY_SIZE(xperms->drivers.p); i++)
xperms->drivers.p[i] |= node->datum.u.xperms->perms.p[i];
break;
case AVTAB_XPERMS_IOCTLFUNCTION:
+ xperms->base_perms |= AVC_EXT_IOCTL;
+ /* if allowing permissions within a driver */
+ security_xperm_set(xperms->drivers.p,
+ node->datum.u.xperms->driver);
+ break;
case AVTAB_XPERMS_NLMSG:
+ xperms->base_perms |= AVC_EXT_NLMSG;
/* if allowing permissions within a driver */
security_xperm_set(xperms->drivers.p,
node->datum.u.xperms->driver);
@@ -631,8 +639,7 @@ static void context_struct_compute_av(struct policydb *policydb,
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
if (xperms) {
- memset(&xperms->drivers, 0, sizeof(xperms->drivers));
- xperms->len = 0;
+ memset(xperms, 0, sizeof(*xperms));
}
if (unlikely(!tclass || tclass > policydb->p_classes.nprim)) {
@@ -946,7 +953,7 @@ static void avd_init(struct selinux_policy *policy, struct av_decision *avd)
}
static void update_xperms_extended_data(u8 specified,
- struct extended_perms_data *from,
+ const struct extended_perms_data *from,
struct extended_perms_data *xp_data)
{
unsigned int i;
@@ -967,38 +974,52 @@ static void update_xperms_extended_data(u8 specified,
void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
struct avtab_node *node)
{
+ u16 specified;
+
switch (node->datum.u.xperms->specified) {
case AVTAB_XPERMS_IOCTLFUNCTION:
- case AVTAB_XPERMS_NLMSG:
- if (xpermd->driver != node->datum.u.xperms->driver)
+ if (xpermd->base_perm != AVC_EXT_IOCTL ||
+ xpermd->driver != node->datum.u.xperms->driver)
return;
break;
case AVTAB_XPERMS_IOCTLDRIVER:
- if (!security_xperm_test(node->datum.u.xperms->perms.p,
- xpermd->driver))
+ if (xpermd->base_perm != AVC_EXT_IOCTL ||
+ !security_xperm_test(node->datum.u.xperms->perms.p,
+ xpermd->driver))
+ return;
+ break;
+ case AVTAB_XPERMS_NLMSG:
+ if (xpermd->base_perm != AVC_EXT_NLMSG ||
+ xpermd->driver != node->datum.u.xperms->driver)
return;
break;
default:
- BUG();
+ pr_warn_once(
+ "SELinux: unknown extended permission (%u) will be ignored\n",
+ node->datum.u.xperms->specified);
+ return;
}
- if (node->key.specified == AVTAB_XPERMS_ALLOWED) {
+ specified = node->key.specified & ~(AVTAB_ENABLED | AVTAB_ENABLED_OLD);
+
+ if (specified == AVTAB_XPERMS_ALLOWED) {
xpermd->used |= XPERMS_ALLOWED;
update_xperms_extended_data(node->datum.u.xperms->specified,
&node->datum.u.xperms->perms,
xpermd->allowed);
- } else if (node->key.specified == AVTAB_XPERMS_AUDITALLOW) {
+ } else if (specified == AVTAB_XPERMS_AUDITALLOW) {
xpermd->used |= XPERMS_AUDITALLOW;
update_xperms_extended_data(node->datum.u.xperms->specified,
&node->datum.u.xperms->perms,
xpermd->auditallow);
- } else if (node->key.specified == AVTAB_XPERMS_DONTAUDIT) {
+ } else if (specified == AVTAB_XPERMS_DONTAUDIT) {
xpermd->used |= XPERMS_DONTAUDIT;
update_xperms_extended_data(node->datum.u.xperms->specified,
&node->datum.u.xperms->perms,
xpermd->dontaudit);
} else {
- BUG();
+ pr_warn_once("SELinux: unknown specified key (%u)\n",
+ node->key.specified);
}
}
@@ -1006,6 +1027,7 @@ void security_compute_xperms_decision(u32 ssid,
u32 tsid,
u16 orig_tclass,
u8 driver,
+ u8 base_perm,
struct extended_perms_decision *xpermd)
{
struct selinux_policy *policy;
@@ -1019,6 +1041,7 @@ void security_compute_xperms_decision(u32 ssid,
struct ebitmap_node *snode, *tnode;
unsigned int i, j;
+ xpermd->base_perm = base_perm;
xpermd->driver = driver;
xpermd->used = 0;
memset(xpermd->allowed->p, 0, sizeof(xpermd->allowed->p));
@@ -1130,6 +1153,14 @@ void security_compute_av(u32 ssid,
if (ebitmap_get_bit(&policydb->permissive_map, scontext->type))
avd->flags |= AVD_FLAGS_PERMISSIVE;
+ /* neveraudit domain? */
+ if (ebitmap_get_bit(&policydb->neveraudit_map, scontext->type))
+ avd->flags |= AVD_FLAGS_NEVERAUDIT;
+
+ /* both permissive and neveraudit => allow */
+ if (avd->flags == (AVD_FLAGS_PERMISSIVE|AVD_FLAGS_NEVERAUDIT))
+ goto allow;
+
tcontext = sidtab_search(sidtab, tsid);
if (!tcontext) {
pr_err("SELinux: %s: unrecognized SID %d\n",
@@ -1149,6 +1180,8 @@ void security_compute_av(u32 ssid,
policydb->allow_unknown);
out:
rcu_read_unlock();
+ if (avd->flags & AVD_FLAGS_NEVERAUDIT)
+ avd->auditallow = avd->auditdeny = 0;
return;
allow:
avd->allowed = 0xffffffff;
@@ -1185,6 +1218,14 @@ void security_compute_av_user(u32 ssid,
if (ebitmap_get_bit(&policydb->permissive_map, scontext->type))
avd->flags |= AVD_FLAGS_PERMISSIVE;
+ /* neveraudit domain? */
+ if (ebitmap_get_bit(&policydb->neveraudit_map, scontext->type))
+ avd->flags |= AVD_FLAGS_NEVERAUDIT;
+
+ /* both permissive and neveraudit => allow */
+ if (avd->flags == (AVD_FLAGS_PERMISSIVE|AVD_FLAGS_NEVERAUDIT))
+ goto allow;
+
tcontext = sidtab_search(sidtab, tsid);
if (!tcontext) {
pr_err("SELinux: %s: unrecognized SID %d\n",
@@ -1202,6 +1243,8 @@ void security_compute_av_user(u32 ssid,
NULL);
out:
rcu_read_unlock();
+ if (avd->flags & AVD_FLAGS_NEVERAUDIT)
+ avd->auditallow = avd->auditdeny = 0;
return;
allow:
avd->allowed = 0xffffffff;
@@ -1886,11 +1929,17 @@ retry:
goto out_unlock;
}
/* Obtain the sid for the context. */
- rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
- if (rc == -ESTALE) {
- rcu_read_unlock();
- context_destroy(&newcontext);
- goto retry;
+ if (context_equal(scontext, &newcontext))
+ *out_sid = ssid;
+ else if (context_equal(tcontext, &newcontext))
+ *out_sid = tsid;
+ else {
+ rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
+ if (rc == -ESTALE) {
+ rcu_read_unlock();
+ context_destroy(&newcontext);
+ goto retry;
+ }
}
out_unlock:
rcu_read_unlock();
@@ -2550,13 +2599,14 @@ out:
* @name: interface name
* @if_sid: interface SID
*/
-int security_netif_sid(char *name, u32 *if_sid)
+int security_netif_sid(const char *name, u32 *if_sid)
{
struct selinux_policy *policy;
struct policydb *policydb;
struct sidtab *sidtab;
int rc;
struct ocontext *c;
+ bool wildcard_support;
if (!selinux_initialized()) {
*if_sid = SECINITSID_NETIF;
@@ -2569,11 +2619,18 @@ retry:
policy = rcu_dereference(selinux_state.policy);
policydb = &policy->policydb;
sidtab = policy->sidtab;
+ wildcard_support = ebitmap_get_bit(&policydb->policycaps, POLICYDB_CAP_NETIF_WILDCARD);
c = policydb->ocontexts[OCON_NETIF];
while (c) {
- if (strcmp(name, c->u.name) == 0)
- break;
+ if (wildcard_support) {
+ if (match_wildcard(c->u.name, name))
+ break;
+ } else {
+ if (strcmp(c->u.name, name) == 0)
+ break;
+ }
+
c = c->next;
}
@@ -2593,17 +2650,15 @@ out:
return rc;
}
-static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask)
+static bool match_ipv6_addrmask(const u32 input[4], const u32 addr[4], const u32 mask[4])
{
- int i, fail = 0;
+ int i;
for (i = 0; i < 4; i++)
- if (addr[i] != (input[i] & mask[i])) {
- fail = 1;
- break;
- }
+ if (addr[i] != (input[i] & mask[i]))
+ return false;
- return !fail;
+ return true;
}
/**
@@ -2614,7 +2669,7 @@ static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask)
* @out_sid: security identifier
*/
int security_node_sid(u16 domain,
- void *addrp,
+ const void *addrp,
u32 addrlen,
u32 *out_sid)
{
@@ -2643,7 +2698,7 @@ retry:
if (addrlen != sizeof(u32))
goto out;
- addr = *((u32 *)addrp);
+ addr = *((const u32 *)addrp);
c = policydb->ocontexts[OCON_NODE];
while (c) {
@@ -2708,7 +2763,7 @@ out:
*/
int security_get_user_sids(u32 fromsid,
- char *username,
+ const char *username,
u32 **sids,
u32 *nel)
{
@@ -2843,6 +2898,7 @@ static inline int __security_genfs_sid(struct selinux_policy *policy,
struct genfs *genfs;
struct ocontext *c;
int cmp = 0;
+ bool wildcard;
while (path[0] == '/' && path[1] == '/')
path++;
@@ -2859,11 +2915,20 @@ static inline int __security_genfs_sid(struct selinux_policy *policy,
if (!genfs || cmp)
return -ENOENT;
+ wildcard = ebitmap_get_bit(&policy->policydb.policycaps,
+ POLICYDB_CAP_GENFS_SECLABEL_WILDCARD);
for (c = genfs->head; c; c = c->next) {
- size_t len = strlen(c->u.name);
- if ((!c->v.sclass || sclass == c->v.sclass) &&
- (strncmp(c->u.name, path, len) == 0))
- break;
+ if (!c->v.sclass || sclass == c->v.sclass) {
+ if (wildcard) {
+ if (match_wildcard(c->u.name, path))
+ break;
+ } else {
+ size_t len = strlen(c->u.name);
+
+ if ((strncmp(c->u.name, path, len)) == 0)
+ break;
+ }
+ }
}
if (!c)
@@ -3030,7 +3095,7 @@ err:
}
-int security_set_bools(u32 len, int *values)
+int security_set_bools(u32 len, const int *values)
{
struct selinux_state *state = &selinux_state;
struct selinux_policy *newpolicy, *oldpolicy;
@@ -3329,7 +3394,7 @@ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
__func__, xfrm_sid);
goto out;
}
- rc = (mls_context_cmp(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES);
+ rc = (mls_context_equal(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES);
if (rc)
goto out;
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index c8848cbba81f..59f8c09158ef 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -66,7 +66,7 @@ static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
if (entry->hash != hash)
continue;
- if (context_cmp(&entry->context, context)) {
+ if (context_equal(&entry->context, context)) {
sid = entry->sid;
break;
}
@@ -114,12 +114,12 @@ int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
int sidtab_hash_stats(struct sidtab *sidtab, char *page)
{
- int i;
+ unsigned int i;
int chain_len = 0;
int slots_used = 0;
int entries = 0;
int max_chain_len = 0;
- int cur_bucket = 0;
+ unsigned int cur_bucket = 0;
struct sidtab_entry *entry;
rcu_read_lock();
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 90ec4ef1b082..61d56b0c2be1 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -94,7 +94,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
ctx->ctx_doi = XFRM_SC_DOI_LSM;
ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
- ctx->ctx_len = str_len;
+ ctx->ctx_len = str_len + 1;
memcpy(ctx->ctx_str, &uctx[1], str_len);
ctx->ctx_str[str_len] = '\0';
rc = security_context_to_sid(ctx->ctx_str, str_len,
diff --git a/security/smack/smack.h b/security/smack/smack.h
index dbf8d7226eb5..bf6a6ed3946c 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -42,7 +42,7 @@
/*
* This is the repository for labels seen so that it is
- * not necessary to keep allocating tiny chuncks of memory
+ * not necessary to keep allocating tiny chunks of memory
* and so that they can be shared.
*
* Labels are never modified in place. Anytime a label
@@ -152,6 +152,7 @@ struct smk_net4addr {
struct smack_known *smk_label; /* label */
};
+#if IS_ENABLED(CONFIG_IPV6)
/*
* An entry in the table identifying IPv6 hosts.
*/
@@ -162,7 +163,9 @@ struct smk_net6addr {
int smk_masks; /* mask size */
struct smack_known *smk_label; /* label */
};
+#endif /* CONFIG_IPV6 */
+#ifdef SMACK_IPV6_PORT_LABELING
/*
* An entry in the table identifying ports.
*/
@@ -175,6 +178,7 @@ struct smk_port_label {
short smk_sock_type; /* Socket type */
short smk_can_reuse;
};
+#endif /* SMACK_IPV6_PORT_LABELING */
struct smack_known_list_elem {
struct list_head list;
@@ -280,6 +284,7 @@ int smk_access(struct smack_known *, struct smack_known *,
int smk_tskacc(struct task_smack *, struct smack_known *,
u32, struct smk_audit_info *);
int smk_curacc(struct smack_known *, u32, struct smk_audit_info *);
+int smack_str_from_perm(char *string, int access);
struct smack_known *smack_from_secid(const u32);
char *smk_parse_smack(const char *string, int len);
int smk_netlbl_mls(int, char *, struct netlbl_lsm_secattr *, int);
@@ -314,7 +319,9 @@ extern struct smack_known smack_known_web;
extern struct mutex smack_known_lock;
extern struct list_head smack_known_list;
extern struct list_head smk_net4addr_list;
+#if IS_ENABLED(CONFIG_IPV6)
extern struct list_head smk_net6addr_list;
+#endif /* CONFIG_IPV6 */
extern struct mutex smack_onlycap_lock;
extern struct list_head smack_onlycap_list;
@@ -425,6 +432,12 @@ static inline struct smack_known *smk_of_current(void)
return smk_of_task(smack_cred(current_cred()));
}
+void smack_log(char *subject_label, char *object_label,
+ int request,
+ int result, struct smk_audit_info *auditdata);
+
+#ifdef CONFIG_AUDIT
+
/*
* logging functions
*/
@@ -432,12 +445,6 @@ static inline struct smack_known *smk_of_current(void)
#define SMACK_AUDIT_ACCEPT 0x2
extern int log_policy;
-void smack_log(char *subject_label, char *object_label,
- int request,
- int result, struct smk_audit_info *auditdata);
-
-#ifdef CONFIG_AUDIT
-
/*
* some inline functions to set up audit data
* they do nothing if CONFIG_AUDIT is not set
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 585e5e35710b..2e4a0cb22782 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -45,11 +45,13 @@ LIST_HEAD(smack_known_list);
*/
static u32 smack_next_secid = 10;
+#ifdef CONFIG_AUDIT
/*
* what events do we log
* can be overwritten at run-time by /smack/logging
*/
int log_policy = SMACK_AUDIT_DENIED;
+#endif /* CONFIG_AUDIT */
/**
* smk_access_entry - look up matching access rule
@@ -242,7 +244,7 @@ int smk_tskacc(struct task_smack *tsp, struct smack_known *obj_known,
}
/*
- * Allow for priviliged to override policy.
+ * Allow for privileged to override policy.
*/
if (rc != 0 && smack_privileged(CAP_MAC_OVERRIDE))
rc = 0;
@@ -275,15 +277,14 @@ int smk_curacc(struct smack_known *obj_known,
return smk_tskacc(tsp, obj_known, mode, a);
}
-#ifdef CONFIG_AUDIT
/**
- * smack_str_from_perm : helper to transalate an int to a
+ * smack_str_from_perm : helper to translate an int to a
* readable string
* @string : the string to fill
* @access : the int
*
*/
-static inline void smack_str_from_perm(char *string, int access)
+int smack_str_from_perm(char *string, int access)
{
int i = 0;
@@ -299,8 +300,15 @@ static inline void smack_str_from_perm(char *string, int access)
string[i++] = 't';
if (access & MAY_LOCK)
string[i++] = 'l';
+ if (access & MAY_BRINGUP)
+ string[i++] = 'b';
+ if (i == 0)
+ string[i++] = '-';
string[i] = '\0';
+ return i;
}
+
+#ifdef CONFIG_AUDIT
/**
* smack_log_callback - SMACK specific information
* will be called by generic audit code
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 0c476282e279..fc340a6f0dde 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -24,7 +24,6 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
-#include <linux/dccp.h>
#include <linux/icmpv6.h>
#include <linux/slab.h>
#include <linux/mutex.h>
@@ -107,23 +106,7 @@ static char *smk_bu_mess[] = {
static void smk_bu_mode(int mode, char *s)
{
- int i = 0;
-
- if (mode & MAY_READ)
- s[i++] = 'r';
- if (mode & MAY_WRITE)
- s[i++] = 'w';
- if (mode & MAY_EXEC)
- s[i++] = 'x';
- if (mode & MAY_APPEND)
- s[i++] = 'a';
- if (mode & MAY_TRANSMUTE)
- s[i++] = 't';
- if (mode & MAY_LOCK)
- s[i++] = 'l';
- if (i == 0)
- s[i++] = '-';
- s[i] = '\0';
+ smack_str_from_perm(s, mode);
}
#endif
@@ -1950,7 +1933,7 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
*/
file = fown->file;
- /* we don't log here as rc can be overriden */
+ /* we don't log here as rc can be overridden */
blob = smack_file(file);
skp = *blob;
rc = smk_access(skp, tkp, MAY_DELIVER, NULL);
@@ -2508,6 +2491,7 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip)
return NULL;
}
+#if IS_ENABLED(CONFIG_IPV6)
/*
* smk_ipv6_localhost - Check for local ipv6 host address
* @sip: the address
@@ -2575,6 +2559,7 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
return NULL;
}
+#endif /* CONFIG_IPV6 */
/**
* smack_netlbl_add - Set the secattr on a socket
@@ -2679,6 +2664,7 @@ static int smk_ipv4_check(struct sock *sk, struct sockaddr_in *sap)
return rc;
}
+#if IS_ENABLED(CONFIG_IPV6)
/**
* smk_ipv6_check - check Smack access
* @subject: subject Smack label
@@ -2711,6 +2697,7 @@ static int smk_ipv6_check(struct smack_known *subject,
rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc);
return rc;
}
+#endif /* CONFIG_IPV6 */
#ifdef SMACK_IPV6_PORT_LABELING
/**
@@ -3043,7 +3030,9 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
return 0;
if (addrlen < offsetofend(struct sockaddr, sa_family))
return 0;
- if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) {
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sap->sa_family == AF_INET6) {
struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
struct smack_known *rsp = NULL;
@@ -3063,6 +3052,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
return rc;
}
+#endif /* CONFIG_IPV6 */
+
if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in))
return 0;
rc = smk_ipv4_check(sock->sk, (struct sockaddr_in *)sap);
@@ -4069,7 +4060,6 @@ static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
__be16 frag_off;
struct tcphdr _tcph, *th;
struct udphdr _udph, *uh;
- struct dccp_hdr _dccph, *dh;
sip->sin6_port = 0;
@@ -4098,11 +4088,6 @@ static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
if (uh != NULL)
sip->sin6_port = uh->source;
break;
- case IPPROTO_DCCP:
- dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
- if (dh != NULL)
- sip->sin6_port = dh->dccph_sport;
- break;
}
return proto;
}
@@ -4211,7 +4196,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
/*
* Receiving a packet requires that the other end
* be able to write here. Read access is not required.
- * This is the simplist possible security model
+ * This is the simplest possible security model
* for networking.
*/
rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad);
@@ -4224,7 +4209,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
case PF_INET6:
proto = smk_skb_to_addr_ipv6(skb, &sadd);
if (proto != IPPROTO_UDP && proto != IPPROTO_UDPLITE &&
- proto != IPPROTO_TCP && proto != IPPROTO_DCCP)
+ proto != IPPROTO_TCP)
break;
#ifdef SMACK_IPV6_SECMARK_LABELING
skp = smack_from_skb(skb);
@@ -4359,29 +4344,6 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
}
/**
- * smack_sock_graft - Initialize a newly created socket with an existing sock
- * @sk: child sock
- * @parent: parent socket
- *
- * Set the smk_{in,out} state of an existing sock based on the process that
- * is creating the new socket.
- */
-static void smack_sock_graft(struct sock *sk, struct socket *parent)
-{
- struct socket_smack *ssp;
- struct smack_known *skp = smk_of_current();
-
- if (sk == NULL ||
- (sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
- return;
-
- ssp = smack_sock(sk);
- ssp->smk_in = skp;
- ssp->smk_out = skp;
- /* cssp->smk_packet is already set in smack_inet_csk_clone() */
-}
-
-/**
* smack_inet_conn_request - Smack access check on connect
* @sk: socket involved
* @skb: packet
@@ -4717,7 +4679,7 @@ static int smack_post_notification(const struct cred *w_cred,
* @gfp: type of the memory for the allocation
*
* Prepare to audit cases where (@field @op @rulestr) is true.
- * The label to be audited is created if necessay.
+ * The label to be audited is created if necessary.
*/
static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
gfp_t gfp)
@@ -4818,40 +4780,47 @@ static int smack_ismaclabel(const char *name)
}
/**
+ * smack_to_secctx - fill a lsm_context
+ * @skp: Smack label
+ * @cp: destination
+ *
+ * Fill the passed @cp and return the length of the string
+ */
+static int smack_to_secctx(struct smack_known *skp, struct lsm_context *cp)
+{
+ int len = strlen(skp->smk_known);
+
+ if (cp) {
+ cp->context = skp->smk_known;
+ cp->len = len;
+ cp->id = LSM_ID_SMACK;
+ }
+ return len;
+}
+
+/**
* smack_secid_to_secctx - return the smack label for a secid
* @secid: incoming integer
- * @secdata: destination
- * @seclen: how long it is
+ * @cp: destination
*
* Exists for networking code.
*/
-static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+static int smack_secid_to_secctx(u32 secid, struct lsm_context *cp)
{
- struct smack_known *skp = smack_from_secid(secid);
-
- if (secdata)
- *secdata = skp->smk_known;
- *seclen = strlen(skp->smk_known);
- return 0;
+ return smack_to_secctx(smack_from_secid(secid), cp);
}
/**
* smack_lsmprop_to_secctx - return the smack label
* @prop: includes incoming Smack data
- * @secdata: destination
- * @seclen: how long it is
+ * @cp: destination
*
* Exists for audit code.
*/
-static int smack_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata,
- u32 *seclen)
+static int smack_lsmprop_to_secctx(struct lsm_prop *prop,
+ struct lsm_context *cp)
{
- struct smack_known *skp = prop->smack.skp;
-
- if (secdata)
- *secdata = skp->smk_known;
- *seclen = strlen(skp->smk_known);
- return 0;
+ return smack_to_secctx(prop->smack.skp, cp);
}
/**
@@ -4891,12 +4860,13 @@ static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
ctx, ctxlen, 0, NULL);
}
-static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+static int smack_inode_getsecctx(struct inode *inode, struct lsm_context *cp)
{
struct smack_known *skp = smk_of_inode(inode);
- *ctx = skp->smk_known;
- *ctxlen = strlen(skp->smk_known);
+ cp->context = skp->smk_known;
+ cp->len = strlen(skp->smk_known);
+ cp->id = LSM_ID_SMACK;
return 0;
}
@@ -5187,7 +5157,6 @@ static struct security_hook_list smack_hooks[] __ro_after_init = {
LSM_HOOK_INIT(sk_free_security, smack_sk_free_security),
#endif
LSM_HOOK_INIT(sk_clone_security, smack_sk_clone_security),
- LSM_HOOK_INIT(sock_graft, smack_sock_graft),
LSM_HOOK_INIT(inet_conn_request, smack_inet_conn_request),
LSM_HOOK_INIT(inet_csk_clone, smack_inet_csk_clone),
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 1401412fd794..b1e5e62f5cbd 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -41,7 +41,9 @@ enum smk_inos {
SMK_AMBIENT = 7, /* internet ambient label */
SMK_NET4ADDR = 8, /* single label hosts */
SMK_ONLYCAP = 9, /* the only "capable" label */
+#ifdef CONFIG_AUDIT
SMK_LOGGING = 10, /* logging */
+#endif /* CONFIG_AUDIT */
SMK_LOAD_SELF = 11, /* task specific rules */
SMK_ACCESSES = 12, /* access policy */
SMK_MAPPED = 13, /* CIPSO level indicating mapped label */
@@ -165,7 +167,7 @@ static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
#define SMK_LOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN)
/*
- * Stricly for CIPSO level manipulation.
+ * Strictly for CIPSO level manipulation.
* Set the category bit number in a smack label sized buffer.
*/
static inline void smack_catset_bit(unsigned int cat, char *catsetp)
@@ -562,6 +564,7 @@ static void smk_seq_stop(struct seq_file *s, void *v)
static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
{
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
/*
* Don't show any rules with label names too long for
* interface file (/smack/load or /smack/load2)
@@ -575,28 +578,11 @@ static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
if (srp->smk_access == 0)
return;
- seq_printf(s, "%s %s",
+ smack_str_from_perm(acc, srp->smk_access);
+ seq_printf(s, "%s %s %s\n",
srp->smk_subject->smk_known,
- srp->smk_object->smk_known);
-
- seq_putc(s, ' ');
-
- if (srp->smk_access & MAY_READ)
- seq_putc(s, 'r');
- if (srp->smk_access & MAY_WRITE)
- seq_putc(s, 'w');
- if (srp->smk_access & MAY_EXEC)
- seq_putc(s, 'x');
- if (srp->smk_access & MAY_APPEND)
- seq_putc(s, 'a');
- if (srp->smk_access & MAY_TRANSMUTE)
- seq_putc(s, 't');
- if (srp->smk_access & MAY_LOCK)
- seq_putc(s, 'l');
- if (srp->smk_access & MAY_BRINGUP)
- seq_putc(s, 'b');
-
- seq_putc(s, '\n');
+ srp->smk_object->smk_known,
+ acc);
}
/*
@@ -828,7 +814,7 @@ static int smk_open_cipso(struct inode *inode, struct file *file)
static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
size_t count, loff_t *ppos, int format)
{
- struct netlbl_lsm_catmap *old_cat, *new_cat = NULL;
+ struct netlbl_lsm_catmap *old_cat;
struct smack_known *skp;
struct netlbl_lsm_secattr ncats;
char mapcatset[SMK_CIPSOLEN];
@@ -915,22 +901,15 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
smack_catset_bit(cat, mapcatset);
}
- ncats.flags = 0;
- if (catlen == 0) {
- ncats.attr.mls.cat = NULL;
- ncats.attr.mls.lvl = maplevel;
- new_cat = netlbl_catmap_alloc(GFP_ATOMIC);
- if (new_cat)
- new_cat->next = ncats.attr.mls.cat;
- ncats.attr.mls.cat = new_cat;
- skp->smk_netlabel.flags &= ~(1U << 3);
- rc = 0;
- } else {
- rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN);
- }
+
+ rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN);
if (rc >= 0) {
old_cat = skp->smk_netlabel.attr.mls.cat;
rcu_assign_pointer(skp->smk_netlabel.attr.mls.cat, ncats.attr.mls.cat);
+ if (ncats.attr.mls.cat)
+ skp->smk_netlabel.flags |= NETLBL_SECATTR_MLS_CAT;
+ else
+ skp->smk_netlabel.flags &= ~(u32)NETLBL_SECATTR_MLS_CAT;
skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl;
synchronize_rcu();
netlbl_catmap_free(old_cat);
@@ -1098,13 +1077,12 @@ static int smk_open_net4addr(struct inode *inode, struct file *file)
}
/**
- * smk_net4addr_insert
+ * smk_net4addr_insert - insert a new entry into the net4addrs list
* @new : netlabel to insert
*
- * This helper insert netlabel in the smack_net4addrs list
+ * This helper inserts netlabel in the smack_net4addrs list
* sorted by netmask length (longest to smallest)
- * locked by &smk_net4addr_lock in smk_write_net4addr
- *
+ * locked by &smk_net4addr_lock in smk_write_net4addr.
*/
static void smk_net4addr_insert(struct smk_net4addr *new)
{
@@ -1361,13 +1339,12 @@ static int smk_open_net6addr(struct inode *inode, struct file *file)
}
/**
- * smk_net6addr_insert
+ * smk_net6addr_insert - insert a new entry into the net6addrs list
* @new : entry to insert
*
* This inserts an entry in the smack_net6addrs list
* sorted by netmask length (longest to smallest)
- * locked by &smk_net6addr_lock in smk_write_net6addr
- *
+ * locked by &smk_net6addr_lock in smk_write_net6addr.
*/
static void smk_net6addr_insert(struct smk_net6addr *new)
{
@@ -2149,6 +2126,7 @@ static const struct file_operations smk_unconfined_ops = {
};
#endif /* CONFIG_SECURITY_SMACK_BRINGUP */
+#ifdef CONFIG_AUDIT
/**
* smk_read_logging - read() for /smack/logging
* @filp: file pointer, not actually used
@@ -2213,6 +2191,7 @@ static const struct file_operations smk_logging_ops = {
.write = smk_write_logging,
.llseek = default_llseek,
};
+#endif /* CONFIG_AUDIT */
/*
* Seq_file read operations for /smack/load-self
@@ -2899,8 +2878,10 @@ static int smk_fill_super(struct super_block *sb, struct fs_context *fc)
"netlabel", &smk_net4addr_ops, S_IRUGO|S_IWUSR},
[SMK_ONLYCAP] = {
"onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR},
+#ifdef CONFIG_AUDIT
[SMK_LOGGING] = {
"logging", &smk_logging_ops, S_IRUGO|S_IWUSR},
+#endif /* CONFIG_AUDIT */
[SMK_LOAD_SELF] = {
"load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO},
[SMK_ACCESSES] = {
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 5c7b059a332a..0f78898bce09 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -1981,6 +1981,114 @@ static int tomoyo_truncate(char *str)
}
/**
+ * tomoyo_numscan - sscanf() which stores the length of a decimal integer value.
+ *
+ * @str: String to scan.
+ * @head: Leading string that must start with.
+ * @width: Pointer to "int" for storing length of a decimal integer value after @head.
+ * @tail: Optional character that must match after a decimal integer value.
+ *
+ * Returns whether @str starts with @head and a decimal value follows @head.
+ */
+static bool tomoyo_numscan(const char *str, const char *head, int *width, const char tail)
+{
+ const char *cp;
+ const int n = strlen(head);
+
+ if (!strncmp(str, head, n)) {
+ cp = str + n;
+ while (*cp && *cp >= '0' && *cp <= '9')
+ cp++;
+ if (*cp == tail || !tail) {
+ *width = cp - (str + n);
+ return *width != 0;
+ }
+ }
+ *width = 0;
+ return 0;
+}
+
+/**
+ * tomoyo_patternize_path - Make patterns for file path. Used by learning mode.
+ *
+ * @buffer: Destination buffer.
+ * @len: Size of @buffer.
+ * @entry: Original line.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_patternize_path(char *buffer, const int len, char *entry)
+{
+ int width;
+ char *cp = entry;
+
+ /* Nothing to do if this line is not for "file" related entry. */
+ if (strncmp(entry, "file ", 5))
+ goto flush;
+ /*
+ * Nothing to do if there is no colon in this line, for this rewriting
+ * applies to only filesystems where numeric values in the path are volatile.
+ */
+ cp = strchr(entry + 5, ':');
+ if (!cp) {
+ cp = entry;
+ goto flush;
+ }
+ /* Flush e.g. "file ioctl" part. */
+ while (*cp != ' ')
+ cp--;
+ *cp++ = '\0';
+ tomoyo_addprintf(buffer, len, "%s ", entry);
+ /* e.g. file ioctl pipe:[$INO] $CMD */
+ if (tomoyo_numscan(cp, "pipe:[", &width, ']')) {
+ cp += width + 7;
+ tomoyo_addprintf(buffer, len, "pipe:[\\$]");
+ goto flush;
+ }
+ /* e.g. file ioctl socket:[$INO] $CMD */
+ if (tomoyo_numscan(cp, "socket:[", &width, ']')) {
+ cp += width + 9;
+ tomoyo_addprintf(buffer, len, "socket:[\\$]");
+ goto flush;
+ }
+ if (!strncmp(cp, "proc:/self", 10)) {
+ /* e.g. file read proc:/self/task/$TID/fdinfo/$FD */
+ cp += 10;
+ tomoyo_addprintf(buffer, len, "proc:/self");
+ } else if (tomoyo_numscan(cp, "proc:/", &width, 0)) {
+ /* e.g. file read proc:/$PID/task/$TID/fdinfo/$FD */
+ /*
+ * Don't patternize $PID part if $PID == 1, for several
+ * programs access only files in /proc/1/ directory.
+ */
+ cp += width + 6;
+ if (width == 1 && *(cp - 1) == '1')
+ tomoyo_addprintf(buffer, len, "proc:/1");
+ else
+ tomoyo_addprintf(buffer, len, "proc:/\\$");
+ } else {
+ goto flush;
+ }
+ /* Patternize $TID part if "/task/" follows. */
+ if (tomoyo_numscan(cp, "/task/", &width, 0)) {
+ cp += width + 6;
+ tomoyo_addprintf(buffer, len, "/task/\\$");
+ }
+ /* Patternize $FD part if "/fd/" or "/fdinfo/" follows. */
+ if (tomoyo_numscan(cp, "/fd/", &width, 0)) {
+ cp += width + 4;
+ tomoyo_addprintf(buffer, len, "/fd/\\$");
+ } else if (tomoyo_numscan(cp, "/fdinfo/", &width, 0)) {
+ cp += width + 8;
+ tomoyo_addprintf(buffer, len, "/fdinfo/\\$");
+ }
+flush:
+ /* Flush remaining part if any. */
+ if (*cp)
+ tomoyo_addprintf(buffer, len, "%s", cp);
+}
+
+/**
* tomoyo_add_entry - Add an ACL to current thread's domain. Used by learning mode.
*
* @domain: Pointer to "struct tomoyo_domain_info".
@@ -2003,7 +2111,8 @@ static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header)
if (!cp)
return;
*cp++ = '\0';
- len = strlen(cp) + 1;
+ /* Reserve some space for potentially using patterns. */
+ len = strlen(cp) + 16;
/* strstr() will return NULL if ordering is wrong. */
if (*cp == 'f') {
argv0 = strstr(header, " argv[]={ \"");
@@ -2020,10 +2129,10 @@ static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header)
if (symlink)
len += tomoyo_truncate(symlink + 1) + 1;
}
- buffer = kmalloc(len, GFP_NOFS);
+ buffer = kmalloc(len, GFP_NOFS | __GFP_ZERO);
if (!buffer)
return;
- snprintf(buffer, len - 1, "%s", cp);
+ tomoyo_patternize_path(buffer, len, cp);
if (realpath)
tomoyo_addprintf(buffer, len, " exec.%s", realpath);
if (argv0)
@@ -2665,7 +2774,7 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
if (head->w.avail >= head->writebuf_size - 1) {
const int len = head->writebuf_size * 2;
- char *cp = kzalloc(len, GFP_NOFS);
+ char *cp = kzalloc(len, GFP_NOFS | __GFP_NOWARN);
if (!cp) {
error = -ENOMEM;
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index aed9e3ef2c9e..5f9ccab26e9a 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -722,10 +722,17 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
ee->bprm = bprm;
ee->r.obj = &ee->obj;
ee->obj.path1 = bprm->file->f_path;
- /* Get symlink's pathname of program. */
+ /*
+ * Get symlink's pathname of program, but fallback to realpath if
+ * symlink's pathname does not exist or symlink's pathname refers
+ * to proc filesystem (e.g. /dev/fd/<num> or /proc/self/fd/<num> ).
+ */
exename.name = tomoyo_realpath_nofollow(original_name);
+ if (exename.name && !strncmp(exename.name, "proc:/", 6)) {
+ kfree(exename.name);
+ exename.name = NULL;
+ }
if (!exename.name) {
- /* Fallback to realpath if symlink's pathname does not exist. */
exename.name = tomoyo_realpath_from_path(&bprm->file->f_path);
if (!exename.name)
goto out;
@@ -913,7 +920,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
#ifdef CONFIG_MMU
/*
* This is called at execve() time in order to dig around
- * in the argv/environment of the new proceess
+ * in the argv/environment of the new process
* (represented by bprm).
*/
mmap_read_lock(bprm->mm);
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index a2705798476f..7e69747b2f77 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -229,11 +229,11 @@ static void __init tomoyo_create_entry(const char *name, const umode_t mode,
}
/**
- * tomoyo_initerface_init - Initialize /sys/kernel/security/tomoyo/ interface.
+ * tomoyo_interface_init - Initialize /sys/kernel/security/tomoyo/ interface.
*
* Returns 0.
*/
-static int __init tomoyo_initerface_init(void)
+static int __init tomoyo_interface_init(void)
{
struct tomoyo_domain_info *domain;
struct dentry *tomoyo_dir;
@@ -270,4 +270,4 @@ static int __init tomoyo_initerface_init(void)
return 0;
}
-fs_initcall(tomoyo_initerface_init);
+fs_initcall(tomoyo_interface_init);
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 04a92c3d65d4..d6ebcd9db80a 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -549,10 +549,7 @@ static const struct lsm_id tomoyo_lsmid = {
.id = LSM_ID_TOMOYO,
};
-/*
- * tomoyo_security_ops is a "struct security_operations" which is used for
- * registering TOMOYO.
- */
+/* tomoyo_hooks is used for registering TOMOYO. */
static struct security_hook_list tomoyo_hooks[] __ro_after_init = {
LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare),
LSM_HOOK_INIT(bprm_committed_creds, tomoyo_bprm_committed_creds),
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index e1a5e13ea269..3d064dd4e03f 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -76,7 +76,6 @@ static void report_access(const char *access, struct task_struct *target,
struct task_struct *agent)
{
struct access_report_info *info;
- char agent_comm[sizeof(agent->comm)];
assert_spin_locked(&target->alloc_lock); /* for target->comm */
@@ -86,8 +85,7 @@ static void report_access(const char *access, struct task_struct *target,
*/
pr_notice_ratelimited(
"ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
- access, target->comm, target->pid,
- get_task_comm(agent_comm, agent), agent->pid);
+ access, target->comm, target->pid, agent->comm, agent->pid);
return;
}
@@ -224,7 +222,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
int rc = -ENOSYS;
- struct task_struct *myself = current;
+ struct task_struct *myself;
switch (option) {
case PR_SET_PTRACER:
@@ -234,11 +232,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
* leader checking is handled later when walking the ancestry
* at the time of PTRACE_ATTACH check.
*/
- rcu_read_lock();
- if (!thread_group_leader(myself))
- myself = rcu_dereference(myself->group_leader);
- get_task_struct(myself);
- rcu_read_unlock();
+ myself = current->group_leader;
if (arg2 == 0) {
yama_ptracer_del(NULL, myself);
@@ -257,7 +251,6 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
}
}
- put_task_struct(myself);
break;
}
@@ -454,7 +447,7 @@ static int yama_dointvec_minmax(const struct ctl_table *table, int write,
static int max_scope = YAMA_SCOPE_NO_ATTACH;
-static struct ctl_table yama_sysctl_table[] = {
+static const struct ctl_table yama_sysctl_table[] = {
{
.procname = "ptrace_scope",
.data = &ptrace_scope,