summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-05-19 18:18:55 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-05-19 18:18:55 +0300
commitdc6a7effb48e7267c9f1314e3aa8cfe539bd6096 (patch)
tree50018d72ebe06eef0c24117851a1ccd14d03240a /drivers
parentbab6ffa233b960181feb3bdaa51e93ec35f3eb01 (diff)
parentf260fd59e3f387432bda51072fff4494fba10b91 (diff)
downloadlinux-dc6a7effb48e7267c9f1314e3aa8cfe539bd6096.tar.xz
Merge tag 'lkdtm-next' of https://git.kernel.org/pub/scm/linux/kernel/git/kees/linux into char-misc-next
Kees writes: lkdtm updates for -next - Test for new usercopy memory regions - avoid GCC 12 warnings - update expected CONFIGs for selftests * tag 'lkdtm-next' of https://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: lkdtm/heap: Hide allocation size from -Warray-bounds selftests/lkdtm: Add configs for stackleak and "after free" tests lkdtm/usercopy: Check vmalloc and >0-order folios lkdtm/usercopy: Rename "heap" to "slab" lkdtm: cfi: Fix type width for masking PAC bits
Diffstat (limited to 'drivers')
-rw-r--r--drivers/misc/lkdtm/heap.c1
-rw-r--r--drivers/misc/lkdtm/usercopy.c113
2 files changed, 99 insertions, 15 deletions
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 5d3b92cd23bd..62516078a619 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -56,6 +56,7 @@ static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
return;
pr_info("Attempting slab linear overflow ...\n");
+ OPTIMIZER_HIDE_VAR(data);
data[1024 / sizeof(u32)] = 0x12345678;
kfree(data);
}
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 7852b9fc7c47..6215ec995cd3 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -5,6 +5,7 @@
*/
#include "lkdtm.h"
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/sched/task_stack.h>
#include <linux/mman.h>
@@ -130,7 +131,7 @@ free_user:
* This checks for whole-object size validation with hardened usercopy,
* with or without usercopy whitelisting.
*/
-static void do_usercopy_heap_size(bool to_user)
+static void do_usercopy_slab_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
@@ -196,9 +197,9 @@ free_kernel:
/*
* This checks for the specific whitelist window within an object. If this
- * test passes, then do_usercopy_heap_size() tests will pass too.
+ * test passes, then do_usercopy_slab_size() tests will pass too.
*/
-static void do_usercopy_heap_whitelist(bool to_user)
+static void do_usercopy_slab_whitelist(bool to_user)
{
unsigned long user_alloc;
unsigned char *buf = NULL;
@@ -272,24 +273,24 @@ free_alloc:
}
/* Callable tests. */
-static void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
+static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
{
- do_usercopy_heap_size(true);
+ do_usercopy_slab_size(true);
}
-static void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
+static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
{
- do_usercopy_heap_size(false);
+ do_usercopy_slab_size(false);
}
-static void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
+static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
{
- do_usercopy_heap_whitelist(true);
+ do_usercopy_slab_whitelist(true);
}
-static void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
+static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
{
- do_usercopy_heap_whitelist(false);
+ do_usercopy_slab_whitelist(false);
}
static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
@@ -341,6 +342,86 @@ free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
+/*
+ * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
+ * a more complete test that would include copy_from_user() would risk
+ * memory corruption. Just test copy_to_user() here, as that exercises
+ * almost exactly the same code paths.
+ */
+static void do_usercopy_page_span(const char *name, void *kaddr)
+{
+ unsigned long uaddr;
+
+ uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (uaddr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ /* Initialize contents. */
+ memset(kaddr, 0xAA, PAGE_SIZE);
+
+ /* Bump the kaddr forward to detect a page-spanning overflow. */
+ kaddr += PAGE_SIZE / 2;
+
+ pr_info("attempting good copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr,
+ unconst + (PAGE_SIZE / 2))) {
+ pr_err("copy_to_user() failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
+ pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
+ goto free_user;
+ }
+
+ pr_err("FAIL: bad copy_to_user() not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
+
+free_user:
+ vm_munmap(uaddr, PAGE_SIZE);
+}
+
+static void lkdtm_USERCOPY_VMALLOC(void)
+{
+ void *addr;
+
+ addr = vmalloc(PAGE_SIZE);
+ if (!addr) {
+ pr_err("vmalloc() failed!?\n");
+ return;
+ }
+ do_usercopy_page_span("vmalloc", addr);
+ vfree(addr);
+}
+
+static void lkdtm_USERCOPY_FOLIO(void)
+{
+ struct folio *folio;
+ void *addr;
+
+ /*
+ * FIXME: Folio checking currently misses 0-order allocations, so
+ * allocate and bump forward to the last page.
+ */
+ folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!folio) {
+ pr_err("folio_alloc() failed!?\n");
+ return;
+ }
+ addr = folio_address(folio);
+ if (addr)
+ do_usercopy_page_span("folio", addr + PAGE_SIZE);
+ else
+ pr_err("folio_address() failed?!\n");
+ folio_put(folio);
+}
+
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
@@ -358,13 +439,15 @@ void __exit lkdtm_usercopy_exit(void)
}
static struct crashtype crashtypes[] = {
- CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
- CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
- CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
- CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
+ CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
+ CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
+ CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
+ CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
CRASHTYPE(USERCOPY_STACK_FRAME_TO),
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
CRASHTYPE(USERCOPY_STACK_BEYOND),
+ CRASHTYPE(USERCOPY_VMALLOC),
+ CRASHTYPE(USERCOPY_FOLIO),
CRASHTYPE(USERCOPY_KERNEL),
};