summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h1
-rw-r--r--tools/arch/x86/include/asm/msr-index.h4
-rw-r--r--tools/lib/bpf/skel_internal.h4
-rw-r--r--tools/testing/cxl/Kbuild1
-rw-r--r--tools/testing/cxl/test/cxl.c131
-rw-r--r--tools/testing/cxl/test/mem.c53
-rw-r--r--tools/testing/cxl/test/mock.c8
-rw-r--r--tools/testing/memblock/Makefile3
-rw-r--r--tools/testing/memblock/README17
-rw-r--r--tools/testing/memblock/TODO14
-rw-r--r--tools/testing/memblock/internal.h11
-rw-r--r--tools/testing/memblock/linux/memory_hotplug.h8
-rw-r--r--tools/testing/memblock/main.c2
-rw-r--r--tools/testing/memblock/scripts/Makefile.include10
-rw-r--r--tools/testing/memblock/tests/alloc_api.c225
-rw-r--r--tools/testing/memblock/tests/alloc_helpers_api.c129
-rw-r--r--tools/testing/memblock/tests/alloc_nid_api.c351
-rw-r--r--tools/testing/memblock/tests/basic_api.c337
-rw-r--r--tools/testing/memblock/tests/common.c118
-rw-r--r--tools/testing/memblock/tests/common.h54
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c116
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c95
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lru_bug.c21
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c9
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c22
-rw-r--r--tools/testing/selftests/bpf/progs/lru_bug.c49
-rw-r--r--tools/testing/selftests/kvm/Makefile7
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c58
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c17
-rw-r--r--tools/testing/selftests/net/.gitignore3
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-xtools/testing/selftests/net/forwarding/custom_multipath_hash.sh24
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh24
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh24
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c26
-rw-r--r--tools/testing/selftests/net/tap.c434
-rwxr-xr-xtools/testing/selftests/netfilter/nft_trans_stress.sh81
-rw-r--r--tools/thermal/tmon/sysfs.c24
38 files changed, 1971 insertions, 546 deletions
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index a77b915d36a8..8323ac5b7eee 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -303,6 +303,7 @@
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
+#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM-Exit when EIBRS is enabled */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index cc615be27a54..e057e039173c 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -150,6 +150,10 @@
* are restricted to targets in
* kernel.
*/
+#define ARCH_CAP_PBRSB_NO BIT(24) /*
+ * Not susceptible to Post-Barrier
+ * Return Stack Buffer Predictions.
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h
index bd6f4505e7b1..70adf7b119b9 100644
--- a/tools/lib/bpf/skel_internal.h
+++ b/tools/lib/bpf/skel_internal.h
@@ -66,13 +66,13 @@ struct bpf_load_and_run_opts {
const char *errstr;
};
-long bpf_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
+long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
unsigned int size)
{
#ifdef __KERNEL__
- return bpf_sys_bpf(cmd, attr, size);
+ return kern_sys_bpf(cmd, attr, size);
#else
return syscall(__NR_bpf, cmd, attr, size);
#endif
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 33543231d453..500be85729cc 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -47,6 +47,7 @@ cxl_core-y += $(CXL_CORE_SRC)/memdev.o
cxl_core-y += $(CXL_CORE_SRC)/mbox.o
cxl_core-y += $(CXL_CORE_SRC)/pci.o
cxl_core-y += $(CXL_CORE_SRC)/hdm.o
+cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
cxl_core-y += config_check.o
obj-m += test/
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index 431f2bddf6c8..a072b2d3e726 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -14,7 +14,7 @@
#define NR_CXL_HOST_BRIDGES 2
#define NR_CXL_ROOT_PORTS 2
#define NR_CXL_SWITCH_PORTS 2
-#define NR_CXL_PORT_DECODERS 2
+#define NR_CXL_PORT_DECODERS 8
static struct platform_device *cxl_acpi;
static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
@@ -118,7 +118,7 @@ static struct {
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
.qtg_id = 0,
- .window_size = SZ_256M,
+ .window_size = SZ_256M * 4UL,
},
.target = { 0 },
},
@@ -133,7 +133,7 @@ static struct {
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
.qtg_id = 1,
- .window_size = SZ_256M * 2,
+ .window_size = SZ_256M * 8UL,
},
.target = { 0, 1, },
},
@@ -148,7 +148,7 @@ static struct {
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = 2,
- .window_size = SZ_256M,
+ .window_size = SZ_256M * 4UL,
},
.target = { 0 },
},
@@ -163,7 +163,7 @@ static struct {
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = 3,
- .window_size = SZ_256M * 2,
+ .window_size = SZ_256M * 8UL,
},
.target = { 0, 1, },
},
@@ -429,6 +429,50 @@ static int map_targets(struct device *dev, void *data)
return 0;
}
+static int mock_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ int id = cxld->id;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
+ if (port->commit_end + 1 != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end + 1);
+ return -EBUSY;
+ }
+
+ port->commit_end++;
+ cxld->flags |= CXL_DECODER_F_ENABLE;
+
+ return 0;
+}
+
+static int mock_decoder_reset(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ int id = cxld->id;
+
+ if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
+ return 0;
+
+ dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
+ if (port->commit_end != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order reset, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id, port->commit_end);
+ return -EBUSY;
+ }
+
+ port->commit_end--;
+ cxld->flags &= ~CXL_DECODER_F_ENABLE;
+
+ return 0;
+}
+
static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
{
struct cxl_port *port = cxlhdm->port;
@@ -451,25 +495,39 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
struct cxl_decoder *cxld;
int rc;
- if (target_count)
- cxld = cxl_switch_decoder_alloc(port, target_count);
- else
- cxld = cxl_endpoint_decoder_alloc(port);
- if (IS_ERR(cxld)) {
- dev_warn(&port->dev,
- "Failed to allocate the decoder\n");
- return PTR_ERR(cxld);
+ if (target_count) {
+ struct cxl_switch_decoder *cxlsd;
+
+ cxlsd = cxl_switch_decoder_alloc(port, target_count);
+ if (IS_ERR(cxlsd)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxlsd);
+ }
+ cxld = &cxlsd->cxld;
+ } else {
+ struct cxl_endpoint_decoder *cxled;
+
+ cxled = cxl_endpoint_decoder_alloc(port);
+
+ if (IS_ERR(cxled)) {
+ dev_warn(&port->dev,
+ "Failed to allocate the decoder\n");
+ return PTR_ERR(cxled);
+ }
+ cxld = &cxled->cxld;
}
- cxld->decoder_range = (struct range) {
+ cxld->hpa_range = (struct range) {
.start = 0,
.end = -1,
};
- cxld->flags = CXL_DECODER_F_ENABLE;
cxld->interleave_ways = min_not_zero(target_count, 1);
cxld->interleave_granularity = SZ_4K;
cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->commit = mock_decoder_commit;
+ cxld->reset = mock_decoder_reset;
if (target_count) {
rc = device_for_each_child(port->uport, &ctx,
@@ -569,44 +627,6 @@ static void mock_companion(struct acpi_device *adev, struct device *dev)
#define SZ_512G (SZ_64G * 8)
#endif
-static struct platform_device *alloc_memdev(int id)
-{
- struct resource res[] = {
- [0] = {
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .flags = IORESOURCE_MEM,
- .desc = IORES_DESC_PERSISTENT_MEMORY,
- },
- };
- struct platform_device *pdev;
- int i, rc;
-
- for (i = 0; i < ARRAY_SIZE(res); i++) {
- struct cxl_mock_res *r = alloc_mock_res(SZ_256M);
-
- if (!r)
- return NULL;
- res[i].start = r->range.start;
- res[i].end = r->range.end;
- }
-
- pdev = platform_device_alloc("cxl_mem", id);
- if (!pdev)
- return NULL;
-
- rc = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
- if (rc)
- goto err;
-
- return pdev;
-
-err:
- platform_device_put(pdev);
- return NULL;
-}
-
static __init int cxl_test_init(void)
{
int rc, i;
@@ -619,7 +639,8 @@ static __init int cxl_test_init(void)
goto err_gen_pool_create;
}
- rc = gen_pool_add(cxl_mock_pool, SZ_512G, SZ_64G, NUMA_NO_NODE);
+ rc = gen_pool_add(cxl_mock_pool, iomem_resource.end + 1 - SZ_64G,
+ SZ_64G, NUMA_NO_NODE);
if (rc)
goto err_gen_pool_add;
@@ -708,7 +729,7 @@ static __init int cxl_test_init(void)
struct platform_device *dport = cxl_switch_dport[i];
struct platform_device *pdev;
- pdev = alloc_memdev(i);
+ pdev = platform_device_alloc("cxl_mem", i);
if (!pdev)
goto err_mem;
pdev->dev.parent = &dport->dev;
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 6b9239b2afd4..aa2df3a15051 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -10,6 +10,7 @@
#include <cxlmem.h>
#define LSA_SIZE SZ_128K
+#define DEV_SIZE SZ_2G
#define EFFECT(x) (1U << x)
static struct cxl_cel_entry mock_cel[] = {
@@ -26,6 +27,10 @@ static struct cxl_cel_entry mock_cel[] = {
.effect = cpu_to_le16(0),
},
{
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
+ .effect = cpu_to_le16(0),
+ },
+ {
.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
.effect = cpu_to_le16(EFFECT(1) | EFFECT(2)),
},
@@ -97,42 +102,37 @@ static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
{
- struct platform_device *pdev = to_platform_device(cxlds->dev);
struct cxl_mbox_identify id = {
.fw_revision = { "mock fw v1 " },
.lsa_size = cpu_to_le32(LSA_SIZE),
- /* FIXME: Add partition support */
- .partition_align = cpu_to_le64(0),
+ .partition_align =
+ cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
+ .total_capacity =
+ cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
};
- u64 capacity = 0;
- int i;
if (cmd->size_out < sizeof(id))
return -EINVAL;
- for (i = 0; i < 2; i++) {
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res)
- break;
-
- capacity += resource_size(res) / CXL_CAPACITY_MULTIPLIER;
+ memcpy(cmd->payload_out, &id, sizeof(id));
- if (le64_to_cpu(id.partition_align))
- continue;
+ return 0;
+}
- if (res->desc == IORES_DESC_PERSISTENT_MEMORY)
- id.persistent_capacity = cpu_to_le64(
- resource_size(res) / CXL_CAPACITY_MULTIPLIER);
- else
- id.volatile_capacity = cpu_to_le64(
- resource_size(res) / CXL_CAPACITY_MULTIPLIER);
- }
+static int mock_partition_info(struct cxl_dev_state *cxlds,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_get_partition_info pi = {
+ .active_volatile_cap =
+ cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
+ .active_persistent_cap =
+ cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
+ };
- id.total_capacity = cpu_to_le64(capacity);
+ if (cmd->size_out < sizeof(pi))
+ return -EINVAL;
- memcpy(cmd->payload_out, &id, sizeof(id));
+ memcpy(cmd->payload_out, &pi, sizeof(pi));
return 0;
}
@@ -221,6 +221,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *
case CXL_MBOX_OP_GET_LSA:
rc = mock_get_lsa(cxlds, cmd);
break;
+ case CXL_MBOX_OP_GET_PARTITION_INFO:
+ rc = mock_partition_info(cxlds, cmd);
+ break;
case CXL_MBOX_OP_SET_LSA:
rc = mock_set_lsa(cxlds, cmd);
break;
@@ -282,7 +285,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
+ if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM))
rc = devm_cxl_add_nvdimm(dev, cxlmd);
return 0;
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index f1f8c40948c5..bce6a21df0d5 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -208,13 +208,15 @@ int __wrap_cxl_await_media_ready(struct cxl_dev_state *cxlds)
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_await_media_ready, CXL);
-bool __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
- struct cxl_hdm *cxlhdm)
+int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
+ struct cxl_hdm *cxlhdm)
{
int rc = 0, index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
- if (!ops || !ops->is_mock_dev(cxlds->dev))
+ if (ops && ops->is_mock_dev(cxlds->dev))
+ rc = 0;
+ else
rc = cxl_hdm_decode_init(cxlds, cxlhdm);
put_cxl_mock_ops(index);
diff --git a/tools/testing/memblock/Makefile b/tools/testing/memblock/Makefile
index a698e24b35e7..246f7ac8489b 100644
--- a/tools/testing/memblock/Makefile
+++ b/tools/testing/memblock/Makefile
@@ -45,9 +45,8 @@ help:
@echo ' clean - Remove generated files and symlinks in the directory'
@echo ''
@echo 'Configuration:'
+ @echo ' make MEMBLOCK_DEBUG=1 - enable memblock_dbg() messages'
@echo ' make NUMA=1 - simulate enabled NUMA'
- @echo ' make MOVABLE_NODE=1 - override `movable_node_is_enabled`'
- @echo ' definition to simulate movable NUMA nodes'
@echo ' make 32BIT_PHYS_ADDR_T=1 - Use 32 bit physical addresses'
vpath %.c ../../lib
diff --git a/tools/testing/memblock/README b/tools/testing/memblock/README
index ca6afcff013a..7ca437d81806 100644
--- a/tools/testing/memblock/README
+++ b/tools/testing/memblock/README
@@ -33,12 +33,23 @@ To run the tests, build the main target and run it:
$ make && ./main
-A successful run produces no output. It is also possible to override different
-configuration parameters. For example, to simulate enabled NUMA, use:
+A successful run produces no output. It is possible to control the behavior
+by passing options from command line. For example, to include verbose output,
+append the `-v` options when you run the tests:
+
+$ ./main -v
+
+This will print information about which functions are being tested and the
+number of test cases that passed.
+
+For the full list of options from command line, see `./main --help`.
+
+It is also possible to override different configuration parameters to change
+the test functions. For example, to simulate enabled NUMA, use:
$ make NUMA=1
-For the full list of options, see `make help`.
+For the full list of build options, see `make help`.
Project structure
=================
diff --git a/tools/testing/memblock/TODO b/tools/testing/memblock/TODO
index cd1a30d5acc9..33044c634ea7 100644
--- a/tools/testing/memblock/TODO
+++ b/tools/testing/memblock/TODO
@@ -1,25 +1,17 @@
TODO
=====
-1. Add verbose output (e.g., what is being tested and how many tests cases are
- passing)
-
-2. Add flags to Makefile:
- + verbosity level
- + enable memblock_dbg() messages (i.e. pass "-D CONFIG_DEBUG_MEMORY_INIT"
- flag)
-
-3. Add tests trying to memblock_add() or memblock_reserve() 129th region.
+1. Add tests trying to memblock_add() or memblock_reserve() 129th region.
This will trigger memblock_double_array(), make sure it succeeds.
*Important:* These tests require valid memory ranges, use dummy physical
memory block from common.c to implement them. It is also very
likely that the current MEM_SIZE won't be enough for these
test cases. Use realloc to adjust the size accordingly.
-4. Add test cases using this functions (implement them for both directions):
+2. Add test cases using this functions (implement them for both directions):
+ memblock_alloc_raw()
+ memblock_alloc_exact_nid_raw()
+ memblock_alloc_try_nid_raw()
-5. Add tests for memblock_alloc_node() to check if the correct NUMA node is set
+3. Add tests for memblock_alloc_node() to check if the correct NUMA node is set
for the new region
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index 94b52a8718b5..fdb7f5db7308 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -2,6 +2,17 @@
#ifndef _MM_INTERNAL_H
#define _MM_INTERNAL_H
+/*
+ * Enable memblock_dbg() messages
+ */
+#ifdef MEMBLOCK_DEBUG
+static int memblock_debug = 1;
+#endif
+
+#define pr_warn_ratelimited(fmt, ...) printf(fmt, ##__VA_ARGS__)
+
+bool mirrored_kernelcore = false;
+
struct page {};
void memblock_free_pages(struct page *page, unsigned long pfn,
diff --git a/tools/testing/memblock/linux/memory_hotplug.h b/tools/testing/memblock/linux/memory_hotplug.h
index 47988765a219..dabe2c556858 100644
--- a/tools/testing/memblock/linux/memory_hotplug.h
+++ b/tools/testing/memblock/linux/memory_hotplug.h
@@ -7,13 +7,11 @@
#include <linux/cache.h>
#include <linux/types.h>
+extern bool movable_node_enabled;
+
static inline bool movable_node_is_enabled(void)
{
-#ifdef MOVABLE_NODE
- return true;
-#else
- return false;
-#endif
+ return movable_node_enabled;
}
#endif
diff --git a/tools/testing/memblock/main.c b/tools/testing/memblock/main.c
index fb183c9e76d1..4ca1024342b1 100644
--- a/tools/testing/memblock/main.c
+++ b/tools/testing/memblock/main.c
@@ -3,9 +3,11 @@
#include "tests/alloc_api.h"
#include "tests/alloc_helpers_api.h"
#include "tests/alloc_nid_api.h"
+#include "tests/common.h"
int main(int argc, char **argv)
{
+ parse_args(argc, argv);
memblock_basic_checks();
memblock_alloc_checks();
memblock_alloc_helpers_checks();
diff --git a/tools/testing/memblock/scripts/Makefile.include b/tools/testing/memblock/scripts/Makefile.include
index 641569ccb7b0..aa6d82d56a23 100644
--- a/tools/testing/memblock/scripts/Makefile.include
+++ b/tools/testing/memblock/scripts/Makefile.include
@@ -6,14 +6,14 @@ ifeq ($(NUMA), 1)
CFLAGS += -D CONFIG_NUMA
endif
-# Simulate movable NUMA memory regions
-ifeq ($(MOVABLE_NODE), 1)
- CFLAGS += -D MOVABLE_NODE
-endif
-
# Use 32 bit physical addresses.
# Remember to install 32-bit version of dependencies.
ifeq ($(32BIT_PHYS_ADDR_T), 1)
CFLAGS += -m32 -U CONFIG_PHYS_ADDR_T_64BIT
LDFLAGS += -m32
endif
+
+# Enable memblock_dbg() messages
+ifeq ($(MEMBLOCK_DEBUG), 1)
+ CFLAGS += -D MEMBLOCK_DEBUG
+endif
diff --git a/tools/testing/memblock/tests/alloc_api.c b/tools/testing/memblock/tests/alloc_api.c
index d1aa7e15c18d..a14f38eb8a89 100644
--- a/tools/testing/memblock/tests/alloc_api.c
+++ b/tools/testing/memblock/tests/alloc_api.c
@@ -10,6 +10,8 @@ static int alloc_top_down_simple_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_2;
phys_addr_t expected_start;
@@ -19,12 +21,14 @@ static int alloc_top_down_simple_check(void)
allocated_ptr = memblock_alloc(size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == size);
- assert(rgn->base == expected_start);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, expected_start);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -55,6 +59,8 @@ static int alloc_top_down_disjoint_check(void)
struct region r1;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r2_size = SZ_16;
/* Use custom alignment */
phys_addr_t alignment = SMP_CACHE_BYTES * 2;
@@ -73,15 +79,17 @@ static int alloc_top_down_disjoint_check(void)
allocated_ptr = memblock_alloc(r2_size, alignment);
- assert(allocated_ptr);
- assert(rgn1->size == r1.size);
- assert(rgn1->base == r1.base);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
- assert(rgn2->size == r2_size);
- assert(rgn2->base == expected_start);
+ ASSERT_EQ(rgn2->size, r2_size);
+ ASSERT_EQ(rgn2->base, expected_start);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -101,6 +109,8 @@ static int alloc_top_down_before_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
/*
* The first region ends at the aligned address to test region merging
*/
@@ -114,12 +124,14 @@ static int alloc_top_down_before_check(void)
allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == total_size);
- assert(rgn->base == memblock_end_of_DRAM() - total_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -141,6 +153,8 @@ static int alloc_top_down_after_check(void)
struct region r1;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r2_size = SZ_512;
phys_addr_t total_size;
@@ -158,12 +172,14 @@ static int alloc_top_down_after_check(void)
allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == total_size);
- assert(rgn->base == r1.base - r2_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r1.base - r2_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -186,6 +202,8 @@ static int alloc_top_down_second_fit_check(void)
struct region r1, r2;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_1K;
phys_addr_t total_size;
@@ -204,12 +222,14 @@ static int alloc_top_down_second_fit_check(void)
allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == r2.size + r3_size);
- assert(rgn->base == r2.base - r3_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, r2.size + r3_size);
+ ASSERT_EQ(rgn->base, r2.base - r3_size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -231,6 +251,8 @@ static int alloc_in_between_generic_check(void)
struct region r1, r2;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t r3_size = SZ_64;
/*
@@ -254,12 +276,14 @@ static int alloc_in_between_generic_check(void)
allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == total_size);
- assert(rgn->base == r1.base - r2.size - r3_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r1.base - r2.size - r3_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -281,6 +305,8 @@ static int alloc_small_gaps_generic_check(void)
{
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t region_size = SZ_1K;
phys_addr_t gap_size = SZ_256;
phys_addr_t region_end;
@@ -296,7 +322,9 @@ static int alloc_small_gaps_generic_check(void)
allocated_ptr = memblock_alloc(region_size, SMP_CACHE_BYTES);
- assert(!allocated_ptr);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
return 0;
}
@@ -309,6 +337,8 @@ static int alloc_all_reserved_generic_check(void)
{
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
setup_memblock();
/* Simulate full memory */
@@ -316,7 +346,9 @@ static int alloc_all_reserved_generic_check(void)
allocated_ptr = memblock_alloc(SZ_256, SMP_CACHE_BYTES);
- assert(!allocated_ptr);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
return 0;
}
@@ -338,6 +370,8 @@ static int alloc_no_space_generic_check(void)
{
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
setup_memblock();
phys_addr_t available_size = SZ_256;
@@ -348,7 +382,9 @@ static int alloc_no_space_generic_check(void)
allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
- assert(!allocated_ptr);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
return 0;
}
@@ -369,6 +405,8 @@ static int alloc_limited_space_generic_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t available_size = SZ_256;
phys_addr_t reserved_size = MEM_SIZE - available_size;
@@ -379,12 +417,14 @@ static int alloc_limited_space_generic_check(void)
allocated_ptr = memblock_alloc(available_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == MEM_SIZE);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, MEM_SIZE);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, MEM_SIZE);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == MEM_SIZE);
+ test_pass_pop();
return 0;
}
@@ -399,14 +439,18 @@ static int alloc_no_memory_generic_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
reset_memblock_regions();
allocated_ptr = memblock_alloc(SZ_1K, SMP_CACHE_BYTES);
- assert(!allocated_ptr);
- assert(rgn->size == 0);
- assert(rgn->base == 0);
- assert(memblock.reserved.total_size == 0);
+ ASSERT_EQ(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, 0);
+ ASSERT_EQ(rgn->base, 0);
+ ASSERT_EQ(memblock.reserved.total_size, 0);
+
+ test_pass_pop();
return 0;
}
@@ -421,16 +465,20 @@ static int alloc_bottom_up_simple_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
setup_memblock();
allocated_ptr = memblock_alloc(SZ_2, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == SZ_2);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, SZ_2);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == SZ_2);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, SZ_2);
+
+ test_pass_pop();
return 0;
}
@@ -459,6 +507,8 @@ static int alloc_bottom_up_disjoint_check(void)
struct region r1;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r2_size = SZ_16;
/* Use custom alignment */
phys_addr_t alignment = SMP_CACHE_BYTES * 2;
@@ -477,16 +527,18 @@ static int alloc_bottom_up_disjoint_check(void)
allocated_ptr = memblock_alloc(r2_size, alignment);
- assert(allocated_ptr);
+ ASSERT_NE(allocated_ptr, NULL);
- assert(rgn1->size == r1.size);
- assert(rgn1->base == r1.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
- assert(rgn2->size == r2_size);
- assert(rgn2->base == expected_start);
+ ASSERT_EQ(rgn2->size, r2_size);
+ ASSERT_EQ(rgn2->base, expected_start);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -506,6 +558,8 @@ static int alloc_bottom_up_before_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_512;
phys_addr_t r2_size = SZ_128;
phys_addr_t total_size = r1_size + r2_size;
@@ -516,12 +570,14 @@ static int alloc_bottom_up_before_check(void)
allocated_ptr = memblock_alloc(r1_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == total_size);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -542,6 +598,8 @@ static int alloc_bottom_up_after_check(void)
struct region r1;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r2_size = SZ_512;
phys_addr_t total_size;
@@ -559,12 +617,14 @@ static int alloc_bottom_up_after_check(void)
allocated_ptr = memblock_alloc(r2_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == total_size);
- assert(rgn->base == r1.base);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r1.base);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -588,6 +648,8 @@ static int alloc_bottom_up_second_fit_check(void)
struct region r1, r2;
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_1K;
phys_addr_t total_size;
@@ -606,12 +668,14 @@ static int alloc_bottom_up_second_fit_check(void)
allocated_ptr = memblock_alloc(r3_size, SMP_CACHE_BYTES);
- assert(allocated_ptr);
- assert(rgn->size == r2.size + r3_size);
- assert(rgn->base == r2.base);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, r2.size + r3_size);
+ ASSERT_EQ(rgn->base, r2.base);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -619,6 +683,7 @@ static int alloc_bottom_up_second_fit_check(void)
/* Test case wrappers */
static int alloc_simple_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_simple_check();
memblock_set_bottom_up(true);
@@ -629,6 +694,7 @@ static int alloc_simple_check(void)
static int alloc_disjoint_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_disjoint_check();
memblock_set_bottom_up(true);
@@ -639,6 +705,7 @@ static int alloc_disjoint_check(void)
static int alloc_before_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_before_check();
memblock_set_bottom_up(true);
@@ -649,6 +716,7 @@ static int alloc_before_check(void)
static int alloc_after_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_after_check();
memblock_set_bottom_up(true);
@@ -659,6 +727,7 @@ static int alloc_after_check(void)
static int alloc_in_between_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_in_between_generic_check();
memblock_set_bottom_up(true);
@@ -669,6 +738,7 @@ static int alloc_in_between_check(void)
static int alloc_second_fit_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_top_down_second_fit_check();
memblock_set_bottom_up(true);
@@ -679,6 +749,7 @@ static int alloc_second_fit_check(void)
static int alloc_small_gaps_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_small_gaps_generic_check();
memblock_set_bottom_up(true);
@@ -689,6 +760,7 @@ static int alloc_small_gaps_check(void)
static int alloc_all_reserved_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_all_reserved_generic_check();
memblock_set_bottom_up(true);
@@ -699,6 +771,7 @@ static int alloc_all_reserved_check(void)
static int alloc_no_space_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_no_space_generic_check();
memblock_set_bottom_up(true);
@@ -709,6 +782,7 @@ static int alloc_no_space_check(void)
static int alloc_limited_space_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_limited_space_generic_check();
memblock_set_bottom_up(true);
@@ -719,6 +793,7 @@ static int alloc_limited_space_check(void)
static int alloc_no_memory_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_no_memory_generic_check();
memblock_set_bottom_up(true);
@@ -729,6 +804,12 @@ static int alloc_no_memory_check(void)
int memblock_alloc_checks(void)
{
+ const char *func_testing = "memblock_alloc";
+
+ prefix_reset();
+ prefix_push(func_testing);
+ test_print("Running %s tests...\n", func_testing);
+
reset_memblock_attributes();
dummy_physical_memory_init();
@@ -746,5 +827,7 @@ int memblock_alloc_checks(void)
dummy_physical_memory_cleanup();
+ prefix_pop();
+
return 0;
}
diff --git a/tools/testing/memblock/tests/alloc_helpers_api.c b/tools/testing/memblock/tests/alloc_helpers_api.c
index 963a966db461..1069b4bdd5fd 100644
--- a/tools/testing/memblock/tests/alloc_helpers_api.c
+++ b/tools/testing/memblock/tests/alloc_helpers_api.c
@@ -21,6 +21,8 @@ static int alloc_from_simple_generic_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_16;
phys_addr_t min_addr;
@@ -31,14 +33,16 @@ static int alloc_from_simple_generic_check(void)
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
- assert(rgn->size == size);
- assert(rgn->base == min_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -64,6 +68,8 @@ static int alloc_from_misaligned_generic_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_32;
phys_addr_t min_addr;
@@ -75,14 +81,16 @@ static int alloc_from_misaligned_generic_check(void)
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == size);
- assert(rgn->base == memblock_end_of_DRAM() - SMP_CACHE_BYTES);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - SMP_CACHE_BYTES);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
return 0;
}
@@ -110,6 +118,8 @@ static int alloc_from_top_down_high_addr_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_32;
phys_addr_t min_addr;
@@ -120,12 +130,14 @@ static int alloc_from_top_down_high_addr_check(void)
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->size == size);
- assert(rgn->base == memblock_end_of_DRAM() - SMP_CACHE_BYTES);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - SMP_CACHE_BYTES);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
return 0;
}
@@ -151,6 +163,8 @@ static int alloc_from_top_down_no_space_above_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_64;
phys_addr_t r2_size = SZ_2;
phys_addr_t total_size = r1_size + r2_size;
@@ -165,12 +179,14 @@ static int alloc_from_top_down_no_space_above_check(void)
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->base == min_addr - r1_size);
- assert(rgn->size == total_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, min_addr - r1_size);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -186,6 +202,8 @@ static int alloc_from_top_down_min_addr_cap_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_64;
phys_addr_t min_addr;
phys_addr_t start_addr;
@@ -199,12 +217,14 @@ static int alloc_from_top_down_min_addr_cap_check(void)
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->base == start_addr);
- assert(rgn->size == MEM_SIZE);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, start_addr);
+ ASSERT_EQ(rgn->size, MEM_SIZE);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, MEM_SIZE);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == MEM_SIZE);
+ test_pass_pop();
return 0;
}
@@ -230,6 +250,8 @@ static int alloc_from_bottom_up_high_addr_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_32;
phys_addr_t min_addr;
@@ -240,12 +262,14 @@ static int alloc_from_bottom_up_high_addr_check(void)
allocated_ptr = memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->size == size);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -270,6 +294,8 @@ static int alloc_from_bottom_up_no_space_above_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_64;
phys_addr_t min_addr;
phys_addr_t r2_size;
@@ -284,12 +310,14 @@ static int alloc_from_bottom_up_no_space_above_check(void)
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->base == memblock_start_of_DRAM());
- assert(rgn->size == r1_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
+ ASSERT_EQ(rgn->size, r1_size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == r1_size + r2_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, r1_size + r2_size);
+
+ test_pass_pop();
return 0;
}
@@ -304,6 +332,8 @@ static int alloc_from_bottom_up_min_addr_cap_check(void)
struct memblock_region *rgn = &memblock.reserved.regions[0];
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_64;
phys_addr_t min_addr;
phys_addr_t start_addr;
@@ -315,12 +345,14 @@ static int alloc_from_bottom_up_min_addr_cap_check(void)
allocated_ptr = memblock_alloc_from(r1_size, SMP_CACHE_BYTES, min_addr);
- assert(allocated_ptr);
- assert(rgn->base == start_addr);
- assert(rgn->size == r1_size);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(rgn->base, start_addr);
+ ASSERT_EQ(rgn->size, r1_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == r1_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r1_size);
+
+ test_pass_pop();
return 0;
}
@@ -328,6 +360,7 @@ static int alloc_from_bottom_up_min_addr_cap_check(void)
/* Test case wrappers */
static int alloc_from_simple_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_from_simple_generic_check();
memblock_set_bottom_up(true);
@@ -338,6 +371,7 @@ static int alloc_from_simple_check(void)
static int alloc_from_misaligned_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_from_misaligned_generic_check();
memblock_set_bottom_up(true);
@@ -348,6 +382,7 @@ static int alloc_from_misaligned_check(void)
static int alloc_from_high_addr_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_from_top_down_high_addr_check();
memblock_set_bottom_up(true);
@@ -358,6 +393,7 @@ static int alloc_from_high_addr_check(void)
static int alloc_from_no_space_above_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_from_top_down_no_space_above_check();
memblock_set_bottom_up(true);
@@ -368,6 +404,7 @@ static int alloc_from_no_space_above_check(void)
static int alloc_from_min_addr_cap_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_from_top_down_min_addr_cap_check();
memblock_set_bottom_up(true);
@@ -378,6 +415,12 @@ static int alloc_from_min_addr_cap_check(void)
int memblock_alloc_helpers_checks(void)
{
+ const char *func_testing = "memblock_alloc_from";
+
+ prefix_reset();
+ prefix_push(func_testing);
+ test_print("Running %s tests...\n", func_testing);
+
reset_memblock_attributes();
dummy_physical_memory_init();
@@ -389,5 +432,7 @@ int memblock_alloc_helpers_checks(void)
dummy_physical_memory_cleanup();
+ prefix_pop();
+
return 0;
}
diff --git a/tools/testing/memblock/tests/alloc_nid_api.c b/tools/testing/memblock/tests/alloc_nid_api.c
index 6390206e50e1..255fd514e9f5 100644
--- a/tools/testing/memblock/tests/alloc_nid_api.c
+++ b/tools/testing/memblock/tests/alloc_nid_api.c
@@ -21,6 +21,8 @@ static int alloc_try_nid_top_down_simple_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_128;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -36,15 +38,17 @@ static int alloc_try_nid_top_down_simple_check(void)
b = (char *)allocated_ptr;
rgn_end = rgn->base + rgn->size;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, max_addr - size);
+ ASSERT_EQ(rgn_end, max_addr);
- assert(rgn->size == size);
- assert(rgn->base == max_addr - size);
- assert(rgn_end == max_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -72,6 +76,8 @@ static int alloc_try_nid_top_down_end_misaligned_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_128;
phys_addr_t misalign = SZ_2;
phys_addr_t min_addr;
@@ -88,15 +94,17 @@ static int alloc_try_nid_top_down_end_misaligned_check(void)
b = (char *)allocated_ptr;
rgn_end = rgn->base + rgn->size;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == size);
- assert(rgn->base == max_addr - size - misalign);
- assert(rgn_end < max_addr);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, max_addr - size - misalign);
+ ASSERT_LT(rgn_end, max_addr);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
return 0;
}
@@ -122,6 +130,8 @@ static int alloc_try_nid_exact_address_generic_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -137,15 +147,17 @@ static int alloc_try_nid_exact_address_generic_check(void)
b = (char *)allocated_ptr;
rgn_end = rgn->base + rgn->size;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
+ ASSERT_EQ(rgn_end, max_addr);
- assert(rgn->size == size);
- assert(rgn->base == min_addr);
- assert(rgn_end == max_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -173,6 +185,8 @@ static int alloc_try_nid_top_down_narrow_range_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_256;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -186,14 +200,16 @@ static int alloc_try_nid_top_down_narrow_range_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, max_addr - size);
- assert(rgn->size == size);
- assert(rgn->base == max_addr - size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -222,6 +238,8 @@ static int alloc_try_nid_low_max_generic_check(void)
{
void *allocated_ptr = NULL;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -234,7 +252,9 @@ static int alloc_try_nid_low_max_generic_check(void)
allocated_ptr = memblock_alloc_try_nid(size, SMP_CACHE_BYTES,
min_addr, max_addr, NUMA_NO_NODE);
- assert(!allocated_ptr);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
return 0;
}
@@ -259,6 +279,8 @@ static int alloc_try_nid_min_reserved_generic_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_128;
phys_addr_t r2_size = SZ_64;
phys_addr_t total_size = r1_size + r2_size;
@@ -278,14 +300,16 @@ static int alloc_try_nid_min_reserved_generic_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == total_size);
- assert(rgn->base == reserved_base);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, reserved_base);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -310,6 +334,8 @@ static int alloc_try_nid_max_reserved_generic_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t r1_size = SZ_64;
phys_addr_t r2_size = SZ_128;
phys_addr_t total_size = r1_size + r2_size;
@@ -327,14 +353,16 @@ static int alloc_try_nid_max_reserved_generic_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, min_addr);
- assert(rgn->size == total_size);
- assert(rgn->base == min_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -364,6 +392,8 @@ static int alloc_try_nid_top_down_reserved_with_space_check(void)
char *b;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_64;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t total_size;
@@ -389,17 +419,19 @@ static int alloc_try_nid_top_down_reserved_with_space_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn1->size, r1.size + r3_size);
+ ASSERT_EQ(rgn1->base, max_addr - r3_size);
- assert(rgn1->size == r1.size + r3_size);
- assert(rgn1->base == max_addr - r3_size);
+ ASSERT_EQ(rgn2->size, r2.size);
+ ASSERT_EQ(rgn2->base, r2.base);
- assert(rgn2->size == r2.size);
- assert(rgn2->base == r2.base);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -427,6 +459,8 @@ static int alloc_try_nid_reserved_full_merge_generic_check(void)
char *b;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_64;
phys_addr_t total_size;
phys_addr_t max_addr;
@@ -451,14 +485,16 @@ static int alloc_try_nid_reserved_full_merge_generic_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == total_size);
- assert(rgn->base == r2.base);
+ ASSERT_EQ(rgn->size, total_size);
+ ASSERT_EQ(rgn->base, r2.base);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -489,6 +525,8 @@ static int alloc_try_nid_top_down_reserved_no_space_check(void)
char *b;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_256;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t total_size;
@@ -514,17 +552,19 @@ static int alloc_try_nid_top_down_reserved_no_space_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
- assert(rgn1->size == r1.size);
- assert(rgn1->base == r1.base);
+ ASSERT_EQ(rgn2->size, r2.size + r3_size);
+ ASSERT_EQ(rgn2->base, r2.base - r3_size);
- assert(rgn2->size == r2.size + r3_size);
- assert(rgn2->base == r2.base - r3_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -554,6 +594,8 @@ static int alloc_try_nid_reserved_all_generic_check(void)
void *allocated_ptr = NULL;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_256;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t max_addr;
@@ -576,7 +618,9 @@ static int alloc_try_nid_reserved_all_generic_check(void)
allocated_ptr = memblock_alloc_try_nid(r3_size, SMP_CACHE_BYTES,
min_addr, max_addr, NUMA_NO_NODE);
- assert(!allocated_ptr);
+ ASSERT_EQ(allocated_ptr, NULL);
+
+ test_pass_pop();
return 0;
}
@@ -592,6 +636,8 @@ static int alloc_try_nid_top_down_cap_max_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_256;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -605,14 +651,16 @@ static int alloc_try_nid_top_down_cap_max_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
- assert(rgn->size == size);
- assert(rgn->base == memblock_end_of_DRAM() - size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -628,6 +676,8 @@ static int alloc_try_nid_top_down_cap_min_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -641,14 +691,16 @@ static int alloc_try_nid_top_down_cap_min_check(void)
min_addr, max_addr, NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == size);
- assert(rgn->base == memblock_end_of_DRAM() - size);
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
return 0;
}
@@ -673,6 +725,8 @@ static int alloc_try_nid_bottom_up_simple_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_128;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -689,15 +743,17 @@ static int alloc_try_nid_bottom_up_simple_check(void)
b = (char *)allocated_ptr;
rgn_end = rgn->base + rgn->size;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
+ ASSERT_LT(rgn_end, max_addr);
- assert(rgn->size == size);
- assert(rgn->base == min_addr);
- assert(rgn_end < max_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -725,6 +781,8 @@ static int alloc_try_nid_bottom_up_start_misaligned_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_128;
phys_addr_t misalign = SZ_2;
phys_addr_t min_addr;
@@ -742,15 +800,17 @@ static int alloc_try_nid_bottom_up_start_misaligned_check(void)
b = (char *)allocated_ptr;
rgn_end = rgn->base + rgn->size;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign));
+ ASSERT_LT(rgn_end, max_addr);
- assert(rgn->size == size);
- assert(rgn->base == min_addr + (SMP_CACHE_BYTES - misalign));
- assert(rgn_end < max_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -778,6 +838,8 @@ static int alloc_try_nid_bottom_up_narrow_range_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_256;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -792,14 +854,16 @@ static int alloc_try_nid_bottom_up_narrow_range_check(void)
NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn->size == size);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
+
+ test_pass_pop();
return 0;
}
@@ -829,6 +893,8 @@ static int alloc_try_nid_bottom_up_reserved_with_space_check(void)
char *b;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_64;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t total_size;
@@ -855,17 +921,19 @@ static int alloc_try_nid_bottom_up_reserved_with_space_check(void)
NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
- assert(rgn1->size == r1.size);
- assert(rgn1->base == max_addr);
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, max_addr);
- assert(rgn2->size == r2.size + r3_size);
- assert(rgn2->base == r2.base);
+ ASSERT_EQ(rgn2->size, r2.size + r3_size);
+ ASSERT_EQ(rgn2->base, r2.base);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -899,6 +967,8 @@ static int alloc_try_nid_bottom_up_reserved_no_space_check(void)
char *b;
struct region r1, r2;
+ PREFIX_PUSH();
+
phys_addr_t r3_size = SZ_256;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t total_size;
@@ -925,20 +995,22 @@ static int alloc_try_nid_bottom_up_reserved_no_space_check(void)
NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn3->size, r3_size);
+ ASSERT_EQ(rgn3->base, memblock_start_of_DRAM());
- assert(rgn3->size == r3_size);
- assert(rgn3->base == memblock_start_of_DRAM());
+ ASSERT_EQ(rgn2->size, r2.size);
+ ASSERT_EQ(rgn2->base, r2.base);
- assert(rgn2->size == r2.size);
- assert(rgn2->base == r2.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
- assert(rgn1->size == r1.size);
- assert(rgn1->base == r1.base);
+ ASSERT_EQ(memblock.reserved.cnt, 3);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 3);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -954,6 +1026,8 @@ static int alloc_try_nid_bottom_up_cap_max_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_256;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -968,14 +1042,16 @@ static int alloc_try_nid_bottom_up_cap_max_check(void)
NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, min_addr);
- assert(rgn->size == size);
- assert(rgn->base == min_addr);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -991,6 +1067,8 @@ static int alloc_try_nid_bottom_up_cap_min_check(void)
void *allocated_ptr = NULL;
char *b;
+ PREFIX_PUSH();
+
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
@@ -1005,14 +1083,16 @@ static int alloc_try_nid_bottom_up_cap_min_check(void)
NUMA_NO_NODE);
b = (char *)allocated_ptr;
- assert(allocated_ptr);
- assert(*b == 0);
+ ASSERT_NE(allocated_ptr, NULL);
+ ASSERT_EQ(*b, 0);
+
+ ASSERT_EQ(rgn->size, size);
+ ASSERT_EQ(rgn->base, memblock_start_of_DRAM());
- assert(rgn->size == size);
- assert(rgn->base == memblock_start_of_DRAM());
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == size);
+ test_pass_pop();
return 0;
}
@@ -1020,6 +1100,7 @@ static int alloc_try_nid_bottom_up_cap_min_check(void)
/* Test case wrappers */
static int alloc_try_nid_simple_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_simple_check();
memblock_set_bottom_up(true);
@@ -1030,6 +1111,7 @@ static int alloc_try_nid_simple_check(void)
static int alloc_try_nid_misaligned_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_end_misaligned_check();
memblock_set_bottom_up(true);
@@ -1040,6 +1122,7 @@ static int alloc_try_nid_misaligned_check(void)
static int alloc_try_nid_narrow_range_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_narrow_range_check();
memblock_set_bottom_up(true);
@@ -1050,6 +1133,7 @@ static int alloc_try_nid_narrow_range_check(void)
static int alloc_try_nid_reserved_with_space_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_reserved_with_space_check();
memblock_set_bottom_up(true);
@@ -1060,6 +1144,7 @@ static int alloc_try_nid_reserved_with_space_check(void)
static int alloc_try_nid_reserved_no_space_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_reserved_no_space_check();
memblock_set_bottom_up(true);
@@ -1070,6 +1155,7 @@ static int alloc_try_nid_reserved_no_space_check(void)
static int alloc_try_nid_cap_max_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_cap_max_check();
memblock_set_bottom_up(true);
@@ -1080,6 +1166,7 @@ static int alloc_try_nid_cap_max_check(void)
static int alloc_try_nid_cap_min_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_top_down_cap_min_check();
memblock_set_bottom_up(true);
@@ -1090,6 +1177,7 @@ static int alloc_try_nid_cap_min_check(void)
static int alloc_try_nid_min_reserved_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_min_reserved_generic_check();
memblock_set_bottom_up(true);
@@ -1100,6 +1188,7 @@ static int alloc_try_nid_min_reserved_check(void)
static int alloc_try_nid_max_reserved_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_max_reserved_generic_check();
memblock_set_bottom_up(true);
@@ -1110,6 +1199,7 @@ static int alloc_try_nid_max_reserved_check(void)
static int alloc_try_nid_exact_address_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_exact_address_generic_check();
memblock_set_bottom_up(true);
@@ -1120,6 +1210,7 @@ static int alloc_try_nid_exact_address_check(void)
static int alloc_try_nid_reserved_full_merge_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_reserved_full_merge_generic_check();
memblock_set_bottom_up(true);
@@ -1130,6 +1221,7 @@ static int alloc_try_nid_reserved_full_merge_check(void)
static int alloc_try_nid_reserved_all_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_reserved_all_generic_check();
memblock_set_bottom_up(true);
@@ -1140,6 +1232,7 @@ static int alloc_try_nid_reserved_all_check(void)
static int alloc_try_nid_low_max_check(void)
{
+ test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_try_nid_low_max_generic_check();
memblock_set_bottom_up(true);
@@ -1150,6 +1243,12 @@ static int alloc_try_nid_low_max_check(void)
int memblock_alloc_nid_checks(void)
{
+ const char *func_testing = "memblock_alloc_try_nid";
+
+ prefix_reset();
+ prefix_push(func_testing);
+ test_print("Running %s tests...\n", func_testing);
+
reset_memblock_attributes();
dummy_physical_memory_init();
@@ -1170,5 +1269,7 @@ int memblock_alloc_nid_checks(void)
dummy_physical_memory_cleanup();
+ prefix_pop();
+
return 0;
}
diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c
index a7bc180316d6..66f46f261e66 100644
--- a/tools/testing/memblock/tests/basic_api.c
+++ b/tools/testing/memblock/tests/basic_api.c
@@ -4,21 +4,29 @@
#include "basic_api.h"
#define EXPECTED_MEMBLOCK_REGIONS 128
+#define FUNC_ADD "memblock_add"
+#define FUNC_RESERVE "memblock_reserve"
+#define FUNC_REMOVE "memblock_remove"
+#define FUNC_FREE "memblock_free"
static int memblock_initialization_check(void)
{
- assert(memblock.memory.regions);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.max == EXPECTED_MEMBLOCK_REGIONS);
- assert(strcmp(memblock.memory.name, "memory") == 0);
+ PREFIX_PUSH();
- assert(memblock.reserved.regions);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.memory.max == EXPECTED_MEMBLOCK_REGIONS);
- assert(strcmp(memblock.reserved.name, "reserved") == 0);
+ ASSERT_NE(memblock.memory.regions, NULL);
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
+ ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0);
- assert(!memblock.bottom_up);
- assert(memblock.current_limit == MEMBLOCK_ALLOC_ANYWHERE);
+ ASSERT_NE(memblock.reserved.regions, NULL);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS);
+ ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0);
+
+ ASSERT_EQ(memblock.bottom_up, false);
+ ASSERT_EQ(memblock.current_limit, MEMBLOCK_ALLOC_ANYWHERE);
+
+ test_pass_pop();
return 0;
}
@@ -40,14 +48,18 @@ static int memblock_add_simple_check(void)
.size = SZ_4M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r.base, r.size);
- assert(rgn->base == r.base);
- assert(rgn->size == r.size);
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, r.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r.size);
+ test_pass_pop();
return 0;
}
@@ -69,18 +81,22 @@ static int memblock_add_node_simple_check(void)
.size = SZ_16M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add_node(r.base, r.size, 1, MEMBLOCK_HOTPLUG);
- assert(rgn->base == r.base);
- assert(rgn->size == r.size);
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, r.size);
#ifdef CONFIG_NUMA
- assert(rgn->nid == 1);
+ ASSERT_EQ(rgn->nid, 1);
#endif
- assert(rgn->flags == MEMBLOCK_HOTPLUG);
+ ASSERT_EQ(rgn->flags, MEMBLOCK_HOTPLUG);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r.size);
+ test_pass_pop();
return 0;
}
@@ -113,18 +129,22 @@ static int memblock_add_disjoint_check(void)
.size = SZ_8K
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
- assert(rgn1->base == r1.base);
- assert(rgn1->size == r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+
+ ASSERT_EQ(rgn2->base, r2.base);
+ ASSERT_EQ(rgn2->size, r2.size);
- assert(rgn2->base == r2.base);
- assert(rgn2->size == r2.size);
+ ASSERT_EQ(memblock.memory.cnt, 2);
+ ASSERT_EQ(memblock.memory.total_size, r1.size + r2.size);
- assert(memblock.memory.cnt == 2);
- assert(memblock.memory.total_size == r1.size + r2.size);
+ test_pass_pop();
return 0;
}
@@ -162,17 +182,21 @@ static int memblock_add_overlap_top_check(void)
.size = SZ_512M
};
+ PREFIX_PUSH();
+
total_size = (r1.base - r2.base) + r1.size;
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
- assert(rgn->base == r2.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -210,17 +234,21 @@ static int memblock_add_overlap_bottom_check(void)
.size = SZ_1G
};
+ PREFIX_PUSH();
+
total_size = (r2.base - r1.base) + r2.size;
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -255,15 +283,19 @@ static int memblock_add_within_check(void)
.size = SZ_1M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == r1.size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r1.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r1.size);
+ test_pass_pop();
return 0;
}
@@ -279,19 +311,27 @@ static int memblock_add_twice_check(void)
.size = SZ_2M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r.base, r.size);
memblock_add(r.base, r.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r.size);
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r.size);
+
+ test_pass_pop();
return 0;
}
static int memblock_add_checks(void)
{
+ prefix_reset();
+ prefix_push(FUNC_ADD);
+ test_print("Running %s tests...\n", FUNC_ADD);
+
memblock_add_simple_check();
memblock_add_node_simple_check();
memblock_add_disjoint_check();
@@ -300,6 +340,8 @@ static int memblock_add_checks(void)
memblock_add_within_check();
memblock_add_twice_check();
+ prefix_pop();
+
return 0;
}
@@ -320,11 +362,15 @@ static int memblock_reserve_simple_check(void)
.size = SZ_128M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r.base, r.size);
- assert(rgn->base == r.base);
- assert(rgn->size == r.size);
+ ASSERT_EQ(rgn->base, r.base);
+ ASSERT_EQ(rgn->size, r.size);
+
+ test_pass_pop();
return 0;
}
@@ -356,18 +402,22 @@ static int memblock_reserve_disjoint_check(void)
.size = SZ_512M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
- assert(rgn1->base == r1.base);
- assert(rgn1->size == r1.size);
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1.size);
+
+ ASSERT_EQ(rgn2->base, r2.base);
+ ASSERT_EQ(rgn2->size, r2.size);
- assert(rgn2->base == r2.base);
- assert(rgn2->size == r2.size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, r1.size + r2.size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == r1.size + r2.size);
+ test_pass_pop();
return 0;
}
@@ -406,17 +456,21 @@ static int memblock_reserve_overlap_top_check(void)
.size = SZ_1G
};
+ PREFIX_PUSH();
+
total_size = (r1.base - r2.base) + r1.size;
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
- assert(rgn->base == r2.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -455,17 +509,21 @@ static int memblock_reserve_overlap_bottom_check(void)
.size = SZ_128K
};
+ PREFIX_PUSH();
+
total_size = (r2.base - r1.base) + r2.size;
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -502,15 +560,19 @@ static int memblock_reserve_within_check(void)
.size = SZ_64K
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == r1.size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r1.size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == r1.size);
+ test_pass_pop();
return 0;
}
@@ -527,19 +589,27 @@ static int memblock_reserve_twice_check(void)
.size = SZ_2M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r.base, r.size);
memblock_reserve(r.base, r.size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == r.size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r.size);
+
+ test_pass_pop();
return 0;
}
static int memblock_reserve_checks(void)
{
+ prefix_reset();
+ prefix_push(FUNC_RESERVE);
+ test_print("Running %s tests...\n", FUNC_RESERVE);
+
memblock_reserve_simple_check();
memblock_reserve_disjoint_check();
memblock_reserve_overlap_top_check();
@@ -547,6 +617,8 @@ static int memblock_reserve_checks(void)
memblock_reserve_within_check();
memblock_reserve_twice_check();
+ prefix_pop();
+
return 0;
}
@@ -581,16 +653,20 @@ static int memblock_remove_simple_check(void)
.size = SZ_4M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_add(r2.base, r2.size);
memblock_remove(r1.base, r1.size);
- assert(rgn->base == r2.base);
- assert(rgn->size == r2.size);
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, r2.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r2.size);
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r2.size);
+
+ test_pass_pop();
return 0;
}
@@ -626,15 +702,19 @@ static int memblock_remove_absent_check(void)
.size = SZ_1G
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == r1.size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, r1.size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == r1.size);
+ test_pass_pop();
return 0;
}
@@ -674,6 +754,8 @@ static int memblock_remove_overlap_top_check(void)
.size = SZ_32M
};
+ PREFIX_PUSH();
+
r1_end = r1.base + r1.size;
r2_end = r2.base + r2.size;
total_size = r1_end - r2_end;
@@ -682,11 +764,13 @@ static int memblock_remove_overlap_top_check(void)
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
- assert(rgn->base == r1.base + r2.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r1.base + r2.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == total_size);
+ test_pass_pop();
return 0;
}
@@ -724,17 +808,22 @@ static int memblock_remove_overlap_bottom_check(void)
.size = SZ_256M
};
+ PREFIX_PUSH();
+
total_size = r2.base - r1.base;
reset_memblock_regions();
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
+
+ ASSERT_EQ(memblock.memory.cnt, 1);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
+
+ test_pass_pop();
- assert(memblock.memory.cnt == 1);
- assert(memblock.memory.total_size == total_size);
return 0;
}
@@ -774,6 +863,8 @@ static int memblock_remove_within_check(void)
.size = SZ_1M
};
+ PREFIX_PUSH();
+
r1_size = r2.base - r1.base;
r2_size = (r1.base + r1.size) - (r2.base + r2.size);
total_size = r1_size + r2_size;
@@ -782,26 +873,34 @@ static int memblock_remove_within_check(void)
memblock_add(r1.base, r1.size);
memblock_remove(r2.base, r2.size);
- assert(rgn1->base == r1.base);
- assert(rgn1->size == r1_size);
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1_size);
+
+ ASSERT_EQ(rgn2->base, r2.base + r2.size);
+ ASSERT_EQ(rgn2->size, r2_size);
- assert(rgn2->base == r2.base + r2.size);
- assert(rgn2->size == r2_size);
+ ASSERT_EQ(memblock.memory.cnt, 2);
+ ASSERT_EQ(memblock.memory.total_size, total_size);
- assert(memblock.memory.cnt == 2);
- assert(memblock.memory.total_size == total_size);
+ test_pass_pop();
return 0;
}
static int memblock_remove_checks(void)
{
+ prefix_reset();
+ prefix_push(FUNC_REMOVE);
+ test_print("Running %s tests...\n", FUNC_REMOVE);
+
memblock_remove_simple_check();
memblock_remove_absent_check();
memblock_remove_overlap_top_check();
memblock_remove_overlap_bottom_check();
memblock_remove_within_check();
+ prefix_pop();
+
return 0;
}
@@ -835,16 +934,20 @@ static int memblock_free_simple_check(void)
.size = SZ_1M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_reserve(r2.base, r2.size);
memblock_free((void *)r1.base, r1.size);
- assert(rgn->base == r2.base);
- assert(rgn->size == r2.size);
+ ASSERT_EQ(rgn->base, r2.base);
+ ASSERT_EQ(rgn->size, r2.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r2.size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == r2.size);
+ test_pass_pop();
return 0;
}
@@ -880,15 +983,19 @@ static int memblock_free_absent_check(void)
.size = SZ_128M
};
+ PREFIX_PUSH();
+
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == r1.size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, r1.size);
+
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, r1.size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == r1.size);
+ test_pass_pop();
return 0;
}
@@ -928,17 +1035,21 @@ static int memblock_free_overlap_top_check(void)
.size = SZ_8M
};
+ PREFIX_PUSH();
+
total_size = (r1.size + r1.base) - (r2.base + r2.size);
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
- assert(rgn->base == r2.base + r2.size);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r2.base + r2.size);
+ ASSERT_EQ(rgn->size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -973,17 +1084,21 @@ static int memblock_free_overlap_bottom_check(void)
.size = SZ_32M
};
+ PREFIX_PUSH();
+
total_size = r2.base - r1.base;
reset_memblock_regions();
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
- assert(rgn->base == r1.base);
- assert(rgn->size == total_size);
+ ASSERT_EQ(rgn->base, r1.base);
+ ASSERT_EQ(rgn->size, total_size);
- assert(memblock.reserved.cnt == 1);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 1);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
@@ -1024,6 +1139,8 @@ static int memblock_free_within_check(void)
.size = SZ_1M
};
+ PREFIX_PUSH();
+
r1_size = r2.base - r1.base;
r2_size = (r1.base + r1.size) - (r2.base + r2.size);
total_size = r1_size + r2_size;
@@ -1032,26 +1149,34 @@ static int memblock_free_within_check(void)
memblock_reserve(r1.base, r1.size);
memblock_free((void *)r2.base, r2.size);
- assert(rgn1->base == r1.base);
- assert(rgn1->size == r1_size);
+ ASSERT_EQ(rgn1->base, r1.base);
+ ASSERT_EQ(rgn1->size, r1_size);
- assert(rgn2->base == r2.base + r2.size);
- assert(rgn2->size == r2_size);
+ ASSERT_EQ(rgn2->base, r2.base + r2.size);
+ ASSERT_EQ(rgn2->size, r2_size);
- assert(memblock.reserved.cnt == 2);
- assert(memblock.reserved.total_size == total_size);
+ ASSERT_EQ(memblock.reserved.cnt, 2);
+ ASSERT_EQ(memblock.reserved.total_size, total_size);
+
+ test_pass_pop();
return 0;
}
static int memblock_free_checks(void)
{
+ prefix_reset();
+ prefix_push(FUNC_FREE);
+ test_print("Running %s tests...\n", FUNC_FREE);
+
memblock_free_simple_check();
memblock_free_absent_check();
memblock_free_overlap_top_check();
memblock_free_overlap_bottom_check();
memblock_free_within_check();
+ prefix_pop();
+
return 0;
}
diff --git a/tools/testing/memblock/tests/common.c b/tools/testing/memblock/tests/common.c
index 62d3191f7c9a..e43b2676af81 100644
--- a/tools/testing/memblock/tests/common.c
+++ b/tools/testing/memblock/tests/common.c
@@ -1,11 +1,39 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "tests/common.h"
#include <string.h>
+#include <getopt.h>
+#include <linux/memory_hotplug.h>
+#include <linux/build_bug.h>
#define INIT_MEMBLOCK_REGIONS 128
#define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
+#define PREFIXES_MAX 15
+#define DELIM ": "
static struct test_memory memory_block;
+static const char __maybe_unused *prefixes[PREFIXES_MAX];
+static int __maybe_unused nr_prefixes;
+
+static const char *short_opts = "mv";
+static const struct option long_opts[] = {
+ {"movable-node", 0, NULL, 'm'},
+ {"verbose", 0, NULL, 'v'},
+ {NULL, 0, NULL, 0}
+};
+
+static const char * const help_opts[] = {
+ "disallow allocations from regions marked as hotplugged\n\t\t\t"
+ "by simulating enabling the \"movable_node\" kernel\n\t\t\t"
+ "parameter",
+ "enable verbose output, which includes the name of the\n\t\t\t"
+ "memblock function being tested, the name of the test,\n\t\t\t"
+ "and whether the test passed or failed."
+};
+
+static int verbose;
+
+/* sets global variable returned by movable_node_is_enabled() stub */
+bool movable_node_enabled;
void reset_memblock_regions(void)
{
@@ -46,3 +74,93 @@ void dummy_physical_memory_cleanup(void)
{
free(memory_block.base);
}
+
+static void usage(const char *prog)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(help_opts) != ARRAY_SIZE(long_opts) - 1);
+
+ printf("Usage: %s [-%s]\n", prog, short_opts);
+
+ for (int i = 0; long_opts[i].name; i++) {
+ printf(" -%c, --%-12s\t%s\n", long_opts[i].val,
+ long_opts[i].name, help_opts[i]);
+ }
+
+ exit(1);
+}
+
+void parse_args(int argc, char **argv)
+{
+ int c;
+
+ while ((c = getopt_long_only(argc, argv, short_opts, long_opts,
+ NULL)) != -1) {
+ switch (c) {
+ case 'm':
+ movable_node_enabled = true;
+ break;
+ case 'v':
+ verbose = 1;
+ break;
+ default:
+ usage(argv[0]);
+ }
+ }
+}
+
+void print_prefixes(const char *postfix)
+{
+ for (int i = 0; i < nr_prefixes; i++)
+ test_print("%s%s", prefixes[i], DELIM);
+ test_print(postfix);
+}
+
+void test_fail(void)
+{
+ if (verbose) {
+ ksft_test_result_fail(": ");
+ print_prefixes("failed\n");
+ }
+}
+
+void test_pass(void)
+{
+ if (verbose) {
+ ksft_test_result_pass(": ");
+ print_prefixes("passed\n");
+ }
+}
+
+void test_print(const char *fmt, ...)
+{
+ if (verbose) {
+ int saved_errno = errno;
+ va_list args;
+
+ va_start(args, fmt);
+ errno = saved_errno;
+ vprintf(fmt, args);
+ va_end(args);
+ }
+}
+
+void prefix_reset(void)
+{
+ memset(prefixes, 0, PREFIXES_MAX * sizeof(char *));
+ nr_prefixes = 0;
+}
+
+void prefix_push(const char *prefix)
+{
+ assert(nr_prefixes < PREFIXES_MAX);
+ prefixes[nr_prefixes] = prefix;
+ nr_prefixes++;
+}
+
+void prefix_pop(void)
+{
+ if (nr_prefixes > 0) {
+ prefixes[nr_prefixes - 1] = 0;
+ nr_prefixes--;
+ }
+}
diff --git a/tools/testing/memblock/tests/common.h b/tools/testing/memblock/tests/common.h
index 619054d03219..3e7f23d341d7 100644
--- a/tools/testing/memblock/tests/common.h
+++ b/tools/testing/memblock/tests/common.h
@@ -7,9 +7,49 @@
#include <linux/types.h>
#include <linux/memblock.h>
#include <linux/sizes.h>
+#include <linux/printk.h>
+#include <../selftests/kselftest.h>
#define MEM_SIZE SZ_16K
+/**
+ * ASSERT_EQ():
+ * Check the condition
+ * @_expected == @_seen
+ * If false, print failed test message (if in VERBOSE mode) and then assert
+ */
+#define ASSERT_EQ(_expected, _seen) do { \
+ if ((_expected) != (_seen)) \
+ test_fail(); \
+ assert((_expected) == (_seen)); \
+} while (0)
+
+/**
+ * ASSERT_NE():
+ * Check the condition
+ * @_expected != @_seen
+ * If false, print failed test message (if in VERBOSE mode) and then assert
+ */
+#define ASSERT_NE(_expected, _seen) do { \
+ if ((_expected) == (_seen)) \
+ test_fail(); \
+ assert((_expected) != (_seen)); \
+} while (0)
+
+/**
+ * ASSERT_LT():
+ * Check the condition
+ * @_expected < @_seen
+ * If false, print failed test message (if in VERBOSE mode) and then assert
+ */
+#define ASSERT_LT(_expected, _seen) do { \
+ if ((_expected) >= (_seen)) \
+ test_fail(); \
+ assert((_expected) < (_seen)); \
+} while (0)
+
+#define PREFIX_PUSH() prefix_push(__func__)
+
/*
* Available memory registered with memblock needs to be valid for allocs
* test to run. This is a convenience wrapper for memory allocated in
@@ -30,5 +70,19 @@ void reset_memblock_attributes(void);
void setup_memblock(void);
void dummy_physical_memory_init(void);
void dummy_physical_memory_cleanup(void);
+void parse_args(int argc, char **argv);
+
+void test_fail(void);
+void test_pass(void);
+void test_print(const char *fmt, ...);
+void prefix_reset(void);
+void prefix_push(const char *prefix);
+void prefix_pop(void);
+
+static inline void test_pass_pop(void)
+{
+ test_pass();
+ prefix_pop();
+}
#endif
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index a33874b081b6..e89685bd587c 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -28,6 +28,7 @@
#include "bpf_iter_test_kern6.skel.h"
#include "bpf_iter_bpf_link.skel.h"
#include "bpf_iter_ksym.skel.h"
+#include "bpf_iter_sockmap.skel.h"
static int duration;
@@ -67,6 +68,50 @@ free_link:
bpf_link__destroy(link);
}
+static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
+ struct bpf_map *map)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ union bpf_iter_link_info linfo;
+ struct bpf_link *link;
+ char buf[16] = {};
+ int iter_fd, len;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.map.map_fd = bpf_map__fd(map);
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ link = bpf_program__attach_iter(prog, &opts);
+ if (!ASSERT_OK_PTR(link, "attach_map_iter"))
+ return;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
+ bpf_link__destroy(link);
+ return;
+ }
+
+ /* Close link and map fd prematurely */
+ bpf_link__destroy(link);
+ bpf_object__destroy_skeleton(*skel);
+ *skel = NULL;
+
+ /* Try to let map free work to run first if map is freed */
+ usleep(100);
+ /* Memory used by both sock map and sock local storage map are
+ * freed after two synchronize_rcu() calls, so wait for it
+ */
+ kern_sync_rcu();
+ kern_sync_rcu();
+
+ /* Read after both map fd and link fd are closed */
+ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+ ;
+ ASSERT_GE(len, 0, "read_iterator");
+
+ close(iter_fd);
+}
+
static int read_fd_into_buffer(int fd, char *buf, int size)
{
int bufleft = size;
@@ -634,6 +679,12 @@ static void test_bpf_hash_map(void)
goto out;
}
+ /* Sleepable program is prohibited for hash map iterator */
+ linfo.map.map_fd = map_fd;
+ link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
+ if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
+ goto out;
+
linfo.map.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
@@ -827,6 +878,20 @@ out:
bpf_iter_bpf_array_map__destroy(skel);
}
+static void test_bpf_array_map_iter_fd(void)
+{
+ struct bpf_iter_bpf_array_map *skel;
+
+ skel = bpf_iter_bpf_array_map__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
+ return;
+
+ do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
+ skel->maps.arraymap1);
+
+ bpf_iter_bpf_array_map__destroy(skel);
+}
+
static void test_bpf_percpu_array_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
@@ -1009,6 +1074,20 @@ out:
bpf_iter_bpf_sk_storage_helpers__destroy(skel);
}
+static void test_bpf_sk_stoarge_map_iter_fd(void)
+{
+ struct bpf_iter_bpf_sk_storage_map *skel;
+
+ skel = bpf_iter_bpf_sk_storage_map__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
+ return;
+
+ do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
+ skel->maps.sk_stg_map);
+
+ bpf_iter_bpf_sk_storage_map__destroy(skel);
+}
+
static void test_bpf_sk_storage_map(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
@@ -1044,7 +1123,15 @@ static void test_bpf_sk_storage_map(void)
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
- link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
+ link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
+ err = libbpf_get_error(link);
+ if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
+ if (!err)
+ bpf_link__destroy(link);
+ goto out;
+ }
+
+ link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto out;
@@ -1052,6 +1139,7 @@ static void test_bpf_sk_storage_map(void)
if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
+ skel->bss->to_add_val = time(NULL);
/* do some tests */
while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
;
@@ -1065,6 +1153,13 @@ static void test_bpf_sk_storage_map(void)
if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
+ for (i = 0; i < num_sockets; i++) {
+ err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
+ if (!ASSERT_OK(err, "map_lookup") ||
+ !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
+ break;
+ }
+
close_iter:
close(iter_fd);
free_link:
@@ -1217,6 +1312,19 @@ out:
bpf_iter_task_vma__destroy(skel);
}
+void test_bpf_sockmap_map_iter_fd(void)
+{
+ struct bpf_iter_sockmap *skel;
+
+ skel = bpf_iter_sockmap__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
+ return;
+
+ do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
+
+ bpf_iter_sockmap__destroy(skel);
+}
+
void test_bpf_iter(void)
{
if (test__start_subtest("btf_id_or_null"))
@@ -1267,10 +1375,14 @@ void test_bpf_iter(void)
test_bpf_percpu_hash_map();
if (test__start_subtest("bpf_array_map"))
test_bpf_array_map();
+ if (test__start_subtest("bpf_array_map_iter_fd"))
+ test_bpf_array_map_iter_fd();
if (test__start_subtest("bpf_percpu_array_map"))
test_bpf_percpu_array_map();
if (test__start_subtest("bpf_sk_storage_map"))
test_bpf_sk_storage_map();
+ if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
+ test_bpf_sk_stoarge_map_iter_fd();
if (test__start_subtest("bpf_sk_storage_delete"))
test_bpf_sk_storage_delete();
if (test__start_subtest("bpf_sk_storage_get"))
@@ -1283,4 +1395,6 @@ void test_bpf_iter(void)
test_link_iter();
if (test__start_subtest("ksym"))
test_ksym_iter();
+ if (test__start_subtest("bpf_sockmap_map_iter_fd"))
+ test_bpf_sockmap_map_iter_fd();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
index 02bb8cbf9194..da860b07abb5 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -3,6 +3,7 @@
#include <test_progs.h>
#include <network_helpers.h>
#include <bpf/btf.h>
+#include "bind4_prog.skel.h"
typedef int (*test_cb)(struct bpf_object *obj);
@@ -407,6 +408,98 @@ static void test_func_replace_global_func(void)
prog_name, false, NULL);
}
+static int find_prog_btf_id(const char *name, __u32 attach_prog_fd)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ struct btf *btf;
+ int ret;
+
+ ret = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len);
+ if (ret)
+ return ret;
+
+ if (!info.btf_id)
+ return -EINVAL;
+
+ btf = btf__load_from_kernel_by_id(info.btf_id);
+ ret = libbpf_get_error(btf);
+ if (ret)
+ return ret;
+
+ ret = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
+ btf__free(btf);
+ return ret;
+}
+
+static int load_fentry(int attach_prog_fd, int attach_btf_id)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ .attach_prog_fd = attach_prog_fd,
+ .attach_btf_id = attach_btf_id,
+ );
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ return bpf_prog_load(BPF_PROG_TYPE_TRACING,
+ "bind4_fentry",
+ "GPL",
+ insns,
+ ARRAY_SIZE(insns),
+ &opts);
+}
+
+static void test_fentry_to_cgroup_bpf(void)
+{
+ struct bind4_prog *skel = NULL;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int cgroup_fd = -1;
+ int fentry_fd = -1;
+ int btf_id;
+
+ cgroup_fd = test__join_cgroup("/fentry_to_cgroup_bpf");
+ if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd"))
+ return;
+
+ skel = bind4_prog__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel"))
+ goto cleanup;
+
+ skel->links.bind_v4_prog = bpf_program__attach_cgroup(skel->progs.bind_v4_prog, cgroup_fd);
+ if (!ASSERT_OK_PTR(skel->links.bind_v4_prog, "bpf_program__attach_cgroup"))
+ goto cleanup;
+
+ btf_id = find_prog_btf_id("bind_v4_prog", bpf_program__fd(skel->progs.bind_v4_prog));
+ if (!ASSERT_GE(btf_id, 0, "find_prog_btf_id"))
+ goto cleanup;
+
+ fentry_fd = load_fentry(bpf_program__fd(skel->progs.bind_v4_prog), btf_id);
+ if (!ASSERT_GE(fentry_fd, 0, "load_fentry"))
+ goto cleanup;
+
+ /* Make sure bpf_obj_get_info_by_fd works correctly when attaching
+ * to another BPF program.
+ */
+
+ ASSERT_OK(bpf_obj_get_info_by_fd(fentry_fd, &info, &info_len),
+ "bpf_obj_get_info_by_fd");
+
+ ASSERT_EQ(info.btf_id, 0, "info.btf_id");
+ ASSERT_EQ(info.attach_btf_id, btf_id, "info.attach_btf_id");
+ ASSERT_GT(info.attach_btf_obj_id, 0, "info.attach_btf_obj_id");
+
+cleanup:
+ if (cgroup_fd >= 0)
+ close(cgroup_fd);
+ if (fentry_fd >= 0)
+ close(fentry_fd);
+ bind4_prog__destroy(skel);
+}
+
/* NOTE: affect other tests, must run in serial mode */
void serial_test_fexit_bpf2bpf(void)
{
@@ -430,4 +523,6 @@ void serial_test_fexit_bpf2bpf(void)
test_fmod_ret_freplace();
if (test__start_subtest("func_replace_global_func"))
test_func_replace_global_func();
+ if (test__start_subtest("fentry_to_cgroup_bpf"))
+ test_fentry_to_cgroup_bpf();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/lru_bug.c b/tools/testing/selftests/bpf/prog_tests/lru_bug.c
new file mode 100644
index 000000000000..3c7822390827
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lru_bug.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#include "lru_bug.skel.h"
+
+void test_lru_bug(void)
+{
+ struct lru_bug *skel;
+ int ret;
+
+ skel = lru_bug__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "lru_bug__open_and_load"))
+ return;
+ ret = lru_bug__attach(skel);
+ if (!ASSERT_OK(ret, "lru_bug__attach"))
+ goto end;
+ usleep(1);
+ ASSERT_OK(skel->data->result, "prealloc_lru_pop doesn't call check_and_init_map_value");
+end:
+ lru_bug__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
index 0aa3cd34cbe3..d7a69217fb68 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
@@ -112,3 +112,12 @@ int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
return 0;
}
+
+SEC("iter.s/bpf_map_elem")
+int sleepable_dummy_dump(struct bpf_iter__bpf_map_elem *ctx)
+{
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(ctx->meta->seq, "map dump starts\n");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
index 6b70ccaba301..c7b8e006b171 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c
@@ -16,19 +16,37 @@ struct {
__u32 val_sum = 0;
__u32 ipv6_sk_count = 0;
+__u32 to_add_val = 0;
SEC("iter/bpf_sk_storage_map")
-int dump_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
+int rw_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
{
struct sock *sk = ctx->sk;
__u32 *val = ctx->value;
- if (sk == (void *)0 || val == (void *)0)
+ if (sk == NULL || val == NULL)
return 0;
if (sk->sk_family == AF_INET6)
ipv6_sk_count++;
val_sum += *val;
+
+ *val += to_add_val;
+
+ return 0;
+}
+
+SEC("iter/bpf_sk_storage_map")
+int oob_write_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx)
+{
+ struct sock *sk = ctx->sk;
+ __u32 *val = ctx->value;
+
+ if (sk == NULL || val == NULL)
+ return 0;
+
+ *(val + 1) = 0xdeadbeef;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/lru_bug.c b/tools/testing/selftests/bpf/progs/lru_bug.c
new file mode 100644
index 000000000000..687081a724b3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lru_bug.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_value {
+ struct task_struct __kptr *ptr;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_value);
+} lru_map SEC(".maps");
+
+int pid = 0;
+int result = 1;
+
+SEC("fentry/bpf_ktime_get_ns")
+int printk(void *ctx)
+{
+ struct map_value v = {};
+
+ if (pid == bpf_get_current_task_btf()->pid)
+ bpf_map_update_elem(&lru_map, &(int){0}, &v, 0);
+ return 0;
+}
+
+SEC("fentry/do_nanosleep")
+int nanosleep(void *ctx)
+{
+ struct map_value val = {}, *v;
+ struct task_struct *current;
+
+ bpf_map_update_elem(&lru_map, &(int){0}, &val, 0);
+ v = bpf_map_lookup_elem(&lru_map, &(int){0});
+ if (!v)
+ return 0;
+ bpf_map_delete_elem(&lru_map, &(int){0});
+ current = bpf_get_current_task_btf();
+ v->ptr = current;
+ pid = current->pid;
+ bpf_ktime_get_ns();
+ result = !v->ptr;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index c7f47429d6cd..4c122f1b1737 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -4,6 +4,8 @@ include ../../../build/Build.include
all:
top_srcdir = ../../../..
+include $(top_srcdir)/scripts/subarch.include
+ARCH ?= $(SUBARCH)
# For cross-builds to work, UNAME_M has to map to ARCH and arch specific
# directories and targets in this Makefile. "uname -m" doesn't map to
@@ -197,7 +199,8 @@ endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
-I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
- -I$(<D) -Iinclude/$(UNAME_M) -I.. $(EXTRA_CFLAGS) $(KHDR_INCLUDES)
+ -I$(<D) -Iinclude/$(UNAME_M) -I ../rseq -I.. $(EXTRA_CFLAGS) \
+ $(KHDR_INCLUDES)
no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
$(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie)
@@ -206,7 +209,7 @@ no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
pgste-option = $(call try-run, echo 'int main() { return 0; }' | \
$(CC) -Werror -Wl$(comma)--s390-pgste -x c - -o "$$TMP",-Wl$(comma)--s390-pgste)
-
+LDLIBS += -ldl
LDFLAGS += -pthread $(no-pie-option) $(pgste-option)
# After inclusion, $(OUTPUT) is defined and
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index a54d4d05a058..fac248a43666 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -20,15 +20,7 @@
#include "processor.h"
#include "test_util.h"
-static __thread volatile struct rseq __rseq = {
- .cpu_id = RSEQ_CPU_ID_UNINITIALIZED,
-};
-
-/*
- * Use an arbitrary, bogus signature for configuring rseq, this test does not
- * actually enter an rseq critical section.
- */
-#define RSEQ_SIG 0xdeadbeef
+#include "../rseq/rseq.c"
/*
* Any bug related to task migration is likely to be timing-dependent; perform
@@ -49,12 +41,16 @@ static void guest_code(void)
GUEST_SYNC(0);
}
-static void sys_rseq(int flags)
+/*
+ * We have to perform direct system call for getcpu() because it's
+ * not available until glic 2.29.
+ */
+static void sys_getcpu(unsigned *cpu)
{
int r;
- r = syscall(__NR_rseq, &__rseq, sizeof(__rseq), flags, RSEQ_SIG);
- TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
+ r = syscall(__NR_getcpu, cpu, NULL, NULL);
+ TEST_ASSERT(!r, "getcpu failed, errno = %d (%s)", errno, strerror(errno));
}
static int next_cpu(int cpu)
@@ -101,7 +97,7 @@ static void *migration_worker(void *__rseq_tid)
atomic_inc(&seq_cnt);
/*
- * Ensure the odd count is visible while sched_getcpu() isn't
+ * Ensure the odd count is visible while getcpu() isn't
* stable, i.e. while changing affinity is in-progress.
*/
smp_wmb();
@@ -142,10 +138,10 @@ static void *migration_worker(void *__rseq_tid)
* check completes.
*
* 3. To ensure the read-side makes efficient forward progress,
- * e.g. if sched_getcpu() involves a syscall. Stalling the
- * read-side means the test will spend more time waiting for
- * sched_getcpu() to stabilize and less time trying to hit
- * the timing-dependent bug.
+ * e.g. if getcpu() involves a syscall. Stalling the read-side
+ * means the test will spend more time waiting for getcpu()
+ * to stabilize and less time trying to hit the timing-dependent
+ * bug.
*
* Because any bug in this area is likely to be timing-dependent,
* run with a range of delays at 1us intervals from 1us to 10us
@@ -218,7 +214,9 @@ int main(int argc, char *argv[])
calc_min_max_cpu();
- sys_rseq(0);
+ r = rseq_register_current_thread();
+ TEST_ASSERT(!r, "rseq_register_current_thread failed, errno = %d (%s)",
+ errno, strerror(errno));
/*
* Create and run a dummy VM that immediately exits to userspace via
@@ -238,9 +236,9 @@ int main(int argc, char *argv[])
/*
* Verify rseq's CPU matches sched's CPU. Ensure migration
- * doesn't occur between sched_getcpu() and reading the rseq
- * cpu_id by rereading both if the sequence count changes, or
- * if the count is odd (migration in-progress).
+ * doesn't occur between getcpu() and reading the rseq cpu_id
+ * by rereading both if the sequence count changes, or if the
+ * count is odd (migration in-progress).
*/
do {
/*
@@ -250,13 +248,13 @@ int main(int argc, char *argv[])
snapshot = atomic_read(&seq_cnt) & ~1;
/*
- * Ensure reading sched_getcpu() and rseq.cpu_id
- * complete in a single "no migration" window, i.e. are
- * not reordered across the seq_cnt reads.
+ * Ensure calling getcpu() and reading rseq.cpu_id complete
+ * in a single "no migration" window, i.e. are not reordered
+ * across the seq_cnt reads.
*/
smp_rmb();
- cpu = sched_getcpu();
- rseq_cpu = READ_ONCE(__rseq.cpu_id);
+ sys_getcpu(&cpu);
+ rseq_cpu = rseq_current_cpu_raw();
smp_rmb();
} while (snapshot != atomic_read(&seq_cnt));
@@ -267,9 +265,9 @@ int main(int argc, char *argv[])
/*
* Sanity check that the test was able to enter the guest a reasonable
* number of times, e.g. didn't get stalled too often/long waiting for
- * sched_getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a
- * fairly conservative ratio on x86-64, which can do _more_ KVM_RUNs
- * than migrations given the 1us+ delay in the migration task.
+ * getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a fairly
+ * conservative ratio on x86-64, which can do _more_ KVM_RUNs than
+ * migrations given the 1us+ delay in the migration task.
*/
TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
"Only performed %d KVM_RUNs, task stalled too much?\n", i);
@@ -278,7 +276,7 @@ int main(int argc, char *argv[])
kvm_vm_free(vm);
- sys_rseq(RSEQ_FLAG_UNREGISTER);
+ rseq_unregister_current_thread();
return 0;
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
index 6ec901dab61e..069589c52f41 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
@@ -59,6 +59,7 @@ int main(int argc, char *argv[])
int ret;
union cpuid10_eax eax;
union perf_capabilities host_cap;
+ uint64_t val;
host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES);
host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT);
@@ -91,11 +92,17 @@ int main(int argc, char *argv[])
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format);
ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format);
- /* testcase 3, check invalid LBR format is rejected */
- /* Note, on Arch LBR capable platforms, LBR_FMT in perf capability msr is 0x3f,
- * to avoid the failure, use a true invalid format 0x30 for the test. */
- ret = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0x30);
- TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
+ /*
+ * Testcase 3, check that an "invalid" LBR format is rejected. Only an
+ * exact match of the host's format (and 0/disabled) is allowed.
+ */
+ for (val = 1; val <= PMU_CAP_LBR_FMT; val++) {
+ if (val == (host_cap.capabilities & PMU_CAP_LBR_FMT))
+ continue;
+
+ ret = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, val);
+ TEST_ASSERT(!ret, "Bad LBR FMT = 0x%lx didn't fail", val);
+ }
printf("Completed perf capability tests.\n");
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 892306bdb47d..0e5751af6247 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -38,4 +38,5 @@ ioam6_parser
toeplitz
tun
cmsg_sender
-unix_connect \ No newline at end of file
+unix_connect
+tap \ No newline at end of file
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index e2dfef8b78a7..c0ee2955fe54 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -57,7 +57,7 @@ TEST_GEN_FILES += ipsec
TEST_GEN_FILES += ioam6_parser
TEST_GEN_FILES += gro
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
-TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls tun
+TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls tun tap
TEST_GEN_FILES += toeplitz
TEST_GEN_FILES += cmsg_sender
TEST_GEN_FILES += stress_reuseport_listen
diff --git a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
index a15d21dc035a..56eb83d1a3bd 100755
--- a/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/custom_multipath_hash.sh
@@ -181,37 +181,43 @@ ping_ipv6()
send_src_ipv4()
{
- $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:4::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:4::2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:4::2-2001:db8:4::fd" \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B "2001:db8:4::2-2001:db8:4::fd" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
@@ -226,13 +232,15 @@ send_flowlabel()
send_src_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:4::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:4::2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:4::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:4::2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
diff --git a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
index a73f52efcb6c..0446db9c6f74 100755
--- a/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh
@@ -276,37 +276,43 @@ ping_ipv6()
send_src_ipv4()
{
- $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
@@ -321,13 +327,15 @@ send_flowlabel()
send_src_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:2::2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:2::2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
diff --git a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
index 8fea2c2e0b25..d40183b4eccc 100755
--- a/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
+++ b/tools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh
@@ -278,37 +278,43 @@ ping_ipv6()
send_src_ipv4()
{
- $MZ $h1 -q -p 64 -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A "198.51.100.2-198.51.100.253" -B 203.0.113.2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B "203.0.113.2-203.0.113.253" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_src_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp4()
{
- $MZ $h1 -q -p 64 -A 198.51.100.2 -B 203.0.113.2 \
+ ip vrf exec v$h1 $MZ $h1 -q -p 64 \
+ -A 198.51.100.2 -B 203.0.113.2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
send_src_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A "2001:db8:1::2-2001:db8:1::fd" -B 2001:db8:2::2 \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
send_dst_ipv6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B "2001:db8:2::2-2001:db8:2::fd" \
-d 1msec -c 50 -t udp "sp=20000,dp=30000"
}
@@ -323,13 +329,15 @@ send_flowlabel()
send_src_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:2::2 \
-d 1msec -t udp "sp=0-32768,dp=30000"
}
send_dst_udp6()
{
- $MZ -6 $h1 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
+ ip vrf exec v$h1 $MZ -6 $h1 -q -p 64 \
+ -A 2001:db8:1::2 -B 2001:db8:2::2 \
-d 1msec -t udp "sp=20000,dp=0-32768"
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index e2ea6c126c99..24d4e9cb617e 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -553,6 +553,18 @@ static void set_nonblock(int fd, bool nonblock)
fcntl(fd, F_SETFL, flags & ~O_NONBLOCK);
}
+static void shut_wr(int fd)
+{
+ /* Close our write side, ev. give some time
+ * for address notification and/or checking
+ * the current status
+ */
+ if (cfg_wait)
+ usleep(cfg_wait);
+
+ shutdown(fd, SHUT_WR);
+}
+
static int copyfd_io_poll(int infd, int peerfd, int outfd, bool *in_closed_after_out)
{
struct pollfd fds = {
@@ -630,14 +642,7 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd, bool *in_closed_after
/* ... and peer also closed already */
break;
- /* ... but we still receive.
- * Close our write side, ev. give some time
- * for address notification and/or checking
- * the current status
- */
- if (cfg_wait)
- usleep(cfg_wait);
- shutdown(peerfd, SHUT_WR);
+ shut_wr(peerfd);
} else {
if (errno == EINTR)
continue;
@@ -767,7 +772,7 @@ static int copyfd_io_mmap(int infd, int peerfd, int outfd,
if (err)
return err;
- shutdown(peerfd, SHUT_WR);
+ shut_wr(peerfd);
err = do_recvfile(peerfd, outfd);
*in_closed_after_out = true;
@@ -791,6 +796,9 @@ static int copyfd_io_sendfile(int infd, int peerfd, int outfd,
err = do_sendfile(infd, peerfd, size);
if (err)
return err;
+
+ shut_wr(peerfd);
+
err = do_recvfile(peerfd, outfd);
*in_closed_after_out = true;
}
diff --git a/tools/testing/selftests/net/tap.c b/tools/testing/selftests/net/tap.c
new file mode 100644
index 000000000000..247c3b3ac1c9
--- /dev/null
+++ b/tools/testing/selftests/net/tap.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <linux/if_tun.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <linux/virtio_net.h>
+#include <netinet/ip.h>
+#include <netinet/udp.h>
+#include "../kselftest_harness.h"
+
+static const char param_dev_tap_name[] = "xmacvtap0";
+static const char param_dev_dummy_name[] = "xdummy0";
+static unsigned char param_hwaddr_src[] = { 0x00, 0xfe, 0x98, 0x14, 0x22, 0x42 };
+static unsigned char param_hwaddr_dest[] = {
+ 0x00, 0xfe, 0x98, 0x94, 0xd2, 0x43
+};
+
+#define MAX_RTNL_PAYLOAD (2048)
+#define PKT_DATA 0xCB
+#define TEST_PACKET_SZ (sizeof(struct virtio_net_hdr) + ETH_HLEN + ETH_MAX_MTU)
+
+static struct rtattr *rtattr_add(struct nlmsghdr *nh, unsigned short type,
+ unsigned short len)
+{
+ struct rtattr *rta =
+ (struct rtattr *)((uint8_t *)nh + RTA_ALIGN(nh->nlmsg_len));
+ rta->rta_type = type;
+ rta->rta_len = RTA_LENGTH(len);
+ nh->nlmsg_len = RTA_ALIGN(nh->nlmsg_len) + RTA_ALIGN(rta->rta_len);
+ return rta;
+}
+
+static struct rtattr *rtattr_begin(struct nlmsghdr *nh, unsigned short type)
+{
+ return rtattr_add(nh, type, 0);
+}
+
+static void rtattr_end(struct nlmsghdr *nh, struct rtattr *attr)
+{
+ uint8_t *end = (uint8_t *)nh + nh->nlmsg_len;
+
+ attr->rta_len = end - (uint8_t *)attr;
+}
+
+static struct rtattr *rtattr_add_str(struct nlmsghdr *nh, unsigned short type,
+ const char *s)
+{
+ struct rtattr *rta = rtattr_add(nh, type, strlen(s));
+
+ memcpy(RTA_DATA(rta), s, strlen(s));
+ return rta;
+}
+
+static struct rtattr *rtattr_add_strsz(struct nlmsghdr *nh, unsigned short type,
+ const char *s)
+{
+ struct rtattr *rta = rtattr_add(nh, type, strlen(s) + 1);
+
+ strcpy(RTA_DATA(rta), s);
+ return rta;
+}
+
+static struct rtattr *rtattr_add_any(struct nlmsghdr *nh, unsigned short type,
+ const void *arr, size_t len)
+{
+ struct rtattr *rta = rtattr_add(nh, type, len);
+
+ memcpy(RTA_DATA(rta), arr, len);
+ return rta;
+}
+
+static int dev_create(const char *dev, const char *link_type,
+ int (*fill_rtattr)(struct nlmsghdr *nh),
+ int (*fill_info_data)(struct nlmsghdr *nh))
+{
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ unsigned char data[MAX_RTNL_PAYLOAD];
+ } req;
+ struct rtattr *link_info, *info_data;
+ int ret, rtnl;
+
+ rtnl = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE);
+ if (rtnl < 0) {
+ fprintf(stderr, "%s: socket %s\n", __func__, strerror(errno));
+ return 1;
+ }
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE;
+ req.nh.nlmsg_type = RTM_NEWLINK;
+
+ req.info.ifi_family = AF_UNSPEC;
+ req.info.ifi_type = 1;
+ req.info.ifi_index = 0;
+ req.info.ifi_flags = IFF_BROADCAST | IFF_UP;
+ req.info.ifi_change = 0xffffffff;
+
+ rtattr_add_str(&req.nh, IFLA_IFNAME, dev);
+
+ if (fill_rtattr) {
+ ret = fill_rtattr(&req.nh);
+ if (ret)
+ return ret;
+ }
+
+ link_info = rtattr_begin(&req.nh, IFLA_LINKINFO);
+
+ rtattr_add_strsz(&req.nh, IFLA_INFO_KIND, link_type);
+
+ if (fill_info_data) {
+ info_data = rtattr_begin(&req.nh, IFLA_INFO_DATA);
+ ret = fill_info_data(&req.nh);
+ if (ret)
+ return ret;
+ rtattr_end(&req.nh, info_data);
+ }
+
+ rtattr_end(&req.nh, link_info);
+
+ ret = send(rtnl, &req, req.nh.nlmsg_len, 0);
+ if (ret < 0)
+ fprintf(stderr, "%s: send %s\n", __func__, strerror(errno));
+ ret = (unsigned int)ret != req.nh.nlmsg_len;
+
+ close(rtnl);
+ return ret;
+}
+
+static int dev_delete(const char *dev)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ unsigned char data[MAX_RTNL_PAYLOAD];
+ } req;
+ int ret, rtnl;
+
+ rtnl = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE);
+ if (rtnl < 0) {
+ fprintf(stderr, "%s: socket %s\n", __func__, strerror(errno));
+ return 1;
+ }
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info));
+ req.nh.nlmsg_flags = NLM_F_REQUEST;
+ req.nh.nlmsg_type = RTM_DELLINK;
+
+ req.info.ifi_family = AF_UNSPEC;
+
+ rtattr_add_str(&req.nh, IFLA_IFNAME, dev);
+
+ ret = send(rtnl, &req, req.nh.nlmsg_len, 0);
+ if (ret < 0)
+ fprintf(stderr, "%s: send %s\n", __func__, strerror(errno));
+
+ ret = (unsigned int)ret != req.nh.nlmsg_len;
+
+ close(rtnl);
+ return ret;
+}
+
+static int macvtap_fill_rtattr(struct nlmsghdr *nh)
+{
+ int ifindex;
+
+ ifindex = if_nametoindex(param_dev_dummy_name);
+ if (ifindex == 0) {
+ fprintf(stderr, "%s: ifindex %s\n", __func__, strerror(errno));
+ return -errno;
+ }
+
+ rtattr_add_any(nh, IFLA_LINK, &ifindex, sizeof(ifindex));
+ rtattr_add_any(nh, IFLA_ADDRESS, param_hwaddr_src, ETH_ALEN);
+
+ return 0;
+}
+
+static int opentap(const char *devname)
+{
+ int ifindex;
+ char buf[256];
+ int fd;
+ struct ifreq ifr;
+
+ ifindex = if_nametoindex(devname);
+ if (ifindex == 0) {
+ fprintf(stderr, "%s: ifindex %s\n", __func__, strerror(errno));
+ return -errno;
+ }
+
+ sprintf(buf, "/dev/tap%d", ifindex);
+ fd = open(buf, O_RDWR | O_NONBLOCK);
+ if (fd < 0) {
+ fprintf(stderr, "%s: open %s\n", __func__, strerror(errno));
+ return -errno;
+ }
+
+ memset(&ifr, 0, sizeof(ifr));
+ strcpy(ifr.ifr_name, devname);
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR | IFF_MULTI_QUEUE;
+ if (ioctl(fd, TUNSETIFF, &ifr, sizeof(ifr)) < 0)
+ return -errno;
+ return fd;
+}
+
+size_t build_eth(uint8_t *buf, uint16_t proto)
+{
+ struct ethhdr *eth = (struct ethhdr *)buf;
+
+ eth->h_proto = htons(proto);
+ memcpy(eth->h_source, param_hwaddr_src, ETH_ALEN);
+ memcpy(eth->h_dest, param_hwaddr_dest, ETH_ALEN);
+
+ return ETH_HLEN;
+}
+
+static uint32_t add_csum(const uint8_t *buf, int len)
+{
+ uint32_t sum = 0;
+ uint16_t *sbuf = (uint16_t *)buf;
+
+ while (len > 1) {
+ sum += *sbuf++;
+ len -= 2;
+ }
+
+ if (len)
+ sum += *(uint8_t *)sbuf;
+
+ return sum;
+}
+
+static uint16_t finish_ip_csum(uint32_t sum)
+{
+ uint16_t lo = sum & 0xffff;
+ uint16_t hi = sum >> 16;
+
+ return ~(lo + hi);
+
+}
+
+static uint16_t build_ip_csum(const uint8_t *buf, int len,
+ uint32_t sum)
+{
+ sum += add_csum(buf, len);
+ return finish_ip_csum(sum);
+}
+
+static int build_ipv4_header(uint8_t *buf, int payload_len)
+{
+ struct iphdr *iph = (struct iphdr *)buf;
+
+ iph->ihl = 5;
+ iph->version = 4;
+ iph->ttl = 8;
+ iph->tot_len =
+ htons(sizeof(*iph) + sizeof(struct udphdr) + payload_len);
+ iph->id = htons(1337);
+ iph->protocol = IPPROTO_UDP;
+ iph->saddr = htonl((172 << 24) | (17 << 16) | 2);
+ iph->daddr = htonl((172 << 24) | (17 << 16) | 1);
+ iph->check = build_ip_csum(buf, iph->ihl << 2, 0);
+
+ return iph->ihl << 2;
+}
+
+static int build_udp_packet(uint8_t *buf, int payload_len, bool csum_off)
+{
+ const int ip4alen = sizeof(uint32_t);
+ struct udphdr *udph = (struct udphdr *)buf;
+ int len = sizeof(*udph) + payload_len;
+ uint32_t sum = 0;
+
+ udph->source = htons(22);
+ udph->dest = htons(58822);
+ udph->len = htons(len);
+
+ memset(buf + sizeof(struct udphdr), PKT_DATA, payload_len);
+
+ sum = add_csum(buf - 2 * ip4alen, 2 * ip4alen);
+ sum += htons(IPPROTO_UDP) + udph->len;
+
+ if (!csum_off)
+ sum += add_csum(buf, len);
+
+ udph->check = finish_ip_csum(sum);
+
+ return sizeof(*udph) + payload_len;
+}
+
+size_t build_test_packet_valid_udp_gso(uint8_t *buf, size_t payload_len)
+{
+ uint8_t *cur = buf;
+ struct virtio_net_hdr *vh = (struct virtio_net_hdr *)buf;
+
+ vh->hdr_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct udphdr);
+ vh->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ vh->csum_start = ETH_HLEN + sizeof(struct iphdr);
+ vh->csum_offset = __builtin_offsetof(struct udphdr, check);
+ vh->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ vh->gso_size = ETH_DATA_LEN - sizeof(struct iphdr);
+ cur += sizeof(*vh);
+
+ cur += build_eth(cur, ETH_P_IP);
+ cur += build_ipv4_header(cur, payload_len);
+ cur += build_udp_packet(cur, payload_len, true);
+
+ return cur - buf;
+}
+
+size_t build_test_packet_valid_udp_csum(uint8_t *buf, size_t payload_len)
+{
+ uint8_t *cur = buf;
+ struct virtio_net_hdr *vh = (struct virtio_net_hdr *)buf;
+
+ vh->flags = VIRTIO_NET_HDR_F_DATA_VALID;
+ vh->gso_type = VIRTIO_NET_HDR_GSO_NONE;
+ cur += sizeof(*vh);
+
+ cur += build_eth(cur, ETH_P_IP);
+ cur += build_ipv4_header(cur, payload_len);
+ cur += build_udp_packet(cur, payload_len, false);
+
+ return cur - buf;
+}
+
+size_t build_test_packet_crash_tap_invalid_eth_proto(uint8_t *buf,
+ size_t payload_len)
+{
+ uint8_t *cur = buf;
+ struct virtio_net_hdr *vh = (struct virtio_net_hdr *)buf;
+
+ vh->hdr_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct udphdr);
+ vh->flags = 0;
+ vh->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ vh->gso_size = ETH_DATA_LEN - sizeof(struct iphdr);
+ cur += sizeof(*vh);
+
+ cur += build_eth(cur, 0);
+ cur += sizeof(struct iphdr) + sizeof(struct udphdr);
+ cur += build_ipv4_header(cur, payload_len);
+ cur += build_udp_packet(cur, payload_len, true);
+ cur += payload_len;
+
+ return cur - buf;
+}
+
+FIXTURE(tap)
+{
+ int fd;
+};
+
+FIXTURE_SETUP(tap)
+{
+ int ret;
+
+ ret = dev_create(param_dev_dummy_name, "dummy", NULL, NULL);
+ EXPECT_EQ(ret, 0);
+
+ ret = dev_create(param_dev_tap_name, "macvtap", macvtap_fill_rtattr,
+ NULL);
+ EXPECT_EQ(ret, 0);
+
+ self->fd = opentap(param_dev_tap_name);
+ ASSERT_GE(self->fd, 0);
+}
+
+FIXTURE_TEARDOWN(tap)
+{
+ int ret;
+
+ if (self->fd != -1)
+ close(self->fd);
+
+ ret = dev_delete(param_dev_tap_name);
+ EXPECT_EQ(ret, 0);
+
+ ret = dev_delete(param_dev_dummy_name);
+ EXPECT_EQ(ret, 0);
+}
+
+TEST_F(tap, test_packet_valid_udp_gso)
+{
+ uint8_t pkt[TEST_PACKET_SZ];
+ size_t off;
+ int ret;
+
+ memset(pkt, 0, sizeof(pkt));
+ off = build_test_packet_valid_udp_gso(pkt, 1021);
+ ret = write(self->fd, pkt, off);
+ ASSERT_EQ(ret, off);
+}
+
+TEST_F(tap, test_packet_valid_udp_csum)
+{
+ uint8_t pkt[TEST_PACKET_SZ];
+ size_t off;
+ int ret;
+
+ memset(pkt, 0, sizeof(pkt));
+ off = build_test_packet_valid_udp_csum(pkt, 1024);
+ ret = write(self->fd, pkt, off);
+ ASSERT_EQ(ret, off);
+}
+
+TEST_F(tap, test_packet_crash_tap_invalid_eth_proto)
+{
+ uint8_t pkt[TEST_PACKET_SZ];
+ size_t off;
+ int ret;
+
+ memset(pkt, 0, sizeof(pkt));
+ off = build_test_packet_crash_tap_invalid_eth_proto(pkt, 1024);
+ ret = write(self->fd, pkt, off);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EINVAL);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/netfilter/nft_trans_stress.sh b/tools/testing/selftests/netfilter/nft_trans_stress.sh
index f1affd12c4b1..a7f62ad4f661 100755
--- a/tools/testing/selftests/netfilter/nft_trans_stress.sh
+++ b/tools/testing/selftests/netfilter/nft_trans_stress.sh
@@ -9,8 +9,27 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
-testns=testns1
+testns=testns-$(mktemp -u "XXXXXXXX")
+
tables="foo bar baz quux"
+global_ret=0
+eret=0
+lret=0
+
+check_result()
+{
+ local r=$1
+ local OK="PASS"
+
+ if [ $r -ne 0 ] ;then
+ OK="FAIL"
+ global_ret=$r
+ fi
+
+ echo "$OK: nft $2 test returned $r"
+
+ eret=0
+}
nft --version > /dev/null 2>&1
if [ $? -ne 0 ];then
@@ -59,16 +78,66 @@ done)
sleep 1
+ip netns exec "$testns" nft -f "$tmp"
for i in $(seq 1 10) ; do ip netns exec "$testns" nft -f "$tmp" & done
for table in $tables;do
- randsleep=$((RANDOM%10))
+ randsleep=$((RANDOM%2))
sleep $randsleep
- ip netns exec "$testns" nft delete table inet $table 2>/dev/null
+ ip netns exec "$testns" nft delete table inet $table
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ eret=$lret
+ fi
done
-randsleep=$((RANDOM%10))
-sleep $randsleep
+check_result $eret "add/delete"
+
+for i in $(seq 1 10) ; do
+ (echo "flush ruleset"; cat "$tmp") | ip netns exec "$testns" nft -f /dev/stdin
+
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ eret=$lret
+ fi
+done
+
+check_result $eret "reload"
+
+for i in $(seq 1 10) ; do
+ (echo "flush ruleset"; cat "$tmp"
+ echo "insert rule inet foo INPUT meta nftrace set 1"
+ echo "insert rule inet foo OUTPUT meta nftrace set 1"
+ ) | ip netns exec "$testns" nft -f /dev/stdin
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ eret=$lret
+ fi
+
+ (echo "flush ruleset"; cat "$tmp"
+ ) | ip netns exec "$testns" nft -f /dev/stdin
+
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ eret=$lret
+ fi
+done
+
+check_result $eret "add/delete with nftrace enabled"
+
+echo "insert rule inet foo INPUT meta nftrace set 1" >> $tmp
+echo "insert rule inet foo OUTPUT meta nftrace set 1" >> $tmp
+
+for i in $(seq 1 10) ; do
+ (echo "flush ruleset"; cat "$tmp") | ip netns exec "$testns" nft -f /dev/stdin
+
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ eret=1
+ fi
+done
+
+check_result $lret "add/delete with nftrace enabled"
pkill -9 ping
@@ -76,3 +145,5 @@ wait
rm -f "$tmp"
ip netns del "$testns"
+
+exit $global_ret
diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
index b00b1bfd9d8e..cb1108bc9249 100644
--- a/tools/thermal/tmon/sysfs.c
+++ b/tools/thermal/tmon/sysfs.c
@@ -13,6 +13,7 @@
#include <stdint.h>
#include <dirent.h>
#include <libintl.h>
+#include <limits.h>
#include <ctype.h>
#include <time.h>
#include <syslog.h>
@@ -33,9 +34,9 @@ int sysfs_set_ulong(char *path, char *filename, unsigned long val)
{
FILE *fd;
int ret = -1;
- char filepath[256];
+ char filepath[PATH_MAX + 2]; /* NUL and '/' */
- snprintf(filepath, 256, "%s/%s", path, filename);
+ snprintf(filepath, sizeof(filepath), "%s/%s", path, filename);
fd = fopen(filepath, "w");
if (!fd) {
@@ -57,9 +58,9 @@ static int sysfs_get_ulong(char *path, char *filename, unsigned long *p_ulong)
{
FILE *fd;
int ret = -1;
- char filepath[256];
+ char filepath[PATH_MAX + 2]; /* NUL and '/' */
- snprintf(filepath, 256, "%s/%s", path, filename);
+ snprintf(filepath, sizeof(filepath), "%s/%s", path, filename);
fd = fopen(filepath, "r");
if (!fd) {
@@ -76,9 +77,9 @@ static int sysfs_get_string(char *path, char *filename, char *str)
{
FILE *fd;
int ret = -1;
- char filepath[256];
+ char filepath[PATH_MAX + 2]; /* NUL and '/' */
- snprintf(filepath, 256, "%s/%s", path, filename);
+ snprintf(filepath, sizeof(filepath), "%s/%s", path, filename);
fd = fopen(filepath, "r");
if (!fd) {
@@ -199,8 +200,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name,
{
unsigned long trip_instance = 0;
char cdev_name_linked[256];
- char cdev_name[256];
- char cdev_trip_name[256];
+ char cdev_name[PATH_MAX];
+ char cdev_trip_name[PATH_MAX];
int cdev_id;
if (nl->d_type == DT_LNK) {
@@ -213,7 +214,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name,
return -EINVAL;
}
/* find the link to real cooling device record binding */
- snprintf(cdev_name, 256, "%s/%s", tz_name, nl->d_name);
+ snprintf(cdev_name, sizeof(cdev_name) - 2, "%s/%s",
+ tz_name, nl->d_name);
memset(cdev_name_linked, 0, sizeof(cdev_name_linked));
if (readlink(cdev_name, cdev_name_linked,
sizeof(cdev_name_linked) - 1) != -1) {
@@ -226,8 +228,8 @@ static int find_tzone_cdev(struct dirent *nl, char *tz_name,
/* find the trip point in which the cdev is binded to
* in this tzone
*/
- snprintf(cdev_trip_name, 256, "%s%s", nl->d_name,
- "_trip_point");
+ snprintf(cdev_trip_name, sizeof(cdev_trip_name) - 1,
+ "%s%s", nl->d_name, "_trip_point");
sysfs_get_ulong(tz_name, cdev_trip_name,
&trip_instance);
/* validate trip point range, e.g. trip could return -1