summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/mm/hugepage-vmemmap.c
blob: df366a4d1b92db75618baf6041b209c0e7480a61 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
// SPDX-License-Identifier: GPL-2.0
/*
 * A test case of using hugepage memory in a user application using the
 * mmap system call with MAP_HUGETLB flag.  Before running this program
 * make sure the administrator has allocated enough default sized huge
 * pages to cover the 2 MB allocation.
 */
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
#include "vm_util.h"

#define PAGE_COMPOUND_HEAD	(1UL << 15)
#define PAGE_COMPOUND_TAIL	(1UL << 16)
#define PAGE_HUGE		(1UL << 17)

#define HEAD_PAGE_FLAGS		(PAGE_COMPOUND_HEAD | PAGE_HUGE)
#define TAIL_PAGE_FLAGS		(PAGE_COMPOUND_TAIL | PAGE_HUGE)

#define PM_PFRAME_BITS		55
#define PM_PFRAME_MASK		~((1UL << PM_PFRAME_BITS) - 1)

static size_t pagesize;
static size_t maplength;

static void write_bytes(char *addr, size_t length)
{
	unsigned long i;

	for (i = 0; i < length; i++)
		*(addr + i) = (char)i;
}

static unsigned long virt_to_pfn(void *addr)
{
	int fd;
	unsigned long pagemap;

	fd = open("/proc/self/pagemap", O_RDONLY);
	if (fd < 0)
		return -1UL;

	lseek(fd, (unsigned long)addr / pagesize * sizeof(pagemap), SEEK_SET);
	read(fd, &pagemap, sizeof(pagemap));
	close(fd);

	return pagemap & ~PM_PFRAME_MASK;
}

static int check_page_flags(unsigned long pfn)
{
	int fd, i;
	unsigned long pageflags;

	fd = open("/proc/kpageflags", O_RDONLY);
	if (fd < 0)
		return -1;

	lseek(fd, pfn * sizeof(pageflags), SEEK_SET);

	read(fd, &pageflags, sizeof(pageflags));
	if ((pageflags & HEAD_PAGE_FLAGS) != HEAD_PAGE_FLAGS) {
		close(fd);
		printf("Head page flags (%lx) is invalid\n", pageflags);
		return -1;
	}

	/*
	 * pages other than the first page must be tail and shouldn't be head;
	 * this also verifies kernel has correctly set the fake page_head to tail
	 * while hugetlb_free_vmemmap is enabled.
	 */
	for (i = 1; i < maplength / pagesize; i++) {
		read(fd, &pageflags, sizeof(pageflags));
		if ((pageflags & TAIL_PAGE_FLAGS) != TAIL_PAGE_FLAGS ||
		    (pageflags & HEAD_PAGE_FLAGS) == HEAD_PAGE_FLAGS) {
			close(fd);
			printf("Tail page flags (%lx) is invalid\n", pageflags);
			return -1;
		}
	}

	close(fd);

	return 0;
}

int main(int argc, char **argv)
{
	void *addr;
	unsigned long pfn;

	pagesize  = psize();
	maplength = default_huge_page_size();
	if (!maplength) {
		printf("Unable to determine huge page size\n");
		exit(1);
	}

	addr = mmap(NULL, maplength, PROT_READ | PROT_WRITE,
			MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
	if (addr == MAP_FAILED) {
		perror("mmap");
		exit(1);
	}

	/* Trigger allocation of HugeTLB page. */
	write_bytes(addr, maplength);

	pfn = virt_to_pfn(addr);
	if (pfn == -1UL) {
		munmap(addr, maplength);
		perror("virt_to_pfn");
		exit(1);
	}

	printf("Returned address is %p whose pfn is %lx\n", addr, pfn);

	if (check_page_flags(pfn) < 0) {
		munmap(addr, maplength);
		perror("check_page_flags");
		exit(1);
	}

	/* munmap() length of MAP_HUGETLB memory must be hugepage aligned */
	if (munmap(addr, maplength)) {
		perror("munmap");
		exit(1);
	}

	return 0;
}