summaryrefslogtreecommitdiff
path: root/fs/proc
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/base.c17
-rw-r--r--fs/proc/fd.c2
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/kcore.c32
-rw-r--r--fs/proc/meminfo.c13
-rw-r--r--fs/proc/proc_net.c3
-rw-r--r--fs/proc/proc_sysctl.c94
-rw-r--r--fs/proc/root.c3
-rw-r--r--fs/proc/self.c2
-rw-r--r--fs/proc/task_mmu.c34
-rw-r--r--fs/proc/task_nommu.c15
-rw-r--r--fs/proc/thread_self.c2
-rw-r--r--fs/proc/vmcore.c2
14 files changed, 109 insertions, 114 deletions
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 05452c3b9872..ffd54617c354 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1902,7 +1902,7 @@ struct inode *proc_pid_make_inode(struct super_block *sb,
ei = PROC_I(inode);
inode->i_mode = mode;
inode->i_ino = get_next_ino();
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_op = &proc_def_inode_operations;
/*
@@ -1966,7 +1966,7 @@ int pid_getattr(struct mnt_idmap *idmap, const struct path *path,
struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
struct task_struct *task;
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->uid = GLOBAL_ROOT_UID;
stat->gid = GLOBAL_ROOT_GID;
@@ -2817,7 +2817,7 @@ static int proc_##LSM##_attr_dir_iterate(struct file *filp, \
\
static const struct file_operations proc_##LSM##_attr_dir_ops = { \
.read = generic_read_dir, \
- .iterate = proc_##LSM##_attr_dir_iterate, \
+ .iterate_shared = proc_##LSM##_attr_dir_iterate, \
.llseek = default_llseek, \
}; \
\
@@ -3207,6 +3207,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
mm = get_task_mm(task);
if (mm) {
seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
+ seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
mmput(mm);
@@ -3583,7 +3584,8 @@ static int proc_tid_comm_permission(struct mnt_idmap *idmap,
}
static const struct inode_operations proc_tid_comm_inode_operations = {
- .permission = proc_tid_comm_permission,
+ .setattr = proc_setattr,
+ .permission = proc_tid_comm_permission,
};
/*
@@ -3813,11 +3815,10 @@ static struct task_struct *first_tid(struct pid *pid, int tid, loff_t f_pos,
/* If we haven't found our starting place yet start
* with the leader and walk nr threads forward.
*/
- pos = task = task->group_leader;
- do {
+ for_each_thread(task, pos) {
if (!nr--)
goto found;
- } while_each_thread(task, pos);
+ };
fail:
pos = NULL;
goto out;
@@ -3899,7 +3900,7 @@ static int proc_task_getattr(struct mnt_idmap *idmap,
{
struct inode *inode = d_inode(path->dentry);
struct task_struct *p = get_proc_task(inode);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (p) {
stat->nlink += get_nr_threads(p);
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index b3140deebbbf..6276b3938842 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -352,7 +352,7 @@ static int proc_fd_getattr(struct mnt_idmap *idmap,
struct inode *inode = d_inode(path->dentry);
int rv = 0;
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
/* If it's a directory, put the number of open fds there */
if (S_ISDIR(inode->i_mode)) {
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 42ae38ff6e7e..775ce0bcf08c 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -146,7 +146,7 @@ static int proc_getattr(struct mnt_idmap *idmap,
}
}
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 67b09a1d9433..532dc9d240f7 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -660,7 +660,7 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
inode->i_private = de->data;
inode->i_ino = de->low_ino;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
PROC_I(inode)->pde = de;
if (is_empty_pde(de)) {
make_empty_dir_inode(inode);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 5d0cf59c4926..23fc24d16b31 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -199,7 +199,7 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
ent->addr = (unsigned long)page_to_virt(p);
ent->size = nr_pages << PAGE_SHIFT;
- if (!virt_addr_valid(ent->addr))
+ if (!virt_addr_valid((void *)ent->addr))
goto free_out;
/* cut not-mapped area. ....from ppc-32 code. */
@@ -309,6 +309,8 @@ static void append_kcore_note(char *notes, size_t *i, const char *name,
static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
{
+ struct file *file = iocb->ki_filp;
+ char *buf = file->private_data;
loff_t *fpos = &iocb->ki_pos;
size_t phdrs_offset, notes_offset, data_offset;
size_t page_offline_frozen = 1;
@@ -555,10 +557,21 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
case KCORE_VMEMMAP:
case KCORE_TEXT:
/*
- * We use _copy_to_iter() to bypass usermode hardening
- * which would otherwise prevent this operation.
+ * Sadly we must use a bounce buffer here to be able to
+ * make use of copy_from_kernel_nofault(), as these
+ * memory regions might not always be mapped on all
+ * architectures.
*/
- if (_copy_to_iter((char *)start, tsz, iter) != tsz) {
+ if (copy_from_kernel_nofault(buf, (void *)start, tsz)) {
+ if (iov_iter_zero(tsz, iter) != tsz) {
+ ret = -EFAULT;
+ goto out;
+ }
+ /*
+ * We know the bounce buffer is safe to copy from, so
+ * use _copy_to_iter() directly.
+ */
+ } else if (_copy_to_iter(buf, tsz, iter) != tsz) {
ret = -EFAULT;
goto out;
}
@@ -595,6 +608,10 @@ static int open_kcore(struct inode *inode, struct file *filp)
if (ret)
return ret;
+ filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!filp->private_data)
+ return -ENOMEM;
+
if (kcore_need_update)
kcore_update_ram();
if (i_size_read(inode) != proc_root_kcore->size) {
@@ -605,9 +622,16 @@ static int open_kcore(struct inode *inode, struct file *filp)
return 0;
}
+static int release_kcore(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
static const struct proc_ops kcore_proc_ops = {
.proc_read_iter = read_kcore_iter,
.proc_open = open_kcore,
+ .proc_release = release_kcore,
.proc_lseek = default_llseek,
};
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 8dca4d6d96c7..45af9a989d40 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -17,6 +17,7 @@
#ifdef CONFIG_CMA
#include <linux/cma.h>
#endif
+#include <linux/zswap.h>
#include <asm/page.h>
#include "internal.h"
@@ -132,17 +133,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "VmallocChunk: ", 0ul);
show_val_kb(m, "Percpu: ", pcpu_nr_pages());
-#ifdef CONFIG_MEMTEST
- if (early_memtest_done) {
- unsigned long early_memtest_bad_size_kb;
-
- early_memtest_bad_size_kb = early_memtest_bad_size>>10;
- if (early_memtest_bad_size && !early_memtest_bad_size_kb)
- early_memtest_bad_size_kb = 1;
- /* When 0 is reported, it means there actually was a successful test */
- seq_printf(m, "EarlyMemtestBad: %5lu kB\n", early_memtest_bad_size_kb);
- }
-#endif
+ memtest_report_meminfo(m);
#ifdef CONFIG_MEMORY_FAILURE
seq_printf(m, "HardwareCorrupted: %5lu kB\n",
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index a0c0419872e3..2ba31b6d68c0 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -308,7 +308,7 @@ static int proc_tgid_net_getattr(struct mnt_idmap *idmap,
net = get_proc_task_net(inode);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (net != NULL) {
stat->nlink = net->proc_net->nlink;
@@ -321,6 +321,7 @@ static int proc_tgid_net_getattr(struct mnt_idmap *idmap,
const struct inode_operations proc_net_inode_operations = {
.lookup = proc_tgid_net_lookup,
.getattr = proc_tgid_net_getattr,
+ .setattr = proc_setattr,
};
static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 5ea42653126e..c88854df0b62 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -19,8 +19,9 @@
#include <linux/kmemleak.h>
#include "internal.h"
-#define list_for_each_table_entry(entry, table) \
- for ((entry) = (table); (entry)->procname; (entry)++)
+#define list_for_each_table_entry(entry, header) \
+ entry = header->ctl_table; \
+ for (size_t i = 0 ; i < header->ctl_table_size && entry->procname; ++i, entry++)
static const struct dentry_operations proc_sys_dentry_operations;
static const struct file_operations proc_sys_file_operations;
@@ -43,7 +44,7 @@ static struct ctl_table sysctl_mount_point[] = {
*/
struct ctl_table_header *register_sysctl_mount_point(const char *path)
{
- return register_sysctl(path, sysctl_mount_point);
+ return register_sysctl_sz(path, sysctl_mount_point, 0);
}
EXPORT_SYMBOL(register_sysctl_mount_point);
@@ -188,9 +189,10 @@ static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry)
static void init_header(struct ctl_table_header *head,
struct ctl_table_root *root, struct ctl_table_set *set,
- struct ctl_node *node, struct ctl_table *table)
+ struct ctl_node *node, struct ctl_table *table, size_t table_size)
{
head->ctl_table = table;
+ head->ctl_table_size = table_size;
head->ctl_table_arg = table;
head->used = 0;
head->count = 1;
@@ -204,7 +206,7 @@ static void init_header(struct ctl_table_header *head,
if (node) {
struct ctl_table *entry;
- list_for_each_table_entry(entry, table) {
+ list_for_each_table_entry(entry, head) {
node->header = head;
node++;
}
@@ -215,7 +217,7 @@ static void erase_header(struct ctl_table_header *head)
{
struct ctl_table *entry;
- list_for_each_table_entry(entry, head->ctl_table)
+ list_for_each_table_entry(entry, head)
erase_entry(head, entry);
}
@@ -242,7 +244,7 @@ static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header)
err = insert_links(header);
if (err)
goto fail_links;
- list_for_each_table_entry(entry, header->ctl_table) {
+ list_for_each_table_entry(entry, header) {
err = insert_entry(header, entry);
if (err)
goto fail;
@@ -463,7 +465,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
head->count++;
spin_unlock(&sysctl_lock);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_mode = table->mode;
if (!S_ISDIR(table->mode)) {
inode->i_mode |= S_IFREG;
@@ -849,7 +851,7 @@ static int proc_sys_getattr(struct mnt_idmap *idmap,
if (IS_ERR(head))
return PTR_ERR(head);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (table)
stat->mode = (stat->mode & S_IFMT) | table->mode;
@@ -973,7 +975,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
memcpy(new_name, name, namelen);
table[0].procname = new_name;
table[0].mode = S_IFDIR|S_IRUGO|S_IXUGO;
- init_header(&new->header, set->dir.header.root, set, node, table);
+ init_header(&new->header, set->dir.header.root, set, node, table, 1);
return new;
}
@@ -1125,11 +1127,11 @@ static int sysctl_check_table_array(const char *path, struct ctl_table *table)
return err;
}
-static int sysctl_check_table(const char *path, struct ctl_table *table)
+static int sysctl_check_table(const char *path, struct ctl_table_header *header)
{
struct ctl_table *entry;
int err = 0;
- list_for_each_table_entry(entry, table) {
+ list_for_each_table_entry(entry, header) {
if ((entry->proc_handler == proc_dostring) ||
(entry->proc_handler == proc_dobool) ||
(entry->proc_handler == proc_dointvec) ||
@@ -1159,8 +1161,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
return err;
}
-static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
- struct ctl_table_root *link_root)
+static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table_header *head)
{
struct ctl_table *link_table, *entry, *link;
struct ctl_table_header *links;
@@ -1170,7 +1171,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
name_bytes = 0;
nr_entries = 0;
- list_for_each_table_entry(entry, table) {
+ list_for_each_table_entry(entry, head) {
nr_entries++;
name_bytes += strlen(entry->procname) + 1;
}
@@ -1189,31 +1190,33 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
link_name = (char *)&link_table[nr_entries + 1];
link = link_table;
- list_for_each_table_entry(entry, table) {
+ list_for_each_table_entry(entry, head) {
int len = strlen(entry->procname) + 1;
memcpy(link_name, entry->procname, len);
link->procname = link_name;
link->mode = S_IFLNK|S_IRWXUGO;
- link->data = link_root;
+ link->data = head->root;
link_name += len;
link++;
}
- init_header(links, dir->header.root, dir->header.set, node, link_table);
+ init_header(links, dir->header.root, dir->header.set, node, link_table,
+ head->ctl_table_size);
links->nreg = nr_entries;
return links;
}
static bool get_links(struct ctl_dir *dir,
- struct ctl_table *table, struct ctl_table_root *link_root)
+ struct ctl_table_header *header,
+ struct ctl_table_root *link_root)
{
- struct ctl_table_header *head;
+ struct ctl_table_header *tmp_head;
struct ctl_table *entry, *link;
/* Are there links available for every entry in table? */
- list_for_each_table_entry(entry, table) {
+ list_for_each_table_entry(entry, header) {
const char *procname = entry->procname;
- link = find_entry(&head, dir, procname, strlen(procname));
+ link = find_entry(&tmp_head, dir, procname, strlen(procname));
if (!link)
return false;
if (S_ISDIR(link->mode) && S_ISDIR(entry->mode))
@@ -1224,10 +1227,10 @@ static bool get_links(struct ctl_dir *dir,
}
/* The checks passed. Increase the registration count on the links */
- list_for_each_table_entry(entry, table) {
+ list_for_each_table_entry(entry, header) {
const char *procname = entry->procname;
- link = find_entry(&head, dir, procname, strlen(procname));
- head->nreg++;
+ link = find_entry(&tmp_head, dir, procname, strlen(procname));
+ tmp_head->nreg++;
}
return true;
}
@@ -1246,13 +1249,13 @@ static int insert_links(struct ctl_table_header *head)
if (IS_ERR(core_parent))
return 0;
- if (get_links(core_parent, head->ctl_table, head->root))
+ if (get_links(core_parent, head, head->root))
return 0;
core_parent->header.nreg++;
spin_unlock(&sysctl_lock);
- links = new_links(core_parent, head->ctl_table, head->root);
+ links = new_links(core_parent, head);
spin_lock(&sysctl_lock);
err = -ENOMEM;
@@ -1260,7 +1263,7 @@ static int insert_links(struct ctl_table_header *head)
goto out;
err = 0;
- if (get_links(core_parent, head->ctl_table, head->root)) {
+ if (get_links(core_parent, head, head->root)) {
kfree(links);
goto out;
}
@@ -1310,6 +1313,7 @@ static struct ctl_dir *sysctl_mkdir_p(struct ctl_dir *dir, const char *path)
* should not be free'd after registration. So it should not be
* used on stack. It can either be a global or dynamically allocated
* by the caller and free'd later after sysctl unregistration.
+ * @table_size : The number of elements in table
*
* Register a sysctl table hierarchy. @table should be a filled in ctl_table
* array. A completely 0 filled entry terminates the table.
@@ -1352,26 +1356,21 @@ static struct ctl_dir *sysctl_mkdir_p(struct ctl_dir *dir, const char *path)
*/
struct ctl_table_header *__register_sysctl_table(
struct ctl_table_set *set,
- const char *path, struct ctl_table *table)
+ const char *path, struct ctl_table *table, size_t table_size)
{
struct ctl_table_root *root = set->dir.header.root;
struct ctl_table_header *header;
struct ctl_dir *dir;
- struct ctl_table *entry;
struct ctl_node *node;
- int nr_entries = 0;
-
- list_for_each_table_entry(entry, table)
- nr_entries++;
header = kzalloc(sizeof(struct ctl_table_header) +
- sizeof(struct ctl_node)*nr_entries, GFP_KERNEL_ACCOUNT);
+ sizeof(struct ctl_node)*table_size, GFP_KERNEL_ACCOUNT);
if (!header)
return NULL;
node = (struct ctl_node *)(header + 1);
- init_header(header, root, set, node, table);
- if (sysctl_check_table(path, table))
+ init_header(header, root, set, node, table, table_size);
+ if (sysctl_check_table(path, header))
goto fail;
spin_lock(&sysctl_lock);
@@ -1401,7 +1400,7 @@ fail:
}
/**
- * register_sysctl - register a sysctl table
+ * register_sysctl_sz - register a sysctl table
* @path: The path to the directory the sysctl table is in. If the path
* doesn't exist we will create it for you.
* @table: the table structure. The calller must ensure the life of the @table
@@ -1411,18 +1410,20 @@ fail:
* to call unregister_sysctl_table() and can instead use something like
* register_sysctl_init() which does not care for the result of the syctl
* registration.
+ * @table_size: The number of elements in table.
*
* Register a sysctl table. @table should be a filled in ctl_table
* array. A completely 0 filled entry terminates the table.
*
* See __register_sysctl_table for more details.
*/
-struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table)
+struct ctl_table_header *register_sysctl_sz(const char *path, struct ctl_table *table,
+ size_t table_size)
{
return __register_sysctl_table(&sysctl_table_root.default_set,
- path, table);
+ path, table, table_size);
}
-EXPORT_SYMBOL(register_sysctl);
+EXPORT_SYMBOL(register_sysctl_sz);
/**
* __register_sysctl_init() - register sysctl table to path
@@ -1433,6 +1434,7 @@ EXPORT_SYMBOL(register_sysctl);
* lifetime use of the sysctl.
* @table_name: The name of sysctl table, only used for log printing when
* registration fails
+ * @table_size: The number of elements in table
*
* The sysctl interface is used by userspace to query or modify at runtime
* a predefined value set on a variable. These variables however have default
@@ -1445,12 +1447,12 @@ EXPORT_SYMBOL(register_sysctl);
* Context: if your base directory does not exist it will be created for you.
*/
void __init __register_sysctl_init(const char *path, struct ctl_table *table,
- const char *table_name)
+ const char *table_name, size_t table_size)
{
- struct ctl_table_header *hdr = register_sysctl(path, table);
+ struct ctl_table_header *hdr = register_sysctl_sz(path, table, table_size);
if (unlikely(!hdr)) {
- pr_err("failed when register_sysctl %s to %s\n", table_name, path);
+ pr_err("failed when register_sysctl_sz %s to %s\n", table_name, path);
return;
}
kmemleak_not_leak(hdr);
@@ -1471,7 +1473,7 @@ static void put_links(struct ctl_table_header *header)
if (IS_ERR(core_parent))
return;
- list_for_each_table_entry(entry, header->ctl_table) {
+ list_for_each_table_entry(entry, header) {
struct ctl_table_header *link_head;
struct ctl_table *link;
const char *name = entry->procname;
@@ -1535,7 +1537,7 @@ void setup_sysctl_set(struct ctl_table_set *set,
{
memset(set, 0, sizeof(*set));
set->is_seen = is_seen;
- init_header(&set->dir.header, root, set, NULL, root_table);
+ init_header(&set->dir.header, root, set, NULL, root_table, 1);
}
void retire_sysctl_set(struct ctl_table_set *set)
diff --git a/fs/proc/root.c b/fs/proc/root.c
index a86e65a608da..9191248f2dac 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -314,7 +314,8 @@ static int proc_root_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
- generic_fillattr(&nop_mnt_idmap, d_inode(path->dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(path->dentry),
+ stat);
stat->nlink = proc_root.nlink + nr_processes();
return 0;
}
diff --git a/fs/proc/self.c b/fs/proc/self.c
index 72cd69bcaf4a..ecc4da8d265e 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -46,7 +46,7 @@ int proc_setup_self(struct super_block *s)
struct inode *inode = new_inode(s);
if (inode) {
inode->i_ino = self_inum;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 507cd4e59d07..15ddf4653a19 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -236,21 +236,6 @@ static int do_maps_open(struct inode *inode, struct file *file,
sizeof(struct proc_maps_private));
}
-/*
- * Indicate if the VMA is a stack for the given task; for
- * /proc/PID/maps that is the stack of the main task.
- */
-static int is_stack(struct vm_area_struct *vma)
-{
- /*
- * We make no effort to guess what a given thread considers to be
- * its "stack". It's not even well-defined for programs written
- * languages like Go.
- */
- return vma->vm_start <= vma->vm_mm->start_stack &&
- vma->vm_end >= vma->vm_mm->start_stack;
-}
-
static void show_vma_header_prefix(struct seq_file *m,
unsigned long start, unsigned long end,
vm_flags_t flags, unsigned long long pgoff,
@@ -327,13 +312,12 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
goto done;
}
- if (vma->vm_start <= mm->brk &&
- vma->vm_end >= mm->start_brk) {
+ if (vma_is_initial_heap(vma)) {
name = "[heap]";
goto done;
}
- if (is_stack(vma)) {
+ if (vma_is_initial_stack(vma)) {
name = "[stack]";
goto done;
}
@@ -587,8 +571,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
bool migration = false;
if (pmd_present(*pmd)) {
- /* FOLL_DUMP will return -EFAULT on huge zero page */
- page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
+ page = vm_normal_page_pmd(vma, addr, *pmd);
} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
swp_entry_t entry = pmd_to_swp_entry(*pmd);
@@ -758,12 +741,14 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
static const struct mm_walk_ops smaps_walk_ops = {
.pmd_entry = smaps_pte_range,
.hugetlb_entry = smaps_hugetlb_range,
+ .walk_lock = PGWALK_RDLOCK,
};
static const struct mm_walk_ops smaps_shmem_walk_ops = {
.pmd_entry = smaps_pte_range,
.hugetlb_entry = smaps_hugetlb_range,
.pte_hole = smaps_pte_hole,
+ .walk_lock = PGWALK_RDLOCK,
};
/*
@@ -870,7 +855,7 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
- seq_printf(m, "THPeligible: %d\n",
+ seq_printf(m, "THPeligible: %8u\n",
hugepage_vma_check(vma, vma->vm_flags, true, false, true));
if (arch_pkeys_enabled())
@@ -1245,6 +1230,7 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end,
static const struct mm_walk_ops clear_refs_walk_ops = {
.pmd_entry = clear_refs_pte_range,
.test_walk = clear_refs_test_walk,
+ .walk_lock = PGWALK_WRLOCK,
};
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
@@ -1622,6 +1608,7 @@ static const struct mm_walk_ops pagemap_ops = {
.pmd_entry = pagemap_pmd_range,
.pte_hole = pagemap_pte_hole,
.hugetlb_entry = pagemap_hugetlb_range,
+ .walk_lock = PGWALK_RDLOCK,
};
/*
@@ -1935,6 +1922,7 @@ static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
static const struct mm_walk_ops show_numa_ops = {
.hugetlb_entry = gather_hugetlb_stats,
.pmd_entry = gather_pte_stats,
+ .walk_lock = PGWALK_RDLOCK,
};
/*
@@ -1971,9 +1959,9 @@ static int show_numa_map(struct seq_file *m, void *v)
if (file) {
seq_puts(m, " file=");
seq_file_path(m, file, "\n\t= ");
- } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
+ } else if (vma_is_initial_heap(vma)) {
seq_puts(m, " heap");
- } else if (is_stack(vma)) {
+ } else if (vma_is_initial_stack(vma)) {
seq_puts(m, " stack");
}
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 2c8b62265981..a8ac0dd8041e 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -121,19 +121,6 @@ unsigned long task_statm(struct mm_struct *mm,
return size;
}
-static int is_stack(struct vm_area_struct *vma)
-{
- struct mm_struct *mm = vma->vm_mm;
-
- /*
- * We make no effort to guess what a given thread considers to be
- * its "stack". It's not even well-defined for programs written
- * languages like Go.
- */
- return vma->vm_start <= mm->start_stack &&
- vma->vm_end >= mm->start_stack;
-}
-
/*
* display a single VMA to a sequenced file
*/
@@ -171,7 +158,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
if (file) {
seq_pad(m, ' ');
seq_file_path(m, file, "");
- } else if (mm && is_stack(vma)) {
+ } else if (mm && vma_is_initial_stack(vma)) {
seq_pad(m, ' ');
seq_puts(m, "[stack]");
}
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
index a553273fbd41..63ac1f93289f 100644
--- a/fs/proc/thread_self.c
+++ b/fs/proc/thread_self.c
@@ -46,7 +46,7 @@ int proc_setup_thread_self(struct super_block *s)
struct inode *inode = new_inode(s);
if (inode) {
inode->i_ino = thread_self_inum;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index cb80a7703d58..1fb213f379a5 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -132,7 +132,7 @@ ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
u64 *ppos, bool encrypted)
{
unsigned long pfn, offset;
- size_t nr_bytes;
+ ssize_t nr_bytes;
ssize_t read = 0, tmp;
int idx;