summaryrefslogtreecommitdiff
path: root/fs/proc
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/kcore.c81
-rw-r--r--fs/proc/namespaces.c2
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/proc/vmcore.c58
5 files changed, 74 insertions, 71 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 55ed3510d2bb..d6a0369caa93 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -500,7 +500,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
* a program is not able to use ptrace(2) in that case. It is
* safe because the task has stopped executing permanently.
*/
- if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) {
+ if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE|PF_POSTCOREDUMP))) {
if (try_get_task_stack(task)) {
eip = KSTK_EIP(task);
esp = KSTK_ESP(task);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e376f48c4b8b..1cb33771bf9f 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -65,7 +65,11 @@ static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
#endif
static LIST_HEAD(kclist_head);
-static DECLARE_RWSEM(kclist_lock);
+static int kcore_nphdr;
+static size_t kcore_phdrs_len;
+static size_t kcore_notes_len;
+static size_t kcore_data_offset;
+DEFINE_STATIC_PERCPU_RWSEM(kclist_lock);
static int kcore_need_update = 1;
/*
@@ -101,33 +105,32 @@ void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
list_add_tail(&new->list, &kclist_head);
}
-static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
- size_t *data_offset)
+static void update_kcore_size(void)
{
size_t try, size;
struct kcore_list *m;
- *nphdr = 1; /* PT_NOTE */
+ kcore_nphdr = 1; /* PT_NOTE */
size = 0;
list_for_each_entry(m, &kclist_head, list) {
try = kc_vaddr_to_offset((size_t)m->addr + m->size);
if (try > size)
size = try;
- *nphdr = *nphdr + 1;
+ kcore_nphdr++;
}
- *phdrs_len = *nphdr * sizeof(struct elf_phdr);
- *notes_len = (4 * sizeof(struct elf_note) +
- 3 * ALIGN(sizeof(CORE_STR), 4) +
- VMCOREINFO_NOTE_NAME_BYTES +
- ALIGN(sizeof(struct elf_prstatus), 4) +
- ALIGN(sizeof(struct elf_prpsinfo), 4) +
- ALIGN(arch_task_struct_size, 4) +
- ALIGN(vmcoreinfo_size, 4));
- *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
- *notes_len);
- return *data_offset + size;
+ kcore_phdrs_len = kcore_nphdr * sizeof(struct elf_phdr);
+ kcore_notes_len = (4 * sizeof(struct elf_note) +
+ 3 * ALIGN(sizeof(CORE_STR), 4) +
+ VMCOREINFO_NOTE_NAME_BYTES +
+ ALIGN(sizeof(struct elf_prstatus), 4) +
+ ALIGN(sizeof(struct elf_prpsinfo), 4) +
+ ALIGN(arch_task_struct_size, 4) +
+ ALIGN(vmcoreinfo_size, 4));
+ kcore_data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + kcore_phdrs_len +
+ kcore_notes_len);
+ proc_root_kcore->size = kcore_data_offset + size;
}
#ifdef CONFIG_HIGHMEM
@@ -270,12 +273,10 @@ static int kcore_update_ram(void)
{
LIST_HEAD(list);
LIST_HEAD(garbage);
- int nphdr;
- size_t phdrs_len, notes_len, data_offset;
struct kcore_list *tmp, *pos;
int ret = 0;
- down_write(&kclist_lock);
+ percpu_down_write(&kclist_lock);
if (!xchg(&kcore_need_update, 0))
goto out;
@@ -293,11 +294,10 @@ static int kcore_update_ram(void)
}
list_splice_tail(&list, &kclist_head);
- proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, &notes_len,
- &data_offset);
+ update_kcore_size();
out:
- up_write(&kclist_lock);
+ percpu_up_write(&kclist_lock);
list_for_each_entry_safe(pos, tmp, &garbage, list) {
list_del(&pos->list);
kfree(pos);
@@ -326,27 +326,24 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
struct file *file = iocb->ki_filp;
char *buf = file->private_data;
loff_t *fpos = &iocb->ki_pos;
- size_t phdrs_offset, notes_offset, data_offset;
+ size_t phdrs_offset, notes_offset;
size_t page_offline_frozen = 1;
- size_t phdrs_len, notes_len;
struct kcore_list *m;
size_t tsz;
- int nphdr;
unsigned long start;
size_t buflen = iov_iter_count(iter);
size_t orig_buflen = buflen;
int ret = 0;
- down_read(&kclist_lock);
+ percpu_down_read(&kclist_lock);
/*
* Don't race against drivers that set PageOffline() and expect no
* further page access.
*/
page_offline_freeze();
- get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
phdrs_offset = sizeof(struct elfhdr);
- notes_offset = phdrs_offset + phdrs_len;
+ notes_offset = phdrs_offset + kcore_phdrs_len;
/* ELF file header. */
if (buflen && *fpos < sizeof(struct elfhdr)) {
@@ -368,7 +365,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
.e_flags = ELF_CORE_EFLAGS,
.e_ehsize = sizeof(struct elfhdr),
.e_phentsize = sizeof(struct elf_phdr),
- .e_phnum = nphdr,
+ .e_phnum = kcore_nphdr,
};
tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
@@ -382,10 +379,10 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
}
/* ELF program headers. */
- if (buflen && *fpos < phdrs_offset + phdrs_len) {
+ if (buflen && *fpos < phdrs_offset + kcore_phdrs_len) {
struct elf_phdr *phdrs, *phdr;
- phdrs = kzalloc(phdrs_len, GFP_KERNEL);
+ phdrs = kzalloc(kcore_phdrs_len, GFP_KERNEL);
if (!phdrs) {
ret = -ENOMEM;
goto out;
@@ -393,13 +390,14 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
phdrs[0].p_type = PT_NOTE;
phdrs[0].p_offset = notes_offset;
- phdrs[0].p_filesz = notes_len;
+ phdrs[0].p_filesz = kcore_notes_len;
phdr = &phdrs[1];
list_for_each_entry(m, &kclist_head, list) {
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R | PF_W | PF_X;
- phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
+ phdr->p_offset = kc_vaddr_to_offset(m->addr)
+ + kcore_data_offset;
phdr->p_vaddr = (size_t)m->addr;
if (m->type == KCORE_RAM)
phdr->p_paddr = __pa(m->addr);
@@ -412,7 +410,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
phdr++;
}
- tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
+ tsz = min_t(size_t, buflen,
+ phdrs_offset + kcore_phdrs_len - *fpos);
if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz,
iter) != tsz) {
kfree(phdrs);
@@ -426,7 +425,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
}
/* ELF note segment. */
- if (buflen && *fpos < notes_offset + notes_len) {
+ if (buflen && *fpos < notes_offset + kcore_notes_len) {
struct elf_prstatus prstatus = {};
struct elf_prpsinfo prpsinfo = {
.pr_sname = 'R',
@@ -438,7 +437,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
strscpy(prpsinfo.pr_psargs, saved_command_line,
sizeof(prpsinfo.pr_psargs));
- notes = kzalloc(notes_len, GFP_KERNEL);
+ notes = kzalloc(kcore_notes_len, GFP_KERNEL);
if (!notes) {
ret = -ENOMEM;
goto out;
@@ -459,9 +458,10 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
*/
append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
vmcoreinfo_data,
- min(vmcoreinfo_size, notes_len - i));
+ min(vmcoreinfo_size, kcore_notes_len - i));
- tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
+ tsz = min_t(size_t, buflen,
+ notes_offset + kcore_notes_len - *fpos);
if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) {
kfree(notes);
ret = -EFAULT;
@@ -477,7 +477,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
* Check to see if our file offset matches with any of
* the addresses in the elf_phdr on our list.
*/
- start = kc_offset_to_vaddr(*fpos - data_offset);
+ start = kc_offset_to_vaddr(*fpos - kcore_data_offset);
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
tsz = buflen;
@@ -626,7 +626,7 @@ skip:
out:
page_offline_thaw();
- up_read(&kclist_lock);
+ percpu_up_read(&kclist_lock);
if (ret)
return ret;
return orig_buflen - buflen;
@@ -663,6 +663,7 @@ static int release_kcore(struct inode *inode, struct file *file)
}
static const struct proc_ops kcore_proc_ops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
.proc_read_iter = read_kcore_iter,
.proc_open = open_kcore,
.proc_release = release_kcore,
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 8e159fc78c0a..c610224faf10 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -83,7 +83,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
res = ns_get_name(name, sizeof(name), task, ns_ops);
if (res >= 0)
- res = readlink_copy(buffer, buflen, name);
+ res = readlink_copy(buffer, buflen, name, strlen(name));
}
put_task_struct(task);
return res;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 38a5a3e9cba2..f02cd362309a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1810,7 +1810,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
}
for (; addr != end; addr += PAGE_SIZE, idx++) {
- unsigned long cur_flags = flags;
+ u64 cur_flags = flags;
pagemap_entry_t pme;
if (folio && (flags & PM_PRESENT) &&
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index b4521b096058..658bf199d424 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -404,6 +404,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
if (!iov_iter_count(iter))
return acc;
}
+
+ cond_resched();
}
return acc;
@@ -414,6 +416,34 @@ static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
return __read_vmcore(iter, &iocb->ki_pos);
}
+/**
+ * vmcore_alloc_buf - allocate buffer in vmalloc memory
+ * @size: size of buffer
+ *
+ * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
+ * the buffer to user-space by means of remap_vmalloc_range().
+ *
+ * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
+ * disabled and there's no need to allow users to mmap the buffer.
+ */
+static inline char *vmcore_alloc_buf(size_t size)
+{
+#ifdef CONFIG_MMU
+ return vmalloc_user(size);
+#else
+ return vzalloc(size);
+#endif
+}
+
+/*
+ * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
+ * essential for mmap_vmcore() in order to map physically
+ * non-contiguous objects (ELF header, ELF note segment and memory
+ * regions in the 1st kernel pointed to by PT_LOAD entries) into
+ * virtually contiguous user-space in ELF layout.
+ */
+#ifdef CONFIG_MMU
+
/*
* The vmcore fault handler uses the page cache and fills data using the
* standard __read_vmcore() function.
@@ -457,34 +487,6 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
#endif
}
-/**
- * vmcore_alloc_buf - allocate buffer in vmalloc memory
- * @size: size of buffer
- *
- * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
- * the buffer to user-space by means of remap_vmalloc_range().
- *
- * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
- * disabled and there's no need to allow users to mmap the buffer.
- */
-static inline char *vmcore_alloc_buf(size_t size)
-{
-#ifdef CONFIG_MMU
- return vmalloc_user(size);
-#else
- return vzalloc(size);
-#endif
-}
-
-/*
- * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
- * essential for mmap_vmcore() in order to map physically
- * non-contiguous objects (ELF header, ELF note segment and memory
- * regions in the 1st kernel pointed to by PT_LOAD entries) into
- * virtually contiguous user-space in ELF layout.
- */
-#ifdef CONFIG_MMU
-
static const struct vm_operations_struct vmcore_mmap_ops = {
.fault = mmap_vmcore_fault,
};