blob: 8afa8c3a097308854b50f2fdde6ba727e20b162b (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, Microsoft Corporation.
*
* Authors:
* Beau Belgrave <beaub@linux.microsoft.com>
*/
#ifndef _LINUX_USER_EVENTS_H
#define _LINUX_USER_EVENTS_H
#include <linux/list.h>
#include <linux/refcount.h>
#include <linux/mm_types.h>
#include <linux/workqueue.h>
#include <uapi/linux/user_events.h>
#ifdef CONFIG_USER_EVENTS
struct user_event_mm {
struct list_head mms_link;
struct list_head enablers;
struct mm_struct *mm;
/* Used for one-shot lists, protected by event_mutex */
struct user_event_mm *next;
refcount_t refcnt;
refcount_t tasks;
struct rcu_work put_rwork;
};
extern void user_event_mm_dup(struct task_struct *t,
struct user_event_mm *old_mm);
extern void user_event_mm_remove(struct task_struct *t);
static inline void user_events_fork(struct task_struct *t,
unsigned long clone_flags)
{
struct user_event_mm *old_mm;
if (!t || !current->user_event_mm)
return;
old_mm = current->user_event_mm;
if (clone_flags & CLONE_VM) {
t->user_event_mm = old_mm;
refcount_inc(&old_mm->tasks);
return;
}
user_event_mm_dup(t, old_mm);
}
static inline void user_events_execve(struct task_struct *t)
{
if (!t || !t->user_event_mm)
return;
user_event_mm_remove(t);
}
static inline void user_events_exit(struct task_struct *t)
{
if (!t || !t->user_event_mm)
return;
user_event_mm_remove(t);
}
#else
static inline void user_events_fork(struct task_struct *t,
unsigned long clone_flags)
{
}
static inline void user_events_execve(struct task_struct *t)
{
}
static inline void user_events_exit(struct task_struct *t)
{
}
#endif /* CONFIG_USER_EVENTS */
#endif /* _LINUX_USER_EVENTS_H */
|