blob: 7fd1b3945a0493149671f9027247a9943a941f1a (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
|
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "i915_active.h"
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
int __init i915_globals_init(void)
{
int err;
err = i915_global_active_init();
if (err)
return err;
err = i915_global_request_init();
if (err)
goto err_active;
err = i915_global_scheduler_init();
if (err)
goto err_request;
return 0;
err_request:
i915_global_request_exit();
err_active:
i915_global_active_exit();
return err;
}
static void i915_globals_shrink(void)
{
/*
* kmem_cache_shrink() discards empty slabs and reorders partially
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
i915_global_active_shrink();
i915_global_request_shrink();
i915_global_scheduler_shrink();
}
static atomic_t active;
static atomic_t epoch;
struct park_work {
struct rcu_work work;
int epoch;
};
static void __i915_globals_park(struct work_struct *work)
{
struct park_work *wrk = container_of(work, typeof(*wrk), work.work);
/* Confirm nothing woke up in the last grace period */
if (wrk->epoch == atomic_read(&epoch))
i915_globals_shrink();
kfree(wrk);
}
void i915_globals_park(void)
{
struct park_work *wrk;
/*
* Defer shrinking the global slab caches (and other work) until
* after a RCU grace period has completed with no activity. This
* is to try and reduce the latency impact on the consumers caused
* by us shrinking the caches the same time as they are trying to
* allocate, with the assumption being that if we idle long enough
* for an RCU grace period to elapse since the last use, it is likely
* to be longer until we need the caches again.
*/
if (!atomic_dec_and_test(&active))
return;
wrk = kmalloc(sizeof(*wrk), GFP_KERNEL);
if (!wrk)
return;
wrk->epoch = atomic_inc_return(&epoch);
INIT_RCU_WORK(&wrk->work, __i915_globals_park);
queue_rcu_work(system_wq, &wrk->work);
}
void i915_globals_unpark(void)
{
atomic_inc(&epoch);
atomic_inc(&active);
}
void __exit i915_globals_exit(void)
{
/* Flush any residual park_work */
rcu_barrier();
flush_scheduled_work();
i915_global_scheduler_exit();
i915_global_request_exit();
i915_global_active_exit();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
rcu_barrier();
}
|