-
Notifications
You must be signed in to change notification settings - Fork 0
/
eevdf.mypatch
217 lines (197 loc) · 6.6 KB
/
eevdf.mypatch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
[peterz: rebase, squash]
Signed-off-by: Parth Shah <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
include/linux/sched.h | 1 +
include/uapi/linux/sched.h | 4 +++-
include/uapi/linux/sched/types.h | 19 +++++++++++++++++++
init/init_task.c | 3 ++-
kernel/sched/core.c | 27 ++++++++++++++++++++++++++-
kernel/sched/debug.c | 1 +
tools/include/uapi/linux/sched.h | 4 +++-
7 files changed, 55 insertions(+), 4 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -784,6 +784,7 @@ struct task_struct {
int static_prio;
int normal_prio;
unsigned int rt_priority;
+ int latency_prio;
struct sched_entity se;
struct sched_rt_entity rt;
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -132,6 +132,7 @@ struct clone_args {
#define SCHED_FLAG_KEEP_PARAMS 0x10
#define SCHED_FLAG_UTIL_CLAMP_MIN 0x20
#define SCHED_FLAG_UTIL_CLAMP_MAX 0x40
+#define SCHED_FLAG_LATENCY_NICE 0x80
#define SCHED_FLAG_KEEP_ALL (SCHED_FLAG_KEEP_POLICY | \
SCHED_FLAG_KEEP_PARAMS)
@@ -143,6 +144,7 @@ struct clone_args {
SCHED_FLAG_RECLAIM | \
SCHED_FLAG_DL_OVERRUN | \
SCHED_FLAG_KEEP_ALL | \
- SCHED_FLAG_UTIL_CLAMP)
+ SCHED_FLAG_UTIL_CLAMP | \
+ SCHED_FLAG_LATENCY_NICE)
#endif /* _UAPI_LINUX_SCHED_H */
--- a/include/uapi/linux/sched/types.h
+++ b/include/uapi/linux/sched/types.h
@@ -10,6 +10,7 @@ struct sched_param {
#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
#define SCHED_ATTR_SIZE_VER1 56 /* add: util_{min,max} */
+#define SCHED_ATTR_SIZE_VER2 60 /* add: latency_nice */
/*
* Extended scheduling parameters data structure.
@@ -98,6 +99,22 @@ struct sched_param {
* scheduled on a CPU with no more capacity than the specified value.
*
* A task utilization boundary can be reset by setting the attribute to -1.
+ *
+ * Latency Tolerance Attributes
+ * ===========================
+ *
+ * A subset of sched_attr attributes allows to specify the relative latency
+ * requirements of a task with respect to the other tasks running/queued in the
+ * system.
+ *
+ * @ sched_latency_nice task's latency_nice value
+ *
+ * The latency_nice of a task can have any value in a range of
+ * [MIN_LATENCY_NICE..MAX_LATENCY_NICE].
+ *
+ * A task with latency_nice with the value of LATENCY_NICE_MIN can be
+ * taken for a task requiring a lower latency as opposed to the task with
+ * higher latency_nice.
*/
struct sched_attr {
__u32 size;
@@ -120,6 +137,8 @@ struct sched_attr {
__u32 sched_util_min;
__u32 sched_util_max;
+ /* latency requirement hints */
+ __s32 sched_latency_nice;
};
#endif /* _UAPI_LINUX_SCHED_TYPES_H */
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -78,6 +78,7 @@ struct task_struct init_task
.prio = MAX_PRIO - 20,
.static_prio = MAX_PRIO - 20,
.normal_prio = MAX_PRIO - 20,
+ .latency_prio = DEFAULT_PRIO,
.policy = SCHED_NORMAL,
.cpus_ptr = &init_task.cpus_mask,
.user_cpus_ptr = NULL,
@@ -89,7 +90,7 @@ struct task_struct init_task
.fn = do_no_restart_syscall,
},
.se = {
- .group_node = LIST_HEAD_INIT(init_task.se.group_node),
+ .group_node = LIST_HEAD_INIT(init_task.se.group_node),
},
.rt = {
.run_list = LIST_HEAD_INIT(init_task.rt.run_list),
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4684,6 +4684,8 @@ int sched_fork(unsigned long clone_flags
p->prio = p->normal_prio = p->static_prio;
set_load_weight(p, false);
+ p->latency_prio = NICE_TO_PRIO(0);
+
/*
* We don't need the reset flag anymore after the fork. It has
* fulfilled its duty:
@@ -7428,7 +7430,7 @@ static struct task_struct *find_process_
#define SETPARAM_POLICY -1
static void __setscheduler_params(struct task_struct *p,
- const struct sched_attr *attr)
+ const struct sched_attr *attr)
{
int policy = attr->sched_policy;
@@ -7452,6 +7454,13 @@ static void __setscheduler_params(struct
set_load_weight(p, true);
}
+static void __setscheduler_latency(struct task_struct *p,
+ const struct sched_attr *attr)
+{
+ if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE)
+ p->latency_prio = NICE_TO_PRIO(attr->sched_latency_nice);
+}
+
/*
* Check the target process has a UID that matches the current process's:
*/
@@ -7592,6 +7601,13 @@ static int __sched_setscheduler(struct t
return retval;
}
+ if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE) {
+ if (attr->sched_latency_nice > MAX_NICE)
+ return -EINVAL;
+ if (attr->sched_latency_nice < MIN_NICE)
+ return -EINVAL;
+ }
+
if (pi)
cpuset_read_lock();
@@ -7626,6 +7642,9 @@ static int __sched_setscheduler(struct t
goto change;
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
goto change;
+ if (attr->sched_flags & SCHED_FLAG_LATENCY_NICE &&
+ attr->sched_latency_nice != PRIO_TO_NICE(p->latency_prio))
+ goto change;
p->sched_reset_on_fork = reset_on_fork;
retval = 0;
@@ -7714,6 +7733,7 @@ static int __sched_setscheduler(struct t
__setscheduler_params(p, attr);
__setscheduler_prio(p, newprio);
}
+ __setscheduler_latency(p, attr);
__setscheduler_uclamp(p, attr);
if (queued) {
@@ -7924,6 +7944,9 @@ static int sched_copy_attr(struct sched_
size < SCHED_ATTR_SIZE_VER1)
return -EINVAL;
+ if ((attr->sched_flags & SCHED_FLAG_LATENCY_NICE) &&
+ size < SCHED_ATTR_SIZE_VER2)
+ return -EINVAL;
/*
* XXX: Do we want to be lenient like existing syscalls; or do we want
* to be strict and return an error on out-of-bounds values?
@@ -8161,6 +8184,8 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pi
get_params(p, &kattr);
kattr.sched_flags &= SCHED_FLAG_ALL;
+ kattr.sched_latency_nice = PRIO_TO_NICE(p->latency_prio);
+
#ifdef CONFIG_UCLAMP_TASK
/*
* This could race with another potential updater, but this is fine
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -1043,6 +1043,7 @@ void proc_sched_show_task(struct task_st
#endif
P(policy);
P(prio);
+ P(latency_prio);
if (task_has_dl_policy(p)) {
P(dl.runtime);
P(dl.deadline);
--- a/tools/include/uapi/linux/sched.h
+++ b/tools/include/uapi/linux/sched.h
@@ -132,6 +132,7 @@ struct clone_args {
#define SCHED_FLAG_KEEP_PARAMS 0x10
#define SCHED_FLAG_UTIL_CLAMP_MIN 0x20
#define SCHED_FLAG_UTIL_CLAMP_MAX 0x40
+#define SCHED_FLAG_LATENCY_NICE 0x80
#define SCHED_FLAG_KEEP_ALL (SCHED_FLAG_KEEP_POLICY | \
SCHED_FLAG_KEEP_PARAMS)
@@ -143,6 +144,7 @@ struct clone_args {
SCHED_FLAG_RECLAIM | \
SCHED_FLAG_DL_OVERRUN | \
SCHED_FLAG_KEEP_ALL | \
- SCHED_FLAG_UTIL_CLAMP)
+ SCHED_FLAG_UTIL_CLAMP | \
+ SCHED_FLAG_LATENCY_NICE)
#endif /* _UAPI_LINUX_SCHED_H */