1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
|
diff -ur a/init/Kconfig b/init/Kconfig
--- a/init/Kconfig 2022-04-20 17:30:09.904751692 -0500
+++ b/init/Kconfig 2022-04-20 17:32:51.173376197 -0500
@@ -814,35 +814,6 @@
menu "Scheduler features"
-menuconfig SCHED_ALT
- bool "Alternative CPU Schedulers"
- default y
- help
- This feature enable alternative CPU scheduler"
-
-if SCHED_ALT
-
-choice
- prompt "Alternative CPU Scheduler"
- default SCHED_BMQ
-
-config SCHED_BMQ
- bool "BMQ CPU scheduler"
- help
- The BitMap Queue CPU scheduler for excellent interactivity and
- responsiveness on the desktop and solid scalability on normal
- hardware and commodity servers.
-
-config SCHED_PDS
- bool "PDS CPU scheduler"
- help
- The Priority and Deadline based Skip list multiple queue CPU
- Scheduler.
-
-endchoice
-
-endif
-
config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks"
depends on CPU_FREQ_GOV_SCHEDUTIL
@@ -893,6 +864,35 @@
If in doubt, use the default value.
+menuconfig SCHED_ALT
+ bool "Alternative CPU Schedulers"
+ default y
+ help
+ This feature enable alternative CPU scheduler"
+
+if SCHED_ALT
+
+choice
+ prompt "Alternative CPU Scheduler"
+ default SCHED_BMQ
+
+config SCHED_BMQ
+ bool "BMQ CPU scheduler"
+ help
+ The BitMap Queue CPU scheduler for excellent interactivity and
+ responsiveness on the desktop and solid scalability on normal
+ hardware and commodity servers.
+
+config SCHED_PDS
+ bool "PDS CPU scheduler"
+ help
+ The Priority and Deadline based Skip list multiple queue CPU
+ Scheduler.
+
+endchoice
+
+endif
+
endmenu
#
diff -ur a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
--- a/kernel/sched/alt_core.c 2022-04-20 17:30:09.908751806 -0500
+++ b/kernel/sched/alt_core.c 2022-04-20 17:32:51.169376082 -0500
@@ -2954,7 +2954,7 @@
return 0;
}
-void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
+void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
{
unsigned long flags;
struct rq *rq;
@@ -2997,6 +2997,13 @@
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
+void sched_post_fork(struct task_struct *p)
+{
+#ifdef CONFIG_UCLAMP_TASK
+ uclamp_post_fork(p);
+#endif
+}
+
#ifdef CONFIG_SCHEDSTATS
DEFINE_STATIC_KEY_FALSE(sched_schedstats);
diff -ur a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
--- a/kernel/sched/alt_sched.h 2022-04-20 17:30:09.908751806 -0500
+++ b/kernel/sched/alt_sched.h 2022-04-20 17:32:51.173376197 -0500
@@ -653,6 +653,20 @@
}
#endif
+static inline
+unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long boost,
+ struct task_struct *p)
+{
+ return boost;
+}
+
+static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
+
+static inline bool uclamp_is_used(void)
+{
+ return false;
+}
+
extern void swake_up_all_locked(struct swait_queue_head *q);
extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
|