Untitled

 avatar
unknown
plain_text
a year ago
5.7 kB
4
Indexable
# cat  kernel/sched/fair.c.rej | paste
--- kernel/sched/fair.c
+++ kernel/sched/fair.c
@@ -67,28 +70,124 @@
  *   SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
  *   SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
  *
- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
+ * (BORE  default SCHED_TUNABLESCALING_NONE = *1 constant)
+ * (EEVDF default SCHED_TUNABLESCALING_LOG  = *(1+ilog(ncpus))
  */
+#ifdef CONFIG_SCHED_BORE
+unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
+#else // !CONFIG_SCHED_BORE
 unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
+#endif // CONFIG_SCHED_BORE
 
 /*
  * Minimal preemption granularity for CPU-bound tasks:
  *
- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (BORE  default: max(1 sec / HZ, min_base_slice) constant, units: nanoseconds)
+ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
  */
-#ifdef CONFIG_CACHY
-unsigned int sysctl_sched_base_slice                   = 350000ULL;
-static unsigned int normalized_sysctl_sched_base_slice = 350000ULL;
-#else
+#ifdef CONFIG_SCHED_BORE
+unsigned int            sysctl_sched_base_slice = 1000000000ULL / HZ;
+static unsigned int configured_sched_base_slice = 1000000000ULL / HZ;
+unsigned int        sysctl_sched_min_base_slice =    2000000ULL;
+#else // !CONFIG_SCHED_BORE
 unsigned int sysctl_sched_base_slice                   = 750000ULL;
 static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
-#endif
+#endif // CONFIG_SCHED_BORE
 
-#ifdef CONFIG_CACHY
-const_debug unsigned int sysctl_sched_migration_cost   = 300000UL;
-#else
 const_debug unsigned int sysctl_sched_migration_cost   = 500000UL;
-#endif
+
+#ifdef CONFIG_SCHED_BORE
+u8   __read_mostly sched_bore                   = 1;
+u8   __read_mostly sched_burst_smoothness_long  = 1;
+u8   __read_mostly sched_burst_smoothness_short = 0;
+u8   __read_mostly sched_burst_fork_atavistic   = 2;
+u8   __read_mostly sched_burst_penalty_offset   = 22;
+uint __read_mostly sched_burst_penalty_scale    = 1280;
+uint __read_mostly sched_burst_cache_lifetime   = 60000000;
+static int __maybe_unused sixty_four     = 64;
+static int __maybe_unused maxval_12_bits = 4095;
+
+#define MAX_BURST_PENALTY (39U <<2)
+
+static inline u32 log2plus1_u64_u32f8(u64 v) {
+       u32 msb = fls64(v);
+       s32 excess_bits = msb - 9;
+    u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits;
+       return msb << 8 | fractional;
+}
+
+static inline u32 calc_burst_penalty(u64 burst_time) {
+       u32 greed, tolerance, penalty, scaled_penalty;
+
+       greed = log2plus1_u64_u32f8(burst_time);
+       tolerance = sched_burst_penalty_offset << 8;
+       penalty = max(0, (s32)greed - (s32)tolerance);
+       scaled_penalty = penalty * sched_burst_penalty_scale >> 16;
+
+       return min(MAX_BURST_PENALTY, scaled_penalty);
+}
+
+static inline u64 scale_slice(u64 delta, struct sched_entity *se) {
+       return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->burst_score], 22);
+}
+
+static inline u64 __unscale_slice(u64 delta, u8 score) {
+       return mul_u64_u32_shr(delta, sched_prio_to_weight[score], 10);
+}
+
+static inline u64 unscale_slice(u64 delta, struct sched_entity *se) {
+       return __unscale_slice(delta, se->burst_score);
+}
+
+static void reweight_task_fair(struct rq *rq, struct task_struct *p, int prio);
+
+static void update_burst_score(struct sched_entity *se) {
+       if (!entity_is_task(se)) return;
+       struct task_struct *p = task_of(se);
+       u8 prio = p->static_prio - MAX_RT_PRIO;
+       u8 prev_prio = min(39, prio + se->burst_score);
+
+       se->burst_score = se->burst_penalty >> 2;
+
+       u8 new_prio = min(39, prio + se->burst_score);
+       if (new_prio != prev_prio)
+               reweight_task_fair(task_rq(p), p, new_prio);
+}
+
+static void update_burst_penalty(struct sched_entity *se) {
+       se->curr_burst_penalty = calc_burst_penalty(se->burst_time);
+       se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty);
+       update_burst_score(se);
+}
+
+static inline u32 binary_smooth(u32 new, u32 old) {
+  int increment = new - old;
+  return (0 <= increment)?
+    old + ( increment >> (int)sched_burst_smoothness_long):
+    old - (-increment >> (int)sched_burst_smoothness_short);
+}
+
+static void restart_burst(struct sched_entity *se) {
+       se->burst_penalty = se->prev_burst_penalty =
+               binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty);
+       se->curr_burst_penalty = 0;
+       se->burst_time = 0;
+       update_burst_score(se);
+}
+
+static void restart_burst_rescale_deadline(struct sched_entity *se) {
+       s64 vscaled, wremain, vremain = se->deadline - se->vruntime;
+       u8 prev_score = se->burst_score;
+       restart_burst(se);
+       if (prev_score > se->burst_score) {
+               wremain = __unscale_slice(abs(vremain), prev_score);
+               vscaled = scale_slice(wremain, se);
+               if (unlikely(vremain < 0))
+                       vscaled = -vscaled;
+               se->deadline = se->vruntime + vscaled;
+       }
+}
+#endif // CONFIG_SCHED_BORE
 
 int sched_thermal_decay_shift;
 static int __init setup_sched_thermal_decay_shift(char *str)
@@ -139,12 +238,8 @@ int __weak arch_asym_cpu_priority(int cpu)
  *
  * (default: 5 msec, units: microseconds)
  */
-#ifdef CONFIG_CACHY
-static unsigned int sysctl_sched_cfs_bandwidth_slice           = 3000UL;
-#else
 static unsigned int sysctl_sched_cfs_bandwidth_slice           = 5000UL;
 #endif
-#endif
 
 #ifdef CONFIG_NUMA_BALANCING
 /* Restrict the NUMA promotion throughput (MB/s) for each target node. */
Editor is loading...
Leave a Comment