Xenomai  3.0.8
sched.h
1 /*
2  * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  */
19 #ifndef _COBALT_KERNEL_SCHED_H
20 #define _COBALT_KERNEL_SCHED_H
21 
22 #include <linux/percpu.h>
23 #include <cobalt/kernel/lock.h>
24 #include <cobalt/kernel/thread.h>
25 #include <cobalt/kernel/schedqueue.h>
26 #include <cobalt/kernel/sched-tp.h>
27 #include <cobalt/kernel/sched-weak.h>
28 #include <cobalt/kernel/sched-sporadic.h>
29 #include <cobalt/kernel/sched-quota.h>
30 #include <cobalt/kernel/vfile.h>
31 #include <cobalt/kernel/assert.h>
32 #include <asm/xenomai/machine.h>
33 
39 /* Sched status flags */
40 #define XNRESCHED 0x10000000 /* Needs rescheduling */
41 #define XNINSW 0x20000000 /* In context switch */
42 #define XNINTCK 0x40000000 /* In master tick handler context */
43 
44 /* Sched local flags */
45 #define XNIDLE 0x00010000 /* Idle (no outstanding timer) */
46 #define XNHTICK 0x00008000 /* Host tick pending */
47 #define XNINIRQ 0x00004000 /* In IRQ handling context */
48 #define XNHDEFER 0x00002000 /* Host tick deferred */
49 
50 struct xnsched_rt {
51  xnsched_queue_t runnable;
52 };
53 
58 struct xnsched {
60  unsigned long status;
62  unsigned long lflags;
64  struct xnthread *curr;
65 #ifdef CONFIG_SMP
66 
67  int cpu;
69  cpumask_t resched;
70 #endif
71 
72  struct xnsched_rt rt;
73 #ifdef CONFIG_XENO_OPT_SCHED_WEAK
74 
75  struct xnsched_weak weak;
76 #endif
77 #ifdef CONFIG_XENO_OPT_SCHED_TP
78 
79  struct xnsched_tp tp;
80 #endif
81 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
82 
83  struct xnsched_sporadic pss;
84 #endif
85 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
86 
87  struct xnsched_quota quota;
88 #endif
89 
90  volatile unsigned inesting;
92  struct xntimer htimer;
94  struct xntimer rrbtimer;
96  struct xnthread rootcb;
97 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
98  struct xnthread *last;
99 #endif
100 #ifdef CONFIG_XENO_ARCH_FPU
101 
102  struct xnthread *fpuholder;
103 #endif
104 #ifdef CONFIG_XENO_OPT_WATCHDOG
105 
106  struct xntimer wdtimer;
108  int wdcount;
109 #endif
110 #ifdef CONFIG_XENO_OPT_STATS
111 
112  xnticks_t last_account_switch;
114  xnstat_exectime_t *current_account;
115 #endif
116 };
117 
118 DECLARE_PER_CPU(struct xnsched, nksched);
119 
120 extern cpumask_t cobalt_cpu_affinity;
121 
122 extern struct list_head nkthreadq;
123 
124 extern int cobalt_nrthreads;
125 
126 #ifdef CONFIG_XENO_OPT_VFILE
127 extern struct xnvfile_rev_tag nkthreadlist_tag;
128 #endif
129 
130 union xnsched_policy_param;
131 
132 struct xnsched_class {
133  void (*sched_init)(struct xnsched *sched);
134  void (*sched_enqueue)(struct xnthread *thread);
135  void (*sched_dequeue)(struct xnthread *thread);
136  void (*sched_requeue)(struct xnthread *thread);
137  struct xnthread *(*sched_pick)(struct xnsched *sched);
138  void (*sched_tick)(struct xnsched *sched);
139  void (*sched_rotate)(struct xnsched *sched,
140  const union xnsched_policy_param *p);
141  void (*sched_migrate)(struct xnthread *thread,
142  struct xnsched *sched);
143  int (*sched_chkparam)(struct xnthread *thread,
144  const union xnsched_policy_param *p);
170  void (*sched_setparam)(struct xnthread *thread,
171  const union xnsched_policy_param *p);
172  void (*sched_getparam)(struct xnthread *thread,
173  union xnsched_policy_param *p);
174  void (*sched_trackprio)(struct xnthread *thread,
175  const union xnsched_policy_param *p);
176  int (*sched_declare)(struct xnthread *thread,
177  const union xnsched_policy_param *p);
178  void (*sched_forget)(struct xnthread *thread);
179  void (*sched_kick)(struct xnthread *thread);
180 #ifdef CONFIG_XENO_OPT_VFILE
181  int (*sched_init_vfile)(struct xnsched_class *schedclass,
182  struct xnvfile_directory *vfroot);
183  void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
184 #endif
185  int nthreads;
186  struct xnsched_class *next;
187  int weight;
188  int policy;
189  const char *name;
190 };
191 
192 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR)
193 
194 /* Placeholder for current thread priority */
195 #define XNSCHED_RUNPRIO 0x80000000
196 
197 #define xnsched_for_each_thread(__thread) \
198  list_for_each_entry(__thread, &nkthreadq, glink)
199 
200 #ifdef CONFIG_SMP
201 static inline int xnsched_cpu(struct xnsched *sched)
202 {
203  return sched->cpu;
204 }
205 #else /* !CONFIG_SMP */
206 static inline int xnsched_cpu(struct xnsched *sched)
207 {
208  return 0;
209 }
210 #endif /* CONFIG_SMP */
211 
212 static inline struct xnsched *xnsched_struct(int cpu)
213 {
214  return &per_cpu(nksched, cpu);
215 }
216 
217 static inline struct xnsched *xnsched_current(void)
218 {
219  /* IRQs off */
220  return raw_cpu_ptr(&nksched);
221 }
222 
223 static inline struct xnthread *xnsched_current_thread(void)
224 {
225  return xnsched_current()->curr;
226 }
227 
228 /* Test resched flag of given sched. */
229 static inline int xnsched_resched_p(struct xnsched *sched)
230 {
231  return sched->status & XNRESCHED;
232 }
233 
234 /* Set self resched flag for the current scheduler. */
235 static inline void xnsched_set_self_resched(struct xnsched *sched)
236 {
237  sched->status |= XNRESCHED;
238 }
239 
240 #define xnsched_realtime_domain cobalt_pipeline.domain
241 
242 /* Set resched flag for the given scheduler. */
243 #ifdef CONFIG_SMP
244 
245 static inline void xnsched_set_resched(struct xnsched *sched)
246 {
247  struct xnsched *current_sched = xnsched_current();
248 
249  if (current_sched == sched)
250  current_sched->status |= XNRESCHED;
251  else if (!xnsched_resched_p(sched)) {
252  cpumask_set_cpu(xnsched_cpu(sched), &current_sched->resched);
253  sched->status |= XNRESCHED;
254  current_sched->status |= XNRESCHED;
255  }
256 }
257 
258 #define xnsched_realtime_cpus cobalt_pipeline.supported_cpus
259 
260 static inline int xnsched_supported_cpu(int cpu)
261 {
262  return cpumask_test_cpu(cpu, &xnsched_realtime_cpus);
263 }
264 
265 static inline int xnsched_threading_cpu(int cpu)
266 {
267  return cpumask_test_cpu(cpu, &cobalt_cpu_affinity);
268 }
269 
270 #else /* !CONFIG_SMP */
271 
272 static inline void xnsched_set_resched(struct xnsched *sched)
273 {
274  xnsched_set_self_resched(sched);
275 }
276 
277 #define xnsched_realtime_cpus CPU_MASK_ALL
278 
279 static inline int xnsched_supported_cpu(int cpu)
280 {
281  return 1;
282 }
283 
284 static inline int xnsched_threading_cpu(int cpu)
285 {
286  return 1;
287 }
288 
289 #endif /* !CONFIG_SMP */
290 
291 #define for_each_realtime_cpu(cpu) \
292  for_each_online_cpu(cpu) \
293  if (xnsched_supported_cpu(cpu)) \
294 
295 int ___xnsched_run(struct xnsched *sched);
296 
297 void __xnsched_run_handler(void);
298 
299 static inline int __xnsched_run(struct xnsched *sched)
300 {
301  /*
302  * NOTE: Since ___xnsched_run() won't run immediately if an
303  * escalation to primary domain is needed, we won't use
304  * critical scheduler information before we actually run in
305  * primary mode; therefore we can first test the scheduler
306  * status then escalate.
307  *
308  * Running in the primary domain means that no Linux-triggered
309  * CPU migration may occur from that point either. Finally,
310  * since migration is always a self-directed operation for
311  * Xenomai threads, we can safely read the scheduler state
312  * bits without holding the nklock.
313  *
314  * Said differently, if we race here because of a CPU
315  * migration, it must have been Linux-triggered because we run
316  * in secondary mode; in which case we will escalate to the
317  * primary domain, then unwind the current call frame without
318  * running the rescheduling procedure in
319  * ___xnsched_run(). Therefore, the scheduler slot
320  * (i.e. "sched") will be either valid, or unused.
321  */
322  if (((sched->status|sched->lflags) &
323  (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
324  return 0;
325 
326  return ___xnsched_run(sched);
327 }
328 
329 static inline int xnsched_run(void)
330 {
331  struct xnsched *sched = xnsched_current();
332  /*
333  * No rescheduling is possible, either if:
334  *
335  * - the current thread holds the scheduler lock
336  * - an ISR context is active
337  * - we are caught in the middle of an unlocked context switch.
338  */
339  smp_rmb();
340  if (unlikely(sched->curr->lock_count > 0))
341  return 0;
342 
343  return __xnsched_run(sched);
344 }
345 
346 void xnsched_lock(void);
347 
348 void xnsched_unlock(void);
349 
350 static inline int xnsched_interrupt_p(void)
351 {
352  return xnsched_current()->lflags & XNINIRQ;
353 }
354 
355 static inline int xnsched_root_p(void)
356 {
357  return xnthread_test_state(xnsched_current_thread(), XNROOT);
358 }
359 
360 static inline int xnsched_unblockable_p(void)
361 {
362  return xnsched_interrupt_p() || xnsched_root_p();
363 }
364 
365 static inline int xnsched_primary_p(void)
366 {
367  return !xnsched_unblockable_p();
368 }
369 
370 #ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
371 
372 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
373 
374 #define xnsched_resched_after_unlocked_switch() xnsched_run()
375 
376 static inline
377 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
378 {
379  return sched->status & XNRESCHED;
380 }
381 
382 #else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
383 
384 static inline struct xnsched *
385 xnsched_finish_unlocked_switch(struct xnsched *sched)
386 {
387  XENO_BUG_ON(COBALT, !hard_irqs_disabled());
388  return xnsched_current();
389 }
390 
391 static inline void xnsched_resched_after_unlocked_switch(void) { }
392 
393 static inline int
394 xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
395 {
396  return 0;
397 }
398 
399 #endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
400 
401 #ifdef CONFIG_XENO_OPT_WATCHDOG
402 static inline void xnsched_reset_watchdog(struct xnsched *sched)
403 {
404  sched->wdcount = 0;
405 }
406 #else /* !CONFIG_XENO_OPT_WATCHDOG */
407 static inline void xnsched_reset_watchdog(struct xnsched *sched)
408 {
409 }
410 #endif /* CONFIG_XENO_OPT_WATCHDOG */
411 
412 #include <cobalt/kernel/sched-idle.h>
413 #include <cobalt/kernel/sched-rt.h>
414 
415 int xnsched_init_proc(void);
416 
417 void xnsched_cleanup_proc(void);
418 
419 void xnsched_register_classes(void);
420 
421 void xnsched_init(struct xnsched *sched, int cpu);
422 
423 void xnsched_destroy(struct xnsched *sched);
424 
425 struct xnthread *xnsched_pick_next(struct xnsched *sched);
426 
427 void xnsched_putback(struct xnthread *thread);
428 
429 int xnsched_set_policy(struct xnthread *thread,
430  struct xnsched_class *sched_class,
431  const union xnsched_policy_param *p);
432 
433 void xnsched_track_policy(struct xnthread *thread,
434  struct xnthread *target);
435 
436 void xnsched_migrate(struct xnthread *thread,
437  struct xnsched *sched);
438 
439 void xnsched_migrate_passive(struct xnthread *thread,
440  struct xnsched *sched);
441 
464 static inline void xnsched_rotate(struct xnsched *sched,
465  struct xnsched_class *sched_class,
466  const union xnsched_policy_param *sched_param)
467 {
468  sched_class->sched_rotate(sched, sched_param);
469 }
470 
471 static inline int xnsched_init_thread(struct xnthread *thread)
472 {
473  int ret = 0;
474 
475  xnsched_idle_init_thread(thread);
476  xnsched_rt_init_thread(thread);
477 
478 #ifdef CONFIG_XENO_OPT_SCHED_TP
479  ret = xnsched_tp_init_thread(thread);
480  if (ret)
481  return ret;
482 #endif /* CONFIG_XENO_OPT_SCHED_TP */
483 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
484  ret = xnsched_sporadic_init_thread(thread);
485  if (ret)
486  return ret;
487 #endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
488 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
489  ret = xnsched_quota_init_thread(thread);
490  if (ret)
491  return ret;
492 #endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
493 
494  return ret;
495 }
496 
497 static inline int xnsched_root_priority(struct xnsched *sched)
498 {
499  return sched->rootcb.cprio;
500 }
501 
502 static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
503 {
504  return sched->rootcb.sched_class;
505 }
506 
507 static inline void xnsched_tick(struct xnsched *sched)
508 {
509  struct xnthread *curr = sched->curr;
510  struct xnsched_class *sched_class = curr->sched_class;
511  /*
512  * A thread that undergoes round-robin scheduling only
513  * consumes its time slice when it runs within its own
514  * scheduling class, which excludes temporary PIP boosts, and
515  * does not hold the scheduler lock.
516  */
517  if (sched_class == curr->base_class &&
518  sched_class->sched_tick &&
519  xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB &&
520  curr->lock_count == 0)
521  sched_class->sched_tick(sched);
522 }
523 
524 static inline int xnsched_chkparam(struct xnsched_class *sched_class,
525  struct xnthread *thread,
526  const union xnsched_policy_param *p)
527 {
528  if (sched_class->sched_chkparam)
529  return sched_class->sched_chkparam(thread, p);
530 
531  return 0;
532 }
533 
534 static inline int xnsched_declare(struct xnsched_class *sched_class,
535  struct xnthread *thread,
536  const union xnsched_policy_param *p)
537 {
538  int ret;
539 
540  if (sched_class->sched_declare) {
541  ret = sched_class->sched_declare(thread, p);
542  if (ret)
543  return ret;
544  }
545  if (sched_class != thread->base_class)
546  sched_class->nthreads++;
547 
548  return 0;
549 }
550 
551 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
552 
553 static inline void xnsched_enqueue(struct xnthread *thread)
554 {
555  struct xnsched_class *sched_class = thread->sched_class;
556 
557  if (sched_class != &xnsched_class_idle)
558  sched_class->sched_enqueue(thread);
559 }
560 
561 static inline void xnsched_dequeue(struct xnthread *thread)
562 {
563  struct xnsched_class *sched_class = thread->sched_class;
564 
565  if (sched_class != &xnsched_class_idle)
566  sched_class->sched_dequeue(thread);
567 }
568 
569 static inline void xnsched_requeue(struct xnthread *thread)
570 {
571  struct xnsched_class *sched_class = thread->sched_class;
572 
573  if (sched_class != &xnsched_class_idle)
574  sched_class->sched_requeue(thread);
575 }
576 
577 static inline void xnsched_setparam(struct xnthread *thread,
578  const union xnsched_policy_param *p)
579 {
580  thread->sched_class->sched_setparam(thread, p);
581  thread->wprio = thread->cprio + thread->sched_class->weight;
582 }
583 
584 static inline void xnsched_getparam(struct xnthread *thread,
585  union xnsched_policy_param *p)
586 {
587  thread->sched_class->sched_getparam(thread, p);
588 }
589 
590 static inline void xnsched_trackprio(struct xnthread *thread,
591  const union xnsched_policy_param *p)
592 {
593  thread->sched_class->sched_trackprio(thread, p);
594  thread->wprio = thread->cprio + thread->sched_class->weight;
595 }
596 
597 static inline void xnsched_forget(struct xnthread *thread)
598 {
599  struct xnsched_class *sched_class = thread->base_class;
600 
601  --sched_class->nthreads;
602 
603  if (sched_class->sched_forget)
604  sched_class->sched_forget(thread);
605 }
606 
607 static inline void xnsched_kick(struct xnthread *thread)
608 {
609  struct xnsched_class *sched_class = thread->base_class;
610 
611  xnthread_set_info(thread, XNKICKED);
612 
613  if (sched_class->sched_kick)
614  sched_class->sched_kick(thread);
615 
616  xnsched_set_resched(thread->sched);
617 }
618 
619 #else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
620 
621 /*
622  * If only the RT and IDLE scheduling classes are compiled in, we can
623  * fully inline common helpers for dealing with those.
624  */
625 
626 static inline void xnsched_enqueue(struct xnthread *thread)
627 {
628  struct xnsched_class *sched_class = thread->sched_class;
629 
630  if (sched_class != &xnsched_class_idle)
631  __xnsched_rt_enqueue(thread);
632 }
633 
634 static inline void xnsched_dequeue(struct xnthread *thread)
635 {
636  struct xnsched_class *sched_class = thread->sched_class;
637 
638  if (sched_class != &xnsched_class_idle)
639  __xnsched_rt_dequeue(thread);
640 }
641 
642 static inline void xnsched_requeue(struct xnthread *thread)
643 {
644  struct xnsched_class *sched_class = thread->sched_class;
645 
646  if (sched_class != &xnsched_class_idle)
647  __xnsched_rt_requeue(thread);
648 }
649 
650 static inline void xnsched_setparam(struct xnthread *thread,
651  const union xnsched_policy_param *p)
652 {
653  struct xnsched_class *sched_class = thread->sched_class;
654 
655  if (sched_class != &xnsched_class_idle)
656  __xnsched_rt_setparam(thread, p);
657  else
658  __xnsched_idle_setparam(thread, p);
659 
660  thread->wprio = thread->cprio + sched_class->weight;
661 }
662 
663 static inline void xnsched_getparam(struct xnthread *thread,
664  union xnsched_policy_param *p)
665 {
666  struct xnsched_class *sched_class = thread->sched_class;
667 
668  if (sched_class != &xnsched_class_idle)
669  __xnsched_rt_getparam(thread, p);
670  else
671  __xnsched_idle_getparam(thread, p);
672 }
673 
674 static inline void xnsched_trackprio(struct xnthread *thread,
675  const union xnsched_policy_param *p)
676 {
677  struct xnsched_class *sched_class = thread->sched_class;
678 
679  if (sched_class != &xnsched_class_idle)
680  __xnsched_rt_trackprio(thread, p);
681  else
682  __xnsched_idle_trackprio(thread, p);
683 
684  thread->wprio = thread->cprio + sched_class->weight;
685 }
686 
687 static inline void xnsched_forget(struct xnthread *thread)
688 {
689  --thread->base_class->nthreads;
690  __xnsched_rt_forget(thread);
691 }
692 
693 static inline void xnsched_kick(struct xnthread *thread)
694 {
695  xnthread_set_info(thread, XNKICKED);
696  xnsched_set_resched(thread->sched);
697 }
698 
699 #endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
700 
703 #endif /* !_COBALT_KERNEL_SCHED_H */
struct xnthread * curr
Definition: sched.h:64
Snapshot revision tag.
Definition: vfile.h:482
#define XNKICKED
Forced out of primary mode.
Definition: thread.h:69
#define XNRRB
Undergoes a round-robin scheduling.
Definition: thread.h:45
int cpu
Definition: sched.h:67
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:48
volatile unsigned inesting
Definition: sched.h:90
Scheduling information structure.
Definition: sched.h:58
struct xnsched_rt rt
Definition: sched.h:72
struct xntimer htimer
Definition: sched.h:92
unsigned long lflags
Definition: sched.h:62
struct xntimer rrbtimer
Definition: sched.h:94
unsigned long status
Definition: sched.h:60
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition: sched.h:464
static int xnsched_run(void)
The rescheduling procedure.
Definition: sched.h:329
cpumask_t resched
Definition: sched.h:69