Xenomai  3.0.8
thread.h
1 /*
2  * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  */
19 #ifndef _COBALT_KERNEL_THREAD_H
20 #define _COBALT_KERNEL_THREAD_H
21 
22 #include <linux/wait.h>
23 #include <linux/sched.h>
24 #include <linux/sched/rt.h>
25 #include <cobalt/kernel/list.h>
26 #include <cobalt/kernel/stat.h>
27 #include <cobalt/kernel/timer.h>
28 #include <cobalt/kernel/registry.h>
29 #include <cobalt/kernel/schedparam.h>
30 #include <cobalt/kernel/trace.h>
31 #include <cobalt/kernel/synch.h>
32 #include <cobalt/uapi/kernel/thread.h>
33 #include <cobalt/uapi/signal.h>
34 #include <asm/xenomai/machine.h>
35 #include <asm/xenomai/thread.h>
36 
41 #define XNTHREAD_BLOCK_BITS (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNMIGRATE|XNHELD)
42 #define XNTHREAD_MODE_BITS (XNRRB|XNWARN|XNTRAPLB)
43 
44 struct xnthread;
45 struct xnsched;
46 struct xnselector;
47 struct xnsched_class;
48 struct xnsched_tpslot;
49 struct xnthread_personality;
50 struct completion;
51 
52 struct xnthread_init_attr {
53  struct xnthread_personality *personality;
54  cpumask_t affinity;
55  int flags;
56  const char *name;
57 };
58 
59 struct xnthread_start_attr {
60  int mode;
61  void (*entry)(void *cookie);
62  void *cookie;
63 };
64 
65 struct xnthread_wait_context {
66  int posted;
67 };
68 
69 struct xnthread_personality {
70  const char *name;
71  unsigned int magic;
72  int xid;
73  atomic_t refcnt;
74  struct {
75  void *(*attach_process)(void);
76  void (*detach_process)(void *arg);
77  void (*map_thread)(struct xnthread *thread);
78  struct xnthread_personality *(*relax_thread)(struct xnthread *thread);
79  struct xnthread_personality *(*harden_thread)(struct xnthread *thread);
80  struct xnthread_personality *(*move_thread)(struct xnthread *thread,
81  int dest_cpu);
82  struct xnthread_personality *(*exit_thread)(struct xnthread *thread);
83  struct xnthread_personality *(*finalize_thread)(struct xnthread *thread);
84  } ops;
85  struct module *module;
86 };
87 
88 struct xnthread {
89  struct xnarchtcb tcb; /* Architecture-dependent block */
90 
91  __u32 state; /* Thread state flags */
92  __u32 info; /* Thread information flags */
93  __u32 local_info; /* Local thread information flags */
94 
95  struct xnsched *sched; /* Thread scheduler */
96  struct xnsched_class *sched_class; /* Current scheduling class */
97  struct xnsched_class *base_class; /* Base scheduling class */
98 
99 #ifdef CONFIG_XENO_OPT_SCHED_TP
100  struct xnsched_tpslot *tps; /* Current partition slot for TP scheduling */
101  struct list_head tp_link; /* Link in per-sched TP thread queue */
102 #endif
103 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
104  struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */
105 #endif
106 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
107  struct xnsched_quota_group *quota; /* Quota scheduling group. */
108  struct list_head quota_expired;
109  struct list_head quota_next;
110 #endif
111  cpumask_t affinity; /* Processor affinity. */
112 
113  int bprio; /* Base priority (before PIP boost) */
114 
115  int cprio; /* Current priority */
116 
120  int wprio;
121 
122  int lock_count;
128  struct list_head rlink;
129 
134  struct list_head plink;
135 
137  struct list_head glink;
138 
143  struct list_head claimq;
144 
145  struct xnsynch *wchan; /* Resource the thread pends on */
146 
147  struct xnsynch *wwake; /* Wait channel the thread was resumed from */
148 
149  int res_count; /* Held resources count */
150 
151  struct xntimer rtimer; /* Resource timer */
152 
153  struct xntimer ptimer; /* Periodic timer */
154 
155  xnticks_t rrperiod; /* Allotted round-robin period (ns) */
156 
157  struct xnthread_wait_context *wcontext; /* Active wait context. */
158 
159  struct {
160  xnstat_counter_t ssw; /* Primary -> secondary mode switch count */
161  xnstat_counter_t csw; /* Context switches (includes secondary -> primary switches) */
162  xnstat_counter_t xsc; /* Xenomai syscalls */
163  xnstat_counter_t pf; /* Number of page faults */
164  xnstat_exectime_t account; /* Execution time accounting entity */
165  xnstat_exectime_t lastperiod; /* Interval marker for execution time reports */
166  } stat;
167 
168  struct xnselector *selector; /* For select. */
169 
170  xnhandle_t handle; /* Handle in registry */
171 
172  char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */
173 
174  void (*entry)(void *cookie); /* Thread entry routine */
175  void *cookie; /* Cookie to pass to the entry routine */
176 
181  struct xnthread_user_window *u_window;
182 
183  struct xnthread_personality *personality;
184 
185  struct completion exited;
186 
187 #ifdef CONFIG_XENO_OPT_DEBUG
188  const char *exe_path; /* Executable path */
189  u32 proghash; /* Hash value for exe_path */
190 #endif
191 };
192 
193 static inline int xnthread_get_state(const struct xnthread *thread)
194 {
195  return thread->state;
196 }
197 
198 static inline int xnthread_test_state(struct xnthread *thread, int bits)
199 {
200  return thread->state & bits;
201 }
202 
203 static inline void xnthread_set_state(struct xnthread *thread, int bits)
204 {
205  thread->state |= bits;
206 }
207 
208 static inline void xnthread_clear_state(struct xnthread *thread, int bits)
209 {
210  thread->state &= ~bits;
211 }
212 
213 static inline int xnthread_test_info(struct xnthread *thread, int bits)
214 {
215  return thread->info & bits;
216 }
217 
218 static inline void xnthread_set_info(struct xnthread *thread, int bits)
219 {
220  thread->info |= bits;
221 }
222 
223 static inline void xnthread_clear_info(struct xnthread *thread, int bits)
224 {
225  thread->info &= ~bits;
226 }
227 
228 static inline int xnthread_test_localinfo(struct xnthread *curr, int bits)
229 {
230  return curr->local_info & bits;
231 }
232 
233 static inline void xnthread_set_localinfo(struct xnthread *curr, int bits)
234 {
235  curr->local_info |= bits;
236 }
237 
238 static inline void xnthread_clear_localinfo(struct xnthread *curr, int bits)
239 {
240  curr->local_info &= ~bits;
241 }
242 
243 static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread)
244 {
245  return &thread->tcb;
246 }
247 
248 static inline int xnthread_base_priority(const struct xnthread *thread)
249 {
250  return thread->bprio;
251 }
252 
253 static inline int xnthread_current_priority(const struct xnthread *thread)
254 {
255  return thread->cprio;
256 }
257 
258 static inline struct task_struct *xnthread_host_task(struct xnthread *thread)
259 {
260  return xnthread_archtcb(thread)->core.host_task;
261 }
262 
263 static inline pid_t xnthread_host_pid(struct xnthread *thread)
264 {
265  if (xnthread_test_state(thread, XNROOT))
266  return 0;
267 
268  return task_pid_nr(xnthread_host_task(thread));
269 }
270 
271 #define xnthread_for_each_claimed(__pos, __thread) \
272  list_for_each_entry(__pos, &(__thread)->claimq, link)
273 
274 #define xnthread_for_each_claimed_safe(__pos, __tmp, __thread) \
275  list_for_each_entry_safe(__pos, __tmp, &(__thread)->claimq, link)
276 
277 #define xnthread_run_handler(__t, __h, __a...) \
278  do { \
279  struct xnthread_personality *__p__ = (__t)->personality; \
280  if ((__p__)->ops.__h) \
281  (__p__)->ops.__h(__t, ##__a); \
282  } while (0)
283 
284 #define xnthread_run_handler_stack(__t, __h, __a...) \
285  do { \
286  struct xnthread_personality *__p__ = (__t)->personality; \
287  do { \
288  if ((__p__)->ops.__h == NULL) \
289  break; \
290  __p__ = (__p__)->ops.__h(__t, ##__a); \
291  } while (__p__); \
292  } while (0)
293 
294 static inline
295 struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread)
296 {
297  return thread->wcontext;
298 }
299 
300 static inline
301 int xnthread_register(struct xnthread *thread, const char *name)
302 {
303  return xnregistry_enter(name, thread, &thread->handle, NULL);
304 }
305 
306 static inline
307 struct xnthread *xnthread_lookup(xnhandle_t threadh)
308 {
309  struct xnthread *thread = xnregistry_lookup(threadh, NULL);
310  return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL;
311 }
312 
313 static inline void xnthread_sync_window(struct xnthread *thread)
314 {
315  if (thread->u_window) {
316  thread->u_window->state = thread->state;
317  thread->u_window->info = thread->info;
318  }
319 }
320 
321 static inline
322 void xnthread_clear_sync_window(struct xnthread *thread, int state_bits)
323 {
324  if (thread->u_window) {
325  thread->u_window->state = thread->state & ~state_bits;
326  thread->u_window->info = thread->info;
327  }
328 }
329 
330 static inline
331 void xnthread_set_sync_window(struct xnthread *thread, int state_bits)
332 {
333  if (thread->u_window) {
334  thread->u_window->state = thread->state | state_bits;
335  thread->u_window->info = thread->info;
336  }
337 }
338 
339 static inline int normalize_priority(int prio)
340 {
341  return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
342 }
343 
344 int __xnthread_init(struct xnthread *thread,
345  const struct xnthread_init_attr *attr,
346  struct xnsched *sched,
347  struct xnsched_class *sched_class,
348  const union xnsched_policy_param *sched_param);
349 
350 void __xnthread_test_cancel(struct xnthread *curr);
351 
352 void __xnthread_cleanup(struct xnthread *curr);
353 
354 void __xnthread_discard(struct xnthread *thread);
355 
371 static inline struct xnthread *xnthread_current(void)
372 {
373  return ipipe_current_threadinfo()->thread;
374 }
375 
387 static inline struct xnthread *xnthread_from_task(struct task_struct *p)
388 {
389  return ipipe_task_threadinfo(p)->thread;
390 }
391 
401 static inline void xnthread_test_cancel(void)
402 {
403  struct xnthread *curr = xnthread_current();
404 
405  if (curr && xnthread_test_info(curr, XNCANCELD))
406  __xnthread_test_cancel(curr);
407 }
408 
409 static inline
410 void xnthread_complete_wait(struct xnthread_wait_context *wc)
411 {
412  wc->posted = 1;
413 }
414 
415 static inline
416 int xnthread_wait_complete_p(struct xnthread_wait_context *wc)
417 {
418  return wc->posted;
419 }
420 
421 #ifdef CONFIG_XENO_ARCH_FPU
422 void xnthread_switch_fpu(struct xnsched *sched);
423 #else
424 static inline void xnthread_switch_fpu(struct xnsched *sched) { }
425 #endif /* CONFIG_XENO_ARCH_FPU */
426 
427 void xnthread_init_shadow_tcb(struct xnthread *thread);
428 
429 void xnthread_init_root_tcb(struct xnthread *thread);
430 
431 void xnthread_deregister(struct xnthread *thread);
432 
433 char *xnthread_format_status(unsigned long status,
434  char *buf, int size);
435 
436 int xnthread_set_clock(struct xnthread *thread,
437  struct xnclock *newclock);
438 
439 xnticks_t xnthread_get_timeout(struct xnthread *thread,
440  xnticks_t ns);
441 
442 xnticks_t xnthread_get_period(struct xnthread *thread);
443 
444 void xnthread_prepare_wait(struct xnthread_wait_context *wc);
445 
446 int xnthread_init(struct xnthread *thread,
447  const struct xnthread_init_attr *attr,
448  struct xnsched_class *sched_class,
449  const union xnsched_policy_param *sched_param);
450 
451 int xnthread_start(struct xnthread *thread,
452  const struct xnthread_start_attr *attr);
453 
454 int xnthread_set_mode(int clrmask,
455  int setmask);
456 
457 void xnthread_suspend(struct xnthread *thread,
458  int mask,
459  xnticks_t timeout,
460  xntmode_t timeout_mode,
461  struct xnsynch *wchan);
462 
463 void xnthread_resume(struct xnthread *thread,
464  int mask);
465 
466 int xnthread_unblock(struct xnthread *thread);
467 
468 int xnthread_set_periodic(struct xnthread *thread,
469  xnticks_t idate,
470  xntmode_t timeout_mode,
471  xnticks_t period);
472 
473 int xnthread_wait_period(unsigned long *overruns_r);
474 
475 int xnthread_set_slice(struct xnthread *thread,
476  xnticks_t quantum);
477 
478 void xnthread_cancel(struct xnthread *thread);
479 
480 int xnthread_join(struct xnthread *thread, bool uninterruptible);
481 
482 int xnthread_harden(void);
483 
484 void xnthread_relax(int notify, int reason);
485 
486 void __xnthread_kick(struct xnthread *thread);
487 
488 void xnthread_kick(struct xnthread *thread);
489 
490 void __xnthread_demote(struct xnthread *thread);
491 
492 void xnthread_demote(struct xnthread *thread);
493 
494 void xnthread_signal(struct xnthread *thread,
495  int sig, int arg);
496 
497 void xnthread_pin_initial(struct xnthread *thread);
498 
499 int xnthread_map(struct xnthread *thread,
500  struct completion *done);
501 
502 void xnthread_call_mayday(struct xnthread *thread, int reason);
503 
504 static inline void xnthread_get_resource(struct xnthread *thread)
505 {
506  if (xnthread_test_state(thread, XNWEAK|XNDEBUG))
507  thread->res_count++;
508 }
509 
510 static inline int xnthread_put_resource(struct xnthread *thread)
511 {
512  if (xnthread_test_state(thread, XNWEAK) ||
513  IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) {
514  if (unlikely(thread->res_count == 0)) {
515  if (xnthread_test_state(thread, XNWARN))
516  xnthread_signal(thread, SIGDEBUG,
517  SIGDEBUG_RESCNT_IMBALANCE);
518  return -EPERM;
519  }
520  thread->res_count--;
521  }
522 
523  return 0;
524 }
525 
526 #ifdef CONFIG_SMP
527 
528 void xnthread_migrate_passive(struct xnthread *thread,
529  struct xnsched *sched);
530 #else
531 
532 static inline void xnthread_migrate_passive(struct xnthread *thread,
533  struct xnsched *sched)
534 { }
535 
536 #endif
537 
538 int __xnthread_set_schedparam(struct xnthread *thread,
539  struct xnsched_class *sched_class,
540  const union xnsched_policy_param *sched_param);
541 
542 int xnthread_set_schedparam(struct xnthread *thread,
543  struct xnsched_class *sched_class,
544  const union xnsched_policy_param *sched_param);
545 
546 int xnthread_killall(int grace, int mask);
547 
548 extern struct xnthread_personality xenomai_personality;
549 
552 #endif /* !_COBALT_KERNEL_THREAD_H */
void xnthread_relax(int notify, int reason)
Switch a shadow thread back to the Linux domain.
Definition: thread.c:2015
void xnthread_cancel(struct xnthread *thread)
Cancel a thread.
Definition: thread.c:1500
#define XNCANCELD
Cancellation request is pending.
Definition: thread.h:72
static void xnthread_test_cancel(void)
Introduce a thread cancellation point.
Definition: thread.h:401
#define XNWARN
Issue SIGDEBUG on error detection.
Definition: thread.h:46
#define XNDEBUG
User-level debugging enabled.
Definition: thread.h:53
int xnregistry_enter(const char *key, void *objaddr, xnhandle_t *phandle, struct xnpnode *pnode)
Register a real-time object.
Definition: registry.c:630
void xnthread_suspend(struct xnthread *thread, int mask, xnticks_t timeout, xntmode_t timeout_mode, struct xnsynch *wchan)
Suspend a thread.
Definition: thread.c:850
#define XNWEAK
Non real-time shadow (from the WEAK class)
Definition: thread.h:49
int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
Set thread time-slicing information.
Definition: thread.c:1441
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:48
static struct xnthread * xnthread_current(void)
Retrieve the current Cobalt core TCB.
Definition: thread.h:371
int xnthread_wait_period(unsigned long *overruns_r)
Wait for the next periodic release point.
Definition: thread.c:1365
Scheduling information structure.
Definition: sched.h:58
int xnthread_map(struct xnthread *thread, struct completion *done)
Create a shadow thread context over a kernel task.
Definition: thread.c:2430
int xnthread_set_mode(int clrmask, int setmask)
Change control mode of the current thread.
Definition: thread.c:756
static void * xnregistry_lookup(xnhandle_t handle, unsigned long *cstamp_r)
Find a real-time object into the registry.
Definition: registry.h:175
int xnthread_init(struct xnthread *thread, const struct xnthread_init_attr *attr, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Initialize a new thread.
Definition: thread.c:592
int xnthread_harden(void)
Migrate a Linux task to the Xenomai domain.
Definition: thread.c:1905
int xnthread_set_periodic(struct xnthread *thread, xnticks_t idate, xntmode_t timeout_mode, xnticks_t period)
Make a thread periodic.
Definition: thread.c:1280
int xnthread_start(struct xnthread *thread, const struct xnthread_start_attr *attr)
Start a newly created thread.
Definition: thread.c:665
Copyright © 2011 Gilles Chanteperdrix gilles.chanteperdrix@xenomai.org.
Definition: atomic.h:24
int xnthread_unblock(struct xnthread *thread)
Unblock a thread.
Definition: thread.c:1189
static struct xnthread * xnthread_from_task(struct task_struct *p)
Retrieve the Cobalt core TCB attached to a Linux task.
Definition: thread.h:387
int xnthread_join(struct xnthread *thread, bool uninterruptible)
Join with a terminated thread.
Definition: thread.c:1629
int xnthread_set_schedparam(struct xnthread *thread, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Change the base scheduling parameters of a thread.
Definition: thread.c:1803
void xnthread_resume(struct xnthread *thread, int mask)
Resume a thread.
Definition: thread.c:1071