Roomba Tank
CSC 460 Project 3
os.c
Go to the documentation of this file.
1 #include <string.h>
2 #include <avr/io.h>
3 #include <avr/interrupt.h>
4 #include "os.h"
5 #include "queue.h"
6 
7 //Comment out the following line to remove debugging code from compiled version.
8 #define DEBUG
9 
10 extern void a_main();
11 
12 /*===========
13  * RTOS Internal
14  *===========
15  */
16 
30 extern void CSwitch();
31 extern void Exit_Kernel(); /* this is the same as CSwitch() */
32 
36 void Task_Terminate(void);
37 static void Dispatch();
38 static void Kernel_Unlock_Mutex();
39 
43 extern void Enter_Kernel();
44 
49 static PD Process[MAXTHREAD];
50 
55 static MTX Mutex[MAXMUTEX];
56 
61 static EVT Event[MAXEVENT];
62 
66 volatile static PD* Cp;
67 
76 volatile unsigned char *KernelSp;
77 
83 volatile unsigned char *CurrentSp;
84 
86 volatile static unsigned int KernelActive;
87 
89 volatile static unsigned int Tasks;
90 
92 volatile static unsigned int pCount;
93 
95 volatile static unsigned int Mutexes;
96 
98 volatile static unsigned int Events;
99 
101 volatile unsigned int tickOverflowCount = 0;
102 
105 volatile int RQCount = 0;
106 
109 volatile int SQCount = 0;
110 
113 volatile int WQCount = 0;
114 
120 PID Kernel_Create_Task_At( volatile PD *p, voidfuncptr f, PRIORITY py, int arg ) {
121  unsigned char *sp;
122 
123 #ifdef DEBUG
124  int counter = 0;
125 #endif
126 
127  sp = (unsigned char *) &(p->workSpace[WORKSPACE-1]);
128 
129  //Clear the contents of the workspace
130  memset(&(p->workSpace),0,WORKSPACE);
131 
132  //Notice that we are placing the address (16-bit) of the functions
133  //onto the stack in reverse byte order (least significant first, followed
134  //by most significant). This is because the "return" assembly instructions
135  //(rtn and rti) pop addresses off in BIG ENDIAN (most sig. first, least sig.
136  //second), even though the AT90 is LITTLE ENDIAN machine.
137 
138  //Store terminate at the bottom of stack to protect against stack underrun.
139  *(unsigned char *)sp-- = ((unsigned int)Task_Terminate) & 0xff;
140  *(unsigned char *)sp-- = (((unsigned int)Task_Terminate) >> 8) & 0xff;
141 
142  //Place return address of function at bottom of stack
143  *(unsigned char *)sp-- = ((unsigned int)f) & 0xff;
144  *(unsigned char *)sp-- = (((unsigned int)f) >> 8) & 0xff;
145  *(unsigned char *)sp-- = 0x00; // Fix 17 bit address problem for PC
146 
147 #ifdef DEBUG
148  //Fill stack with initial values for development debugging
149  //Registers 0 -> 31 and the status register
150  for (counter = 0; counter < 34; counter++) {
151  *(unsigned char *)sp-- = counter;
152  }
153 #else
154  //Place stack pointer at top of stack
155  sp = sp - 34;
156 #endif
157 
158  p->sp = sp; /* stack pointer into the "workSpace" */
159  p->code = f; /* function to be executed as a task */
160  p->request = NONE;
161  p->p = pCount;
162  p->py = py;
163  p->inheritedPy = py;
164  p->arg = arg;
165  p->suspended = 0;
166  p->eWait = 99;
167 
168  Tasks++;
169  pCount++;
170 
171  p->state = READY;
172 
173  enqueueRQ(&p, &ReadyQueue, &RQCount);
174 
175  return p->p;
176 }
177 
181 static PID Kernel_Create_Task( voidfuncptr f, PRIORITY py, int arg ) {
182  int x;
183 
184  if (Tasks == MAXTHREAD) return; /* Too many task! */
185 
186  /* find a DEAD PD that we can use */
187  for (x = 0; x < MAXTHREAD; x++) {
188  if (Process[x].state == DEAD) break;
189  }
190 
191  unsigned int p = Kernel_Create_Task_At( &(Process[x]), f, py, arg );
192 
193  return p;
194 }
195 
199 static void Kernel_Suspend_Task() {
200  int i;
201 
202  if(Cp->p == Cp->pidAction) {
203  Cp->suspended = 1;
204  }
205  else {
206  for(i = 0; i < MAXTHREAD; i++) {
207  if (Process[i].p == Cp->pidAction) break;
208  }
209 
210  if(i >= MAXTHREAD) {
211  return;
212  }
213 
214  Process[i].suspended = 1;
215  }
216 }
217 
221 static unsigned int Kernel_Resume_Task() {
222  int i;
223 
224  for(i = 0; i < MAXTHREAD; i++) {
225  if (Process[i].p == Cp->pidAction) break;
226  }
227 
228  if(i >= MAXTHREAD) {
229  return 0;
230  }
231 
232  if(Process[i].suspended == 1) {
233  Process[i].suspended = 0;
234  if(Process[i].inheritedPy < Cp->inheritedPy) {
235  return 1;
236  }
237  }
238 
239  return 0;
240 }
241 
245 static void Kernel_Terminate_Task() {
246  Cp->inheritedPy = 0;
247  Cp->py = 0;
248  Cp->state = TERMINATED;
249 
250  int i;
251 
252  for(i = 0; i < MAXMUTEX; i++) {
253  if (Mutex[i].owner == Cp->p) {
254  Cp->m = Mutex[i].m;
255  Kernel_Unlock_Mutex();
256  }
257  }
258 
259  Cp->state = DEAD;
260  Cp->eWait = 99;
261  Cp->inheritedPy = MINPRIORITY;
262  Cp->py = MINPRIORITY;
263  Cp->p = 0;
264  Tasks--;
265 }
266 
271  m->m = Mutexes;
272  m->state = FREE;
273  Mutexes++;
274 
275  return m->m;
276 }
277 
281 static MUTEX Kernel_Init_Mutex() {
282  int x;
283 
284  if (Mutexes == MAXMUTEX) return; // Too many mutexes!
285 
286  // find a Disabled mutex that we can use
287  for (x = 0; x < MAXMUTEX; x++) {
288  if (Mutex[x].state == DISABLED) break;
289  }
290 
291  unsigned int m = Kernel_Init_Mutex_At( &(Mutex[x]) );
292 
293  return m;
294 }
295 
299 static unsigned int Kernel_Lock_Mutex() {
300  int i,j;
301  MUTEX m = Cp->m;
302 
303  for(i = 0; i < MAXMUTEX; i++) {
304  if (Mutex[i].m == m) break;
305  }
306 
307  if(i>=MAXMUTEX){
308  return 1;
309  }
310 
311  if(Mutex[i].state == FREE) {
312  Mutex[i].state = LOCKED;
313  Mutex[i].owner = Cp->p;
314  Mutex[i].lockCount++;
315  }
316  else if (Mutex[i].owner == Cp->p) {
317  Mutex[i].lockCount++;
318  }
319  else {
320  for(j = 0; j < MAXTHREAD; j++) {
321  if ((Process[j].p == Mutex[i].owner) && (Process[j].p != 0)) break;
322  }
323 
324  if (Process[j].inheritedPy > Cp->inheritedPy) {
325  Process[j].inheritedPy = Cp->inheritedPy;
326  }
327 
328  Cp->state = BLOCKED_ON_MUTEX;
329  enqueueWQ(&Cp, &WaitingQueue, &WQCount);
330 
331  return 0;
332  }
333 
334  return 1;
335 }
336 
340 static void Kernel_Unlock_Mutex() {
341  int i;
342  MUTEX m = Cp->m;
343 
344  for(i = 0; i < MAXMUTEX; i++) {
345  if (Mutex[i].m == m) break;
346  }
347 
348  if(i >= MAXMUTEX){
349  return;
350  }
351 
352  if(Mutex[i].owner != Cp->p){
353  return;
354  }
355  else if (Cp->state == TERMINATED) {
356  volatile PD* p = dequeueWQ(&WaitingQueue, &WQCount, m);
357  if (p == NULL) {
358  Mutex[i].lockCount = 0;
359  Mutex[i].state = FREE;
360  Mutex[i].owner = 0;
361  return;
362  }
363  else {
364  Mutex[i].lockCount = 1;
365  Mutex[i].owner = p->p;
366 
367  p->inheritedPy = Cp->inheritedPy;
368  p->state = READY;
369 
370  Cp->inheritedPy = Cp->py;
371 
372  Cp->state = READY;
373 
374  enqueueRQ(&p, &ReadyQueue, &RQCount);
375  }
376  }
377  else if (Mutex[i].lockCount > 1) {
378  Mutex[i].lockCount--;
379  }
380  else {
381  volatile PD* p = dequeueWQ(&WaitingQueue, &WQCount, m);
382 
383  if(p == NULL){
384  Mutex[i].state = FREE;
385  Mutex[i].lockCount = 0;
386  Mutex[i].owner = 0;
387  Cp->inheritedPy = Cp->py;
388  }
389  else {
390  Mutex[i].lockCount = 1;
391  Mutex[i].owner = p->p;
392 
393  p->inheritedPy = Cp->inheritedPy;
394  p->state = READY;
395 
396  Cp->inheritedPy = Cp->py;
397 
398  Cp->state = READY;
399 
400  enqueueRQ(&p, &ReadyQueue, &RQCount);
401  enqueueRQ(&Cp, &ReadyQueue, &RQCount);
402  Dispatch();
403  }
404  }
405 }
406 
411  e->e = Events;
412  e->state = UNSIGNALLED;
413  e->p = NULL;
414 
415  Events++;
416 
417  return e->e;
418 }
419 
423 static EVENT Kernel_Init_Event() {
424  int x;
425 
426  if (Events == MAXEVENT) return; // Too many mutexes!
427 
428  // find a Disabled mutex that we can use
429  for (x = 0; x < MAXEVENT; x++) {
430  if (Event[x].state == INACTIVE) break;
431  }
432 
433  unsigned int e = Kernel_Init_Event_At( &(Event[x]) );
434 
435  return e;
436 }
437 
441 static unsigned int Kernel_Wait_Event() {
442  int i;
443  unsigned int e = Cp->eSend;
444 
445  for (i = 0; i < MAXEVENT; i++) {
446  if (Event[i].e == e) break;
447  }
448 
449  if (i >= MAXEVENT) {
450  return 0;
451  }
452 
453  if (Event[i].p == NULL) {
454  if (Event[i].state == SIGNALLED) {
455  Event[i].state = UNSIGNALLED;
456  return 0;
457  }
458  else {
459  Cp->eWait = e;
460  Event[i].p = Cp->p;
461  return 1;
462  }
463  }
464 
465  return 0;
466 }
467 
471 static void Kernel_Signal_Event() {
472  int i, j;
473  unsigned int e = Cp->eSend;
474 
475  for (i = 0; i < MAXEVENT; i++) {
476  if (Event[i].e == e) break;
477  }
478 
479  if (i >= MAXEVENT) {
480  return;
481  }
482 
483  for(j = 0; j < MAXTHREAD; j++) {
484  if (Process[j].eWait == e) break;
485  }
486 
487  if (j >= MAXTHREAD) {
488  Event[i].state = SIGNALLED;
489  }
490  else {
491  Process[j].state = READY;
492  Process[j].eWait = 99;
493 
494  Event[i].p = NULL;
495 
496  if ((Process[j].inheritedPy < Cp->inheritedPy) && (Process[j].suspended == 0)) {
497  Cp->state = READY;
498  enqueueRQ(&Cp, &ReadyQueue, &RQCount);
499  Dispatch();
500  }
501  }
502 }
503 
508 static void Dispatch() {
509  Cp = dequeueRQ(&ReadyQueue, &RQCount);
510 
511  if (Cp == NULL) {
512  OS_Abort();
513  }
514 
515  CurrentSp = Cp->sp;
516  Cp->state = RUNNING;
517 }
518 
527 static void Next_Kernel_Request() {
528  Dispatch(); /* select a new task to run */
529 
530  unsigned int mutex_is_locked;
531  unsigned int resumed;
532  unsigned int waiting;
533 
534  while(1) {
535  Cp->request = NONE; /* clear its request */
536 
537  /* activate this newly selected task */
538  CurrentSp = Cp->sp;
539 
540  Exit_Kernel(); /* or CSwitch() */
541 
542  /* if this task makes a system call, it will return to here! */
543 
544  /* save the Cp's stack pointer */
545  Cp->sp = CurrentSp;
546 
547  switch(Cp->request){
548  case CREATE:
549  Cp->response = Kernel_Create_Task( Cp->code, Cp->py, Cp->arg );
550  break;
551  case NEXT:
552  case NONE:
553  Cp->state = READY;
554  enqueueRQ(&Cp, &ReadyQueue, &RQCount);
555  Dispatch();
556  break;
557  case SLEEP:
558  Cp->state = SLEEPING;
559  enqueueSQ(&Cp, &SleepQueue, &SQCount);
560  Dispatch();
561  break;
562  case SUSPEND:
563  Kernel_Suspend_Task();
564  if(Cp->suspended) {
565  Cp->state = READY;
566  enqueueRQ(&Cp, &ReadyQueue, &RQCount);
567  Dispatch();
568  }
569  break;
570  case RESUME:
571  resumed = Kernel_Resume_Task();
572  if(resumed){
573  Cp->state = READY;
574  enqueueRQ(&Cp, &ReadyQueue, &RQCount);
575  Dispatch();
576  }
577  break;
578  case TERMINATE:
579  /* deallocate all resources used by this task */
580  Kernel_Terminate_Task();
581  Dispatch();
582  break;
583  case MUTEX_INIT:
584  Cp->response = Kernel_Init_Mutex();
585  break;
586  case MUTEX_LOCK:
587  mutex_is_locked = Kernel_Lock_Mutex();
588  if (!mutex_is_locked) {
589  Dispatch();
590  }
591  break;
592  case MUTEX_UNLOCK:
593  Kernel_Unlock_Mutex();
594  break;
595  case EVENT_INIT:
596  Cp->response = Kernel_Init_Event();
597  break;
598  case EVENT_WAIT:
599  waiting = Kernel_Wait_Event();
600  if (waiting) {
601  Cp->state = WAITING_ON_EVENT;
602  enqueueRQ(&Cp, &ReadyQueue, &RQCount);
603  Dispatch();
604  }
605  break;
606  case EVENT_SIGNAL:
607  Kernel_Signal_Event();
608  break;
609  default:
610  /* Houston! we have a problem! */
611  break;
612  }
613  }
614 }
615 
616 /*================
617  * RTOS API and Stubs
618  *================
619  */
620 
624 void OS_Init() {
625  int x;
626 
627  Tasks = 0;
628  KernelActive = 0;
629  Mutexes = 0;
630  Events = 0;
631  pCount = 0;
632 
633  for (x = 0; x < MAXTHREAD; x++) {
634  memset(&(Process[x]),0,sizeof(PD));
635  Process[x].state = DEAD;
636  Process[x].eWait = 99;
637  Process[x].p = 0;
638  }
639 
640  for (x = 0; x < MAXMUTEX; x++) {
641  memset(&(Mutex[x]),0,sizeof(MTX));
642  Mutex[x].state = DISABLED;
643  }
644 
645  for (x = 0; x < MAXEVENT; x++) {
646  memset(&(Event[x]),0,sizeof(EVT));
647  Event[x].state = INACTIVE;
648  }
649 }
650 
654 void OS_Start() {
655  if ( (! KernelActive) && (Tasks > 0)) {
657 
658  KernelActive = 1;
659  Next_Kernel_Request();
660  /* SHOULD NEVER GET HERE!!! */
661  }
662 }
663 
667 void OS_Abort() {
668  exit(1);
669 }
670 
675  if(KernelActive) {
677  Cp->request = MUTEX_INIT;
678  Enter_Kernel();
679  return Cp->response;
680  }
681 }
682 
686 void Mutex_Lock(MUTEX m) {
687  if(KernelActive) {
689  Cp->request = MUTEX_LOCK;
690  Cp->m = m;
691  Enter_Kernel();
692  }
693 
694 }
695 
700  if(KernelActive) {
702  Cp->request = MUTEX_UNLOCK;
703  Cp->m = m;
704  Enter_Kernel();
705  }
706 }
707 
712  if(KernelActive) {
714  Cp->request = EVENT_INIT;
715  Enter_Kernel();
716  return Cp->response;
717  }
718 }
719 
723 void Event_Wait(EVENT e) {
724  if(KernelActive) {
726  Cp->request = EVENT_WAIT;
727  Cp->eSend = e;
728  Enter_Kernel();
729  }
730 }
731 
736  if(KernelActive) {
738  Cp->request = EVENT_SIGNAL;
739  Cp->eSend = e;
740  Enter_Kernel();
741  }
742 }
743 
748  unsigned int p;
749 
750  if (KernelActive) {
752  Cp->request = CREATE;
753  Cp->code = f;
754  Cp->py = py;
755  Cp->arg = arg;
756  Enter_Kernel();
757  p = Cp->response;
758  } else {
759  /* call the RTOS function directly */
760  p = Kernel_Create_Task( f, py, arg );
761  }
762  return p;
763 }
764 
768 void Task_Next() {
769  if (KernelActive) {
771  Cp->request = NEXT;
772  Enter_Kernel();
773  }
774 }
775 
779 void Task_Sleep(TICK t) {
780  if (KernelActive) {
782  Cp->request = SLEEP;
783  unsigned int clockTicks = TCNT3/625;
784  Cp->wakeTickOverflow = tickOverflowCount + ((t + clockTicks) / 100);
785  Cp->wakeTick = (t + clockTicks) % 100;
786  Enter_Kernel();
787  }
788 }
789 
793 void Task_Suspend(PID p) {
794  if (KernelActive) {
796  Cp->request = SUSPEND;
797  Cp->pidAction = p;
798  Enter_Kernel();
799  }
800 }
801 
805 void Task_Resume(PID p) {
806  if (KernelActive) {
808  Cp->request = RESUME;
809  Cp->pidAction = p;
810  Enter_Kernel();
811  }
812 }
813 
818  if (KernelActive) {
820  Cp -> request = TERMINATE;
821  Enter_Kernel();
822  /* never returns here! */
823  }
824 }
825 
829 int Task_GetArg(PID p) {
830  return (Cp->arg);
831 }
832 
836 void setup() {
837 
840 
842  TCCR1A = 0;
843  TCCR1B = 0;
845  TCNT1 = 0;
847  OCR1A = 624;
849  TCCR1B |= (1 << WGM12);
851  TCCR1B |= (1 << CS12);
853  TIMSK1 |= (1 << OCIE1A);
856  TCCR3A = 0;
857  TCCR3B = 0;
859  TCNT3 = 0;
861  OCR3A = 62499;
863  TCCR3B |= (1 << WGM32);
865  TCCR3B |= (1 << CS32);
867  TIMSK3 = (1 << OCIE3A);
868 
870 }
871 
875 ISR(TIMER1_COMPA_vect) {
876 
877  volatile int i;
878 
879  for (i = SQCount-1; i >= 0; i--) {
880  if ((SleepQueue[i]->wakeTickOverflow <= tickOverflowCount) && (SleepQueue[i]->wakeTick <= (TCNT3/625))) {
881  volatile PD *p = dequeue(&SleepQueue, &SQCount);
882  p->state = READY;
883  enqueueRQ(&p, &ReadyQueue, &RQCount);
884  if (p->inheritedPy < Cp->inheritedPy) {
885  Task_Next();
886  }
887  }
888  else {
889  break;
890  }
891  }
892 
893  // Task_Next();
894 }
895 
899 ISR(TIMER3_COMPA_vect) {
900  tickOverflowCount += 1;
901 }
902 
906 void main() {
907  setup();
908 
909  OS_Init();
910  Task_Create(a_main, 0, 1);
911  OS_Start();
912 }
913 
void Task_Next()
Definition: os.c:768
#define MINPRIORITY
Definition: os.h:9
Definition: os.h:44
Definition: os.h:54
void Event_Signal(EVENT e)
Definition: os.c:735
#define MAXEVENT
Definition: os.h:7
volatile unsigned int tickOverflowCount
Definition: os.c:101
Definition: os.h:37
ISR(TIMER1_COMPA_vect)
Definition: os.c:875
Definition: os.h:33
unsigned char workSpace[WORKSPACE]
Definition: os.h:106
Definition: os.h:32
void main()
Definition: os.c:906
void Task_Resume(PID p)
Definition: os.c:805
int x
Definition: base.c:16
PID pidAction
Definition: os.h:120
volatile PD * dequeue(volatile PD **Queue, volatile int *QCount)
Definition: queue.c:137
void OS_Start()
Definition: os.c:654
EVENT e
Definition: os.h:93
Definition: os.h:47
void CSwitch()
volatile unsigned char * CurrentSp
Definition: os.c:83
TICK wakeTick
Definition: os.h:115
void enqueueRQ(volatile PD **p, volatile PD **Queue, volatile int *QCount)
Definition: queue.c:57
MUTEX Mutex_Init()
Definition: os.c:674
void a_main()
Definition: base.c:317
PRIORITY inheritedPy
Definition: os.h:109
int Task_GetArg(PID p)
Definition: os.c:829
Definition: os.h:46
PROCESS_STATES state
Definition: os.h:107
volatile PD * dequeueRQ(volatile PD **Queue, volatile int *QCount)
Definition: queue.c:109
unsigned int suspended
Definition: os.h:119
PRIORITY py
Definition: os.h:108
volatile int SQCount
Definition: os.c:109
EVENT eWait
Definition: os.h:117
Definition: os.h:50
unsigned char * sp
Definition: os.h:105
unsigned int PID
Definition: os.h:21
#define MAXMUTEX
Definition: os.h:6
#define Enable_Interrupt()
Definition: os.h:17
TICK wakeTickOverflow
Definition: os.h:114
Definition: os.h:52
void Task_Terminate(void)
Definition: os.c:817
Definition: os.h:48
MUTEX_STATE state
Definition: os.h:74
Definition: os.h:64
#define Disable_Interrupt()
Definition: os.h:16
void OS_Abort()
Definition: os.c:667
unsigned int MUTEX
Definition: os.h:22
void Task_Sleep(TICK t)
Definition: os.c:779
Definition: os.h:63
#define MAXTHREAD
Definition: os.h:4
unsigned int lockCount
Definition: os.h:76
volatile PD * SleepQueue[MAXTHREAD]
Definition: os.c:108
#define NULL
Definition: os.h:13
volatile unsigned char * KernelSp
Definition: os.c:76
void Event_Wait(EVENT e)
Definition: os.c:723
MUTEX m
Definition: os.h:116
volatile PD * WaitingQueue[MAXTHREAD]
Definition: os.c:112
Definition: os.h:34
PID Task_Create(voidfuncptr f, PRIORITY py, int arg)
Definition: os.c:747
Definition: os.h:49
Definition: os.h:84
void Enter_Kernel()
PID owner
Definition: os.h:75
PID p
Definition: os.h:95
unsigned int PRIORITY
Definition: os.h:23
Definition: os.h:65
unsigned int response
Definition: os.h:113
Definition: os.h:45
unsigned int TICK
Definition: os.h:25
EVENT eSend
Definition: os.h:118
Definition: os.h:85
void enqueueSQ(volatile PD **p, volatile PD **Queue, volatile int *QCount)
Definition: queue.c:33
void(* voidfuncptr)(void)
Definition: os.h:19
MUTEX Kernel_Init_Mutex_At(volatile MTX *m)
Definition: os.c:270
EVENT_STATE state
Definition: os.h:94
Definition: os.h:31
PID Kernel_Create_Task_At(volatile PD *p, voidfuncptr f, PRIORITY py, int arg)
Definition: os.c:120
volatile int RQCount
Definition: os.c:105
KERNEL_REQUEST_TYPE request
Definition: os.h:112
EVENT Kernel_Init_Event_At(volatile EVT *e)
Definition: os.c:410
volatile PD * dequeueWQ(volatile PD **Queue, volatile int *QCount, MUTEX m)
Definition: queue.c:81
void enqueueWQ(volatile PD **p, volatile PD **Queue, volatile int *QCount)
Definition: queue.c:17
volatile int WQCount
Definition: os.c:113
void Mutex_Lock(MUTEX m)
Definition: os.c:686
#define WORKSPACE
Definition: os.h:5
void Mutex_Unlock(MUTEX m)
Definition: os.c:699
volatile PD * ReadyQueue[MAXTHREAD]
Definition: os.c:104
voidfuncptr code
Definition: os.h:111
void setup()
Definition: os.c:836
unsigned int EVENT
Definition: os.h:24
Definition: os.h:92
Definition: os.h:55
MUTEX m
Definition: os.h:73
void Task_Suspend(PID p)
Definition: os.c:793
Definition: os.h:51
void OS_Init()
Definition: os.c:624
Definition: os.h:83
Definition: os.h:72
void Exit_Kernel()
EVENT Event_Init()
Definition: os.c:711