3 #include <avr/interrupt.h> 37 static void Dispatch();
38 static void Kernel_Unlock_Mutex();
66 volatile static PD* Cp;
86 volatile static unsigned int KernelActive;
89 volatile static unsigned int Tasks;
92 volatile static unsigned int pCount;
95 volatile static unsigned int Mutexes;
98 volatile static unsigned int Events;
140 *(
unsigned char *)sp-- = (((
unsigned int)
Task_Terminate) >> 8) & 0xff;
143 *(
unsigned char *)sp-- = ((
unsigned int)f) & 0xff;
144 *(
unsigned char *)sp-- = (((
unsigned int)f) >> 8) & 0xff;
145 *(
unsigned char *)sp-- = 0x00;
150 for (counter = 0; counter < 34; counter++) {
151 *(
unsigned char *)sp-- = counter;
188 if (Process[x].state ==
DEAD)
break;
199 static void Kernel_Suspend_Task() {
207 if (Process[i].p == Cp->
pidAction)
break;
221 static unsigned int Kernel_Resume_Task() {
225 if (Process[i].p == Cp->
pidAction)
break;
232 if(Process[i].suspended == 1) {
234 if(Process[i].inheritedPy < Cp->inheritedPy) {
245 static void Kernel_Terminate_Task() {
253 if (Mutex[i].owner == Cp->
p) {
255 Kernel_Unlock_Mutex();
281 static MUTEX Kernel_Init_Mutex() {
288 if (Mutex[x].state ==
DISABLED)
break;
299 static unsigned int Kernel_Lock_Mutex() {
304 if (Mutex[i].m == m)
break;
311 if(Mutex[i].state ==
FREE) {
316 else if (Mutex[i].owner == Cp->
p) {
321 if ((Process[j].p == Mutex[i].owner) && (Process[j].p != 0))
break;
340 static void Kernel_Unlock_Mutex() {
345 if (Mutex[i].m == m)
break;
352 if(Mutex[i].owner != Cp->
p){
377 else if (Mutex[i].lockCount > 1) {
423 static EVENT Kernel_Init_Event() {
430 if (Event[x].state ==
INACTIVE)
break;
441 static unsigned int Kernel_Wait_Event() {
443 unsigned int e = Cp->
eSend;
446 if (Event[i].e == e)
break;
453 if (Event[i].p ==
NULL) {
471 static void Kernel_Signal_Event() {
473 unsigned int e = Cp->
eSend;
476 if (Event[i].e == e)
break;
484 if (Process[j].eWait == e)
break;
487 if (j >= MAXTHREAD) {
492 Process[j].
eWait = 99;
496 if ((Process[j].inheritedPy < Cp->inheritedPy) && (Process[j].suspended == 0)) {
508 static void Dispatch() {
527 static void Next_Kernel_Request() {
530 unsigned int mutex_is_locked;
531 unsigned int resumed;
532 unsigned int waiting;
563 Kernel_Suspend_Task();
571 resumed = Kernel_Resume_Task();
580 Kernel_Terminate_Task();
587 mutex_is_locked = Kernel_Lock_Mutex();
588 if (!mutex_is_locked) {
593 Kernel_Unlock_Mutex();
599 waiting = Kernel_Wait_Event();
607 Kernel_Signal_Event();
634 memset(&(Process[x]),0,
sizeof(
PD));
641 memset(&(Mutex[x]),0,
sizeof(
MTX));
646 memset(&(Event[x]),0,
sizeof(
EVT));
655 if ( (! KernelActive) && (Tasks > 0)) {
659 Next_Kernel_Request();
760 p = Kernel_Create_Task( f, py, arg );
783 unsigned int clockTicks = TCNT3/625;
785 Cp->
wakeTick = (t + clockTicks) % 100;
849 TCCR1B |= (1 << WGM12);
851 TCCR1B |= (1 << CS12);
853 TIMSK1 |= (1 << OCIE1A);
863 TCCR3B |= (1 << WGM32);
865 TCCR3B |= (1 << CS32);
867 TIMSK3 = (1 << OCIE3A);
879 for (i =
SQCount-1; i >= 0; i--) {
880 if ((SleepQueue[i]->wakeTickOverflow <=
tickOverflowCount) && (SleepQueue[i]->wakeTick <= (TCNT3/625))) {
void Event_Signal(EVENT e)
volatile unsigned int tickOverflowCount
unsigned char workSpace[WORKSPACE]
volatile PD * dequeue(volatile PD **Queue, volatile int *QCount)
volatile unsigned char * CurrentSp
void enqueueRQ(volatile PD **p, volatile PD **Queue, volatile int *QCount)
volatile PD * dequeueRQ(volatile PD **Queue, volatile int *QCount)
#define Enable_Interrupt()
void Task_Terminate(void)
#define Disable_Interrupt()
volatile PD * SleepQueue[MAXTHREAD]
volatile unsigned char * KernelSp
volatile PD * WaitingQueue[MAXTHREAD]
PID Task_Create(voidfuncptr f, PRIORITY py, int arg)
void enqueueSQ(volatile PD **p, volatile PD **Queue, volatile int *QCount)
void(* voidfuncptr)(void)
MUTEX Kernel_Init_Mutex_At(volatile MTX *m)
PID Kernel_Create_Task_At(volatile PD *p, voidfuncptr f, PRIORITY py, int arg)
KERNEL_REQUEST_TYPE request
EVENT Kernel_Init_Event_At(volatile EVT *e)
volatile PD * dequeueWQ(volatile PD **Queue, volatile int *QCount, MUTEX m)
void enqueueWQ(volatile PD **p, volatile PD **Queue, volatile int *QCount)
void Mutex_Unlock(MUTEX m)
volatile PD * ReadyQueue[MAXTHREAD]