os.c

Go to the documentation of this file.
00001 /**
00002  * @file os.c
00003  *
00004  * @brief A Real Time Operating System
00005  *
00006  * Our implementation of the operating system described by Mantis Cheng in os.h.
00007  *
00008  * @author Scott Craig
00009  * @author Justin Tanner
00010  */
00011 
00012 #include <avr/io.h>
00013 #include <avr/interrupt.h>
00014 
00015 #include "os.h"
00016 #include "kernel.h"
00017 #include "error_code.h"
00018 
00019 /* Needed for memset */
00020 /* #include <string.h> */
00021 
00022 /** @brief main function provided by user application. The first task to run. */
00023 extern int main();
00024 
00025 /** PPP and PT defined in user application. */
00026 extern const unsigned char PPP[];
00027 
00028 /** PPP and PT defined in user application. */
00029 extern const unsigned int PT;
00030 
00031 /** The task descriptor of the currently RUNNING task. */
00032 static task_descriptor_t* cur_task = NULL;
00033 
00034 /** Since this is a "full-served" model, the kernel is executing using its own stack. */
00035 static volatile uint16_t kernel_sp;
00036 
00037 /** This table contains all task descriptors, regardless of state, plus idler. */
00038 static task_descriptor_t task_desc[MAXPROCESS + 1];
00039 
00040 /** The special "idle task" at the end of the descriptors array. */
00041 static task_descriptor_t* idle_task = &task_desc[MAXPROCESS];
00042 
00043 /** The current kernel request. */
00044 static volatile kernel_request_t kernel_request;
00045 
00046 /** Arguments for Task_Create() request. */
00047 static volatile create_args_t kernel_request_create_args;
00048 
00049 /** Return value for Task_Create() request. */
00050 static volatile int kernel_request_retval;
00051 
00052 /** Argument and return value for Event class of requests. */
00053 static volatile EVENT* kernel_request_event_ptr;
00054 
00055 /** Number of tasks created so far */
00056 static queue_t dead_pool_queue;
00057 
00058 /** Number of events created so far */
00059 static uint8_t num_events_created = 0;
00060 
00061 /** The ready queue for RR tasks. Their scheduling is round-robin. */
00062 static queue_t rr_queue;
00063 
00064 /** The ready queue for SYSTEM tasks. Their scheduling is first come, first served. */
00065 static queue_t system_queue;
00066 
00067 /** An array of queues for tasks waiting on events. */
00068 static queue_t event_queue[MAXEVENT];
00069 
00070 /** time remaining in current slot */
00071 static uint8_t ticks_remaining = 0;
00072 
00073 /** Indicates if periodic task in this slot has already run this time */
00074 static uint8_t slot_task_finished = 0;
00075 
00076 /** Index of name of task in current slot in PPP array. An even number from 0 to 2*(PT-1). */
00077 static int slot_name_index = 0;
00078 
00079 /** The task descriptor for index "name of task" */
00080 static task_descriptor_t* name_to_task_ptr[MAXNAME + 1];
00081 
00082 /** The names that appear in PPP */
00083 static uint8_t name_in_PPP[MAXNAME + 1];
00084 
00085 /** Error message used in OS_Abort() */
00086 static uint8_t volatile error_msg = ERR_RUN_1_USER_CALLED_OS_ABORT;
00087 
00088 
00089 /* Forward declarations */
00090 /* kernel */
00091 static void kernel_main_loop(void);
00092 static void kernel_dispatch(void);
00093 static void kernel_handle_request(void);
00094 /* context switching */
00095 static void exit_kernel(void) __attribute((naked));
00096 static void enter_kernel(void) __attribute((naked));
00097 void TIMER1_COMPA_vect(void) __attribute__ ((signal, naked));
00098 
00099 static int kernel_create_task();
00100 static void kernel_terminate_task(void);
00101 /* events */
00102 static void kernel_event_wait(void);
00103 static void kernel_event_signal(uint8_t is_broadcast, uint8_t and_next);
00104 /* queues */
00105 
00106 static void enqueue(queue_t* queue_ptr, task_descriptor_t* task_to_add);
00107 static task_descriptor_t* dequeue(queue_t* queue_ptr);
00108 
00109 static void kernel_update_ticker(void);
00110 static void check_PPP_names(void);
00111 static void idle (void);
00112 static void _delay_25ms(void);
00113 
00114 /*
00115  * FUNCTIONS
00116  */
00117 /**
00118  *  @brief The idle task does nothing but busy loop.
00119  */
00120 static void idle (void)
00121 {
00122     for(;;)
00123     {};
00124 }
00125 
00126 
00127 /**
00128  * @fn kernel_main_loop
00129  *
00130  * @brief The heart of the RTOS, the main loop where the kernel is entered and exited.
00131  *
00132  * The complete function is:
00133  *
00134  *  Loop
00135  *<ol><li>Select and dispatch a process to run</li>
00136  *<li>Exit the kernel (The loop is left and re-entered here.)</li>
00137  *<li>Handle the request from the process that was running.</li>
00138  *<li>End loop, go to 1.</li>
00139  *</ol>
00140  */
00141 static void kernel_main_loop(void)
00142 {
00143     for(;;)
00144     {
00145         kernel_dispatch();
00146 
00147         exit_kernel();
00148 
00149         /* if this task makes a system call, or is interrupted,
00150          * the thread of control will return to here. */
00151 
00152         kernel_handle_request();
00153     }
00154 }
00155 
00156 
00157 /**
00158  * @fn kernel_dispatch
00159  *
00160  *@brief The second part of the scheduler.
00161  *
00162  * Chooses the next task to run.
00163  *
00164  */
00165 static void kernel_dispatch(void)
00166 {
00167     /* If the current state is RUNNING, then select it to run again.
00168      * kernel_handle_request() has already determined it should be selected.
00169      */
00170 
00171     if(cur_task->state != RUNNING || cur_task == idle_task)
00172     {
00173         if(system_queue.head != NULL)
00174         {
00175             cur_task = dequeue(&system_queue);
00176         }
00177         else if(!slot_task_finished && name_to_task_ptr[PPP[slot_name_index]] != NULL)
00178         {
00179             /* Keep running the current PERIODIC task. */
00180             cur_task = name_to_task_ptr[PPP[slot_name_index]];
00181         }
00182         else if(rr_queue.head != NULL)
00183         {
00184             cur_task = dequeue(&rr_queue);
00185         }
00186         else
00187         {
00188             /* No task available, so idle. */
00189             cur_task = idle_task;
00190         }   
00191 
00192         cur_task->state = RUNNING;
00193     }
00194 }
00195 
00196 
00197 /**
00198  * @fn kernel_handle_request
00199  *
00200  *@brief The first part of the scheduler.
00201  *
00202  * Perform some action based on the system call or timer tick.
00203  * Perhaps place the current process in a ready or waitng queue.
00204  */
00205 static void kernel_handle_request(void)
00206 {
00207    switch(kernel_request)
00208     {
00209     case NONE:
00210         /* Should not happen. */
00211         break;
00212 
00213     case TIMER_EXPIRED:
00214         kernel_update_ticker();
00215 
00216         /* Round robin tasks get pre-empted on every tick. */
00217         if(cur_task->level == RR && cur_task->state == RUNNING)
00218         {
00219             cur_task->state = READY;
00220             enqueue(&rr_queue, cur_task);
00221         }
00222         break;
00223 
00224     case TASK_CREATE:
00225         kernel_request_retval = kernel_create_task();
00226 
00227         /* Check if new task has higer priority, and that it wasn't an ISR
00228          * making the request.
00229          */
00230         if(kernel_request_retval)
00231         {
00232             /* If new task is SYSTEM and cur is not, then don't run old one */
00233             if(kernel_request_create_args.level == SYSTEM && cur_task->level != SYSTEM)
00234             {
00235                 cur_task->state = READY;
00236             }
00237 
00238             /* If cur is RR, it might be pre-empted by a new PERIODIC. */
00239             if(cur_task->level == RR &&
00240                kernel_request_create_args.level == PERIODIC &&
00241                PPP[slot_name_index] == kernel_request_create_args.name)
00242             {
00243                 cur_task->state = READY;
00244             }
00245 
00246             /* enqueue READY RR tasks. */
00247             if(cur_task->level == RR && cur_task->state == READY)
00248             {
00249                 enqueue(&rr_queue, cur_task);
00250             }
00251         }
00252         break;
00253 
00254     case TASK_TERMINATE:
00255         if(cur_task != idle_task)
00256         {
00257             kernel_terminate_task();
00258         }
00259         break;
00260 
00261     case TASK_NEXT:
00262         switch(cur_task->level)
00263         {  
00264         case SYSTEM:
00265             enqueue(&system_queue, cur_task);
00266             break;
00267 
00268         case PERIODIC:
00269             slot_task_finished = 1;
00270             break;
00271 
00272         case RR:
00273             enqueue(&rr_queue, cur_task);
00274             break;
00275 
00276         default: /* idle_task */
00277             break;
00278         }
00279 
00280         cur_task->state = READY;
00281         break;
00282 
00283     case TASK_GET_ARG:
00284         /* Should not happen. Handled in task itself. */
00285         break;
00286 
00287     case EVENT_INIT:
00288         kernel_request_event_ptr = NULL;
00289         if(num_events_created < MAXEVENT)
00290         {
00291             /* Pass a number back to the task, but pretend it is a pointer.
00292              * It is the index of the event_queue plus 1.
00293              * (0 is return value for failure.)
00294              */
00295             kernel_request_event_ptr = (EVENT *)(uint16_t)(num_events_created + 1);
00296             /*
00297             event_queue[num_events_created].head = NULL;
00298             event_queue[num_events_created].tail = NULL;
00299             */
00300             ++num_events_created;
00301         }
00302         else
00303         {
00304             kernel_request_event_ptr = (EVENT *)(uint16_t)0;
00305         }
00306         break;
00307 
00308     case EVENT_WAIT:
00309         /* idle_task does not wait. */
00310         if(cur_task != idle_task)
00311         {
00312             kernel_event_wait();
00313         }
00314 
00315         break;
00316 
00317     case EVENT_SIGNAL:
00318         kernel_event_signal(0 /* not broadcast */, 0 /* not task_next */);
00319         break;
00320 
00321     case EVENT_BROADCAST:
00322         kernel_event_signal(1 /* is broadcast */, 0 /* not task_next */);
00323         break;
00324 
00325     case EVENT_SIGNAL_AND_NEXT:
00326         if(cur_task->level == PERIODIC)
00327         {
00328             slot_task_finished = 1;
00329         }
00330 
00331         kernel_event_signal(0 /* not broadcast */, 1 /* is task_next */);
00332 
00333         break;
00334 
00335     case EVENT_BROADCAST_AND_NEXT:
00336         if(cur_task->level == PERIODIC)
00337         {
00338             slot_task_finished = 1;
00339         }
00340 
00341         kernel_event_signal(1 /* is broadcast */, 1 /* is task_next */);
00342         break;
00343 
00344     default:
00345         /* Should never happen */
00346         error_msg = ERR_RUN_8_RTOS_INTERNAL_ERROR;
00347         OS_Abort();
00348         break;
00349     }
00350 
00351     kernel_request = NONE;
00352 }
00353 
00354 
00355 /*
00356  * Context switching
00357  */
00358 /**
00359  * It is important to keep the order of context saving and restoring exactly
00360  * in reverse. Also, when a new task is created, it is important to
00361  * initialize its "initial" context in the same order as a saved context.
00362  *
00363  * Save r31 and SREG on stack, disable interrupts, then save
00364  * the rest of the registers on the stack. In the locations this macro
00365  * is used, the interrupts need to be disabled, or they already are disabled.
00366  */
00367 #define    SAVE_CTX_TOP()       asm volatile (\
00368     "push   r31             \n\t"\
00369     "in     r31,__SREG__    \n\t"\
00370     "cli                    \n\t"::); /* Disable interrupt */
00371 
00372 #define STACK_SREG_SET_I_BIT()    asm volatile (\
00373     "ori    r31, 0x80        \n\t"::);
00374 
00375 #define    SAVE_CTX_BOTTOM()       asm volatile (\
00376     "push   r31             \n\t"\
00377     "push   r30             \n\t"\
00378     "push   r29             \n\t"\
00379     "push   r28             \n\t"\
00380     "push   r27             \n\t"\
00381     "push   r26             \n\t"\
00382     "push   r25             \n\t"\
00383     "push   r24             \n\t"\
00384     "push   r23             \n\t"\
00385     "push   r22             \n\t"\
00386     "push   r21             \n\t"\
00387     "push   r20             \n\t"\
00388     "push   r19             \n\t"\
00389     "push   r18             \n\t"\
00390     "push   r17             \n\t"\
00391     "push   r16             \n\t"\
00392     "push   r15             \n\t"\
00393     "push   r14             \n\t"\
00394     "push   r13             \n\t"\
00395     "push   r12             \n\t"\
00396     "push   r11             \n\t"\
00397     "push   r10             \n\t"\
00398     "push   r9              \n\t"\
00399     "push   r8              \n\t"\
00400     "push   r7              \n\t"\
00401     "push   r6              \n\t"\
00402     "push   r5              \n\t"\
00403     "push   r4              \n\t"\
00404     "push   r3              \n\t"\
00405     "push   r2              \n\t"\
00406     "push   r1              \n\t"\
00407     "push   r0              \n\t"::);
00408 
00409 /**
00410  * @brief Push all the registers and SREG onto the stack.
00411  */
00412 #define    SAVE_CTX()    SAVE_CTX_TOP();SAVE_CTX_BOTTOM();
00413 
00414 /**
00415  * @brief Pop all registers and the status register.
00416  */
00417 #define    RESTORE_CTX()    asm volatile (\
00418     "pop    r0                \n\t"\
00419     "pop    r1                \n\t"\
00420     "pop    r2                \n\t"\
00421     "pop    r3                \n\t"\
00422     "pop    r4                \n\t"\
00423     "pop    r5                \n\t"\
00424     "pop    r6                \n\t"\
00425     "pop    r7                \n\t"\
00426     "pop    r8                \n\t"\
00427     "pop    r9                \n\t"\
00428     "pop    r10             \n\t"\
00429     "pop    r11             \n\t"\
00430     "pop    r12             \n\t"\
00431     "pop    r13             \n\t"\
00432     "pop    r14             \n\t"\
00433     "pop    r15             \n\t"\
00434     "pop    r16             \n\t"\
00435     "pop    r17             \n\t"\
00436     "pop    r18             \n\t"\
00437     "pop    r19             \n\t"\
00438     "pop    r20             \n\t"\
00439     "pop    r21             \n\t"\
00440     "pop    r22             \n\t"\
00441     "pop    r23             \n\t"\
00442     "pop    r24             \n\t"\
00443     "pop    r25             \n\t"\
00444     "pop    r26             \n\t"\
00445     "pop    r27             \n\t"\
00446     "pop    r28             \n\t"\
00447     "pop    r29             \n\t"\
00448     "pop    r30             \n\t"\
00449     "pop    r31             \n\t"\
00450     "out    __SREG__, r31    \n\t"\
00451     "pop    r31             \n\t"::);
00452 
00453 
00454 /**
00455  * @fn exit_kernel
00456  *
00457  * @brief The actual context switching code begins here.
00458  *
00459  * This function is called by the kernel. Upon entry, we are using
00460  * the kernel stack, on top of which is the address of the instruction
00461  * after the call to exit_kernel().
00462  *
00463  * Assumption: Our kernel is executed with interrupts already disabled.
00464  *
00465  * The "naked" attribute prevents the compiler from adding instructions
00466  * to save and restore register values. It also prevents an
00467  * automatic return instruction.
00468  */
00469 static void exit_kernel(void)
00470 {
00471     /*
00472      * The PC was pushed on the stack with the call to this function.
00473      * Now push on the I/O registers and the SREG as well.
00474      */
00475      SAVE_CTX();
00476 
00477     /*
00478      * The last piece of the context is the SP. Save it to a variable.
00479      */
00480     kernel_sp = SP;
00481 
00482     /*
00483      * Now restore the task's context, SP first.
00484      */
00485     SP = (uint16_t)(cur_task->sp);
00486 
00487     /*
00488      * Now restore I/O and SREG registers.
00489      */
00490     RESTORE_CTX();
00491 
00492     /*
00493      * return explicitly required as we are "naked".
00494      * Interrupts are enabled or disabled according to SREG
00495      * recovered from stack, so we don't want to explicitly
00496      * enable them here.
00497      *
00498      * The last piece of the context, the PC, is popped off the stack
00499      * with the ret instruction.
00500      */
00501     asm volatile ("ret\n"::);
00502 }
00503 
00504 
00505 /**
00506  * @fn enter_kernel
00507  *
00508  * @brief All system calls eventually enter here.
00509  *
00510  * Assumption: We are still executing on cur_task's stack.
00511  * The return address of the caller of enter_kernel() is on the
00512  * top of the stack.
00513  */
00514 static void enter_kernel(void)
00515 {
00516     /*
00517      * The PC was pushed on the stack with the call to this function.
00518      * Now push on the I/O registers and the SREG as well.
00519      */
00520     SAVE_CTX();
00521 
00522     /*
00523      * The last piece of the context is the SP. Save it to a variable.
00524      */
00525     cur_task->sp = (uint8_t*)SP;
00526 
00527     /*
00528      * Now restore the kernel's context, SP first.
00529      */
00530     SP = kernel_sp;
00531 
00532     /*
00533      * Now restore I/O and SREG registers.
00534      */
00535     RESTORE_CTX();
00536 
00537     /*
00538      * return explicitly required as we are "naked".
00539      *
00540      * The last piece of the context, the PC, is popped off the stack
00541      * with the ret instruction.
00542      */
00543     asm volatile ("ret\n"::);
00544 }
00545 
00546 
00547 /**
00548  * @fn TIMER1_COMPA_vect
00549  *
00550  * @brief The interrupt handler for output compare interrupts on Timer 1
00551  *
00552  * Used to enter the kernel when a tick expires.
00553  *
00554  * Assumption: We are still executing on the cur_task stack.
00555  * The return address inside the current task code is on the top of the stack.
00556  *
00557  * The "naked" attribute prevents the compiler from adding instructions
00558  * to save and restore register values. It also prevents an
00559  * automatic return instruction.
00560  */
00561 void TIMER1_COMPA_vect(void)
00562 {
00563     /*
00564      * Save the interrupted task's context on its stack,
00565      * and save the stack pointer.
00566      *
00567      * On the cur_task's stack, the registers and SREG are
00568      * saved in the right order, but we have to modify the stored value
00569      * of SREG. We know it should have interrupts enabled because this
00570      * ISR was able to execute, but it has interrupts disabled because
00571      * it was stored while this ISR was executing. So we set the bit (I = bit 7)
00572      * in the stored value.
00573      */
00574     SAVE_CTX_TOP();
00575    
00576     STACK_SREG_SET_I_BIT();
00577 
00578     SAVE_CTX_BOTTOM();
00579 
00580     cur_task->sp = (uint8_t*)SP;
00581 
00582     /*
00583      * Now that we already saved a copy of the stack pointer
00584      * for every context including the kernel, we can move to
00585      * the kernel stack and use it. We will restore it again later.
00586      */
00587     SP = kernel_sp;
00588 
00589     /*
00590      * Inform the kernel that this task was interrupted.
00591      */
00592     kernel_request = TIMER_EXPIRED;
00593 
00594     /*
00595      * Prepare for next tick interrupt.
00596      */
00597     OCR1A += TICK_CYCLES;
00598 
00599     /*
00600      * Restore the kernel context. (The stack pointer is restored again.)
00601      */
00602     SP = kernel_sp;
00603 
00604     /*
00605      * Now restore I/O and SREG registers.
00606      */
00607     RESTORE_CTX();
00608 
00609     /*
00610      * We use "ret" here, not "reti", because we do not want to
00611      * enable interrupts inside the kernel.
00612      * Explilictly required as we are "naked".
00613      *
00614      * The last piece of the context, the PC, is popped off the stack
00615      * with the ret instruction.
00616      */
00617     asm volatile ("ret\n"::);
00618 }
00619 
00620 
00621 /*
00622  * Tasks Functions
00623  */
00624 /**
00625  *  @brief Kernel function to create a new task.
00626  *
00627  * When creating a new task, it is important to initialize its stack just like
00628  * it has called "enter_kernel()"; so that when we switch to it later, we
00629  * can just restore its execution context on its stack.
00630  * @sa enter_kernel 
00631  */
00632 static int kernel_create_task()
00633 {
00634     /* The new task. */
00635     task_descriptor_t *p;
00636     uint8_t* stack_bottom;
00637     
00638 
00639     if (dead_pool_queue.head == NULL)
00640     {
00641         /* Too many tasks! */
00642         return 0;
00643     }
00644 
00645     if(kernel_request_create_args.level == PERIODIC &&
00646         (kernel_request_create_args.name == IDLE ||
00647          kernel_request_create_args.name > MAXNAME))
00648     {
00649         /* PERIODIC name is out of range [1 .. MAXNAME] */
00650         error_msg = ERR_2_CREATE_NAME_OUT_OF_RANGE;
00651         OS_Abort();
00652     }
00653 
00654     if(kernel_request_create_args.level == PERIODIC &&
00655         name_in_PPP[kernel_request_create_args.name] == 0)
00656     {
00657         error_msg = ERR_5_NAME_NOT_IN_PPP;
00658         OS_Abort();
00659     }
00660 
00661     if(kernel_request_create_args.level == PERIODIC &&
00662     name_to_task_ptr[kernel_request_create_args.name] != NULL)
00663     {
00664         /* PERIODIC name already used */
00665         error_msg = ERR_4_PERIODIC_NAME_IN_USE;
00666         OS_Abort();
00667     }
00668 
00669     /* idling "task" goes in last descriptor. */
00670     if(kernel_request_create_args.level == NULL)
00671     {
00672         p = &task_desc[MAXPROCESS];
00673     }
00674     /* Find an unused descriptor. */
00675     else
00676     {
00677         p = dequeue(&dead_pool_queue);
00678     }
00679 
00680     stack_bottom = &(p->stack[WORKSPACE-1]);
00681 
00682     /* The stack grows down in memory, so the stack pointer is going to end up
00683      * pointing to the location 32 + 1 + 2 + 2 = 37 bytes above the bottom, to make
00684      * room for (from bottom to top):
00685      *   the address of Task_Terminate() to destroy the task if it ever returns,
00686      *   the address of the start of the task to "return" to the first time it runs,
00687      *   register 31,
00688      *   the stored SREG, and
00689      *   registers 30 to 0.
00690      */
00691     uint8_t* stack_top = stack_bottom - (32 + 1 + 2 + 2);
00692 
00693     /* Not necessary to clear the task descriptor. */
00694     /* memset(p,0,sizeof(task_descriptor_t)); */
00695 
00696     /* stack_top[0] is the byte above the stack.
00697      * stack_top[1] is r0. */
00698     stack_top[2] = (uint8_t) 0; /* r1 is the "zero" register. */
00699     /* stack_top[31] is r30. */
00700     stack_top[32] = (uint8_t) _BV(SREG_I); /* set SREG_I bit in stored SREG. */
00701     /* stack_top[33] is r31. */
00702 
00703     /* We are placing the address (16-bit) of the functions
00704      * onto the stack in reverse byte order (least significant first, followed
00705      * by most significant).  This is because the "return" assembly instructions
00706      * (ret and reti) pop addresses off in BIG ENDIAN (most sig. first, least sig.
00707      * second), even though the AT90 is LITTLE ENDIAN machine.
00708      */
00709     stack_top[34] = (uint8_t)((uint16_t)(kernel_request_create_args.f) >> 8);
00710     stack_top[35] = (uint8_t)(uint16_t)(kernel_request_create_args.f);
00711     stack_top[36] = (uint8_t)((uint16_t)Task_Terminate >> 8);
00712     stack_top[37] = (uint8_t)(uint16_t)Task_Terminate;
00713 
00714     /*
00715      * Make stack pointer point to cell above stack (the top).
00716      * Make room for 32 registers, SREG and two return addresses.
00717      */
00718     p->sp = stack_top;
00719 
00720     p->state = READY;
00721     p->arg = kernel_request_create_args.arg;
00722     p->level = kernel_request_create_args.level;
00723     p->name = kernel_request_create_args.name;
00724 
00725     switch(kernel_request_create_args.level)
00726     {
00727     case PERIODIC:
00728         /* Put this newly created PPP task into the PPP lookup array */
00729         name_to_task_ptr[kernel_request_create_args.name] = p;
00730         break;
00731 
00732     case SYSTEM:
00733         /* Put SYSTEM and Round Robin tasks on a queue. */
00734         enqueue(&system_queue, p);
00735         break;
00736 
00737     case RR:
00738         /* Put SYSTEM and Round Robin tasks on a queue. */
00739         enqueue(&rr_queue, p);
00740         break;
00741 
00742     default:
00743         /* idle task does not go in a queue */
00744         break;
00745     }
00746     
00747 
00748     return 1;
00749 }
00750 
00751 
00752 /**
00753  * @brief Kernel function to destroy the current task.
00754  */
00755 static void kernel_terminate_task(void)
00756 {
00757     /* deallocate all resources used by this task */
00758     cur_task->state = DEAD;
00759     if(cur_task->level == PERIODIC)
00760     {
00761         name_to_task_ptr[cur_task->name] = NULL;
00762     }
00763     enqueue(&dead_pool_queue, cur_task);
00764 }
00765 
00766 
00767 /**
00768  * @brief Kernel function to place current task in a waiting queue.
00769  */
00770 static void kernel_event_wait(void)
00771 {
00772     /* Check the handle of the event to ensure that it is initialized. */
00773     uint8_t handle = (uint8_t)((uint16_t)(kernel_request_event_ptr) - 1);
00774 
00775     if(handle >= num_events_created)
00776     {
00777         /* Error code. */
00778         error_msg = ERR_RUN_5_WAIT_ON_BAD_EVENT;
00779         OS_Abort();
00780     }
00781     else if(cur_task->level == PERIODIC)
00782     {
00783         error_msg = ERR_RUN_7_PERIODIC_CALLED_WAIT;
00784         OS_Abort();
00785     }
00786     else
00787     {
00788         /* Place this task in a queue. */
00789         cur_task->state = WAITING;
00790         enqueue(&event_queue[handle], cur_task);
00791     }
00792 }
00793 
00794 
00795 /**
00796  * @brief Kernel function to signal waiting processes.
00797  *
00798  * Handles signals and broadcasts, with or without yielding.
00799  * May cause current task to be suspended.
00800  */
00801 static void kernel_event_signal(uint8_t is_broadcast, uint8_t and_next)
00802 {
00803     /* Check the handle of the event to ensure that it is initialized. */
00804     uint8_t handle = (uint8_t)((uint16_t)(kernel_request_event_ptr) - 1);
00805 
00806     if(handle >= num_events_created)
00807     {
00808         /* Error code. */
00809         error_msg = ERR_RUN_4_SIGNAL_ON_BAD_EVENT;
00810         OS_Abort();
00811     }
00812     else
00813     {
00814         uint8_t make_ready = 0;
00815 
00816         /* Signal appropriately, and perhaps place this task in a queue. */
00817         if(and_next)
00818         {
00819             make_ready = 1;
00820         }
00821 
00822         while(event_queue[handle].head != NULL)
00823         {
00824             /* The signalled task */
00825             task_descriptor_t* task_ptr = dequeue(&event_queue[handle]);
00826             task_ptr->state = READY;
00827 
00828             switch(task_ptr->level)
00829             {
00830             case SYSTEM:
00831                 enqueue(&system_queue, task_ptr);
00832                 break;
00833             case PERIODIC:
00834                 break;
00835             case RR:
00836                 enqueue(&rr_queue, task_ptr);
00837                 break;
00838             default:
00839                 break;
00840             }
00841 
00842             /* Check to see if current task needs to be pre-empted */
00843             if(cur_task != idle_task && !make_ready)
00844             {
00845                 if(cur_task->level != SYSTEM && task_ptr->level == SYSTEM)
00846                 {
00847                     make_ready = 1;
00848                 }
00849                 else if(cur_task->level == RR &&
00850                     slot_task_finished == 0 &&
00851                     task_ptr == name_to_task_ptr[PPP[slot_name_index]])
00852                 {
00853                     make_ready = 1;
00854                 }
00855             }
00856 
00857             if(!is_broadcast)
00858             {
00859                 break;
00860             }
00861         }
00862 
00863         if(make_ready && cur_task != idle_task)
00864         {
00865             cur_task->state = READY;
00866             if(cur_task->level == RR)
00867             {
00868                 enqueue(&rr_queue, cur_task);
00869             }
00870         }
00871     }
00872 }
00873 
00874 
00875 /*
00876  * Queue manipulation.
00877  */
00878 
00879 /**
00880  * @brief Add a task the head of the queue
00881  *
00882  * @param queue_ptr the queue to insert in
00883  * @param task_to_add the task descriptor to add
00884  */
00885 static void enqueue(queue_t* queue_ptr, task_descriptor_t* task_to_add)
00886 {
00887     task_to_add->next = NULL;
00888 
00889     if(queue_ptr->head == NULL)
00890     {
00891         /* empty queue */
00892         queue_ptr->head = task_to_add;
00893         queue_ptr->tail = task_to_add;
00894     }
00895     else
00896     {
00897         /* put task at the back of the queue */
00898         queue_ptr->tail->next = task_to_add;
00899         queue_ptr->tail = task_to_add;
00900     }
00901 }
00902 
00903 
00904 /**
00905  * @brief Pops head of queue and returns it.
00906  *
00907  * @param queue_ptr the queue to pop
00908  * @return the popped task descriptor
00909  */
00910 static task_descriptor_t* dequeue(queue_t* queue_ptr)
00911 {
00912     task_descriptor_t* task_ptr = queue_ptr->head;
00913 
00914     if(queue_ptr->head != NULL)
00915     {
00916         queue_ptr->head = queue_ptr->head->next;
00917         task_ptr->next = NULL;
00918     }
00919 
00920     return task_ptr;
00921 }
00922 
00923 
00924 /**
00925  * @brief Update the current time. 
00926  *
00927  * Perhaps move to the next time slot of the PPP.
00928  */
00929 static void kernel_update_ticker(void)
00930 {
00931     /* PORTD ^= LED_D5_RED; */
00932 
00933     if(PT > 0)
00934     {
00935         --ticks_remaining;
00936 
00937         if(ticks_remaining == 0)
00938         {
00939             /* If Periodic task still running then error */
00940             if(cur_task != NULL && cur_task->level == PERIODIC && slot_task_finished == 0)
00941             {
00942                 /* error handling */
00943                 error_msg = ERR_RUN_3_PERIODIC_TOOK_TOO_LONG;
00944                 OS_Abort();
00945             }
00946 
00947             slot_name_index += 2;
00948             if(slot_name_index >= 2 * PT)
00949             {
00950                 slot_name_index = 0;
00951             }
00952 
00953             ticks_remaining = PPP[slot_name_index + 1];
00954 
00955             if(PPP[slot_name_index] == IDLE || name_to_task_ptr[PPP[slot_name_index]] == NULL)
00956             {
00957                 slot_task_finished = 1;
00958             }
00959             else
00960             {
00961                 slot_task_finished = 0;
00962             }
00963         }
00964     }
00965 }
00966 
00967 
00968 /**
00969  * @brief Validate the PPP array.
00970  */
00971 static void check_PPP_names(void)
00972 {
00973     uint8_t i;
00974     uint8_t name;
00975 
00976     for(i = 0; i < 2 * PT; i += 2)
00977     {
00978         name = PPP[i];
00979 
00980         /* name == IDLE or 0 < name <= MAXNAME */
00981         if(name <= MAXNAME)
00982         {
00983             name_in_PPP[name] = 1;
00984         }
00985         else
00986         {
00987             error_msg = ERR_1_PPP_NAME_OUT_OF_RANGE;
00988             OS_Abort();
00989         }
00990     }
00991 
00992 }
00993 
00994 #undef SLOW_CLOCK
00995 
00996 #ifdef SLOW_CLOCK
00997 /**
00998  * @brief For DEBUGGING to make the clock run slower
00999  *
01000  * Divide CLKI/O by 64 on timer 1 to run at 125 kHz  CS3[210] = 011
01001  * 1 MHz CS3[210] = 010
01002  */
01003 static void kernel_slow_clock(void)
01004 {
01005     TCCR1B &= ~(_BV(CS12) | _BV(CS10));
01006     TCCR1B |= (_BV(CS11));
01007 }
01008 #endif
01009 
01010 /**
01011  * @brief Setup the RTOS and create main() as the first SYSTEM level task.
01012  *
01013  * Point of entry from the C runtime crt0.S.
01014  */
01015 void OS_Init()
01016 {
01017     int i;
01018 
01019     /* Set up the clocks */
01020     CLOCK8MHZ();
01021 
01022     TCCR1B &= ~(_BV(CS12) | _BV(CS11));
01023     TCCR1B |= (_BV(CS10));
01024 
01025 #ifdef SLOW_CLOCK 
01026     kernel_slow_clock();
01027 #endif
01028 
01029     check_PPP_names();
01030 
01031     /* 
01032      * Initialize dead pool to contain all but last task descriptor.
01033      *
01034      * DEAD == 0, already set in .init4
01035      */
01036     for (i = 0; i < MAXPROCESS - 1; i++)
01037     {
01038         task_desc[i].state = DEAD;
01039         name_to_task_ptr[i] = NULL;
01040         task_desc[i].next = &task_desc[i + 1];
01041     }
01042     task_desc[MAXPROCESS - 1].next = NULL;
01043     dead_pool_queue.head = &task_desc[0];
01044     dead_pool_queue.tail = &task_desc[MAXPROCESS - 1];
01045 
01046     /* Create idle "task" */
01047     kernel_request_create_args.f = (voidfuncvoid_ptr)idle;
01048     kernel_request_create_args.level = NULL;
01049     kernel_create_task();
01050 
01051     /* Create "main" task as SYSTEM level. */
01052     kernel_request_create_args.f = (voidfuncvoid_ptr)main;
01053     kernel_request_create_args.level = SYSTEM;
01054     kernel_create_task();
01055 
01056     /* First time through. Select "main" task to run first. */
01057     cur_task = task_desc;
01058     cur_task->state = RUNNING;
01059     dequeue(&system_queue);
01060 
01061     /* Initilize time slot */
01062     if(PT > 0)
01063     {
01064         ticks_remaining = PPP[1];
01065     }
01066 
01067     /* Set up Timer 1 Output Compare interrupt,the TICK clock. */
01068     TIMSK1 |= _BV(OCIE1A);
01069     OCR1A = TCNT1 + 40000;
01070     /* Clear flag. */
01071     TIFR1 = _BV(OCF1A);
01072 
01073     /*
01074      * The main loop of the RTOS kernel.
01075      */
01076     kernel_main_loop();
01077 }
01078 
01079 
01080 
01081 
01082 /**
01083  *  @brief Delay function adapted from <util/delay.h>
01084  */
01085 static void _delay_25ms(void)
01086 {
01087     uint16_t i;
01088 
01089     /* 4 * 50000 CPU cycles = 25 ms */
01090     asm volatile ("1: sbiw %0,1" "\n\tbrne 1b" : "=w" (i) : "0" (50000));
01091 }
01092 
01093 
01094 /** @brief Abort the execution of this RTOS due to an unrecoverable erorr.
01095  */
01096 void OS_Abort(void)
01097 {
01098     uint8_t i, j;
01099     uint8_t flashes, mask;
01100 
01101     Disable_Interrupt();
01102 
01103     /* Initialize port for output */
01104     DDRD = LED_RED_MASK | LED_GREEN_MASK;
01105 
01106     if(error_msg < ERR_RUN_1_USER_CALLED_OS_ABORT)
01107     {
01108         flashes = error_msg + 1;
01109         mask = LED_GREEN_MASK;
01110     }
01111     else
01112     {
01113         flashes = error_msg + 1 - ERR_RUN_1_USER_CALLED_OS_ABORT;
01114         mask = LED_RED_MASK;
01115     }
01116 
01117 
01118     for(;;)
01119     {
01120         PORTD = (uint8_t)(LED_RED_MASK | LED_GREEN_MASK);
01121 
01122         for(i = 0; i < 100; ++i)
01123         {
01124                _delay_25ms();
01125         }
01126 
01127         PORTD = (uint8_t) 0;
01128 
01129         for(i = 0; i < 40; ++i)
01130         {
01131                _delay_25ms();
01132         }
01133 
01134  
01135         for(j = 0; j < flashes; ++j)
01136         {
01137             PORTD = mask;
01138 
01139             for(i = 0; i < 10; ++i)
01140             {
01141                 _delay_25ms();
01142             }
01143 
01144             PORTD = (uint8_t) 0;
01145 
01146             for(i = 0; i < 10; ++i)
01147             {
01148                 _delay_25ms();
01149             }
01150         }
01151 
01152         for(i = 0; i < 20; ++i)
01153         {
01154             _delay_25ms();
01155         }
01156     }
01157 }
01158 
01159 
01160 /**
01161  * \param f  a parameterless function to be created as a process instance
01162  * \param arg an integer argument to be assigned to this process instanace
01163  * \param level assigned scheduling level: SYSTEM, PERIODIC or RR
01164  * \param name assigned PERIODIC process name
01165  * \return 0 if not successful; otherwise non-zero.
01166  * \sa Task_GetArg(), PPP[].
01167  *
01168  *  A new process  is created to execute the parameterless
01169  *  function \a f with an initial parameter \a arg, which is retrieved
01170  *  by a call to Task_GetArg().  If a new process cannot be
01171  *  created, 0 is returned; otherwise, it returns non-zero.
01172  *  The created process will belong to its scheduling \a level.
01173  *  If the process is PERIODIC, then its \a name is a user-specified name
01174  *  to be used in the PPP[] array. Otherwise, \a name is ignored.
01175  * \sa \ref policy
01176  */
01177 int Task_Create(void (*f)(void), int arg, unsigned int level, unsigned int name)
01178 {
01179     int retval;
01180     uint8_t sreg;
01181 
01182     sreg = SREG;
01183     Disable_Interrupt();
01184 
01185     kernel_request_create_args.f = (voidfuncvoid_ptr)f;
01186     kernel_request_create_args.arg = arg;
01187     kernel_request_create_args.level = (uint8_t)level;
01188     kernel_request_create_args.name = (uint8_t)name;
01189 
01190     kernel_request = TASK_CREATE;
01191     enter_kernel();
01192 
01193     retval = kernel_request_retval;
01194     SREG = sreg;
01195 
01196     return retval;
01197 }
01198 
01199 
01200 /**
01201   * @brief The calling task gives up its share of the processor voluntarily.
01202   */
01203 void Task_Next()
01204 {
01205     uint8_t sreg;
01206 
01207     sreg = SREG;
01208     Disable_Interrupt();
01209 
01210     kernel_request = TASK_NEXT;
01211     enter_kernel();
01212 
01213     SREG = sreg;
01214 }
01215 
01216 
01217 /**
01218   * @brief The calling task terminates itself.
01219   */
01220 void Task_Terminate()
01221 {
01222     uint8_t sreg;
01223 
01224     sreg = SREG;
01225     Disable_Interrupt();
01226 
01227     kernel_request = TASK_TERMINATE;
01228     enter_kernel();
01229 
01230     SREG = sreg;
01231 }
01232 
01233 
01234 /** @brief Retrieve the assigned parameter.
01235  */
01236 int Task_GetArg(void)
01237 {
01238     int arg;
01239     uint8_t sreg;
01240 
01241     sreg = SREG;
01242     Disable_Interrupt();
01243 
01244     arg = cur_task->arg;
01245 
01246     SREG = sreg;
01247 
01248     return arg;
01249 }
01250 
01251 /**
01252  * \return a non-NULL Event descriptor if successful; NULL otherwise.
01253  *
01254  *  @brief Initialize a new, non-NULL Event descriptor.
01255  */
01256 EVENT *Event_Init(void)
01257 {
01258     volatile EVENT* event_ptr;
01259     uint8_t sreg;
01260 
01261     sreg = SREG;
01262     Disable_Interrupt();
01263 
01264     kernel_request = EVENT_INIT;
01265     enter_kernel();
01266 
01267     event_ptr = kernel_request_event_ptr;
01268 
01269     SREG = sreg;
01270 
01271     return event_ptr;
01272 }
01273 
01274 
01275 /**
01276   * \param e  an Event descriptor
01277   *
01278   * @brief Wait for the next occurrence of a signal on \a e. The calling process always blocks.
01279   */
01280 void Event_Wait(EVENT *e)
01281 {
01282     uint8_t sreg;
01283 
01284     sreg = SREG;
01285     Disable_Interrupt();
01286 
01287     kernel_request = EVENT_WAIT;
01288     kernel_request_event_ptr = e;
01289     enter_kernel();
01290 
01291     SREG = sreg;
01292 }
01293 
01294 
01295 /**
01296   * \param e an Event descriptor
01297   *
01298   * @brief Resume a \b single waiting task on \a e. It is a \a no-op if there is no waiting process.
01299   * \sa Event_Wait()
01300   */
01301 void Event_Signal(EVENT *e)
01302 {
01303     uint8_t sreg;
01304 
01305     sreg = SREG;
01306     Disable_Interrupt();
01307 
01308     kernel_request = EVENT_SIGNAL;
01309     kernel_request_event_ptr = e;
01310     enter_kernel();
01311 
01312     SREG = sreg;
01313 }
01314 
01315 
01316 /**
01317   * \param e  an Event descriptor
01318   *
01319   * @brief Resume \b ALL waiting tasks on \a e. It is a \a no-op if there is no waiting process.
01320   * \sa Event_Wait()
01321   */
01322 void Event_Broadcast(EVENT *e)
01323 {
01324     uint8_t sreg;
01325 
01326     sreg = SREG;
01327     Disable_Interrupt();
01328 
01329     kernel_request = EVENT_BROADCAST;
01330     kernel_request_event_ptr = e;
01331     enter_kernel();
01332 
01333     SREG = sreg;
01334 }
01335 
01336 
01337 /**
01338   * \param e  an Event descriptor
01339   *
01340   * @brief Resume a waiting task on \a e and at the same time relinquish the processor.
01341   *
01342   * This is equivalent to "Event_Signal( e ); Task_Next()" in concept. The
01343   * fundamental difference is that these two operations are performed as
01344   * an indivisible unit. So conceptually, the calling task resumes another
01345   * waiting task and gives up its share of the processor simultaneously.
01346   * \sa Event_Signal(), Task_Next()
01347   */
01348 void  Signal_And_Next(EVENT *e)
01349 {
01350     uint8_t sreg;
01351 
01352     sreg = SREG;
01353     Disable_Interrupt();
01354 
01355     kernel_request = EVENT_SIGNAL_AND_NEXT;
01356     kernel_request_event_ptr = e;
01357     enter_kernel();
01358 
01359     SREG = sreg;
01360 }
01361 
01362 
01363 /**
01364   * \param e  an Event descriptor
01365   *
01366   * @brief Resume \b ALL waiting tasks on \a e and at the same time relinquish the processor.
01367   *
01368   * This is equivalent to "Event_Broadcast( e ); Task_Next()" in concept.
01369   * \sa Event_Broadcast(), Task_Next()
01370   */
01371 void  Broadcast_And_Next(EVENT *e)
01372 {
01373     uint8_t sreg;
01374 
01375     sreg = SREG;
01376     Disable_Interrupt();
01377 
01378     kernel_request = EVENT_BROADCAST_AND_NEXT;
01379     kernel_request_event_ptr = e;
01380     enter_kernel();
01381 
01382     SREG = sreg;
01383 }
01384 

Generated on Tue Oct 23 21:49:51 2007 for RTOS by  doxygen 1.5.1