summaryrefslogtreecommitdiff
path: root/nuttx/arch/avr/src/avr32/up_unblocktask.c
diff options
context:
space:
mode:
Diffstat (limited to 'nuttx/arch/avr/src/avr32/up_unblocktask.c')
-rw-r--r--nuttx/arch/avr/src/avr32/up_unblocktask.c95
1 files changed, 45 insertions, 50 deletions
diff --git a/nuttx/arch/avr/src/avr32/up_unblocktask.c b/nuttx/arch/avr/src/avr32/up_unblocktask.c
index 5ce48e366..1fcfa1dd7 100644
--- a/nuttx/arch/avr/src/avr32/up_unblocktask.c
+++ b/nuttx/arch/avr/src/avr32/up_unblocktask.c
@@ -1,7 +1,7 @@
/****************************************************************************
* arch/avr/src/avr32/up_unblocktask.c
*
- * Copyright (C) 2010 Gregory Nutt. All rights reserved.
+ * Copyright (C) 2010, 2013 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -81,77 +81,72 @@
void up_unblock_task(struct tcb_s *tcb)
{
+ struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
+
/* Verify that the context switch can be performed */
- if ((tcb->task_state < FIRST_BLOCKED_STATE) ||
- (tcb->task_state > LAST_BLOCKED_STATE))
- {
- PANIC(OSERR_BADUNBLOCKSTATE);
- }
- else
- {
- struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
+ ASSERT((tcb->task_state >= FIRST_BLOCKED_STATE) &&
+ (tcb->task_state <= LAST_BLOCKED_STATE));
- /* Remove the task from the blocked task list */
+ /* Remove the task from the blocked task list */
- sched_removeblocked(tcb);
+ sched_removeblocked(tcb);
- /* Reset its timeslice. This is only meaningful for round
- * robin tasks but it doesn't here to do it for everything
- */
+ /* Reset its timeslice. This is only meaningful for round
+ * robin tasks but it doesn't here to do it for everything
+ */
#if CONFIG_RR_INTERVAL > 0
- tcb->timeslice = CONFIG_RR_INTERVAL / MSEC_PER_TICK;
+ tcb->timeslice = CONFIG_RR_INTERVAL / MSEC_PER_TICK;
#endif
- /* Add the task in the correct location in the prioritized
- * g_readytorun task list
+ /* Add the task in the correct location in the prioritized
+ * g_readytorun task list
+ */
+
+ if (sched_addreadytorun(tcb))
+ {
+ /* The currently active task has changed! We need to do
+ * a context switch to the new task.
+ *
+ * Are we in an interrupt handler?
*/
- if (sched_addreadytorun(tcb))
+ if (current_regs)
{
- /* The currently active task has changed! We need to do
- * a context switch to the new task.
- *
- * Are we in an interrupt handler?
+ /* Yes, then we have to do things differently.
+ * Just copy the current_regs into the OLD rtcb.
*/
- if (current_regs)
- {
- /* Yes, then we have to do things differently.
- * Just copy the current_regs into the OLD rtcb.
- */
+ up_savestate(rtcb->xcp.regs);
- up_savestate(rtcb->xcp.regs);
-
- /* Restore the exception context of the rtcb at the (new) head
- * of the g_readytorun task list.
- */
+ /* Restore the exception context of the rtcb at the (new) head
+ * of the g_readytorun task list.
+ */
- rtcb = (struct tcb_s*)g_readytorun.head;
+ rtcb = (struct tcb_s*)g_readytorun.head;
- /* Then switch contexts */
+ /* Then switch contexts */
- up_restorestate(rtcb->xcp.regs);
- }
+ up_restorestate(rtcb->xcp.regs);
+ }
- /* No, then we will need to perform the user context switch */
+ /* No, then we will need to perform the user context switch */
- else
- {
- /* Switch context to the context of the task at the head of the
- * ready to run list.
- */
+ else
+ {
+ /* Switch context to the context of the task at the head of the
+ * ready to run list.
+ */
- struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
- up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
+ struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
+ up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
- /* up_switchcontext forces a context switch to the task at the
- * head of the ready-to-run list. It does not 'return' in the
- * normal sense. When it does return, it is because the blocked
- * task is again ready to run and has execution priority.
- */
- }
+ /* up_switchcontext forces a context switch to the task at the
+ * head of the ready-to-run list. It does not 'return' in the
+ * normal sense. When it does return, it is because the blocked
+ * task is again ready to run and has execution priority.
+ */
}
}
}