summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--nuttx/drivers/rwbuffer.c2
-rw-r--r--nuttx/sched/os_start.c21
-rwxr-xr-xnuttx/sched/sched_garbage.c44
-rwxr-xr-xnuttx/sched/work_cancel.c4
-rwxr-xr-xnuttx/sched/work_thread.c20
5 files changed, 49 insertions, 42 deletions
diff --git a/nuttx/drivers/rwbuffer.c b/nuttx/drivers/rwbuffer.c
index eb696ae6a..e495660f3 100644
--- a/nuttx/drivers/rwbuffer.c
+++ b/nuttx/drivers/rwbuffer.c
@@ -387,7 +387,7 @@ int rwb_initialize(FAR struct rwbuffer_s *rwb)
rwb->wrbuffer = NULL;
#endif
#ifdef CONFIG_FS_READAHEAD
- DEBUGASSERT(rwb->rhnblocks > 0);
+ DEBUGASSERT(rwb->rhblocks > 0);
DEBUGASSERT(rwb->rhreload != NULL);
rwb->rhbuffer = NULL;
#endif
diff --git a/nuttx/sched/os_start.c b/nuttx/sched/os_start.c
index 41fc735e3..440fdb6c3 100644
--- a/nuttx/sched/os_start.c
+++ b/nuttx/sched/os_start.c
@@ -461,13 +461,24 @@ void os_start(void)
sdbg("Beginning Idle Loop\n");
for (;;)
{
- /* Peform garbage collection (if it is not being done by the worker
- * thread. This cleans-up memory de-allocations that was queued
- * because it could not be freed in that execution context (for
+ /* Perform garbage collection (if it is not being done by the worker
+ * thread). This cleans-up memory de-allocations that were queued
+ * because they could not be freed in that execution context (for
* example, if the memory was freed from an interrupt handler).
*/
-#ifndef CONFIG_SCHED_WORKQEUE
- sched_garbagecollection();
+
+#ifndef CONFIG_SCHED_WORKQUEUE
+ /* We must have exclusive access to the memory manager to do this
+ * BUT the idle task cannot wait on a semaphore. So we only do
+ * the cleanup now if we can get the semaphore -- this should be
+ * possible because if the IDLE thread is running, no other task is!
+ */
+
+ if (mm_trysemaphore() == 0)
+ {
+ sched_garbagecollection();
+ mm_givesemaphore();
+ }
#endif
/* Perform any processor-specific idle state operations */
diff --git a/nuttx/sched/sched_garbage.c b/nuttx/sched/sched_garbage.c
index 6dc1a5c88..66c7bafae 100755
--- a/nuttx/sched/sched_garbage.c
+++ b/nuttx/sched/sched_garbage.c
@@ -42,8 +42,6 @@
#include <stdlib.h>
-#include <nuttx/mm.h>
-
#include "os_internal.h"
/****************************************************************************
@@ -93,35 +91,29 @@
void sched_garbagecollection(void)
{
- /* Check if there is anything in the delayed deallocation list. If there
- * is deallocate it now. We must have exclusive access to the memory manager
- * to do this BUT the idle task cannot wait on a semaphore. So we only do
- * the cleanup now if we can get the semaphore -- and this should be possible
- * because if the IDLE thread is running, no other task is!
- */
-
-#ifdef CONFIG_SCHED_WORKQUEUE
- mm_takesemaphore();
-#else
- if (mm_trysemaphore() == 0)
-#endif
+ irqstate_t flags;
+ void *address;
+
+ /* Test if the delayed deallocation queue is empty. No special protection
+ * is needed because this is an atomic test.
+ */
+
+ while (g_delayeddeallocations.head)
{
- while (g_delayeddeallocations.head)
- {
- /* Remove the first delayed deallocation. */
+ /* Remove the first delayed deallocation. This is not atomic and so
+ * we must disable interrupts around the queue operation.
+ */
- irqstate_t saved_state = irqsave();
- void *address = (void*)sq_remfirst((FAR sq_queue_t*)&g_delayeddeallocations);
- irqrestore(saved_state);
+ flags = irqsave();
+ address = (void*)sq_remfirst((FAR sq_queue_t*)&g_delayeddeallocations);
+ irqrestore(flags);
- /* Then deallocate it */
+ /* Then deallocate it. */
- if (address)
- {
- free(address);
- }
+ if (address)
+ {
+ free(address);
}
- mm_givesemaphore();
}
}
diff --git a/nuttx/sched/work_cancel.c b/nuttx/sched/work_cancel.c
index 1a5cce312..d3a79f0b6 100755
--- a/nuttx/sched/work_cancel.c
+++ b/nuttx/sched/work_cancel.c
@@ -101,8 +101,8 @@ int work_cancel(struct work_s *work)
*/
flags = irqsave();
- DEBUGASSERT(work->flink || (FAR dq_entry_t *)work == g_work.head);
- DEBUGASSERT(work->blink || (FAR dq_entry_t *)work == g_work.tail);
+ DEBUGASSERT(work->dq.flink || (FAR dq_entry_t *)work == g_work.head);
+ DEBUGASSERT(work->dq.blink || (FAR dq_entry_t *)work == g_work.tail);
dq_rem((FAR dq_entry_t *)work, &g_work);
irqrestore(flags);
return OK;
diff --git a/nuttx/sched/work_thread.c b/nuttx/sched/work_thread.c
index 4df29dd01..35a7f55ca 100755
--- a/nuttx/sched/work_thread.c
+++ b/nuttx/sched/work_thread.c
@@ -113,7 +113,12 @@ int work_thread(int argc, char *argv[])
usleep(CONFIG_SCHED_WORKPERIOD);
- /* First, clean-up any delayed memory deallocations */
+ /* First, perform garbage collection. This cleans-up memory de-allocations
+ * that were queued because they could not be freed in that execution
+ * context (for example, if the memory was freed from an interrupt handler).
+ * NOTE: If the work thread is disabled, this clean-up is performed by
+ * the IDLE thread (at a very, very lower priority).
+ */
sched_garbagecollection();
@@ -131,21 +136,20 @@ int work_thread(int argc, char *argv[])
if (work->delay == 0 || g_system_timer - work->qtime > work->delay)
{
- /* Remove the work at the head of the list. And re-enable
- * interrupts while the work is performed.
- */
+ /* Remove the ready-to-execute work from the list */
- (void)dq_remfirst(&g_work);
+ (void)dq_rem((struct dq_entry_s *)work, &g_work);
/* Do the work. Re-enable interrupts while the work is being
- * performed... we don't have any idea how long that will take
+ * performed... we don't have any idea how long that will take!
*/
irqrestore(flags);
work->worker(work->arg);
- /* Now, unfortunately, since we re-enabled interrupts we have
- * to start back at the head of the list.
+ /* Now, unfortunately, since we re-enabled interrupts we don't
+ * the state of the work list and we will have to start back at
+ * the head of the list.
*/
flags = irqsave();