summaryrefslogtreecommitdiff
path: root/nuttx/sched
diff options
context:
space:
mode:
authorpatacongo <patacongo@42af7a65-404d-4744-a932-0658087f49c3>2013-03-10 15:32:33 +0000
committerpatacongo <patacongo@42af7a65-404d-4744-a932-0658087f49c3>2013-03-10 15:32:33 +0000
commit8e0c015c2120bf7d76a0c41d1160a745c3565a50 (patch)
tree60de505ed836f8805b07ead45c2fe36cfc15be6a /nuttx/sched
parentdda5be5c1fc672b5d9cb3a910b5e0cb0a41046c5 (diff)
downloadpx4-nuttx-8e0c015c2120bf7d76a0c41d1160a745c3565a50.tar.gz
px4-nuttx-8e0c015c2120bf7d76a0c41d1160a745c3565a50.tar.bz2
px4-nuttx-8e0c015c2120bf7d76a0c41d1160a745c3565a50.zip
Create sched_ufree and sched_kfree from sched_free; Use user-accessible heap to allocae stacks
git-svn-id: svn://svn.code.sf.net/p/nuttx/code/trunk@5725 42af7a65-404d-4744-a932-0658087f49c3
Diffstat (limited to 'nuttx/sched')
-rw-r--r--nuttx/sched/env_dup.c2
-rw-r--r--nuttx/sched/env_release.c2
-rw-r--r--nuttx/sched/env_setenv.c4
-rw-r--r--nuttx/sched/env_unsetenv.c2
-rw-r--r--nuttx/sched/group_leave.c4
-rw-r--r--nuttx/sched/mq_msgfree.c2
-rw-r--r--nuttx/sched/mq_msgqfree.c2
-rw-r--r--nuttx/sched/mq_open.c2
-rw-r--r--nuttx/sched/os_internal.h14
-rw-r--r--nuttx/sched/os_start.c19
-rw-r--r--nuttx/sched/pthread_completejoin.c2
-rw-r--r--nuttx/sched/pthread_create.c2
-rw-r--r--nuttx/sched/pthread_release.c2
-rw-r--r--nuttx/sched/sched_free.c69
-rw-r--r--nuttx/sched/sched_garbage.c116
-rw-r--r--nuttx/sched/sched_releasetcb.c6
-rw-r--r--nuttx/sched/sem_close.c2
-rw-r--r--nuttx/sched/sem_unlink.c2
-rw-r--r--nuttx/sched/sig_releasependingsigaction.c2
-rw-r--r--nuttx/sched/sig_releasependingsignal.c2
-rw-r--r--nuttx/sched/timer_release.c2
21 files changed, 189 insertions, 71 deletions
diff --git a/nuttx/sched/env_dup.c b/nuttx/sched/env_dup.c
index c7a10af48..1b061be8f 100644
--- a/nuttx/sched/env_dup.c
+++ b/nuttx/sched/env_dup.c
@@ -101,7 +101,7 @@ int env_dup(FAR struct task_group_s *group)
/* Yes..The parent task has an environment, duplicate it */
envlen = ptcb->group->tg_envsize;
- envp = (FAR char *)kmalloc(envlen);
+ envp = (FAR char *)kumalloc(envlen);
if (!envp)
{
ret = -ENOMEM;
diff --git a/nuttx/sched/env_release.c b/nuttx/sched/env_release.c
index aebb1f7e8..c6ff7ed75 100644
--- a/nuttx/sched/env_release.c
+++ b/nuttx/sched/env_release.c
@@ -85,7 +85,7 @@ void env_release(FAR struct task_group_s *group)
{
/* Free the environment */
- sched_free(group->tg_envp);
+ sched_ufree(group->tg_envp);
}
/* In any event, make sure that all environment-related varialbles in the
diff --git a/nuttx/sched/env_setenv.c b/nuttx/sched/env_setenv.c
index 7ce3e1a1f..1b0b54c1c 100644
--- a/nuttx/sched/env_setenv.c
+++ b/nuttx/sched/env_setenv.c
@@ -161,7 +161,7 @@ int setenv(FAR const char *name, FAR const char *value, int overwrite)
if (group->tg_envp)
{
newsize = group->tg_envsize + varlen;
- newenvp = (FAR char *)krealloc(group->tg_envp, newsize);
+ newenvp = (FAR char *)kurealloc(group->tg_envp, newsize);
if (!newenvp)
{
ret = ENOMEM;
@@ -173,7 +173,7 @@ int setenv(FAR const char *name, FAR const char *value, int overwrite)
else
{
newsize = varlen;
- newenvp = (FAR char *)kmalloc(varlen);
+ newenvp = (FAR char *)kumalloc(varlen);
if (!newenvp)
{
ret = ENOMEM;
diff --git a/nuttx/sched/env_unsetenv.c b/nuttx/sched/env_unsetenv.c
index b41f58c38..b61c73943 100644
--- a/nuttx/sched/env_unsetenv.c
+++ b/nuttx/sched/env_unsetenv.c
@@ -98,7 +98,7 @@ int unsetenv(FAR const char *name)
/* Reallocate the new environment buffer */
newsize = group->tg_envsize;
- newenvp = (FAR char *)krealloc(group->tg_envp, newsize);
+ newenvp = (FAR char *)kurealloc(group->tg_envp, newsize);
if (!newenvp)
{
set_errno(ENOMEM);
diff --git a/nuttx/sched/group_leave.c b/nuttx/sched/group_leave.c
index 44c52a56d..05104b34c 100644
--- a/nuttx/sched/group_leave.c
+++ b/nuttx/sched/group_leave.c
@@ -213,14 +213,14 @@ static inline void group_release(FAR struct task_group_s *group)
if (group->tg_members)
{
- sched_free(group->tg_members);
+ sched_kfree(group->tg_members);
group->tg_members = NULL;
}
#endif
/* Release the group container itself */
- sched_free(group);
+ sched_kfree(group);
}
/*****************************************************************************
diff --git a/nuttx/sched/mq_msgfree.c b/nuttx/sched/mq_msgfree.c
index 91322fbf3..61b0e1055 100644
--- a/nuttx/sched/mq_msgfree.c
+++ b/nuttx/sched/mq_msgfree.c
@@ -125,7 +125,7 @@ void mq_msgfree(FAR mqmsg_t *mqmsg)
else if (mqmsg->type == MQ_ALLOC_DYN)
{
- sched_free(mqmsg);
+ sched_kfree(mqmsg);
}
else
{
diff --git a/nuttx/sched/mq_msgqfree.c b/nuttx/sched/mq_msgqfree.c
index d7d87db3d..bea598b56 100644
--- a/nuttx/sched/mq_msgqfree.c
+++ b/nuttx/sched/mq_msgqfree.c
@@ -104,5 +104,5 @@ void mq_msgqfree(FAR msgq_t *msgq)
/* Then deallocate the message queue itself */
- sched_free(msgq);
+ sched_kfree(msgq);
}
diff --git a/nuttx/sched/mq_open.c b/nuttx/sched/mq_open.c
index 64c8389b1..7a4e39010 100644
--- a/nuttx/sched/mq_open.c
+++ b/nuttx/sched/mq_open.c
@@ -215,7 +215,7 @@ mqd_t mq_open(const char *mq_name, int oflags, ...)
* uninitialized, mq_deallocate() is not used.
*/
- sched_free(msgq);
+ sched_kfree(msgq);
}
}
}
diff --git a/nuttx/sched/os_internal.h b/nuttx/sched/os_internal.h
index 71003e2da..2aa17fa33 100644
--- a/nuttx/sched/os_internal.h
+++ b/nuttx/sched/os_internal.h
@@ -216,13 +216,17 @@ extern volatile dq_queue_t g_waitingforfill;
extern volatile dq_queue_t g_inactivetasks;
-/* This is the list of dayed memory deallocations that need to be handled
- * within the IDLE loop. These deallocations get queued by sched_free()
- * if the OS attempts to deallocate memory while it is within an interrupt
- * handler.
+/* These are lists of dayed memory deallocations that need to be handled
+ * within the IDLE loop or worker thread. These deallocations get queued
+ * by sched_kufree and sched_kfree() if the OS needs to deallocate memory
+ * while it is within an interrupt handler.
*/
-extern volatile sq_queue_t g_delayeddeallocations;
+extern volatile sq_queue_t g_delayed_kufree;
+
+#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
+extern volatile sq_queue_t g_delayed_kfree;
+#endif
/* This is the value of the last process ID assigned to a task */
diff --git a/nuttx/sched/os_start.c b/nuttx/sched/os_start.c
index 07c5394b4..e76f3c14b 100644
--- a/nuttx/sched/os_start.c
+++ b/nuttx/sched/os_start.c
@@ -141,13 +141,17 @@ volatile dq_queue_t g_waitingforfill;
volatile dq_queue_t g_inactivetasks;
-/* This is the list of dayed memory deallocations that need to be handled
- * within the IDLE loop. These deallocations get queued by sched_free()
- * if the OS attempts to deallocate memory while it is within an interrupt
- * handler.
+/* These are lists of dayed memory deallocations that need to be handled
+ * within the IDLE loop or worker thread. These deallocations get queued
+ * by sched_kufree and sched_kfree() if the OS needs to deallocate memory
+ * while it is within an interrupt handler.
*/
-volatile sq_queue_t g_delayeddeallocations;
+volatile sq_queue_t g_delayed_kufree;
+
+#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
+volatile sq_queue_t g_delayed_kfree;
+#endif
/* This is the value of the last process ID assigned to a task */
@@ -249,7 +253,10 @@ void os_start(void)
dq_init(&g_waitingforfill);
#endif
dq_init(&g_inactivetasks);
- sq_init(&g_delayeddeallocations);
+ sq_init(&g_delayed_kufree);
+#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
+ sq_init(&g_delayed_kfree);
+#endif
/* Initialize the logic that determine unique process IDs. */
diff --git a/nuttx/sched/pthread_completejoin.c b/nuttx/sched/pthread_completejoin.c
index 86f4eac3d..f3a5928e6 100644
--- a/nuttx/sched/pthread_completejoin.c
+++ b/nuttx/sched/pthread_completejoin.c
@@ -300,6 +300,6 @@ void pthread_destroyjoin(FAR struct task_group_s *group,
/* And deallocate the pjoin structure */
- sched_free(pjoin);
+ sched_kfree(pjoin);
}
diff --git a/nuttx/sched/pthread_create.c b/nuttx/sched/pthread_create.c
index ce19941fc..346f51411 100644
--- a/nuttx/sched/pthread_create.c
+++ b/nuttx/sched/pthread_create.c
@@ -441,7 +441,7 @@ int pthread_create(FAR pthread_t *thread, FAR pthread_attr_t *attr,
return ret;
errout_with_join:
- sched_free(pjoin);
+ sched_kfree(pjoin);
ptcb->joininfo = NULL;
errout_with_tcb:
diff --git a/nuttx/sched/pthread_release.c b/nuttx/sched/pthread_release.c
index 6b99b455e..c66ff5187 100644
--- a/nuttx/sched/pthread_release.c
+++ b/nuttx/sched/pthread_release.c
@@ -114,7 +114,7 @@ void pthread_release(FAR struct task_group_s *group)
/* And deallocate the join structure */
- sched_free(join);
+ sched_kfree(join);
}
/* Destroy the join list semaphore */
diff --git a/nuttx/sched/sched_free.c b/nuttx/sched/sched_free.c
index e5e0bdacf..7442a4d09 100644
--- a/nuttx/sched/sched_free.c
+++ b/nuttx/sched/sched_free.c
@@ -1,7 +1,7 @@
/************************************************************************
* sched/sched_free.c
*
- * Copyright (C) 2007, 2009, 2012 Gregory Nutt. All rights reserved.
+ * Copyright (C) 2007, 2009, 2012-2013 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -72,18 +72,61 @@
************************************************************************/
/************************************************************************
- * Name: sched_free
+ * Name: sched_ufree and sched_kfree
*
* Description:
- * This function performs deallocations that the operating system may
- * need to make. This special interface to free is used to handling
+ * These function performs deallocations that the operating system may
+ * need to make. This special interface to free is used in handling
* corner cases where the operating system may have to perform
* deallocations from within an interrupt handler.
*
************************************************************************/
-void sched_free(FAR void *address)
+void sched_ufree(FAR void *address)
{
+ irqstate_t flags;
+
+ /* Check if this is an attempt to deallocate memory from an exception
+ * handler. If this function is called from the IDLE task, then we
+ * must have exclusive access to the memory manager to do this.
+ */
+
+ if (up_interrupt_context() || kumm_trysemaphore() != 0)
+ {
+ /* Yes.. Make sure that this is not a attempt to free kernel memory
+ * using the user deallocator.
+ */
+
+ flags = irqsave();
+#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
+ DEBUGASSERT(!kmm_heapmember(address));
+#endif
+
+ /* Delay the deallocation until a more appropriate time. */
+
+ sq_addlast((FAR sq_entry_t*)address, (sq_queue_t*)&g_delayed_kufree);
+
+ /* Signal the worker thread that is has some clean up to do */
+
+#ifdef CONFIG_SCHED_WORKQUEUE
+ work_signal(LPWORK);
+#endif
+ irqrestore(flags);
+ }
+ else
+ {
+ /* No.. just deallocate the memory now. */
+
+ kufree(address);
+ kumm_givesemaphore();
+ }
+}
+
+#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
+void sched_kfree(FAR void *address)
+{
+ irqstate_t flags;
+
/* Check if this is an attempt to deallocate memory from an exception
* handler. If this function is called from the IDLE task, then we
* must have exclusive access to the memory manager to do this.
@@ -91,17 +134,23 @@ void sched_free(FAR void *address)
if (up_interrupt_context() || kmm_trysemaphore() != 0)
{
- /* Yes.. Delay the deallocation until a more appropriate time. */
+ /* Yes.. Make sure that this is not a attempt to free user memory
+ * using the kernel deallocator.
+ */
+
+ flags = irqsave();
+ DEBUGASSERT(kmm_heapmember(address));
- irqstate_t saved_state = irqsave();
- sq_addlast((FAR sq_entry_t*)address, (sq_queue_t*)&g_delayeddeallocations);
+ /* Delay the deallocation until a more appropriate time. */
+
+ sq_addlast((FAR sq_entry_t*)address, (sq_queue_t*)&g_delayed_kfree);
/* Signal the worker thread that is has some clean up to do */
#ifdef CONFIG_SCHED_WORKQUEUE
work_signal(LPWORK);
#endif
- irqrestore(saved_state);
+ irqrestore(flags);
}
else
{
@@ -111,4 +160,4 @@ void sched_free(FAR void *address)
kmm_givesemaphore();
}
}
-
+#endif
diff --git a/nuttx/sched/sched_garbage.c b/nuttx/sched/sched_garbage.c
index 0eaa19247..d11a0df96 100644
--- a/nuttx/sched/sched_garbage.c
+++ b/nuttx/sched/sched_garbage.c
@@ -63,21 +63,10 @@
****************************************************************************/
/****************************************************************************
- * Public Functions
- ****************************************************************************/
-/****************************************************************************
- * Name: sched_garbagecollection
+ * Name: sched_kucleanup
*
* Description:
- * Clean-up memory de-allocations that we queued because they could not
- * be freed in that execution context (for example, if the memory was freed
- * from an interrupt handler).
- *
- * This logic may be called from the worker thread (see work_thread.c).
- * If, however, CONFIG_SCHED_WORKQUEUE is not defined, then this logic will
- * be called from the IDLE thread. It is less optimal for the garbage
- * collection to be called from the IDLE thread because it runs at a very
- * low priority and could cause false memory out conditions.
+ * Clean-up deferred de-allocations of user memory
*
* Input parameters:
* None
@@ -87,7 +76,7 @@
*
****************************************************************************/
-void sched_garbagecollection(void)
+static inline void sched_kucleanup(void)
{
irqstate_t flags;
FAR void *address;
@@ -96,14 +85,14 @@ void sched_garbagecollection(void)
* is needed because this is an atomic test.
*/
- while (g_delayeddeallocations.head)
+ while (g_delayed_kufree.head)
{
/* Remove the first delayed deallocation. This is not atomic and so
* we must disable interrupts around the queue operation.
*/
flags = irqsave();
- address = (FAR void*)sq_remfirst((FAR sq_queue_t*)&g_delayeddeallocations);
+ address = (FAR void*)sq_remfirst((FAR sq_queue_t*)&g_delayed_kufree);
irqrestore(flags);
/* The address should always be non-NULL since that was checked in the
@@ -112,26 +101,95 @@ void sched_garbagecollection(void)
if (address)
{
+ /* Return the memory to the user heap */
+
+ kufree(address);
+ }
+ }
+}
+
+/****************************************************************************
+ * Name: sched_kcleanup
+ *
+ * Description:
+ * Clean-up deferred de-allocations of kernel memory
+ *
+ * Input parameters:
+ * None
+ *
+ * Returned Value:
+ * None
+ *
+ ****************************************************************************/
+
#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
- /* Does the address to be freed lie in the kernel heap? */
+static inline void sched_kcleanup(void)
+{
+ irqstate_t flags;
+ FAR void *address;
- if (kmm_heapmember(address))
- {
- /* Yes.. return the memory to the kernel heap */
+ /* Test if the delayed deallocation queue is empty. No special protection
+ * is needed because this is an atomic test.
+ */
+
+ while (g_delayed_kfree.head)
+ {
+ /* Remove the first delayed deallocation. This is not atomic and so
+ * we must disable interrupts around the queue operation.
+ */
- kfree(address);
- }
+ flags = irqsave();
+ address = (FAR void*)sq_remfirst((FAR sq_queue_t*)&g_delayed_kfree);
+ irqrestore(flags);
- /* No.. then the address must lie in the user heap (unchecked) */
+ /* The address should always be non-NULL since that was checked in the
+ * 'while' condition above.
+ */
- else
-#endif
- {
- /* Return the memory to the user heap */
+ if (address)
+ {
+ /* Return the memory to the kernel heap */
- kufree(address);
- }
+ kfree(address);
}
}
}
+#else
+# define sched_kcleanup()
+#endif
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+/****************************************************************************
+ * Name: sched_garbagecollection
+ *
+ * Description:
+ * Clean-up memory de-allocations that we queued because they could not
+ * be freed in that execution context (for example, if the memory was freed
+ * from an interrupt handler).
+ *
+ * This logic may be called from the worker thread (see work_thread.c).
+ * If, however, CONFIG_SCHED_WORKQUEUE is not defined, then this logic will
+ * be called from the IDLE thread. It is less optimal for the garbage
+ * collection to be called from the IDLE thread because it runs at a very
+ * low priority and could cause false memory out conditions.
+ *
+ * Input parameters:
+ * None
+ *
+ * Returned Value:
+ * None
+ *
+ ****************************************************************************/
+
+void sched_garbagecollection(void)
+{
+ /* Handle deferred deallocations for the kernel heap */
+
+ sched_kcleanup();
+
+ /* Handle deferred dealloctions for the user heap */
+
+ sched_kucleanup();
+}
diff --git a/nuttx/sched/sched_releasetcb.c b/nuttx/sched/sched_releasetcb.c
index d4d59605d..071b0b37e 100644
--- a/nuttx/sched/sched_releasetcb.c
+++ b/nuttx/sched/sched_releasetcb.c
@@ -142,7 +142,7 @@ int sched_releasetcb(FAR struct tcb_s *tcb)
{
if (tcb->dspace->crefs <= 1)
{
- sched_free(tcb->dspace);
+ sched_kfree(tcb->dspace);
}
else
{
@@ -162,7 +162,7 @@ int sched_releasetcb(FAR struct tcb_s *tcb)
FAR struct task_tcb_s *ttcb = (FAR struct task_tcb_s *)tcb;
for (i = 1; i < CONFIG_MAX_TASK_ARGS+1 && ttcb->argv[i]; i++)
{
- sched_free((FAR void*)ttcb->argv[i]);
+ sched_kfree((FAR void*)ttcb->argv[i]);
}
}
@@ -179,7 +179,7 @@ int sched_releasetcb(FAR struct tcb_s *tcb)
#endif
/* And, finally, release the TCB itself */
- sched_free(tcb);
+ sched_kfree(tcb);
}
return ret;
diff --git a/nuttx/sched/sem_close.c b/nuttx/sched/sem_close.c
index 169fc0448..22f1e0f1d 100644
--- a/nuttx/sched/sem_close.c
+++ b/nuttx/sched/sem_close.c
@@ -128,7 +128,7 @@ int sem_close(FAR sem_t *sem)
if (!psem->nconnect && psem->unlinked)
{
dq_rem((FAR dq_entry_t*)psem, &g_nsems);
- sched_free(psem);
+ sched_kfree(psem);
}
ret = OK;
}
diff --git a/nuttx/sched/sem_unlink.c b/nuttx/sched/sem_unlink.c
index 6fe011540..0ee262c79 100644
--- a/nuttx/sched/sem_unlink.c
+++ b/nuttx/sched/sem_unlink.c
@@ -118,7 +118,7 @@ int sem_unlink(FAR const char *name)
if (!psem->nconnect)
{
dq_rem((FAR dq_entry_t*)psem, &g_nsems);
- sched_free(psem);
+ sched_kfree(psem);
}
/* If one or more process still has the semaphore open,
diff --git a/nuttx/sched/sig_releasependingsigaction.c b/nuttx/sched/sig_releasependingsigaction.c
index 69252f284..b0dab2ea9 100644
--- a/nuttx/sched/sig_releasependingsigaction.c
+++ b/nuttx/sched/sig_releasependingsigaction.c
@@ -115,6 +115,6 @@ void sig_releasependingsigaction(FAR sigq_t *sigq)
else if (sigq->type == SIG_ALLOC_DYN)
{
- sched_free(sigq);
+ sched_kfree(sigq);
}
}
diff --git a/nuttx/sched/sig_releasependingsignal.c b/nuttx/sched/sig_releasependingsignal.c
index 5b847bc64..41c2286eb 100644
--- a/nuttx/sched/sig_releasependingsignal.c
+++ b/nuttx/sched/sig_releasependingsignal.c
@@ -126,6 +126,6 @@ void sig_releasependingsignal(FAR sigpendq_t *sigpend)
else if (sigpend->type == SIG_ALLOC_DYN)
{
- sched_free(sigpend);
+ sched_kfree(sigpend);
}
}
diff --git a/nuttx/sched/timer_release.c b/nuttx/sched/timer_release.c
index ed83b5f53..50548b3d9 100644
--- a/nuttx/sched/timer_release.c
+++ b/nuttx/sched/timer_release.c
@@ -97,7 +97,7 @@ static inline void timer_free(struct posix_timer_s *timer)
/* Otherwise, return it to the heap */
irqrestore(flags);
- sched_free(timer);
+ sched_kfree(timer);
}
}