summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGregory Nutt <gnutt@nuttx.org>2014-10-06 15:54:00 -0600
committerGregory Nutt <gnutt@nuttx.org>2014-10-06 15:54:00 -0600
commitbf716b0de41711c320cacd3d38c1911657809926 (patch)
tree8f332e0368cf9c6460e0a7d40e81121f7b5cb683
parentf4555180b5847b4ee4b93c8d06f6741980a26128 (diff)
downloadnuttx-bf716b0de41711c320cacd3d38c1911657809926.tar.gz
nuttx-bf716b0de41711c320cacd3d38c1911657809926.tar.bz2
nuttx-bf716b0de41711c320cacd3d38c1911657809926.zip
Fix some cornercase locking issues; make AIO lock re-entrant
-rw-r--r--nuttx/fs/aio/aio.h4
-rw-r--r--nuttx/fs/aio/aio_cancel.c6
-rw-r--r--nuttx/fs/aio/aio_initialize.c65
-rw-r--r--nuttx/fs/aio/aioc_contain.c3
4 files changed, 61 insertions, 17 deletions
diff --git a/nuttx/fs/aio/aio.h b/nuttx/fs/aio/aio.h
index ea6be2c86..54bfe03e1 100644
--- a/nuttx/fs/aio/aio.h
+++ b/nuttx/fs/aio/aio.h
@@ -122,7 +122,9 @@ void aio_initialize(void);
* Name: aio_lock/aio_unlock
*
* Description:
- * Take/give the lock on the pending asynchronous I/O list
+ * Take/give the lock on the pending asynchronous I/O list. These locks
+ * are implemented with re-entrant semaphores -- deadlocks will not occur
+ * if the lock is taken multiple times on the same thread.
*
* Input Parameters:
* None
diff --git a/nuttx/fs/aio/aio_cancel.c b/nuttx/fs/aio/aio_cancel.c
index 21efe913e..497a2b93c 100644
--- a/nuttx/fs/aio/aio_cancel.c
+++ b/nuttx/fs/aio/aio_cancel.c
@@ -130,6 +130,7 @@ int aio_cancel(int fildes, FAR struct aiocb *aiocbp)
ret = AIO_ALLDONE;
sched_lock();
+ aio_lock();
if (aiocbp)
{
@@ -139,11 +140,9 @@ int aio_cancel(int fildes, FAR struct aiocb *aiocbp)
{
/* No.. Find the container for this AIO control block */
- aio_lock();
for (aioc = (FAR struct aio_container_s *)g_aio_pending.head;
aioc && aioc->aioc_aiocbp != aiocbp;
aioc = (FAR struct aio_container_s *)aioc->aioc_link.flink);
- aio_unlock();
/* Did we find a container for this fildes? We should; the aio_result says
* that the transfer is pending. If not we return AIO_ALLDONE.
@@ -185,11 +184,9 @@ int aio_cancel(int fildes, FAR struct aiocb *aiocbp)
{
/* Find the next container with this AIO control block */
- aio_lock();
for (aioc = next;
aioc && aioc->aioc_aiocbp->aio_fildes != fildes;
aioc = (FAR struct aio_container_s *)aioc->aioc_link.flink);
- aio_unlock();
/* Did we find the container? We should; the aio_result says
* that the transfer is pending. If not we return AIO_ALLDONE.
@@ -230,6 +227,7 @@ int aio_cancel(int fildes, FAR struct aiocb *aiocbp)
while (aioc);
}
+ aio_unlock();
sched_unlock();
return ret;
}
diff --git a/nuttx/fs/aio/aio_initialize.c b/nuttx/fs/aio/aio_initialize.c
index 1360eaee7..f502ae302 100644
--- a/nuttx/fs/aio/aio_initialize.c
+++ b/nuttx/fs/aio/aio_initialize.c
@@ -44,6 +44,8 @@
#include <errno.h>
#include <queue.h>
+#include <nuttx/sched.h>
+
#include "aio/aio.h"
#ifdef CONFIG_FS_AIO
@@ -73,10 +75,13 @@ static dq_queue_t g_aioc_free;
static sem_t g_aioc_freesem;
/* This binary semaphore supports exclusive access to the list of pending
- * asynchronous I/O.
+ * asynchronous I/O. g_aio_holder and a_aio_count support the reentrant
+ * lock.
*/
static sem_t g_aio_exclsem;
+static pid_t g_aio_holder;
+static uint16_t g_aio_count;
/****************************************************************************
* Public Data
@@ -114,9 +119,11 @@ void aio_initialize(void)
/* Initialize counting semaphores */
- (void)sem_init(&g_aioc_freesem, 0, 0);
+ (void)sem_init(&g_aioc_freesem, 0, CONFIG_FS_NAIOC);
(void)sem_init(&g_aio_exclsem, 0, 1);
+ g_aio_holder = INVALID_PROCESS_ID;
+
/* Initialize the container queues */
dq_init(&g_aioc_free);
@@ -126,11 +133,9 @@ void aio_initialize(void)
for (i = 0; i < CONFIG_FS_NAIOC; i++)
{
- /* "Free" the entry, adding it to the free list and bumping up the
- * semaphore count.
- */
+ /* Add the container to the free list */
- aioc_free(&g_aioc_alloc[i]);
+ dq_addlast(&g_aioc_alloc[i], &g_aioc_free);
}
}
@@ -150,15 +155,53 @@ void aio_initialize(void)
void aio_lock(void)
{
- while (sem_wait(&g_aio_exclsem) < 0)
+ pid_t me = getpid();
+
+ /* Does this thread already hold the semaphore? */
+
+ if (g_aio_holder == me)
{
- DEBUGASSERT(get_errno() == EINTR);
+ /* Yes, just increment the counts held */
+
+ DEBUGASSERT(g_aio_count > 0 && g_aio_count < UINT16_MAX);
+ g_aio_count++;
+ }
+ else
+ {
+ /* No.. take the semaphore */
+
+ while (sem_wait(&g_aio_exclsem) < 0)
+ {
+ DEBUGASSERT(get_errno() == EINTR);
+ }
+
+ /* And mark it as ours */
+
+ g_aio_holder = me;
+ g_aio_count = 1;
}
}
void aio_unlock(void)
{
- sem_post(&g_aio_exclsem);
+ DEBUGASSERT(g_aio_holder == getpid() && g_aio_count > 0);
+
+ /* Would decrementing the count release the lock? */
+
+ if (g_aio_count <= 1)
+ {
+ /* Yes.. that we will no longer be the holder */
+
+ g_aio_holder = INVALID_PROCESS_ID;
+ g_aio_count = 0;
+ sem_post(&g_aio_exclsem);
+ }
+ else
+ {
+ /* Otherwise, just decrement the count. We still hold the lock. */
+
+ g_aio_count--;
+ }
}
/****************************************************************************
@@ -184,8 +227,8 @@ FAR struct aio_container_s *aioc_alloc(void)
{
FAR struct aio_container_s *aioc;
- /* Take a semaphore, thus guaranteeing that we have an AIO container
- * set aside for us.
+ /* Take a count from semaphore, thus guaranteeing that we have an AIO
+ * container set aside for us.
*/
while (sem_wait(&g_aioc_freesem) < 0)
diff --git a/nuttx/fs/aio/aioc_contain.c b/nuttx/fs/aio/aioc_contain.c
index 26e67338b..bf13b66c4 100644
--- a/nuttx/fs/aio/aioc_contain.c
+++ b/nuttx/fs/aio/aioc_contain.c
@@ -153,12 +153,13 @@ FAR struct aiocb *aioc_decant(FAR struct aio_container_s *aioc)
aio_lock();
dq_rem(&aioc->aioc_link, &g_aio_pending);
- aio_unlock();
/* De-cant the AIO control block and return the container to the free list */
aiocbp = aioc->aioc_aiocbp;
aioc_free(aioc);
+
+ aio_unlock();
return aiocbp;
}