diff options
author | patacongo <patacongo@42af7a65-404d-4744-a932-0658087f49c3> | 2007-02-17 23:21:28 +0000 |
---|---|---|
committer | patacongo <patacongo@42af7a65-404d-4744-a932-0658087f49c3> | 2007-02-17 23:21:28 +0000 |
commit | e3940eb2080711edac189cca3f642ee89dc215f2 (patch) | |
tree | 1c390958fae49e34dce698b175487e6d4681e540 /nuttx/mm/mm_memalign.c | |
parent | 2223612deb2cc6322992f8595b6d6f86fcb53ae1 (diff) | |
download | px4-nuttx-e3940eb2080711edac189cca3f642ee89dc215f2.tar.gz px4-nuttx-e3940eb2080711edac189cca3f642ee89dc215f2.tar.bz2 px4-nuttx-e3940eb2080711edac189cca3f642ee89dc215f2.zip |
NuttX RTOS
git-svn-id: svn://svn.code.sf.net/p/nuttx/code/trunk@3 42af7a65-404d-4744-a932-0658087f49c3
Diffstat (limited to 'nuttx/mm/mm_memalign.c')
-rw-r--r-- | nuttx/mm/mm_memalign.c | 212 |
1 files changed, 212 insertions, 0 deletions
diff --git a/nuttx/mm/mm_memalign.c b/nuttx/mm/mm_memalign.c new file mode 100644 index 000000000..b91625d0a --- /dev/null +++ b/nuttx/mm/mm_memalign.c @@ -0,0 +1,212 @@ +/************************************************************ + * mm_memalign.c + * + * Copyright (C) 2007 Gregory Nutt. All rights reserved. + * Author: Gregory Nutt <spudmonkey@racsa.co.cr> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name Gregory Nutt nor the names of its contributors may be + * used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + ************************************************************/ + +/************************************************************ + * Included Files + ************************************************************/ + +#include <assert.h> +#include "mm_environment.h" +#include "mm_internal.h" + +/************************************************************ + * Definitions + ************************************************************/ + +/************************************************************ + * Global Functions + ************************************************************/ + +/************************************************************ + * memalign + * + * Description: + * memalign requests more than enough space from malloc, + * finds a region within that chunk that meets the alignment + * request and then frees any leading or trailing space. + * + * The alignment argument must be a power of two (not + * checked). 8-byte alignment is guaranteed by normal + * malloc calls. + * + ************************************************************/ + +void *memalign(size_t alignment, size_t size) +{ + struct mm_allocnode_s *node; + uint32 rawchunk; + uint32 alignedchunk; + uint32 mask = (uint32)(alignment - 1); + uint32 allocsize; + + /* If this requested alignement less than or equal to the + * natural alignment of malloc, then just let malloc do the + * work. + */ + + if (alignment <= MM_MIN_CHUNK) + { + return malloc(size); + } + + /* Adjust the size to account for (1) the size of the allocated + * node, (2) to make sure that it is an even multiple of + * our granule size, and to include the alignment amount. + * + * Notice that we increase the allocation size by twice the + * the requested alignment. We do this so that there will + * be at least two valid alignment points within the allocated + * memory. + * + * NOTE: These are sizes given to malloc and not chunk sizes. + * The do not include SIZEOF_MM_ALLOCNODE. + */ + + size = MM_ALIGN_UP(size); /* Make mutliples of our granule size */ + allocsize = size + 2*alignment; /* Add double full alignment size */ + + /* If the alignment is small + + /* Then malloc that size */ + + rawchunk = (uint32)malloc(allocsize); + if (!rawchunk) + { + return NULL; + } + + /* We need to hold the MM semaphore while we muck with the + * chunks and nodelist. + */ + + mm_takesemaphore(); + + /* Get the node associated with the allocation and the next + * node after the allocation. + */ + + node = (struct mm_allocnode_s*)(rawchunk - SIZEOF_MM_ALLOCNODE); + + /* Find the aligned subregion */ + + alignedchunk = (rawchunk + mask) & ~mask; + + /* Check if there is free space at the beginning of the aligned chunk */ + + if (alignedchunk != rawchunk) + { + struct mm_allocnode_s *newnode; + struct mm_allocnode_s *next; + uint32 precedingsize; + + /* Get the node the next node after the allocation. */ + + next = (struct mm_allocnode_s*)((char*)node + node->size); + + /* Make sure that there is space to convert the preceding mm_allocnode_s + * into an mm_freenode_s. I think that this should always be true + */ + + DEBUGASSERT(alignedchunk >= rawchunk + 8); + + newnode = (struct mm_allocnode_s*)(alignedchunk - SIZEOF_MM_ALLOCNODE); + + /* Preceding size is full size of the new 'node,' including + * SIZEOF_MM_ALLOCNODE + */ + + precedingsize = (uint32)newnode - (uint32)node; + + /* If we were unlucky, then the alignedchunk can lie in such + * a position that precedingsize < SIZEOF_NODE_FREENODE. We + * can't let that happen because we are going to cast 'node' to + * struct mm_freenode_s below. This is why we allocated memory + * large enough to support two alignment points. In this case, + * we will simply use the second alignment point. + */ + + if (precedingsize < SIZEOF_MM_FREENODE) + { + alignedchunk += alignment; + newnode = (struct mm_allocnode_s*)(alignedchunk - SIZEOF_MM_ALLOCNODE); + precedingsize = (uint32)newnode - (uint32)node; + } + + /* Set up the size of the new node */ + + newnode->size = (uint32)next - (uint32)newnode; + newnode->preceding = precedingsize | MM_ALLOC_BIT; + + /* Reduce the size of the original chunk and mark it not allocated, */ + + node->size = precedingsize; + node->preceding &= ~MM_ALLOC_BIT; + + /* Fix the preceding size of the next node */ + + next->preceding = newnode->size | (next->preceding & MM_ALLOC_BIT); + + /* Convert the newnode chunk size back into malloc-compatible + * size by subtracting the header size SIZEOF_MM_ALLOCNODE. + */ + + allocsize = newnode->size - SIZEOF_MM_ALLOCNODE; + + /* Add the original, newly freed node to the free nodelist */ + + mm_addfreechunk((struct mm_freenode_s *)node); + + /* Replace the original node with the newlay realloaced, + * aligned node + */ + + node = newnode; + } + + /* Check if there is free space at the end of the aligned chunk */ + + if (allocsize > size) + { + /* Shrink the chunk by that much -- remember, mm_shrinkchunk + * wants internal chunk sizes that include SIZEOF_MM_ALLOCNODE, + * and not the malloc-compatible sizes that we have. + */ + + mm_shrinkchunk(node, size + SIZEOF_MM_ALLOCNODE); + } + + mm_givesemaphore(); + return (void*)alignedchunk; +} |