summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGregory Nutt <gnutt@nuttx.org>2014-09-10 08:41:01 -0600
committerGregory Nutt <gnutt@nuttx.org>2014-09-10 08:41:01 -0600
commit964637c55aa7e3205590c3e509eb7058f4d0869c (patch)
treeb55791e804f4c0115c3ac945da6a141ef55b6db3
parent56bb85dc4173fec7d91379bd475c0310749d243c (diff)
downloadpx4-nuttx-964637c55aa7e3205590c3e509eb7058f4d0869c.tar.gz
px4-nuttx-964637c55aa7e3205590c3e509eb7058f4d0869c.tar.bz2
px4-nuttx-964637c55aa7e3205590c3e509eb7058f4d0869c.zip
Add configuration to use the fixed DRAM mapping for the page pool (if available) instead of remapping dynamically to access L2 page tables and page data. Also, add logic in address environment creation to initialize the shared data at the beginning of the .bss/.data process memory region.
-rw-r--r--nuttx/arch/Kconfig38
-rw-r--r--nuttx/arch/arm/src/armv7-a/arm_addrenv.c118
-rw-r--r--nuttx/arch/arm/src/armv7-a/arm_pgalloc.c29
-rw-r--r--nuttx/binfmt/binfmt_execmodule.c9
-rw-r--r--nuttx/include/nuttx/addrenv.h25
5 files changed, 213 insertions, 6 deletions
diff --git a/nuttx/arch/Kconfig b/nuttx/arch/Kconfig
index 11954a5a0..e57d59c35 100644
--- a/nuttx/arch/Kconfig
+++ b/nuttx/arch/Kconfig
@@ -236,6 +236,44 @@ config ARCH_STACK_NPAGES
This, along with knowledge of the page size, determines the size of
the stack virtual address space. Default is 1.
+config ARCH_PGPOOL_MAPPING
+ bool "Have page pool mapping"
+ default n
+ ---help---
+ If there is a MMU mapping in place for the page pool memory, then
+ this mapping can be utilized to simplify some page table operations.
+ Otherwise, a temporary mapping will have to be established each time
+ it is necessary to modify the contents of a page.
+
+if ARCH_PGPOOL_MAPPING
+
+config ARCH_PGPOOL_PBASE
+ hex "Page pool physical address"
+ default 0x0
+ ---help---
+ The physical address of the start of the page pool memory. This
+ setting is probably equivalent to other platform specific definitions
+ but is required again in order to modularize the common address
+ environment logic.
+
+config ARCH_PGPOOL_VBASE
+ hex "Page pool virtual address"
+ default 0x0
+ ---help---
+ The virtual address of the start of the page pool memory. This
+ setting is probably equivalent to other platform specific definitions
+ but is required again in order to modularize the common address
+ environment logic.
+
+config ARCH_PGPOOL_SIZE
+ int "Page pool size (byes)"
+ default 0
+ ---help---
+ The size of the page pool memory in bytes. This setting is probably
+ equivalent to other platform specific definitions but is required again
+ in order to modularize the common address environment logic.
+
+endif # ARCH_PGPOOL_MAPPING
endif # ARCH_ADDRENV && ARCH_NEED_ADDRENV_MAPPING
menuconfig PAGING
diff --git a/nuttx/arch/arm/src/armv7-a/arm_addrenv.c b/nuttx/arch/arm/src/armv7-a/arm_addrenv.c
index 8154b94df..4d68fd655 100644
--- a/nuttx/arch/arm/src/armv7-a/arm_addrenv.c
+++ b/nuttx/arch/arm/src/armv7-a/arm_addrenv.c
@@ -82,6 +82,7 @@
#include "cache.h"
#include "mmu.h"
+#include "pginline.h"
#ifdef CONFIG_ARCH_ADDRENV
@@ -161,7 +162,9 @@ static int up_addrenv_create_region(FAR uintptr_t **list,
irqstate_t flags;
uintptr_t paddr;
FAR uint32_t *l2table;
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
uint32_t l1save;
+#endif
size_t nmapped;
unsigned int npages;
unsigned int i;
@@ -204,12 +207,19 @@ static int up_addrenv_create_region(FAR uintptr_t **list,
DEBUGASSERT(MM_ISALIGNED(paddr));
list[i] = (FAR uintptr_t *)paddr;
+ flags = irqsave();
+
+#ifdef CONFIG_ARCH_PGPOOL_MAPPING
+ /* Get the virtual address corresponding to the physical page address */
+
+ l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
+#else
/* Temporarily map the page into the virtual address space */
- flags = irqsave();
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
+#endif
/* Initialize the page table */
@@ -224,7 +234,9 @@ static int up_addrenv_create_region(FAR uintptr_t **list,
paddr = mm_pgalloc(1);
if (!paddr)
{
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
+#endif
irqrestore(flags);
return -ENOMEM;
}
@@ -244,9 +256,11 @@ static int up_addrenv_create_region(FAR uintptr_t **list,
(uintptr_t)l2table +
ENTRIES_PER_L2TABLE * sizeof(uint32_t));
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
/* Restore the scratch section L1 page table entry */
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
+#endif
irqrestore(flags);
}
@@ -254,6 +268,82 @@ static int up_addrenv_create_region(FAR uintptr_t **list,
}
/****************************************************************************
+ * Name: up_addrenv_initdata
+ *
+ * Description:
+ * Initialize the region of memory at the the beginning of the .bss/.data
+ * region that is shared between the user process and the kernel.
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_BUILD_KERNEL
+static int up_addrenv_initdata(uintptr_t l2table)
+{
+ irqstate_t flags;
+ FAR uint32_t *virtptr;
+ uintptr_t paddr;
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
+ uint32_t l1save;
+#endif
+
+ DEBUGASSERT(l2table);
+ flags = irqsave();
+
+#ifdef CONFIG_ARCH_PGPOOL_MAPPING
+ /* Get the virtual address corresponding to the physical page table address */
+
+ virtptr = (FAR uint32_t *)arm_pgvaddr(l2table);
+#else
+ /* Temporarily map the page into the virtual address space */
+
+ l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
+ mmu_l1_setentry(l2table & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
+ virtptr = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (l2table & SECTION_MASK));
+#endif
+
+ /* Invalidate D-Cache so that we read from the physical memory */
+
+ arch_invalidate_dcache((uintptr_t)virtptr,
+ (uintptr_t)virtptr + sizeof(uint32_t));
+
+ /* Get the physical address of the first page of of .bss/.data */
+
+ paddr = (uintptr_t)(*virtptr) & PTE_SMALL_PADDR_MASK;
+ DEBUGASSERT(paddr);
+
+#ifdef CONFIG_ARCH_PGPOOL_MAPPING
+ /* Get the virtual address corresponding to the physical page address */
+
+ virtptr = (FAR uint32_t *)arm_pgvaddr(paddr);
+#else
+ /* Temporarily map the page into the virtual address space */
+
+ mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
+ virtptr = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
+#endif
+
+ /* Finally, after of all of that, we can initialize the tiny region at
+ * the beginning of .bss/.data by setting it to zero.
+ */
+
+ memset(virtptr, 0, ARCH_DATA_RESERVE_SIZE);
+
+ /* Make sure that the initialized data is flushed to physical memory. */
+
+ arch_flush_dcache((uintptr_t)virtptr,
+ (uintptr_t)virtptr + ARCH_DATA_RESERVE_SIZE);
+
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
+ /* Restore the scratch section L1 page table entry */
+
+ mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
+#endif
+ irqrestore(flags);
+ return OK;
+}
+#endif /* CONFIG_BUILD_KERNEL */
+
+/****************************************************************************
* Name: up_addrenv_destroy_region
*
* Description:
@@ -267,7 +357,9 @@ static void up_addrenv_destroy_region(FAR uintptr_t **list,
irqstate_t flags;
uintptr_t paddr;
FAR uint32_t *l2table;
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
uint32_t l1save;
+#endif
int i;
int j;
@@ -284,12 +376,19 @@ static void up_addrenv_destroy_region(FAR uintptr_t **list,
paddr = (uintptr_t)list[i];
if (paddr != 0)
{
+ flags = irqsave();
+
+#ifdef CONFIG_ARCH_PGPOOL_MAPPING
+ /* Get the virtual address corresponding to the physical page address */
+
+ l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
+#else
/* Temporarily map the page into the virtual address space */
- flags = irqsave();
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
+#endif
/* Return the allocated pages to the page allocator */
@@ -303,9 +402,11 @@ static void up_addrenv_destroy_region(FAR uintptr_t **list,
}
}
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
/* Restore the scratch section L1 page table entry */
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
+#endif
irqrestore(flags);
/* And free the L2 page table itself */
@@ -389,6 +490,19 @@ int up_addrenv_create(size_t textsize, size_t datasize,
goto errout;
}
+#ifdef CONFIG_BUILD_KERNEL
+ /* Initialize the shared data are at the beginning of the .bss/.data
+ * region.
+ */
+
+ ret = up_addrenv_initdata((uintptr_t)addrenv->data[0] & PMD_PTE_PADDR_MASK);
+ if (ret < 0)
+ {
+ bdbg("ERROR: Failed to initialize .bss/.data region: %d\n", ret);
+ goto errout;
+ }
+#endif
+
/* Notice that no pages are yet allocated for the heap */
return OK;
diff --git a/nuttx/arch/arm/src/armv7-a/arm_pgalloc.c b/nuttx/arch/arm/src/armv7-a/arm_pgalloc.c
index 4dc2020f3..3235cda25 100644
--- a/nuttx/arch/arm/src/armv7-a/arm_pgalloc.c
+++ b/nuttx/arch/arm/src/armv7-a/arm_pgalloc.c
@@ -49,6 +49,7 @@
#include "cache.h"
#include "mmu.h"
+#include "pginline.h"
#if defined(CONFIG_MM_PGALLOC) && defined(CONFIG_ARCH_USE_MMU)
@@ -77,7 +78,9 @@ static uintptr_t alloc_pgtable(void)
irqstate_t flags;
uintptr_t paddr;
FAR uint32_t *l2table;
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
uint32_t l1save;
+#endif
/* Allocate one physical page for the L2 page table */
@@ -86,12 +89,19 @@ static uintptr_t alloc_pgtable(void)
{
DEBUGASSERT(MM_ISALIGNED(paddr));
+ flags = irqsave();
+
+#ifdef CONFIG_ARCH_PGPOOL_MAPPING
+ /* Get the virtual address corresponding to the physical page address */
+
+ l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
+#else
/* Temporarily map the page into the virtual address space */
- flags = irqsave();
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
+#endif
/* Initialize the page table */
@@ -104,9 +114,11 @@ static uintptr_t alloc_pgtable(void)
arch_flush_dcache((uintptr_t)l2table,
(uintptr_t)l2table + MM_PGSIZE);
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
/* Restore the scratch section page table entry */
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
+#endif
irqrestore(flags);
}
@@ -210,7 +222,9 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
FAR uint32_t *l2table;
irqstate_t flags;
uintptr_t paddr;
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
uint32_t l1save;
+#endif
unsigned int index;
DEBUGASSERT(tcb && tcb->group);
@@ -244,21 +258,30 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
return 0;
}
+ flags = irqsave();
+
+#ifdef CONFIG_ARCH_PGPOOL_MAPPING
+ /* Get the virtual address corresponding to the physical page address */
+
+ l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
+#else
/* Temporarily map the level 2 page table into the "scratch" virtual
* address space
*/
- flags = irqsave();
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
+#endif
/* Back up L2 entry with physical memory */
paddr = mm_pgalloc(1);
if (paddr == 0)
{
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
+#endif
irqrestore(flags);
return 0;
}
@@ -283,9 +306,11 @@ uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
arch_flush_dcache((uintptr_t)&l2table[index],
(uintptr_t)&l2table[index] + sizeof(uint32_t));
+#ifndef CONFIG_ARCH_PGPOOL_MAPPING
/* Restore the scratch L1 page table entry */
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
+#endif
irqrestore(flags);
}
diff --git a/nuttx/binfmt/binfmt_execmodule.c b/nuttx/binfmt/binfmt_execmodule.c
index 9b8024dde..1afb4e8b3 100644
--- a/nuttx/binfmt/binfmt_execmodule.c
+++ b/nuttx/binfmt/binfmt_execmodule.c
@@ -179,10 +179,10 @@ int exec_module(FAR const struct binary_s *binp)
/* Allocate the stack for the new task (always from the user heap) */
stack = (FAR uint32_t*)kumm_malloc(binp->stacksize);
- if (!tcb)
+ if (!stack)
{
err = ENOMEM;
- goto errout_with_tcb;
+ goto errout_with_addrenv;
}
/* Restore the address environment */
@@ -196,6 +196,7 @@ int exec_module(FAR const struct binary_s *binp)
goto errout_with_stack;
}
#endif
+
/* Initialize the task */
ret = task_init((FAR struct tcb_s *)tcb, binp->filename, binp->priority,
@@ -269,6 +270,10 @@ errout_with_stack:
kumm_free(stack);
goto errout;
+errout_with_addrenv:
+#ifdef CONFIG_ARCH_ADDRENV
+ (void)up_addrenv_restore(&oldenv);
+#endif
errout_with_tcb:
kmm_free(tcb);
errout:
diff --git a/nuttx/include/nuttx/addrenv.h b/nuttx/include/nuttx/addrenv.h
index 9782e9918..6b1fc2809 100644
--- a/nuttx/include/nuttx/addrenv.h
+++ b/nuttx/include/nuttx/addrenv.h
@@ -161,6 +161,31 @@
#define ARCH_SCRATCH_VBASE (CONFIG_ARCH_STACK_VBASE + ARCH_STACK_SIZE)
+/* There is no need to use the scratch memory region if the page pool memory
+ * is statically mapped.
+ */
+
+#ifdef CONFIG_ARCH_PGPOOL_MAPPING
+
+# ifndef CONFIG_ARCH_PGPOOL_PBASE
+# error CONFIG_ARCH_PGPOOL_PBASE not defined
+# endif
+
+# ifndef CONFIG_ARCH_PGPOOL_VBASE
+# error CONFIG_ARCH_PGPOOL_VBASE not defined
+# endif
+
+# ifndef CONFIG_ARCH_PGPOOL_SIZE
+# error CONFIG_ARCH_PGPOOL_SIZE not defined
+# endif
+
+# define CONFIG_ARCH_PGPOOL_PEND \
+ (CONFIG_ARCH_PGPOOL_PBASE + CONFIG_ARCH_PGPOOL_SIZE)
+# define CONFIG_ARCH_PGPOOL_VEND \
+ (CONFIG_ARCH_PGPOOL_VBASE + CONFIG_ARCH_PGPOOL_SIZE)
+
+#endif
+
/****************************************************************************
* Public Types
****************************************************************************/