summaryrefslogtreecommitdiff
path: root/nuttx
diff options
context:
space:
mode:
authorGregory Nutt <gnutt@nuttx.org>2014-08-25 11:18:32 -0600
committerGregory Nutt <gnutt@nuttx.org>2014-08-25 11:18:32 -0600
commit28928fed5e3f5ef6b3ff882cda1fb448ce76196b (patch)
tree6fab8a2f346a4e8e2af6771e2f7c0bbdb897771a /nuttx
parent59d9019f45c3e0229b12eda8a57118e6e69f99ea (diff)
downloadnuttx-28928fed5e3f5ef6b3ff882cda1fb448ce76196b.tar.gz
nuttx-28928fed5e3f5ef6b3ff882cda1fb448ce76196b.tar.bz2
nuttx-28928fed5e3f5ef6b3ff882cda1fb448ce76196b.zip
Cortex-A/SAMA5 address environment support is code complete (untested)
Diffstat (limited to 'nuttx')
-rw-r--r--nuttx/arch/arm/include/arch.h15
-rwxr-xr-xnuttx/arch/arm/include/armv7-a/irq.h2
-rw-r--r--nuttx/arch/arm/src/armv7-a/arm_addrenv.c347
-rw-r--r--nuttx/arch/arm/src/armv7-a/arm_mmu.c39
-rw-r--r--nuttx/arch/arm/src/armv7-a/mmu.h35
-rw-r--r--nuttx/arch/arm/src/sama5/Kconfig77
-rw-r--r--nuttx/arch/arm/src/sama5/Make.defs4
-rw-r--r--nuttx/arch/arm/src/sama5/sam_pgalloc.c110
-rw-r--r--nuttx/include/nuttx/addrenv.h4
-rw-r--r--nuttx/include/nuttx/arch.h15
-rw-r--r--nuttx/include/nuttx/pgalloc.h48
-rw-r--r--nuttx/mm/Kconfig12
-rw-r--r--nuttx/mm/mm_pgalloc.c22
-rw-r--r--nuttx/sched/init/os_start.c10
14 files changed, 667 insertions, 73 deletions
diff --git a/nuttx/arch/arm/include/arch.h b/nuttx/arch/arm/include/arch.h
index 6495c5230..81130eb6d 100644
--- a/nuttx/arch/arm/include/arch.h
+++ b/nuttx/arch/arm/include/arch.h
@@ -114,7 +114,9 @@ struct group_addrenv_s
{
FAR uint32_t *text[CONFIG_ARCH_TEXT_NPAGES];
FAR uint32_t *data[CONFIG_ARCH_DATA_NPAGES];
+#if 0 /* Not yet implemented */
FAR uint32_t *heap[CONFIG_ARCH_HEAP_NPAGES];
+#endif
};
typedef struct group_addrenv_s group_addrenv_t;
@@ -124,9 +126,20 @@ typedef struct group_addrenv_s group_addrenv_t;
*
* int up_addrenv_select(group_addrenv_t addrenv, save_addrenv_t *oldenv);
* int up_addrenv_restore(save_addrenv_t oldenv);
+ *
+ * In this case, the saved valued in the L1 page table are returned
*/
-typedef group_addrenv_t *save_addrenv_t;
+struct save_addrenv_s
+{
+ FAR uint32_t text[CONFIG_ARCH_TEXT_NPAGES];
+ FAR uint32_t data[CONFIG_ARCH_DATA_NPAGES];
+#if 0 /* Not yet implemented */
+ FAR uint32_t heap[CONFIG_ARCH_HEAP_NPAGES];
+#endif
+};
+
+typedef struct save_addrenv_s save_addrenv_t;
#endif
/****************************************************************************
diff --git a/nuttx/arch/arm/include/armv7-a/irq.h b/nuttx/arch/arm/include/armv7-a/irq.h
index 98fd8b154..ddfd9df41 100755
--- a/nuttx/arch/arm/include/armv7-a/irq.h
+++ b/nuttx/arch/arm/include/armv7-a/irq.h
@@ -248,8 +248,10 @@ struct xcptcontext
* handling to support dynamically sized stacks for each thread.
*/
+#if 0 /* Not yet implemented */
FAR uint32_t *stack[CONFIG_ARCH_STACK_NPAGES];
#endif
+#endif
};
#endif
diff --git a/nuttx/arch/arm/src/armv7-a/arm_addrenv.c b/nuttx/arch/arm/src/armv7-a/arm_addrenv.c
index d575a68c8..978326736 100644
--- a/nuttx/arch/arm/src/armv7-a/arm_addrenv.c
+++ b/nuttx/arch/arm/src/armv7-a/arm_addrenv.c
@@ -81,7 +81,11 @@
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
-/* Configuration ************************************************************/
+/* Using a 4KiB page size, each 1MiB section maps to a PTE containing
+ * 256*2KiB entries
+ */
+
+#define ENTRIES_PER_L2TABLE 256
/****************************************************************************
* Private Data
@@ -92,6 +96,47 @@
****************************************************************************/
/****************************************************************************
+ * Name: set_l2_entry
+ *
+ * Description:
+ * Set the L2 table entry as part of the initialization of the L2 Page
+ * table.
+ *
+ ****************************************************************************/
+
+static void set_l2_entry(FAR uint32_t *l2table, uintptr_t paddr,
+ uintptr_t vaddr, uint32_t mmuflags)
+{
+ uint32_t index;
+
+ /* The table divides a 1Mb address space up into 256 entries, each
+ * corresponding to 4Kb of address space. The page table index is
+ * related to the offset from the beginning of 1Mb region.
+ */
+
+ index = (vaddr & 0x000ff000) >> 12;
+
+ /* Save the table entry */
+
+ l2table[index] = (paddr | mmuflags);
+}
+
+/****************************************************************************
+ * Name: set_l1_entry
+ *
+ * Description:
+ * Set an L1 page table entry to refer to a specific L2 page table.
+ *
+ ****************************************************************************/
+
+static inline void set_l1_entry(uintptr_t l2vaddr, uintptr_t l2paddr)
+{
+ mmu_l1_setentry(l2paddr & PMD_PTE_PADDR_MASK,
+ l2vaddr & PMD_PTE_PADDR_MASK,
+ MMU_L1_PGTABFLAGS);
+}
+
+/****************************************************************************
* Public Functions
****************************************************************************/
@@ -120,8 +165,135 @@
int up_addrenv_create(size_t textsize, size_t datasize,
FAR group_addrenv_t *addrenv)
{
-#warning Missing logic
- return -ENOSYS;
+ irqstate_t flags;
+ uintptr_t vaddr;
+ uintptr_t paddr;
+ FAR uint32_t *l2table;
+ uint32_t l1save;
+ size_t nmapped;
+ unsigned int ntextpages;
+ unsigned int ndatapages;
+ unsigned int i;
+ unsigned int j;
+ int ret;
+
+ DEBUGASSERT(addrenv);
+
+ /* Initialize the address environment structure to all zeroes */
+
+ memset(addrenv, 0, sizeof(group_addrenv_t));
+
+ /* Verify that we are configured with enough virtual address space to
+ * support this address environment.
+ */
+
+ ntextpages = MM_NPAGES(textsize);
+ ndatapages = MM_NPAGES(datasize);
+
+ if (ntextpages > CONFIG_ARCH_TEXT_NPAGES ||
+ ndatapages > CONFIG_ARCH_DATA_NPAGES)
+ {
+ return -E2BIG;
+ }
+
+ /* Back the allocation up with physical pages and set up the level mapping
+ * (which of course does nothing until the L2 page table is hooked into
+ * the L1 page table).
+ */
+
+ /* Allocate .text space pages */
+
+ vaddr = CONFIG_ARCH_TEXT_VADDR;
+ mapped = 0;
+
+ for (i = 0; i < ntextpages; i++)
+ {
+ /* Allocate one physical page */
+
+ paddr = mm_pgalloc(1);
+ if (!paddr)
+ {
+ ret = -ENOMEM;
+ goto errout;
+ }
+
+ DEBUGASSERT(MM_ISALIGNED(paddr));
+ addrenv->text[i] = (FAR uint32_t *)paddr;
+
+ /* Temporarily map the page into the virtual address space */
+
+ flags = irqsave();
+ l1save = mmu_l1_getentry(vaddr);
+ set_l1_entry(ARCH_SCRATCH_VADDR, paddr);
+ l2table = (FAR uint32_t *)ARCH_SCRATCH_VADDR;
+
+ /* Initialize the page table */
+
+ memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uint32_t));
+ for (j = 0; j < ENTRIES_PER_L2TABLE && nmapped < ntextsize; j++)
+ {
+ set_l2_entry(l2table, paddr, vaddr, MMU_ROMFLAGS);
+ nmapped += MM_PGSIZE;
+ paddr += MM_PGSIZE;
+ vaddr += MM_PGSIZE;
+ }
+
+ /* Restore the original L1 page table entry */
+
+ mmu_l1_restore(ARCH_SCRATCH_VADDR, l1save);
+ irqrestore();
+ }
+
+ /* Allocate .bss/.data space pages */
+
+ vaddr = CONFIG_ARCH_DATA_VADDR;
+ mapped = 0;
+
+ for (i = 0; i < ndatapages; i++)
+ {
+ /* Allocate one physical page */
+
+ paddr = mm_pgalloc(1);
+ if (!paddr)
+ {
+ ret = -ENOMEM;
+ goto errout;
+ }
+
+ DEBUGASSERT(MM_ISALIGNED(paddr));
+ addrenv->data[i] = (FAR uint32_t *)paddr;
+
+ /* Temporarily map the page into the virtual address space */
+
+ flags = irqsave();
+ l1save = mmu_l1_getentry(vaddr);
+ set_l1_entry(ARCH_SCRATCH_VADDR, paddr);
+ l2table = (FAR uint32_t *)ARCH_SCRATCH_VADDR;
+
+ /* Initialize the page table */
+
+ memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uint32_t));
+ for (j = 0; j < ENTRIES_PER_L2TABLE && nmapped < ndatasize; j++)
+ {
+ set_l2_entry(l2table, paddr, vaddr, MMU_MEMFLAGS);
+ nmapped += MM_PGSIZE;
+ paddr += MM_PGSIZE;
+ vaddr += MM_PGSIZE;
+ }
+
+ /* Restore the original L1 page table entry */
+
+ mmu_l1_restore(ARCH_SCRATCH_VADDR, l1save);
+ irqrestore();
+ }
+
+ /* Notice that no pages are yet allocated for the heap */
+
+ return OK;
+
+errout:
+ up_addrenv_destroy(addrenv);
+ return ret;
}
/****************************************************************************
@@ -142,8 +314,48 @@ int up_addrenv_create(size_t textsize, size_t datasize,
int up_addrenv_destroy(group_addrenv_t addrenv)
{
-#warning Missing logic
- return -ENOSYS;
+ uintptr_t vaddr;
+ int i;
+
+ DEBUGASSERT(addrenv);
+
+ for (vaddr = CONFIG_ARCH_TEXT_VADDR, i = 0;
+ i < CONFIG_ARCH_TEXT_NPAGES;
+ vaddr += MM_PGSIZE, i++)
+ {
+ mmu_l1_clrentry(vaddr);
+ if (addrenv->text[i])
+ {
+ mm_pgfree((uintptr_t)addrenv->text[i], 1);
+ }
+ }
+
+ for (vaddr = CONFIG_ARCH_DATA_VADDR, i = 0;
+ i < CONFIG_ARCH_DATA_NPAGES;
+ vaddr += MM_PGSIZE, i++)
+ {
+ mmu_l1_clrentry(vaddr);
+ if (addrenv->data[i])
+ {
+ mm_pgfree((uintptr_t)addrenv->data[i], 1);
+ }
+ }
+
+#if 0 /* Not yet implemented */
+ for (vaddr = CONFIG_ARCH_HEAP_VADDR, i = 0;
+ i < CONFIG_ARCH_HEAP_NPAGES;
+ vaddr += MM_PGSIZE, i++)
+ {
+ mmu_l1_clrentry(vaddr);
+ if (addrenv->heap[i])
+ {
+ mm_pgfree((uintptr_t)addrenv->heap[i], 1);
+ }
+ }
+#endif
+
+ memset(addrenv, 0, sizeof(group_addrenv_t));
+ return OK;
}
/****************************************************************************
@@ -232,8 +444,88 @@ int up_addrenv_vdata(FAR group_addrenv_t addrenv, uintptr_t textsize,
int up_addrenv_select(group_addrenv_t addrenv, save_addrenv_t *oldenv)
{
-#warning Missing logic
- return -ENOSYS;
+ uintptr_t vaddr;
+ uintptr_t paddr;
+ int i;
+
+ DEBUGASSERT(addrenv);
+
+ for (vaddr = CONFIG_ARCH_TEXT_VADDR, i = 0;
+ i < CONFIG_ARCH_TEXT_NPAGES;
+ vaddr += MM_PGSIZE, i++)
+ {
+ /* Save the old L1 page table entry */
+
+ if (oldenv)
+ {
+ oldenv->text[i] = mmu_l1_getentry(vaddr);
+ }
+
+ /* Set (or clear) the new page table entry */
+
+ paddr = (uintptr_t)addrenv->text[i]
+ if (paddr)
+ {
+ set_l1_entry(vaddr, paddr);
+ }
+ else
+ {
+ mmu_l1_clrentry(vaddr);
+ }
+ }
+
+ for (vaddr = CONFIG_ARCH_DATA_VADDR, i = 0;
+ i < CONFIG_ARCH_DATA_NPAGES;
+ vaddr += MM_PGSIZE, i++)
+ {
+ /* Save the old L1 page table entry */
+
+ if (oldenv)
+ {
+ oldenv->data[i] = mmu_l1_getentry(vaddr);
+ }
+
+ /* Set (or clear) the new page table entry */
+
+ paddr = (uintptr_t)addrenv->data[i]
+ if (paddr)
+ {
+ set_l1_entry(vaddr, paddr);
+ }
+ else
+ {
+ mmu_l1_clrentry(vaddr);
+ }
+ }
+
+#if 0 /* Not yet implemented */
+ for (vaddr = CONFIG_ARCH_HEAP_VADDR, i = 0;
+ i < CONFIG_ARCH_HEAP_NPAGES;
+ vaddr += MM_PGSIZE, i++)
+ {
+ /* Save the old L1 page table entry */
+
+ if (oldenv)
+ {
+ oldenv->heap[i] = mmu_l1_getentry(vaddr);
+ }
+
+ /* Set (or clear) the new page table entry */
+
+ paddr = (uintptr_t)addrenv->heap[i]
+ if (paddr)
+ {
+ set_l1_entry(vaddr, paddr);
+ }
+ else
+ {
+ mmu_l1_clrentry(vaddr);
+ }
+ }
+#endif
+
+ memset(addrenv, 0, sizeof(group_addrenv_t));
+ return OK;
}
/****************************************************************************
@@ -255,8 +547,43 @@ int up_addrenv_select(group_addrenv_t addrenv, save_addrenv_t *oldenv)
int up_addrenv_restore(save_addrenv_t oldenv)
{
-#warning Missing logic
- return -ENOSYS;
+ uintptr_t vaddr;
+ uintptr_t paddr;
+ int i;
+
+ DEBUGASSERT(addrenv);
+
+ for (vaddr = CONFIG_ARCH_TEXT_VADDR, i = 0;
+ i < CONFIG_ARCH_TEXT_NPAGES;
+ vaddr += MM_PGSIZE, i++)
+ {
+ /* Restore the L1 page table entry */
+
+ mmu_l1_restore(vaddr, oldenv->text[i]);
+ }
+
+ for (vaddr = CONFIG_ARCH_DATA_VADDR, i = 0;
+ i < CONFIG_ARCH_DATA_NPAGES;
+ vaddr += MM_PGSIZE, i++)
+ {
+ /* Restore the L1 page table entry */
+
+ mmu_l1_restore(vaddr, oldenv->data[i]);
+ }
+
+#if 0 /* Not yet implemented */
+ for (vaddr = CONFIG_ARCH_HEAP_VADDR, i = 0;
+ i < CONFIG_ARCH_HEAP_NPAGES;
+ vaddr += MM_PGSIZE, i++)
+ {
+ /* Restore the L1 page table entry */
+
+ mmu_l1_restore(vaddr, oldenv->heap[i]);
+ }
+#endif
+
+ memset(addrenv, 0, sizeof(group_addrenv_t));
+ return OK;
}
/****************************************************************************
@@ -280,7 +607,7 @@ int up_addrenv_assign(FAR const group_addrenv_t *addrenv,
{
DEBUGASSERT(addrenv && group);
- /* Just copy the addess environment into the group */
+ /* Just copy the address environment into the group */
memcpy(&group->addrenv, addrenv, sizeof(group_addrenv_t));
return OK;
diff --git a/nuttx/arch/arm/src/armv7-a/arm_mmu.c b/nuttx/arch/arm/src/armv7-a/arm_mmu.c
index 0e417af64..e16885f23 100644
--- a/nuttx/arch/arm/src/armv7-a/arm_mmu.c
+++ b/nuttx/arch/arm/src/armv7-a/arm_mmu.c
@@ -67,7 +67,7 @@
* Set a one level 1 translation table entry. Only a single L1 page table
* is supported.
*
- * Input Paramters:
+ * Input Parameters:
* paddr - The physical address to be mapped. Must be aligned to a 1MB
* address boundary
* vaddr - The virtual address to be mapped. Must be aligned to a 1MB
@@ -84,7 +84,7 @@ void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags)
/* Save the page table entry */
- l1table[index] = (paddr | mmuflags);
+ l1table[index] = (paddr | mmuflags);
/* Flush the data cache entry. Make sure that the modified contents
* of the page table are flushed into physical memory.
@@ -99,6 +99,41 @@ void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags)
#endif
/****************************************************************************
+ * Name: mmu_l1_restore
+ *
+ * Description:
+ * Restore one L1 table entry previously returned by mmu_l1_getentry() (or
+ * any other encoded L1 page table value).
+ *
+ * Input Parameters:
+ * vaddr - A virtual address to be mapped
+ * l1entry - The value to write into the page table entry
+ *
+ ****************************************************************************/
+
+#if !defined(CONFIG_ARCH_ROMPGTABLE) && defined(CONFIG_ARCH_ADDRENV)
+void mmu_l1_restore(uint32ptr_t vaddr, uint32_t l1entry)
+{
+ uint32_t *l1table = (uint32_t*)PGTABLE_BASE_VADDR;
+ uint32_t index = vaddr >> 20;
+
+ /* Set the encoded page table entry */
+
+ l1table[index] = l1entry;
+
+ /* Flush the data cache entry. Make sure that the modified contents
+ * of the page table are flushed into physical memory.
+ */
+
+ cp15_clean_dcache_bymva((uint32_t)&l1table[index]);
+
+ /* Invalidate the TLB cache associated with virtual address range */
+
+ mmu_invalidate_region(vaddr & PMD_PTE_PADDR_MASK, 1024*1024);
+}
+#endif
+
+/****************************************************************************
* Name: mmu_l2_setentry
*
* Description:
diff --git a/nuttx/arch/arm/src/armv7-a/mmu.h b/nuttx/arch/arm/src/armv7-a/mmu.h
index a2802b18b..922038ab3 100644
--- a/nuttx/arch/arm/src/armv7-a/mmu.h
+++ b/nuttx/arch/arm/src/armv7-a/mmu.h
@@ -405,6 +405,7 @@
#define PTE_LARGE_TEX_SHIFT (12) /* Bits 12-14: Memory region attribute bits */
#define PTE_LARGE_TEX_MASK (7 << PTE_LARGE_TEX_SHIFT)
#define PTE_LARGE_XN (1 << 15) /* Bit 15: Execute-never bit */
+#define PTE_LARGE_FLAG_MASK (0x0000f03f) /* Bits 0-15: MMU flags (mostly) */
#define PTE_LARGE_PADDR_MASK (0xffff0000) /* Bits 16-31: Large page base address, PA[31:16] */
/* Small page -- 4Kb */
@@ -413,6 +414,7 @@
/* Bit 2: Bufferable bit */
/* Bit 3: Cacheable bit */
/* Bits 4-5: Access Permissions bits AP[0:1] */
+#define PTE_SMALL_FLAG_MASK (0x0000003f) /* Bits 0-11: MMU flags (mostly) */
#define PTE_SMALL_PADDR_MASK (0xfffff000) /* Bits 12-31: Small page base address, PA[31:12] */
/* Level 2 Translation Table Access Permissions:
@@ -1336,6 +1338,39 @@ extern "C" {
void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags);
#endif
+/****************************************************************************
+ * Name: mmu_l1_restore
+ *
+ * Description:
+ * Restore one L1 table entry previously returned by mmu_l1_getentry() (or
+ * any other encoded L1 page table value).
+ *
+ * Input Parameters:
+ * vaddr - A virtual address to be mapped
+ * l1entry - The value to write into the page table entry
+ *
+ ****************************************************************************/
+
+#if !defined(CONFIG_ARCH_ROMPGTABLE) && defined(CONFIG_ARCH_ADDRENV)
+void mmu_l1_restore(uint32ptr_t vaddr, uint32_t l1entry);
+#endif
+
+/************************************************************************************
+ * Name: mmu_l1_clrentry(uint32ptr_t vaddr);
+ *
+ * Description:
+ * Unmap one L1 region by writing zero into the L1 page table entry and by
+ * flushing caches and TLBs appropriately.
+ *
+ * Input Parameters:
+ * vaddr - A virtual address within the L1 address region to be unmapped.
+ *
+ ************************************************************************************/
+
+#if !defined (CONFIG_ARCH_ROMPGTABLE) && defined(CONFIG_ARCH_ADDRENV)
+# define mmu_l1_clrentry(v) mmu_l1_restore(v,0)
+#endif
+
/************************************************************************************
* Name: mmu_l1_map_region
*
diff --git a/nuttx/arch/arm/src/sama5/Kconfig b/nuttx/arch/arm/src/sama5/Kconfig
index 8cb320be4..220ec7402 100644
--- a/nuttx/arch/arm/src/sama5/Kconfig
+++ b/nuttx/arch/arm/src/sama5/Kconfig
@@ -4475,7 +4475,7 @@ config SAMA5_DDRCS_HEAP
default y
depends on SAMA5_DDRCS && !SAMA5_BOOT_SDRAM
---help---
- Include the DDR-SDRAM memory in the heap.
+ Include a portion of DDR-SDRAM memory in the heap.
NOTE: MM_REGIONS must also be set to indicate the total number of
memory regions to be added to the heap.
@@ -4487,20 +4487,30 @@ config SAMA5_DDRCS_HEAP
if SAMA5_DDRCS_HEAP
config SAMA5_DDRCS_HEAP_OFFSET
- int "DDR-SDRAM offset"
+ int "DDR-SDRAM heap offset"
default 0
---help---
Preserve this number of bytes at the beginning of SDRAM. The
portion of DRAM beginning at this offset from the DDRCS base will
be added to the heap.
+ NOTE: If you are using a page cache in DRAM (via SAMA5_DDRCS_PGHEAP),
+ then the memory regions defined by SAMA5_DDRCS_HEAP_OFFSET and
+ SAMA5_DDRCS_HEAP_SIZE must not overlap the memory region defined by
+ SAMA5_DDRCS_PGHEAP_OFFSET and SAMA5_DDRCS_PGHEAP_SIZE.
+
config SAMA5_DDRCS_HEAP_SIZE
- int "DDR-SDRAM size"
+ int "DDR-SDRAM heap size"
default 0
---help---
Add the region of DDR-SDRAM beginning at SAMA5_DDRCS_HEAP_OFFSET
and of size SAMA5_DDRCS_HEAP_SIZE to the heap.
+ NOTE: If you are using a page cache in DRAM (via SAMA5_DDRCS_PGHEAP),
+ then the memory regions defined by SAMA5_DDRCS_HEAP_OFFSET and
+ SAMA5_DDRCS_HEAP_SIZE must not overlap the memory region defined by
+ SAMA5_DDRCS_PGHEAP_OFFSET and SAMA5_DDRCS_PGHEAP_SIZE.
+
endif # SAMA5_DDRCS_HEAP
config SAMA5_DDRCS_RESERVE
@@ -4513,10 +4523,10 @@ config SAMA5_DDRCS_RESERVE
program through the end of DRAM (RAM_START + RAM_END) is
automatically added to the heap. However, there are certain cases
where you may want to reserve a block of DRAM for other purposes
- such a large DMA buffer or an LCD framebuffer. In those cases, you
- can select this option to specify the end of the DRAM memory to add
- to the heap; DRAM after this address will not be part of the heap
- and so will be available for other purposes.
+ such a large DMA buffer or an LCD framebuffer or a page cache. In
+ those cases, you can select this option to specify the end of the
+ DRAM memory to add to the heap; DRAM after this address will not
+ be part of the heap and so will be available for other purposes.
NOTE: There is way to reserve memory before the start of the
program in DRAM using this mechanism. That configuration is
@@ -4533,8 +4543,61 @@ config SAMA5_DDRCS_HEAP_END
address (minus one). This will reserve the memory starting at
this address through RAM_SIZE + RAM_END for other purposes.
+ NOTE: If you are using a page cache in DRAM (via SAMA5_DDRCS_PGHEAP),
+ then the memory regions below by SAMA5_DDRCS_HEAP_END must not
+ overlap the memory region defined by SAMA5_DDRCS_PGHEAP_OFFSET and
+ SAMA5_DDRCS_PGHEAP_SIZE.
+
endif # SAMA5_DDRCS_RESERVE
+config SAMA5_DDRCS_PGHEAP
+ bool "Include DDR-SDRAM in page cache"
+ default y
+ depends on SAMA5_DDRCS && ARCH_ADDRENV
+ ---help---
+ Include a portion of DDR-SDRAM memory in the page cache.
+
+if SAMA5_DDRCS_PGHEAP
+
+config SAMA5_DDRCS_PGHEAP_OFFSET
+ int "DDR-SDRAM heap offset"
+ default 0
+ ---help---
+ Preserve this number of bytes at the beginning of SDRAM. The
+ portion of DRAM beginning at this offset from the DDRCS base will
+ be added to the heap.
+
+ If you are executing from DRAM, then you must have already reserved
+ this region with SAMA5_DDRCS_RESERVE, setting SAMA5_DDRCS_HEAP_END
+ so that this page cache region defined by SAMA5_DDRCS_PGHEAP_OFFSET
+ and SAMA5_DDRCS_PGHEAP_SIZE does not overlap the the region of DRAM
+ that is added to the heap. If you are not executing from DRAM, then
+ you must have excluding this page cache region from the heap ether
+ by (1) not selecting SAMA5_DDRCS_HEAP, or (2) selecting
+ SAMA5_DDRCS_HEAP_OFFSET and SAMA5_DDRCS_HEAP_SIZE so that the page
+ cache region does not overlapy the region of DRAM that is added to
+ the heap.
+
+config SAMA5_DDRCS_PGHEAP_SIZE
+ int "DDR-SDRAM heap size"
+ default 0
+ ---help---
+ Add the region of DDR-SDRAM beginning at SAMA5_DDRCS_PGHEAP_OFFSET
+ and of size SAMA5_DDRCS_PGHEAP_SIZE to the heap.
+
+ If you are executing from DRAM, then you must have already reserved
+ this region with SAMA5_DDRCS_RESERVE, setting SAMA5_DDRCS_HEAP_END
+ so that this page cache region defined by SAMA5_DDRCS_PGHEAP_OFFSET
+ and SAMA5_DDRCS_PGHEAP_SIZE does not overlap the the region of DRAM
+ that is added to the heap. If you are not executing from DRAM, then
+ you must have excluding this page cache region from the heap ether
+ by (1) not selecting SAMA5_DDRCS_HEAP, or (2) selecting
+ SAMA5_DDRCS_HEAP_OFFSET and SAMA5_DDRCS_HEAP_SIZE so that the page
+ cache region does not overlapy the region of DRAM that is added to
+ the heap.
+
+endif # SAMA5_DDRCS_PGHEAP
+
config SAMA5_EBICS0_HEAP
bool "Include EBICS0 SRAM/PSRAM in heap"
default y
diff --git a/nuttx/arch/arm/src/sama5/Make.defs b/nuttx/arch/arm/src/sama5/Make.defs
index a3cfe80e0..5b63c91e0 100644
--- a/nuttx/arch/arm/src/sama5/Make.defs
+++ b/nuttx/arch/arm/src/sama5/Make.defs
@@ -113,6 +113,10 @@ CHIP_CSRCS += sam_sckc.c sam_serial.c
# Configuration dependent C and assembly language files
+ifneq ($(CONFIG_MM_PGALLOC),y)
+CHIP_CSRCS += sam_pgalloc.c
+endif
+
ifneq ($(CONFIG_SCHED_TICKLESS),y)
CHIP_CSRCS += sam_timerisr.c
endif
diff --git a/nuttx/arch/arm/src/sama5/sam_pgalloc.c b/nuttx/arch/arm/src/sama5/sam_pgalloc.c
new file mode 100644
index 000000000..b5dc00c46
--- /dev/null
+++ b/nuttx/arch/arm/src/sama5/sam_pgalloc.c
@@ -0,0 +1,110 @@
+/****************************************************************************
+ * arch/arm/src/sama5/sam_pgalloc.c
+ *
+ * Copyright (C) 2014 Gregory Nutt. All rights reserved.
+ * Author: Gregory Nutt <gnutt@nuttx.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <sys/types.h>
+#include <assert.h>
+
+#include <nuttx/arch.h>
+#include <nuttx/pgalloc.h>
+
+#ifdef CONFIG_MM_PGALLOC
+
+/****************************************************************************
+ * Private Definitions
+ ****************************************************************************/
+/* Currently, page cache memory must be allocated in DRAM. There are other
+ * possibilities, but the logic in this file will have to extended in order
+ * handle any other possibility.
+ */
+
+#ifdef CONFIG_SAMA5_DDRCS_PGHEAP
+# error CONFIG_SAMA5_DDRCS_PGHEAP must be selected
+#endif
+
+#ifdef CONFIG_SAMA5_DDRCS_PGHEAP_OFFSET
+# error CONFIG_SAMA5_DDRCS_PGHEAP_OFFSET must be specified
+#endif
+
+#if (CONFIG_SAMA5_DDRCS_PGHEAP_OFFSET & MM_PGMASK) != 0
+# warning CONFIG_SAMA5_DDRCS_PGHEAP_OFFSET is not aligned to a page boundary
+#endif
+
+#ifdef CONFIG_SAMA5_DDRCS_PGHEAP_SIZE
+# error CONFIG_SAMA5_DDRCS_PGHEAP_SIZE must be specified
+#endif
+
+#if (CONFIG_SAMA5_DDRCS_PGHEAP_SIZE & MM_PGMASK) != 0
+# warning CONFIG_SAMA5_DDRCS_PGHEAP_SIZE is not aligned to a page boundary
+#endif
+
+/****************************************************************************
+ * Private Data
+ ****************************************************************************/
+
+/****************************************************************************
+ * Private Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+ /****************************************************************************
+ * Name: up_allocate_pgheap
+ *
+ * Description:
+ * If there is a page allocator in the configuration, then this function
+ * must be provided by the platform-specific code. The OS initialization
+ * logic will call this function early in the initialization sequence to
+ * get the page heap information needed to configure the page allocator.
+ *
+ ****************************************************************************/
+
+void up_allocate_pgheap(FAR void **heap_start, size_t *heap_size)
+{
+ DEBUGASSERT(heap_start && heap_size);
+
+ *heap_start = (FAR void *)((uintptr_t)SAM_DDRCS_VSECTION +
+ CONFIG_SAMA5_DDRCS_PGHEAP_OFFSET);
+ *heap_size = CONFIG_SAMA5_DDRCS_PGHEAP_SIZE;
+}
+
+#endif /* CONFIG_MM_PGALLOC */
diff --git a/nuttx/include/nuttx/addrenv.h b/nuttx/include/nuttx/addrenv.h
index b5373880d..eedbf6d43 100644
--- a/nuttx/include/nuttx/addrenv.h
+++ b/nuttx/include/nuttx/addrenv.h
@@ -130,6 +130,10 @@
#define CONFIG_ARCH_STACK_SIZE (CONFIG_ARCH_STACK_NPAGES * CONFIG_MM_PGSIZE)
+/* A single page scratch region used for temporary mappings */
+
+#define ARCH_SCRATCH_VADDR (CONFIG_ARCH_STACK_VBASE + CONFIG_ARCH_STACK_SIZE)
+
/****************************************************************************
* Private Data
****************************************************************************/
diff --git a/nuttx/include/nuttx/arch.h b/nuttx/include/nuttx/arch.h
index 81847acd2..eca358442 100644
--- a/nuttx/include/nuttx/arch.h
+++ b/nuttx/include/nuttx/arch.h
@@ -650,6 +650,21 @@ void up_allocate_kheap(FAR void **heap_start, size_t *heap_size);
#endif
/****************************************************************************
+ * Name: up_allocate_pgheap
+ *
+ * Description:
+ * If there is a page allocator in the configuration, then this function
+ * must be provided by the platform-specific code. The OS initialization
+ * logic will call this function early in the initialization sequence to
+ * get the page heap information needed to configure the page allocator.
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_MM_PGALLOC
+void up_allocate_pgheap(FAR void **heap_start, size_t *heap_size);
+#endif
+
+/****************************************************************************
* Name: up_setpicbase, up_getpicbase
*
* Description:
diff --git a/nuttx/include/nuttx/pgalloc.h b/nuttx/include/nuttx/pgalloc.h
index 1db59dee5..ff9e01f8a 100644
--- a/nuttx/include/nuttx/pgalloc.h
+++ b/nuttx/include/nuttx/pgalloc.h
@@ -56,11 +56,6 @@
* CONFIG_MM_PGSIZE - The page size. Must be one of {1024, 2048,
* 4096, 8192, or 16384}. This is easily extensible, but only those
* values are currently support.
- * CONFIG_MM_PGPOOL_PADDR - Physical address of the start of the page
- * memory pool. This will be aligned to the page size if it is not
- * already aligned.
- * CONFIG_MM_PGPOOL_SIZE - The size of the page memory pool in bytes. This
- * will be aligned if it is not already aligned.
* CONFIG_DEBUG_PGALLOC - Just like CONFIG_DEBUG_MM, but only generates
* output from the page allocation logic.
*
@@ -71,34 +66,30 @@
# define CONFIG_MM_PGALLOC_PGSIZE 4096
#endif
-#ifndef CONFIG_MM_PGPOOL_PADDR
-# error CONFIG_MM_PGPOOL_PADDR must be defined
-#endif
-
-#ifndef CONFIG_MM_PGPOOL_SIZE
-# error CONFIG_MM_PGPOOL_SIZE must be defined
-#endif
-
#if CONFIG_MM_PGSIZE == 1024
-# define MM_PGSIZE 1024
-# define MM_PGSHIFT 10
+# define MM_PGSIZE 1024
+# define MM_PGSHIFT 10
#elif CONFIG_MM_PGSIZE == 2048
-# define MM_PGSIZE 2048
-# define MM_PGSHIFT 11
+# define MM_PGSIZE 2048
+# define MM_PGSHIFT 11
#elif CONFIG_MM_PGSIZE == 4096
-# define MM_PGSIZE 4096
-# define MM_PGSHIFT 12
+# define MM_PGSIZE 4096
+# define MM_PGSHIFT 12
#elif CONFIG_MM_PGSIZE == 8192
-# define MM_PGSIZE 8192
-# define MM_PGSHIFT 13
+# define MM_PGSIZE 8192
+# define MM_PGSHIFT 13
#elif CONFIG_MM_PGSIZE == 16384
-# define MM_PGSIZE 16384
-# define MM_PGSHIFT 14
+# define MM_PGSIZE 16384
+# define MM_PGSHIFT 14
#else
# error CONFIG_MM_PGSIZE not supported
#endif
-#define MM_PGMASK (MM_PGSIZE - 1)
+#define MM_PGMASK (MM_PGSIZE - 1)
+#define MM_PGALIGNDOWN(a) ((uintptr_t)(a) & ~MM_PGMASK)
+#define MM_PGALIGNUP(a) (((uintptr_t)(a) + MM_PGMASK) & ~MM_PGMASK)
+#define MM_NPAGES(s) (((uintptr_t)(a) + MM_PGMASK) >> MM_PGSHIFT)
+#define MM_ISALIGNED(a) (((uintptr_t)(a) & MM_PGMASK) == 0)
/****************************************************************************
* Public Types
@@ -123,14 +114,17 @@ extern "C"
* Initialize the page allocator.
*
* Input Parameters:
- * None
+ * heap_start - The physical address of the start of memory region that
+ * will be used for the page allocator heap
+ * heap_size - The size (in bytes) of the memory region that will be used
+ * for the page allocator heap.
*
* Returned Value:
- * Mpme
+ * None
*
****************************************************************************/
-void mm_pginitialize(void);
+void mm_pginitialize(FAR void *heap_start, size_t heap_size);
/****************************************************************************
* Name: mm_pgreserve
diff --git a/nuttx/mm/Kconfig b/nuttx/mm/Kconfig
index 0f7d56102..977baeb86 100644
--- a/nuttx/mm/Kconfig
+++ b/nuttx/mm/Kconfig
@@ -158,18 +158,6 @@ config MM_PGSIZE
16384}. This is easily extensible, but only those values are
currently support.
-config MM_PGPOOL_PADDR
- hex "Page Memory Pool Start"
- ---help---
- Physical address of the start of the page memory pool. This
- will be aligned to the page size if it is not already aligned.
-
-config MM_PGPOOL_SIZE
- int "Page Memory Pool Size"
- ---help---
- The size of the page memory pool in bytes. This will be aligned
- if it is not already aligned.
-
config DEBUG_PGALLOC
bool "Page Allocator Debug"
default n
diff --git a/nuttx/mm/mm_pgalloc.c b/nuttx/mm/mm_pgalloc.c
index f270ce67a..bd61a4bf6 100644
--- a/nuttx/mm/mm_pgalloc.c
+++ b/nuttx/mm/mm_pgalloc.c
@@ -56,11 +56,6 @@
* CONFIG_MM_PGSIZE - The page size. Must be one of {1024, 2048,
* 4096, 8192, or 16384}. This is easily extensible, but only those
* values are currently support.
- * CONFIG_MM_PGPOOL_PADDR - Physical address of the start of the page
- * memory pool. This will be aligned to the page size if it is not
- * already aligned.
- * CONFIG_MM_PGPOOL_SIZE - The size of the page memory pool in bytes. This
- * will be aligned if it is not already aligned.
* CONFIG_DEBUG_PGALLOC - Just like CONFIG_DEBUG_MM, but only generates
* output from the page allocation logic.
*
@@ -112,28 +107,27 @@ static GRAN_HANDLE g_pgalloc;
* Initialize the page allocator.
*
* Input Parameters:
- * None
+ * heap_start - The physical address of the start of memory region that
+ * will be used for the page allocator heap
+ * heap_size - The size (in bytes) of the memory region that will be used
+ * for the page allocator heap.
*
* Returned Value:
- * Mpme
+ * None
*
****************************************************************************/
-void mm_pginitialize(void)
+void mm_pginitialize(FAR void *heap_start, size_t heap_size)
{
#ifdef CONFIG_GRAN_SINGLE
int ret;
- ret = gran_initialize((FAR void *)CONFIG_MM_PGPOOL_PADDR,
- CONFIG_MM_PGPOOL_SIZE,
- MM_PGSHIFT, MM_PGSHIFT);
+ ret = gran_initialize(heap_start, heap_size, MM_PGSHIFT, MM_PGSHIFT);
DEBUGASSERT(ret == OK);
UNUSED(ret);
#else
- g_pgalloc = gran_initialize((FAR void *)CONFIG_MM_PGPOOL_PADDR,
- CONFIG_MM_PGPOOL_SIZE,
- MM_PGSHIFT, MM_PGSHIFT);
+ g_pgalloc = gran_initialize(heap_start, heap_size, MM_PGSHIFT, MM_PGSHIFT);
DEBUGASSERT(pg_alloc != NULL);
#endif
diff --git a/nuttx/sched/init/os_start.c b/nuttx/sched/init/os_start.c
index be319ca02..fdd308dfb 100644
--- a/nuttx/sched/init/os_start.c
+++ b/nuttx/sched/init/os_start.c
@@ -358,6 +358,16 @@ void os_start(void)
up_allocate_kheap(&heap_start, &heap_size);
kmm_initialize(heap_start, heap_size);
#endif
+
+#ifdef CONFIG_MM_PGALLOC
+ /* If there is a page allocator in the configuration, then get the page
+ * heap information from the platform-specific code and configure the
+ * page allocator.
+ */
+
+ up_allocate_pgheap(&heap_start, &heap_size);
+ mm_pginitialize(heap_start, heap_size);
+#endif
}
/* Initialize tasking data structures */