From 6c3a7556f34d9e1a0610c063b3a919af23785acb Mon Sep 17 00:00:00 2001 From: patacongo Date: Thu, 19 Aug 2010 12:57:16 +0000 Subject: More paging changes git-svn-id: svn://svn.code.sf.net/p/nuttx/code/trunk@2868 42af7a65-404d-4744-a932-0658087f49c3 --- nuttx/arch/arm/src/arm/pg_macros.h | 197 ++++++++++++++++++++++++++++++ nuttx/arch/arm/src/arm/up_dataabort.c | 2 +- nuttx/arch/arm/src/arm/up_head.S | 37 ++++-- nuttx/arch/arm/src/arm/up_prefetchabort.c | 3 +- nuttx/include/nuttx/page.h | 56 +++++++-- 5 files changed, 270 insertions(+), 25 deletions(-) create mode 100644 nuttx/arch/arm/src/arm/pg_macros.h diff --git a/nuttx/arch/arm/src/arm/pg_macros.h b/nuttx/arch/arm/src/arm/pg_macros.h new file mode 100644 index 000000000..bf79ba32a --- /dev/null +++ b/nuttx/arch/arm/src/arm/pg_macros.h @@ -0,0 +1,197 @@ +/**************************************************************************** + * arch/arm/src/arm/pg_macros.S + * + * Copyright (C) 2010 Gregory Nutt. All rights reserved. + * Author: Gregory Nutt + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name NuttX nor the names of its contributors may be + * used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************/ + +#ifndef __ARCH_ARM_SRC_ARM_PG_MACROS_H +#define __ARCH_ARM_SRC_ARM_PG_MACROS_H + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include "arm.h" + +/**************************************************************************** + * Definitions + ****************************************************************************/ + +/**************************************************************************** + * Assembly Macros + ****************************************************************************/ + +/* Write one L2 entry for a coarse page table entry. + * + * Inputs (unmodified): + * ctab - Register containing the address of the coarse page table + * paddr - Physical address of the page to be mapped + * vaddr - Virtual address of the page to be mapped + * mmuflags - the MMU flags to use in the mapping + * + * Scratch registers (modified): tmp1, tmp2 + */ + + .macro wrl2coarse, ctab, paddr, vaddr, mmuflags, tmp1, tmp2 + + /* Get tmp1 = (paddr | mmuflags), the value to write into the table */ + + orr \tmp1, \mmuflags, \paddr + + /* index = (vaddr & 0x000ff000) >> 12 + * offset = (vaddr & 0x000ff000) >> 10 + */ + + and \tmp2, \vaddr, #0x0000ff000 + + /* Write value into table at ofset */ + + str \tmp1, [\ctab, \tmp2, lsr #10] + .endm + +/* Write one L1 entry for a coarse page table. + * + * Inputs (unmodified unless noted): + * paddr - Physical address of the section (modified) + * vaddr - Virtual address of the section + * mmuflags - MMU flags to use in the section mapping + * + * Scratch registers (modified): tmp1, tmp2, tmp3 + */ + + .macro wrl1coarse, paddr, vaddr, mmuflags, tmp1, tmp2 + /* tmp1 = the base of the L1 page table */ + + ldr \tmp1, =PGTABLE_BASE_VADDR + + /* tmp2 = (paddr | mmuflags), the value to write into the page table */ + + orr \paddr, \paddr, \mmuflags + + /* Write the value into the table at the correc offset. + * table index = vaddr >> 20, offset = index << 2 + */ + + lsr \tmp2, \vaddr, #20 + str \paddr, [\tmp1, \tmp2, lsl #2] + .endm + +/* Write one coarse L1 entry and all assocated L2 entries for a + * coarse page table. + * + * Inputs: + * offset - coarse page table offset (unmodified) + * paddr - Physical address of the section (modified) + * vaddr - Virtual address of the section (modified) + * npages - Number of pages to write in the section (modified) + * + * Scratch registers (modified): tmp1, tmp2, tmp3, tmp4, tmp5 + * + * On return, paddr and vaddr refer to the beginning of the + * next section. + */ + .macro wrcoarse, offset, paddr, vaddr, npages, tmp1, tmp2, tmp3, tmp4 + + /* tmp1 = address of L2 table; tmp2 = MMU flags */ + + ldr \tmp1, =PGTABLE_COARSE_BASE_VADDR + add \tmp1, \offset, \paddr + ldr \tmp2, =MMU_L2_VECTORFLAGS + b 2f +1: + /* Write that L2 entry into the coarse page table */ + + wrl2coarse \tmp1, \paddr, \vaddr, \tmp2, \tmp3, \tmp4 + + /* Update the physical and virtual addresses that will + * correspond to the next table entry. + */ + + add \paddr, \paddr, #4096 + add \vaddr, \vaddr, #4096 +2: + /* Check if all of the pages have been written. If not, then + * loop and write the next entry. + */ + + sub \npages, \npages, #1 + cmn \npages #1 + bne 1b + + /* Write the section entry that refers to this coarse page + * table. + */ + + ldr \tmp1, =PGTABLE_COARSE_BASE_PADDR + ldr \tmp2, =MMU_L1_VECTORFLAGS + add \tmp1, \offset, \tmp1 + wrl1coarse \tmp1, \vaddr, \tmp2, \tmp3, \tmp4 + .endm + +/* Write several, contiguous coarse L1 page table entries (and all + * associated L2 page table entries). As many entries will be + * written as many as needed to span npages. + * + * Inputs: + * offset - coarse page table offset (modified) + * paddr - Physical address of the section (modified) + * vaddr - Virtual address of the section + * npages - Number of pages to write in the section + * + * Scratch registers (modified): tmp1, tmp2, tmp3, tmp4, tmp5 + */ + + .macro wrsections, offset, paddr, vaddr, npages, tmp1, tmp2, tmp3, tmp4 + b 2f +1: + /* Select the number of coarse, 4Kb pages to write in this section. + * This number will be 256 unless there are fewer than 256 pages + * remaining to be mapped. + */ + + cmp \npages, #255 /* Check if <= 255 */ + movls \tmp1, \npages /* YES.. tmp1 = npages */ + movls \npages, #0 /* npages = 0 */ + movhi \tmp1, #256 /* NO.. tmp1 = 256 */ + subhi \npages, \npages, #256 /* npages -= 256 */ + + /* Write the L2 entries for this section */ + + wrcoarse \offset, \paddr, \vaddr, \tmp1, \tmp1, \tmp2, \tmp3, \tmp4 + add \offset, \offset, #1024 +2: + cmp \npages, #0 + bne 1b + .endm +#endif /* __ARCH_ARM_SRC_ARM_PG_MACROS_H */ diff --git a/nuttx/arch/arm/src/arm/up_dataabort.c b/nuttx/arch/arm/src/arm/up_dataabort.c index 52a595d6d..ecad23b30 100644 --- a/nuttx/arch/arm/src/arm/up_dataabort.c +++ b/nuttx/arch/arm/src/arm/up_dataabort.c @@ -133,7 +133,7 @@ void up_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr) * (It has not yet been saved in the register context save area). */ - if (far < CONFIG_PAGING_PAGEDBASE || far >= CONFIG_PAGING_PAGEDEND) + if (far < PG_PAGEDBASE || far >= PG_PAGEDEND) { goto segfault; } diff --git a/nuttx/arch/arm/src/arm/up_head.S b/nuttx/arch/arm/src/arm/up_head.S index 57bce742c..cdb6eff7a 100644 --- a/nuttx/arch/arm/src/arm/up_head.S +++ b/nuttx/arch/arm/src/arm/up_head.S @@ -1,7 +1,7 @@ /**************************************************************************** * arch/arm/src/arm/up_head.S * - * Copyright (C) 2007, 2009 Gregory Nutt. All rights reserved. + * Copyright (C) 2007, 2009-2010 Gregory Nutt. All rights reserved. * Author: Gregory Nutt * * Redistribution and use in source and binary forms, with or without @@ -45,7 +45,7 @@ #include "up_arch.h" /********************************************************************************** - * Conditional Compilation + * Configuration **********************************************************************************/ #undef ALIGNMENT_TRAP @@ -94,22 +94,33 @@ * Definitions ****************************************************************************/ -/* EXE_NSECTIONS determines the number of 1Mb pages to map for the executable - * address region. This is based on CONFIG_DRAM_SIZE. For most ARM9 - * architectures, CONFIG_DRAM_SIZE describes the size of installed SDRAM. +/* RX_NSECTIONS determines the number of 1Mb sections to map for the + * Read/eXecute address region. This is based on CONFIG_DRAM_SIZE. For most + * ARM9 architectures, CONFIG_DRAM_SIZE describes the size of installed SDRAM. * But for other architectures, this might refer to the size of FLASH or * SRAM regions. (bad choice of naming). */ -#define EXE_NSECTIONS ((CONFIG_DRAM_SIZE+0x000fffff) >> 20) +#define RX_NSECTIONS ((CONFIG_DRAM_SIZE+0x000fffff) >> 20) + +/* If CONFIG_PAGING is defined, then + * - RX_NPAGES determines the number of pages of size PAGESIZE that + * are required to span the locked, Read/eXecute .text region. + * - RW_NPAGES determines the number of pages of size PAGESIZE this + * are required to span the Read/Write data region. + */ + +#ifdef CONFIG_PAGING +# define RX_NPAGES CONFIG_PAGING_LOCKEDPAGES +#endif /**************************************************************************** * Assembly Macros ****************************************************************************/ -/* Since the page table is closely related to the NuttX base - * address, we can convert the page table base address to the - * base address of the section containing both. +/* Since the page table is closely related to the NuttX base address, we can + * convert the page table base address to the base address of the section + * containing both. */ .macro mksection, section, pgtable @@ -168,7 +179,7 @@ __start: * this startup logic executing out of the physical address space. This * identity mapping will be removed by .Lvstart (see below). */ -#warning "We need to do things differently here if the .text region is smaller or if CONFIG_PAGING" + mksection r0, r4 /* r0=phys. base section */ ldr r1, .LCmmuflags /* FLGS=MMU_MEMFLAGS */ add r3, r1, r0 /* r3=flags + base */ @@ -335,7 +346,7 @@ __start: /* Now setup the pagetables for our normal SDRAM mappings mapped region. * We round NUTTX_START_VADDR down to the nearest megabyte boundary. */ -#warning "We need to do things differently here if the .text region is smaller or if CONFIG_PAGING" + ldr r1, .LCmmuflags /* FLGS=MMU_MEMFLAGS */ add r3, r3, r1 /* r3=flags + base */ @@ -346,11 +357,11 @@ __start: add r0, r0, #(NUTTX_START_VADDR & 0x00f00000) >> 18 str r3, [r0], #4 - /* Now map the remaining EXE_NSECTIONS-1 sections of the executable + /* Now map the remaining RX_NSECTIONS-1 sections of the executable * memory region. */ - .rept EXE_NSECTIONS-1 + .rept RX_NSECTIONS-1 add r3, r3, #SECTION_SIZE str r3, [r0], #4 .endr diff --git a/nuttx/arch/arm/src/arm/up_prefetchabort.c b/nuttx/arch/arm/src/arm/up_prefetchabort.c index 3572165b8..b3ea3cba3 100644 --- a/nuttx/arch/arm/src/arm/up_prefetchabort.c +++ b/nuttx/arch/arm/src/arm/up_prefetchabort.c @@ -117,8 +117,7 @@ void up_prefetchabort(uint32_t *regs) * virtual addresses. */ - if (regs[REG_R15] >= CONFIG_PAGING_PAGEDBASE && - regs[REG_R15] < CONFIG_PAGING_PAGEDEND) + if (regs[REG_R15] >= PG_PAGEDBASE && regs[REG_R15] < PG_PAGEDEND) { /* Save the offending PC as the fault address in the TCB of the currently * executing task. This value is, of course, already known in regs[REG_R15], diff --git a/nuttx/include/nuttx/page.h b/nuttx/include/nuttx/page.h index a279dcc47..ea1490c7e 100755 --- a/nuttx/include/nuttx/page.h +++ b/nuttx/include/nuttx/page.h @@ -54,22 +54,60 @@ /* Configuration ************************************************************/ /* CONFIG_PAGING_PAGESIZE - The size of one managed page. This must be a - * value supported by the processor's memory management unit. - * CONFIG_PAGING_NLOCKED - This is the number of locked pages in the memory - * map. The locked address region will then be from: + * value supported by the processor's memory management unit. The + * following may need to be extended to support additional page sizes at + * some point. */ -#define CONFIG_PAGING_LOCKEDBASE CONFIG_DRAM_VSTART -#define CONFIG_PAGING_LOCKEDSIZE (CONFIG_PAGING_PAGESIZE*CONFIG_PAGING_NLOCKED) -#define CONFIG_PAGING_LOCKEDEND (CONFIG_PAGING_LOCKEDBASE + CONFIG_PAGING_LOCKEDSIZE) +#if CONFIG_PAGING_PAGESIZE == 1024 +# define PAGESIZE 1024 +# define PAGESHIFT 10 +# define PAGEMASK 0x000003ff +#elif CONFIG_PAGING_PAGESIZE == 4096 +# define PAGESIZE 4096 +# define PAGESHIFT 12 +# define PAGEMASK 0x00000fff +#else +# error "Need extended definitions for CONFIG_PAGING_PAGESIZE" +#endif + +/* Alignment macros */ + +#define PG_ALIGNDOWN(addr) ((addr) & ~PAGEMASK) +#define PG_ALIGNUP(addr) (((addr) + PAGEMASK) & ~PAGEMASK) + +/* CONFIG_PAGING_NLOCKED - This is the number of locked pages in the memory + * map. The size of locked address region will then be: + */ + +#define PG_LOCKEDSIZE (CONFIG_PAGING_NLOCKED << PAGESHIFT) + +/* PG_LOCKEDBASE - May be defined to determine the base address + * of the locked page regions (lowest in memory). If PG_LOCKEDBASE + * is not defined, it will be set to CONFIG_DRAM_VSTART (i.e., assuming that + * the base address of the locked region is at the virtual address of the + * beginning of RAM). + */ + +#ifdef CONFIG_PAGING_LOCKEDBASE +# define PG_LOCKEDBASE CONFIG_PAGING_LOCKEDBASE +#else +# define PG_LOCKEDBASE CONFIG_DRAM_VSTART +#endif + +#define PG_LOCKEDEND (PG_LOCKEDBASE + PG_LOCKEDSIZE) + +#if (PG_LOCKEDBASE & PAGEMASK) != 0 +# error "Base address of the locked region is not page aligned" +#endif /* CONFIG_PAGING_NPAGES - The number of pages in the paged region of the * memory map. This paged region then begins and ends at: */ -#define CONFIG_PAGING_PAGEDBASE CONFIG_PAGING_LOCKEDEND -#define CONFIG_PAGING_PAGEDSIZE (CONFIG_PAGING_PAGESIZE*CONFIG_PAGING_NPAGES) -#define CONFIG_PAGING_PAGEDEND (CONFIG_PAGING_PAGEDBASE + CONFIG_PAGING_PAGEDSIZE) +#define PG_PAGEDSIZE (CONFIG_PAGING_NPAGES << PAGESHIFT) +#define PG_PAGEDBASE PG_LOCKEDEND +#define PG_PAGEDEND (PG_PAGEDBASE + PG_PAGEDSIZE) /* CONFIG_PAGING_DEFPRIO - The default, minimum priority of the page fill * worker thread. The priority of the page fill work thread will be boosted -- cgit v1.2.3