summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpatacongo <patacongo@42af7a65-404d-4744-a932-0658087f49c3>2010-08-21 21:53:41 +0000
committerpatacongo <patacongo@42af7a65-404d-4744-a932-0658087f49c3>2010-08-21 21:53:41 +0000
commit1f6504b7de207c555ce248423442b0bdf4ae4ce4 (patch)
treef269e549a1753dea4f5da53fc3f885eb02dfd6e5
parent265a1f0038dbdd5affa3f01df8c61ccb1fe3fb29 (diff)
downloadnuttx-1f6504b7de207c555ce248423442b0bdf4ae4ce4.tar.gz
nuttx-1f6504b7de207c555ce248423442b0bdf4ae4ce4.tar.bz2
nuttx-1f6504b7de207c555ce248423442b0bdf4ae4ce4.zip
Fix issues of virtual vs physical sizes of regions
git-svn-id: svn://svn.code.sf.net/p/nuttx/code/trunk@2875 42af7a65-404d-4744-a932-0658087f49c3
-rw-r--r--nuttx/arch/arm/src/arm/pg_macros.h64
-rwxr-xr-xnuttx/arch/arm/src/arm/up_allocpage.c42
-rwxr-xr-xnuttx/arch/arm/src/arm/up_checkmapping.c24
-rw-r--r--nuttx/arch/arm/src/arm/up_head.S2
-rwxr-xr-xnuttx/arch/arm/src/arm/up_va2pte.c120
-rw-r--r--nuttx/arch/arm/src/common/up_internal.h1
-rwxr-xr-xnuttx/arch/arm/src/lpc313x/Make.defs2
-rwxr-xr-xnuttx/include/nuttx/page.h35
8 files changed, 219 insertions, 71 deletions
diff --git a/nuttx/arch/arm/src/arm/pg_macros.h b/nuttx/arch/arm/src/arm/pg_macros.h
index 9d564e8a8..84b8df9fc 100644
--- a/nuttx/arch/arm/src/arm/pg_macros.h
+++ b/nuttx/arch/arm/src/arm/pg_macros.h
@@ -73,6 +73,10 @@
# define PTE_NPAGES PTE_TINY_NPAGES
+ /* Mask to get the page table physical address from an L1 entry */
+
+# define PG_L1_PADDRMASK PMD_FINE_TEX_MASK
+
/* L2 Page table address */
# define PG_L2_BASE_PADDR PGTABLE_FINE_BASE_PADDR
@@ -93,6 +97,10 @@
# define PTE_NPAGES PTE_SMALL_NPAGES
+ /* Mask to get the page table physical address from an L1 entry */
+
+# define PG_L1_PADDRMASK PMD_COARSE_TEX_MASK
+
/* L2 Page table address */
# define PG_L2_BASE_PADDR PGTABLE_COARSE_BASE_PADDR
@@ -127,7 +135,7 @@
#define PG_L2_PAGED_PADDR (PG_L2_BASE_PADDR + PG_L2_LOCKED_SIZE)
#define PG_L2_PAGED_VADDR (PG_L2_BASE_VADDR + PG_L2_LOCKED_SIZE)
-#define PG_L2_PAGED_SIZE (4*CONFIG_PAGING_NPAGED)
+#define PG_L2_PAGED_SIZE (4*CONFIG_PAGING_NPPAGED)
/* This describes the overall text region */
@@ -202,23 +210,25 @@
/* This is the total number of pages used in the text/data mapping: */
-#define PG_TOTAL_NPAGES (PG_TEXT_NPAGES + PG_DATA_PAGES + PG_PGTABLE_NPAGES)
-#if PG_TOTAL_NPAGES >PG_RAM_PAGES
+#define PG_TOTAL_NPPAGES (PG_TEXT_NPPAGES + PG_DATA_PAGES + PG_PGTABLE_NPAGES)
+#define PG_TOTAL_NVPAGES (PG_TEXT_NVPAGES + PG_DATA_PAGES + PG_PGTABLE_NPAGES)
+#if PG_TOTAL_NPPAGES >PG_RAM_PAGES
# error "Total pages required exceeds RAM size"
#endif
/* For page managment purposes, the following summarize the "heap" of
* free pages, operations on free pages and the L2 page table.
*
- * PG_POOL_L2NDX(va) - Converts a virtual address in the paged SRAM
- * region into a index into the paged region of
- * the L2 page table.
- * PG_POOL_L2OFFSET(va) - Converts a virtual address in the paged SRAM
- * region into a byte offset into the paged
- * region of the L2 page table.
- * PG_POOL_L2VADDR(va) - Converts a virtual address in the paged SRAM
- * region into the virtual address of the
- * corresponding PTE entry.
+ * PG_POOL_VA2L1OFFSET(va) - Given a virtual address, return the L1 table
+ * offset (in bytes).
+ * PG_POOL_VA2L1VADDR(va) - Given a virtual address, return the virtual
+ * address of the L1 table entry
+ * PG_POOL_L12PPTABLE(L1) - Given the value of an L1 table entry return
+ * the physical address of the start of the L2
+ * page table
+ * PG_POOL_L12PPTABLE(L1) - Given the value of an L1 table entry return
+ * the virtual address of the start of the L2
+ * page table.
*
* PG_POOL_L1VBASE - The virtual address of the start of the L1
* page table range corresponding to the first
@@ -239,22 +249,12 @@
* text region (the address at the beginning of
* the page).
* PG_POOL_MAXL2NDX - This is the maximum value+1 of such an index.
- * PG_POOL_NDX2L2VADDR(ndx) - Converts an index to the corresponding address
- * in the L1 page table entry.
- * PG_POOL_VA2L2VADDR(va) - Converts a virtual address within the paged
- * text region to the corresponding address in
- * the L2 page table entry.
- *
- * PG_POOL_PGPADDR(ndx) - Converts an index into the corresponding
+ *
+ * PG_POOL_PGPADDR(ndx) - Converts an page index into the corresponding
* (physical) address of the backing page memory.
- * PG_POOL_PGVADDR(ndx) - Converts an index into the corresponding
+ * PG_POOL_PGVADDR(ndx) - Converts an page index into the corresponding
* (virtual)address of the backing page memory.
*
- * PG_POOL_VIRT2PHYS(va) - Convert a virtual address within the paged
- * text region into a physical address.
- * PG_POOL_PHYS2VIRT(va) - Convert a physical address within the paged
- * text region into a virtual address.
- *
* These are used as follows: If a miss occurs at some virtual address, va,
* A new page index, ndx, is allocated. PG_POOL_PGPADDR(i) converts the index
* into the physical address of the page memory; PG_POOL_L2VADDR(va) converts
@@ -262,25 +262,21 @@
* written.
*/
-#define PG_POOL_L2NDX(va) ((va) - PG_PAGED_VBASE) >> PAGESHIFT)
-#define PG_POOL_L2OFFSET(va) (PG_POOL_L2NDX(va) << 2)
-#define PG_POOL_L2VADDR(va) (PG_L2_PAGED_VADDR + PG_POOL_L2OFFSET(va))
+#define PG_POOL_VA2L1OFFSET(va) (((va) >> 20) << 2)
+#define PG_POOL_VA2L1VADDR(va) (PGTABLE_BASE_VADDR + PG_POOL_VA2L1OFFSET(va))
+#define PG_POOL_L12PPTABLE(L1) ((L1) & PG_L1_PADDRMASK)
+#define PG_POOL_L12VPTABLE(L1) (PG_POOL_L12PPTABLE(L1) - PGTABLE_BASE_PADDR + PGTABLE_BASE_VADDR)
#define PG_POOL_L1VBASE (PGTABLE_BASE_VADDR + ((PG_PAGED_VBASE >> 20) << 2))
-#define PG_POOL_L1VEND (PG_POOL_L1VBASE + (CONFIG_PAGING_NPAGED << 2))
+#define PG_POOL_L1VEND (PG_POOL_L1VBASE + (CONFIG_PAGING_NVPAGED << 2))
#define PG_POOL_VA2L2NDX(va) (((va) - PG_PAGED_VBASE) >> PAGESHIFT)
#define PG_POOL_NDX2VA(ndx) (((ndx) << PAGESHIFT) + PG_PAGED_VBASE)
#define PG_POOL_MAXL2NDX PG_POOL_VA2L2NDX(PG_PAGED_VEND)
-#define PG_POOL_NDX2L2VADDR(ndx) (PG_L2_PAGED_VADDR + ((ndx) << 2))
-#define PG_POOL_VA2L2VADDR(va) PG_POOL_NDX2L2VADDR(PG_POOL_VA2L2NDX(va))
#define PG_POOL_PGPADDR(ndx) (PG_PAGED_PBASE + ((ndx) << PAGESHIFT))
#define PG_POOL_PGVADDR(ndx) (PG_PAGED_VBASE + ((ndx) << PAGESHIFT))
-#define PG_POOL_VIRT2PHYS(va) ((va) + (PG_PAGED_PBASE - PG_PAGED_VBASE))
-#define PG_POOL_PHYS2VIRT(pa) ((pa) + (PG_PAGED_VBASE - PG_PAGED_PBASE))
-
#endif /* CONFIG_PAGING */
/****************************************************************************
diff --git a/nuttx/arch/arm/src/arm/up_allocpage.c b/nuttx/arch/arm/src/arm/up_allocpage.c
index 8881620db..cd577ee9e 100755
--- a/nuttx/arch/arm/src/arm/up_allocpage.c
+++ b/nuttx/arch/arm/src/arm/up_allocpage.c
@@ -60,9 +60,9 @@
* Private Types
****************************************************************************/
-#if CONFIG_PAGING_NPAGED < 256
+#if CONFIG_PAGING_NPPAGED < 256
typedef uint8_t pgndx_t;
-#elif CONFIG_PAGING_NPAGED < 65536
+#elif CONFIG_PAGING_NPPAGED < 65536
typedef uint16_t pgndx_t;
#else
typedef uint32_t pgndx_t;
@@ -98,7 +98,7 @@ static pgndx_t g_pgndx;
* another index to the mapped virtual page.
*/
-static L1ndx_t g_ptemap[CONFIG_PAGING_NPAGED];
+static L1ndx_t g_ptemap[CONFIG_PAGING_NPPAGED];
/* The contents of g_ptemap[] are not valid until g_pgndx has wrapped at
* least one time.
@@ -162,7 +162,6 @@ int up_allocpage(FAR _TCB *tcb, FAR void **vpage)
uintptr_t paddr;
uint32_t *pte;
unsigned int pgndx;
- unsigned int l2ndx;
/* Since interrupts are disabled, we don't need to anything special. */
@@ -173,13 +172,6 @@ int up_allocpage(FAR _TCB *tcb, FAR void **vpage)
vaddr = tcb->xcp.far;
DEBUGASSERT(vaddr >= PG_PAGED_VBASE && vaddr < PG_PAGED_VEND);
- /* Verify that this virtual address was previously unmapped */
-
-#if CONFIG_DEBUG
- pte = (uint32_t*)PG_POOL_L2VADDR(vaddr);
- DEBUGASSERT(*pte == 0);
-#endif
-
/* Allocate page memory to back up the mapping. Start by getting the
* index of the next page that we are going to allocate.
*/
@@ -191,10 +183,6 @@ int up_allocpage(FAR _TCB *tcb, FAR void **vpage)
g_pgwrap = true;
}
- /* Then convert the index to a (physical) page address. */
-
- paddr = PG_POOL_PGPADDR(pgndx);
-
/* Was this physical page previously mapped? If so, then we need to un-map
* it.
*/
@@ -205,27 +193,35 @@ int up_allocpage(FAR _TCB *tcb, FAR void **vpage)
* mapping -- then zero it!
*/
- l2ndx = g_ptemap[pgndx];
- pte = (uint32_t*)PG_POOL_NDX2L2VADDR(l2ndx);
+ uintptr_t oldvaddr = PG_POOL_NDX2VA(g_ptemap[pgndx]);
+ pte = up_va2pte(oldvaddr);
*pte = 0;
- /* Invalidate the TLB corresponding to the virtual address */
+ /* Invalidate the instruction TLB corresponding to the virtual address */
- tlb_inst_invalidate_single(PG_POOL_NDX2VA(l2ndx))
+ tlb_inst_invalidate_single(oldvaddr)
}
+ /* Then convert the index to a (physical) page address. */
+
+ paddr = PG_POOL_PGPADDR(pgndx);
+
/* Now setup up the new mapping. Get a pointer to the L2 entry
* corresponding to the new mapping. Then set it map to the newly
* allocated page address.
*/
- pte = (uint32_t*)PG_POOL_VA2L2VADDR(va)
+ pte = up_va2pte(vaddr);
*pte = (paddr | MMU_L2_TEXTFLAGS);
- /* Finally, return the virtual address of allocated page */
+ /* And save the new L1 index */
+
+ g_ptemap[pgndx] = PG_POOL_VA2L2NDX(vaddr);
+
+ /* Finally, return the virtual address of allocated page */
- *vpage = (void*)PG_POOL_PHYS2VIRT(paddr);
- return OK;
+ *vpage = (void*)(vaddr & ~PAGEMASK);
+ return OK;
}
#endif /* CONFIG_PAGING */
diff --git a/nuttx/arch/arm/src/arm/up_checkmapping.c b/nuttx/arch/arm/src/arm/up_checkmapping.c
index 9f7aa5c3f..9b7a166f0 100755
--- a/nuttx/arch/arm/src/arm/up_checkmapping.c
+++ b/nuttx/arch/arm/src/arm/up_checkmapping.c
@@ -41,11 +41,14 @@
#include <nuttx/config.h>
+#include <stdint.h>
#include <debug.h>
#include <nuttx/sched.h>
#include <nuttx/page.h>
+#include "up_internal.h"
+
#ifdef CONFIG_PAGING
/****************************************************************************
@@ -96,8 +99,25 @@
bool up_checkmapping(FAR _TCB *tcb)
{
-# warning "Not implemented"
- return false;
+ uintptr_t vaddr;
+ uint32_t *te;
+
+ /* Since interrupts are disabled, we don't need to anything special. */
+
+ DEBUGASSERT(tcb);
+
+ /* Get the virtual address that caused the fault */
+
+ vaddr = tcb->xcp.far;
+ DEBUGASSERT(vaddr >= PG_PAGED_VBASE && vaddr < PG_PAGED_VEND);
+
+ /* Get the PTE associated with this virtual address */
+
+ pte = up_va2pte(vaddr);
+
+ /* Return true if this virtual address is mapped. */
+
+ return (*pte != 0);
}
#endif /* CONFIG_PAGING */
diff --git a/nuttx/arch/arm/src/arm/up_head.S b/nuttx/arch/arm/src/arm/up_head.S
index 24fb381ae..d6e2196fe 100644
--- a/nuttx/arch/arm/src/arm/up_head.S
+++ b/nuttx/arch/arm/src/arm/up_head.S
@@ -352,7 +352,7 @@ __start:
.Ltxtspan:
.long PG_L2_TEXT_PADDR /* Physical address of L2 table */
.long PG_TEXT_VBASE /* Virtual address of text base */
- .long PG_TEXT_NPAGES /* Total mapped text pages */
+ .long PG_TEXT_NVPAGES /* Total virtual text pages to be mapped */
.long MMU_L1_TEXTFLAGS /* L1 MMU flags to use */
.Ltxtmap:
diff --git a/nuttx/arch/arm/src/arm/up_va2pte.c b/nuttx/arch/arm/src/arm/up_va2pte.c
new file mode 100755
index 000000000..84bf25882
--- /dev/null
+++ b/nuttx/arch/arm/src/arm/up_va2pte.c
@@ -0,0 +1,120 @@
+/****************************************************************************
+ * arch/arm/src/arm/up_va2pte.c
+ * Utility to map a virtual address to a L2 page table entry.
+ *
+ * Copyright (C) 2010 Gregory Nutt. All rights reserved.
+ * Author: Gregory Nutt <spudmonkey@racsa.co.cr>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdint.h>
+#include <debug.h>
+
+#include <nuttx/sched.h>
+#include <nuttx/page.h>
+
+#include "pg_macros.h"
+#include "up_internal.h"
+
+#ifdef CONFIG_PAGING
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Private Data
+ ****************************************************************************/
+
+/****************************************************************************
+ * Private Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_va2pte()
+ *
+ * Description:
+ * Convert a virtual address within the paged text region into a pointer to
+ * the corresponding page table entry.
+ *
+ * Input Parameters:
+ * vaddr - The virtual address within the paged text region.
+ *
+ * Returned Value:
+ * A pointer to the corresponding page table entry.
+ *
+ * Assumptions:
+ * - This function is called from the normal tasking context (but with
+ * interrupts disabled). The implementation must take whatever actions
+ * are necessary to assure that the operation is safe within this
+ * context.
+ *
+ ****************************************************************************/
+
+uint32_t *up_va2pte(uintptr_t vaddr);
+{
+ uint32_t L1;
+ uint32_t *L2;
+ unsigned int ndx;
+
+ /* The virtual address is expected to lie in the paged text region */
+
+ DEBUGASSERT(vaddr >= PG_PAGED_VBASE && vaddr < PG_PAGED_VEND);
+
+ /* Get the L1 table entry associated with this virtual address */
+
+ L1 = *(uint32_t*)PG_POOL_VA2L1VADDR(vaddr);
+
+ /* Get the address of the L2 page table from the L1 entry */
+
+ L2 = (uint32_t*)PG_POOL_L12VPTABLE(L1);
+
+ /* Get the index into the L2 page table. Each L1 entry maps
+ * 256 x 4Kb or 1024 x 1Kb pages.
+ */
+
+ ndx = ((vaddr & 0x000fffff) >> PAGESHIFT;
+
+ /* Return true if this virtual address is mapped. */
+
+ return &L2[ndx];
+}
+
+#endif /* CONFIG_PAGING */
diff --git a/nuttx/arch/arm/src/common/up_internal.h b/nuttx/arch/arm/src/common/up_internal.h
index d8928a428..51f1719db 100644
--- a/nuttx/arch/arm/src/common/up_internal.h
+++ b/nuttx/arch/arm/src/common/up_internal.h
@@ -187,6 +187,7 @@ extern int up_hardfault(int irq, FAR void *context);
extern void up_doirq(int irq, uint32_t *regs);
#ifdef CONFIG_PAGING
extern void up_pginitialize(void);
+extern uint32_t *up_va2pte(uintptr_t vaddr);
extern void up_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr);
#else /* CONFIG_PAGING */
# define up_pginitialize()
diff --git a/nuttx/arch/arm/src/lpc313x/Make.defs b/nuttx/arch/arm/src/lpc313x/Make.defs
index ebe96f38a..5ca34d1cc 100755
--- a/nuttx/arch/arm/src/lpc313x/Make.defs
+++ b/nuttx/arch/arm/src/lpc313x/Make.defs
@@ -47,7 +47,7 @@ CMN_CSRCS = up_assert.c up_blocktask.c up_copystate.c up_createstack.c \
up_undefinedinsn.c up_usestack.c
ifeq ($(CONFIG_PAGING),y)
-CMN_CSRCS += up_pginitialize.c up_checkmapping.c up_allocpage.c
+CMN_CSRCS += up_pginitialize.c up_checkmapping.c up_allocpage.c up_va1pte.c
endif
CGU_ASRCS =
diff --git a/nuttx/include/nuttx/page.h b/nuttx/include/nuttx/page.h
index dd1be371b..b46a893b6 100755
--- a/nuttx/include/nuttx/page.h
+++ b/nuttx/include/nuttx/page.h
@@ -78,7 +78,8 @@
/* CONFIG_PAGING_NLOCKED - This is the number of locked pages in the memory
* map. The size of locked address region will then be given by
- * PG_LOCKED_SIZE.
+ * PG_LOCKED_SIZE. These values applies to both physical and virtual memory
+ * regions.
*/
#define PG_LOCKED_SIZE (CONFIG_PAGING_NLOCKED << PAGESHIFT)
@@ -111,13 +112,25 @@
# error "Base address of the locked region is not page aligned"
#endif
-/* CONFIG_PAGING_NPAGED - This is the number of paged pages in the memory
- * map. The size of paged address region will then be:
+/* CONFIG_PAGING_NPPAGED - This is the number of physical pages available to
+ * support the paged text region.
+ * CONFIG_PAGING_NVPAGED - This actual size of the paged text region (in
+ * pages). This is also the number of virtual pages required to support
+ * the entire paged retion. This feature is intended to support only the
+ * case where the virtual paged text area is much larger the available
+ * physical pages. Otherwise, why would you being on-demand paging?
*/
-#define PG_PAGED_SIZE (CONFIG_PAGING_NPAGED << PAGESHIFT)
+#if CONFIG_PAGING_NPPAGED >= CONFIG_PAGING_NVPAGED
+# error "CONFIG_PAGING_NPPAGED must be less than CONFIG_PAGING_NVPAGED"
+#endif
+
+/* The size of physical and virutal paged address regions will then be: */
+
+#define PG_PAGED_PSIZE CONFIG_PAGING_NPPAGED << PAGESHIFT)
+#define PG_PAGED_VSIZE CONFIG_PAGING_NVPAGED << PAGESHIFT)
-/* This positions the paging Read-Only text section. If the configuration
+/* This positions the paging Read-Only text region. If the configuration
* did not override the default, the paged region will immediately follow
* the locked region.
*/
@@ -130,8 +143,8 @@
# define PG_PAGED_VBASE PG_LOCKED_VEND
#endif
-#define PG_PAGED_PEND (PG_PAGED_PBASE + PG_PAGED_SIZE)
-#define PG_PAGED_VEND (PG_PAGED_VBASE + PG_PAGED_SIZE)
+#define PG_PAGED_PEND (PG_PAGED_PBASE + PG_PAGED_PSIZE)
+#define PG_PAGED_VEND (PG_PAGED_VBASE + PG_PAGED_VSIZE)
/* Size and description of the overall text section. The number of
* pages in the text section is the sum of the number of pages in
@@ -139,8 +152,10 @@
* is the base of the locked region.
*/
-#define PG_TEXT_NPAGES (CONFIG_PAGING_NLOCKED + CONFIG_PAGING_NPAGED)
-#define PG_TEXT_SIZE (PG_TEXT_NPAGES << PAGESHIFT)
+#define PG_TEXT_NPPAGES (CONFIG_PAGING_NLOCKED + CONFIG_PAGING_NPPAGED)
+#define PG_TEXT_NVPAGES (CONFIG_PAGING_NLOCKED + CONFIG_PAGING_NVPAGED)
+#define PG_TEXT_PSIZE (PG_TEXT_NPPAGES << PAGESHIFT)
+#define PG_TEXT_VSIZE (PG_TEXT_NVPAGES << PAGESHIFT)
#define PG_TEXT_PBASE PG_LOCKED_PBASE
#define PG_TEXT_VBASE PG_LOCKED_VBASE
@@ -159,7 +174,7 @@
#ifdef CONFIG_PAGING_NDATA
# PG_DATA_NPAGES CONFIG_PAGING_NDATA
-#elif PG_RAM_PAGES > PG_TEXT_NPAGES
+#elif PG_RAM_PAGES > PG_TEXT_NPPAGES
# PG_DATA_NPAGES (PG_RAM_PAGES - PG_TEXT_NPAGES)
#else
# error "Not enough memory for this page layout"