summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGregory Nutt <gnutt@nuttx.org>2013-07-18 15:20:47 -0600
committerGregory Nutt <gnutt@nuttx.org>2013-07-18 15:20:47 -0600
commit56ca2f8edcd758b3d8483c89f6f7144feeb47ca9 (patch)
treee0fbc4e12013e16e1403a77ef6bc1818a5ba9b0d
parent54fdd99901b8db5ba275ea9c1f3f56efe24232ad (diff)
downloadnuttx-56ca2f8edcd758b3d8483c89f6f7144feeb47ca9.tar.gz
nuttx-56ca2f8edcd758b3d8483c89f6f7144feeb47ca9.tar.bz2
nuttx-56ca2f8edcd758b3d8483c89f6f7144feeb47ca9.zip
Some initial frame for Cortex-A5 support. No much yet
-rw-r--r--apps/ChangeLog.txt2
-rw-r--r--nuttx/ChangeLog5
-rw-r--r--nuttx/Documentation/NuttShell.html2
-rw-r--r--nuttx/arch/arm/Kconfig15
-rwxr-xr-xnuttx/arch/arm/include/armv7-a/irq.h230
-rw-r--r--nuttx/arch/arm/include/irq.h4
-rw-r--r--nuttx/arch/arm/src/Makefile4
-rw-r--r--nuttx/arch/arm/src/armv7-a/Kconfig58
-rw-r--r--nuttx/arch/arm/src/armv7-a/Toolchain.defs153
-rw-r--r--nuttx/arch/arm/src/armv7-a/arm.h119
-rw-r--r--nuttx/arch/arm/src/armv7-a/arm_head.S663
-rw-r--r--nuttx/arch/arm/src/armv7-a/arm_vectors.S445
-rw-r--r--nuttx/arch/arm/src/armv7-a/arm_vectortab.S103
-rw-r--r--nuttx/arch/arm/src/armv7-a/cache.h447
-rw-r--r--nuttx/arch/arm/src/armv7-a/cp15.h206
-rw-r--r--nuttx/arch/arm/src/armv7-a/mmu.h332
-rw-r--r--nuttx/arch/arm/src/armv7-a/sctrl.h340
-rw-r--r--nuttx/configs/README.txt4
18 files changed, 3127 insertions, 5 deletions
diff --git a/apps/ChangeLog.txt b/apps/ChangeLog.txt
index bfc4615a8..3be433862 100644
--- a/apps/ChangeLog.txt
+++ b/apps/ChangeLog.txt
@@ -614,4 +614,4 @@
can now be built to execute on a Linux host.
* apps/nshlib/nsh_fscmds.c: Add a 'cmp' command that can be used to
compare two files for equivalence. Returns an indication if the files
- differ. Contributed by Andrew Twidgell (via Lorenz Meier) (2013-7-18).
+ differ. Contributed by Andrew Tridgell (via Lorenz Meier) (2013-7-18).
diff --git a/nuttx/ChangeLog b/nuttx/ChangeLog
index 3819f6355..4f24f5443 100644
--- a/nuttx/ChangeLog
+++ b/nuttx/ChangeLog
@@ -5132,3 +5132,8 @@
* nuttx/configs/olimex-lpc1766stk/zmodem: Add a new configuration to
test the Zmodem sz and rz commands (which don't actually exist yet,
but will). (2013-7-12).
+ * arch/arm/include/armv7-a and src/armv7-a: Beginning to add support
+ for the ARMMv7-A, the Cortex-A5 in particular. The initial checkin
+ is only fragmentary: A few header files and some copied ARM9
+ assembly files. More to come (2013-7-18).
+
diff --git a/nuttx/Documentation/NuttShell.html b/nuttx/Documentation/NuttShell.html
index 99510817b..500bfb195 100644
--- a/nuttx/Documentation/NuttShell.html
+++ b/nuttx/Documentation/NuttShell.html
@@ -3776,7 +3776,7 @@ mount -t vfat /dev/ram1 /tmp
<ul>
<li><a href="#builtinvars"><code>$?</code></a></li>
<li><a href="#cmdtest"><code>[</code></a></li>
- <li><a href="#custoncmds">Adding NSH commands<</a></li>
+ <li><a href="#custoncmds">Adding NSH commands</a></li>
<li><a href="#custapps"><code>appconfig</code></a></li>
<li><a href="#custapps">Application configuration file (<code>appconfig</code>)</a></li>
<li><a href="#custapps">Autogenerated header files</a></li>
diff --git a/nuttx/arch/arm/Kconfig b/nuttx/arch/arm/Kconfig
index 9e9a77df2..2cc8ffc44 100644
--- a/nuttx/arch/arm/Kconfig
+++ b/nuttx/arch/arm/Kconfig
@@ -138,31 +138,43 @@ endchoice
config ARCH_ARM7TDMI
bool
+ default n
config ARCH_ARM926EJS
bool
+ default n
config ARCH_ARM920T
bool
+ default n
config ARCH_CORTEXM0
bool
+ default n
select ARCH_IRQPRIO
config ARCH_CORTEXM3
bool
+ default n
select ARCH_IRQPRIO
select ARCH_HAVE_RAMVECTORS
config ARCH_CORTEXM4
bool
+ default n
select ARCH_IRQPRIO
select ARCH_HAVE_RAMVECTORS
+config ARCH_CORTEXA5
+ bool
+ default n
+ select ARCH_IRQPRIO
+
config ARCH_FAMILY
string
default "arm" if ARCH_ARM7TDMI || ARCH_ARM926EJS || ARCH_ARM920T
default "armv6-m" if ARCH_CORTEXM0
+ default "armv7-a" if ARCH_CORTEXA5
default "armv7-m" if ARCH_CORTEXM3 || ARCH_CORTEXM4
config ARCH_CHIP
@@ -286,6 +298,9 @@ config DEBUG_HARDFAULT
if ARCH_CORTEXM0
source arch/arm/src/armv6-m/Kconfig
endif
+if ARCH_CORTEXA5
+source arch/arm/src/armv7/Kconfig
+endif
if ARCH_CORTEXM3 || ARCH_CORTEXM4
source arch/arm/src/armv7-m/Kconfig
endif
diff --git a/nuttx/arch/arm/include/armv7-a/irq.h b/nuttx/arch/arm/include/armv7-a/irq.h
new file mode 100755
index 000000000..81eea542a
--- /dev/null
+++ b/nuttx/arch/arm/include/armv7-a/irq.h
@@ -0,0 +1,230 @@
+/****************************************************************************
+ * arch/arm/include/armv7-a/irq.h
+ *
+ * Copyright (C) 2013 Gregory Nutt. All rights reserved.
+ * Author: Gregory Nutt <gnutt@nuttx.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+/* This file should never be included directed but, rather, only indirectly
+ * through nuttx/irq.h
+ */
+
+#ifndef __ARCH_ARM_INCLUDE_ARMV7_A_IRQ_H
+#define __ARCH_ARM_INCLUDE_ARMV7_A_IRQ_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/irq.h>
+#ifndef __ASSEMBLY__
+# include <stdint.h>
+#endif
+
+/****************************************************************************
+ * Definitions
+ ****************************************************************************/
+
+/* IRQ Stack Frame Format:
+ *
+ * Context is always saved/restored in the same way:
+ *
+ * (1) stmia rx, {r0-r14}
+ * (2) then the PC and CPSR
+ *
+ * This results in the following set of indices that
+ * can be used to access individual registers in the
+ * xcp.regs array:
+ */
+
+#define REG_R0 (0)
+#define REG_R1 (1)
+#define REG_R2 (2)
+#define REG_R3 (3)
+#define REG_R4 (4)
+#define REG_R5 (5)
+#define REG_R6 (6)
+#define REG_R7 (7)
+#define REG_R8 (8)
+#define REG_R9 (9)
+#define REG_R10 (10)
+#define REG_R11 (11)
+#define REG_R12 (12)
+#define REG_R13 (13)
+#define REG_R14 (14)
+#define REG_R15 (15)
+#define REG_CPSR (16)
+
+#define XCPTCONTEXT_REGS (17)
+#define XCPTCONTEXT_SIZE (4 * XCPTCONTEXT_REGS)
+
+#define REG_A1 REG_R0
+#define REG_A2 REG_R1
+#define REG_A3 REG_R2
+#define REG_A4 REG_R3
+#define REG_V1 REG_R4
+#define REG_V2 REG_R5
+#define REG_V3 REG_R6
+#define REG_V4 REG_R7
+#define REG_V5 REG_R8
+#define REG_V6 REG_R9
+#define REG_V7 REG_R10
+#define REG_SB REG_R9
+#define REG_SL REG_R10
+#define REG_FP REG_R11
+#define REG_IP REG_R12
+#define REG_SP REG_R13
+#define REG_LR REG_R14
+#define REG_PC REG_R15
+
+/* The PIC register is usually R10. It can be R9 is stack checking is enabled
+ * or if the user changes it with -mpic-register on the GCC command line.
+ */
+
+#define REG_PIC REG_R10
+
+/****************************************************************************
+ * Public Types
+ ****************************************************************************/
+
+/* This struct defines the way the registers are stored. We
+ * need to save:
+ *
+ * 1 CPSR
+ * 7 Static registers, v1-v7 (aka r4-r10)
+ * 1 Frame pointer, fp (aka r11)
+ * 1 Stack pointer, sp (aka r13)
+ * 1 Return address, lr (aka r14)
+ * ---
+ * 11 (XCPTCONTEXT_USER_REG)
+ *
+ * On interrupts, we also need to save:
+ * 4 Volatile registers, a1-a4 (aka r0-r3)
+ * 1 Scratch Register, ip (aka r12)
+ *---
+ * 5 (XCPTCONTEXT_IRQ_REGS)
+ *
+ * For a total of 17 (XCPTCONTEXT_REGS)
+ */
+
+#ifndef __ASSEMBLY__
+struct xcptcontext
+{
+ /* The following function pointer is non-zero if there
+ * are pending signals to be processed.
+ */
+
+#ifndef CONFIG_DISABLE_SIGNALS
+ void *sigdeliver; /* Actual type is sig_deliver_t */
+
+ /* These are saved copies of LR and CPSR used during
+ * signal processing.
+ */
+
+ uint32_t saved_pc;
+ uint32_t saved_cpsr;
+#endif
+
+ /* Register save area */
+
+ uint32_t regs[XCPTCONTEXT_REGS];
+
+ /* Extra fault address register saved for common paging logic. In the
+ * case of the prefetch abort, this value is the same as regs[REG_R15];
+ * For the case of the data abort, this value is the value of the fault
+ * address register (FAR) at the time of data abort exception.
+ */
+
+#ifdef CONFIG_PAGING
+ uintptr_t far;
+#endif
+};
+#endif
+
+/****************************************************************************
+ * Inline functions
+ ****************************************************************************/
+
+#ifndef __ASSEMBLY__
+
+/* Save the current interrupt enable state & disable IRQs */
+
+static inline irqstate_t irqsave(void)
+{
+ unsigned int flags;
+ unsigned int temp;
+ __asm__ __volatile__
+ (
+ "\tmrs %0, cpsr\n"
+ "\torr %1, %0, #128\n"
+ "\tmsr cpsr_c, %1"
+ : "=r" (flags), "=r" (temp)
+ :
+ : "memory");
+ return flags;
+}
+
+/* Restore saved IRQ & FIQ state */
+
+static inline void irqrestore(irqstate_t flags)
+{
+ __asm__ __volatile__
+ (
+ "msr cpsr_c, %0"
+ :
+ : "r" (flags)
+ : "memory");
+}
+#endif /* __ASSEMBLY__ */
+
+/****************************************************************************
+ * Public Variables
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+#ifndef __ASSEMBLY__
+#ifdef __cplusplus
+#define EXTERN extern "C"
+extern "C" {
+#else
+#define EXTERN extern
+#endif
+
+#undef EXTERN
+#ifdef __cplusplus
+}
+#endif
+#endif
+
+#endif /* __ARCH_ARM_INCLUDE_ARMV7_A_IRQ_H */
diff --git a/nuttx/arch/arm/include/irq.h b/nuttx/arch/arm/include/irq.h
index e1763af84..794fd07f9 100644
--- a/nuttx/arch/arm/include/irq.h
+++ b/nuttx/arch/arm/include/irq.h
@@ -56,7 +56,9 @@
* save structure and irqsave()/irqrestore() macros)
*/
-#if defined(CONFIG_ARCH_CORTEXM3) || defined(CONFIG_ARCH_CORTEXM4)
+#if defined(CONFIG_ARCH_CORTEXA5)
+# include <arch/armv7-a/irq.h>
+#elif defined(CONFIG_ARCH_CORTEXM3) || defined(CONFIG_ARCH_CORTEXM4)
# include <arch/armv7-m/irq.h>
#elif defined(CONFIG_ARCH_CORTEXM0)
# include <arch/armv6-m/irq.h>
diff --git a/nuttx/arch/arm/src/Makefile b/nuttx/arch/arm/src/Makefile
index a40fcb058..0f5f1014c 100644
--- a/nuttx/arch/arm/src/Makefile
+++ b/nuttx/arch/arm/src/Makefile
@@ -36,6 +36,9 @@
-include $(TOPDIR)/Make.defs
-include chip/Make.defs
+ifeq ($(CONFIG_ARCH_CORTEXA5),y) # Cortex-A5 is ARMv7
+ARCH_SUBDIR = armv7-a
+else
ifeq ($(CONFIG_ARCH_CORTEXM3),y) # Cortex-M3 is ARMv7-M
ARCH_SUBDIR = armv7-m
else
@@ -49,6 +52,7 @@ ARCH_SUBDIR = arm
endif
endif
endif
+endif
ifeq ($(CONFIG_WINDOWS_NATIVE),y)
ARCH_SRCDIR = $(TOPDIR)\arch\$(CONFIG_ARCH)\src
diff --git a/nuttx/arch/arm/src/armv7-a/Kconfig b/nuttx/arch/arm/src/armv7-a/Kconfig
new file mode 100644
index 000000000..ed426c670
--- /dev/null
+++ b/nuttx/arch/arm/src/armv7-a/Kconfig
@@ -0,0 +1,58 @@
+#
+# For a description of the syntax of this configuration file,
+# see misc/tools/kconfig-language.txt.
+#
+
+comment "ARMv7-A Configuration Options"
+
+choice
+ prompt "Toolchain Selection"
+ default ARMV7A_TOOLCHAIN_GNU_EABIW if HOST_WINDOWS
+ default ARMV7A_TOOLCHAIN_GNU_EABIL if !HOST_WINDOWS
+
+config ARMV7A_TOOLCHAIN_BUILDROOT
+ bool "Buildroot (Cygwin or Linux)"
+ depends on !WINDOWS_NATIVE
+
+config ARMV7A_TOOLCHAIN_CODESOURCERYL
+ bool "CodeSourcery GNU toolchain under Linux"
+ depends on HOST_LINUX
+ ---help---
+ For use with the GNU toolchain built with the NuttX buildroot package.
+ This tools may be arm-nuttx-eabi- or, if ARMV7A_OABI_TOOLCHAIN is set,
+ arm-nuttx-elf-.
+
+config ARMV7A_TOOLCHAIN_CODESOURCERYW
+ bool "CodeSourcery GNU toolchain under Windows"
+ depends on HOST_WINDOWS
+
+config ARMV7A_TOOLCHAIN_DEVKITARM
+ bool "devkitARM GNU toolchain"
+ depends on HOST_WINDOWS
+
+config ARMV7A_TOOLCHAIN_GNU_EABIL
+ bool "Generic GNU EABI toolchain under Linux (or other POSIX environment)"
+ ---help---
+ This option should work for any modern GNU toolchain (GCC 4.5 or newer)
+ configured for arm-none-eabi-.
+
+config ARMV7A_TOOLCHAIN_GNU_EABIW
+ bool "Generic GNU EABI toolchain under Windows"
+ depends on HOST_WINDOWS
+ ---help---
+ This option should work for any modern GNU toolchain (GCC 4.5 or newer)
+ configured for arm-none-eabi-.
+
+config ARMV7A_TOOLCHAIN_GNU_OABI
+ bool "Generic GNU OABI toolchain"
+ ---help---
+ This option should work for any GNU toolchain configured for arm-elf-.
+
+endchoice
+
+config ARMV7A_OABI_TOOLCHAIN
+ bool "OABI (vs EABI)"
+ default y
+ depends on ARMV7A_TOOLCHAIN_BUILDROOT
+ ---help---
+ Most of the older buildroot toolchains are OABI and are named arm-nuttx-elf- vs. arm-nuttx-eabi-
diff --git a/nuttx/arch/arm/src/armv7-a/Toolchain.defs b/nuttx/arch/arm/src/armv7-a/Toolchain.defs
new file mode 100644
index 000000000..201014107
--- /dev/null
+++ b/nuttx/arch/arm/src/armv7-a/Toolchain.defs
@@ -0,0 +1,153 @@
+############################################################################
+# arch/arm/src/armv7-a/Toolchain.defs
+#
+# Copyright (C) 2013 Gregory Nutt. All rights reserved.
+# Author: Gregory Nutt <gnutt@nuttx.org>
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# 3. Neither the name NuttX nor the names of its contributors may be
+# used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+############################################################################
+
+# Setup for the selected toolchain
+
+#
+# Select and allow the selected toolchain to be overridden by a command-line
+#selection.
+#
+
+ifeq ($(filter y, \
+ $(CONFIG_ARMV7A_TOOLCHAIN_BUILDROOT) \
+ ),y)
+ CONFIG_ARMV7A_TOOLCHAIN ?= BUILDROOT
+endif
+
+ifeq ($(filter y, \
+ $(CONFIG_ARMV7A_TOOLCHAIN_CODESOURCERYL) \
+ ),y)
+ CONFIG_ARMV7A_TOOLCHAIN ?= CODESOURCERYL
+endif
+
+ifeq ($(filter y, \
+ $(CONFIG_ARMV7A_TOOLCHAIN_CODESOURCERYW) \
+ ),y)
+ CONFIG_ARMV7A_TOOLCHAIN ?= CODESOURCERYW
+endif
+
+ifeq ($(filter y, \
+ $(CONFIG_ARMV7A_TOOLCHAIN_DEVKITARM) \
+ ),y)
+ CONFIG_ARMV7A_TOOLCHAIN ?= DEVKITARM
+endif
+
+ifeq ($(filter y, \
+ $(CONFIG_ARMV7A_TOOLCHAIN_GNU_EABIL) \
+ ),y)
+ CONFIG_ARMV7A_TOOLCHAIN ?= GNU_EABIL
+endif
+
+ifeq ($(filter y, \
+ $(CONFIG_ARMV7A_TOOLCHAIN_GNU_EABIW) \
+ ),y)
+ CONFIG_ARMV7A_TOOLCHAIN ?= GNU_EABIW
+endif
+
+#
+# Supported toolchains
+#
+# Each toolchain definition should set:
+#
+# CROSSDEV The GNU toolchain triple (command prefix)
+# ARCROSSDEV If required, an alternative prefix used when
+# invoking ar and nm.
+# ARCHCPUFLAGS CPU-specific flags selecting the instruction set
+# FPU options, etc.
+# MAXOPTIMIZATION The maximum optimization level that results in
+# reliable code generation.
+#
+
+# NuttX buildroot under Linux or Cygwin
+
+ifeq ($(CONFIG_ARMV7A_TOOLCHAIN),BUILDROOT)
+ifeq ($(CONFIG_ARMV7A_OABI_TOOLCHAIN),y)
+ CROSSDEV ?= arm-nuttx-elf-
+ ARCROSSDEV ?= arm-nuttx-elf-
+else
+ CROSSDEV ?= arm-nuttx-eabi-
+ ARCROSSDEV ?= arm-nuttx-eabi-
+endif
+ MAXOPTIMIZATION = -Os
+endif
+
+# CodeSourcery under Linux
+
+ifeq ($(CONFIG_ARMV7A_TOOLCHAIN),CODESOURCERYL)
+ CROSSDEV ?= arm-none-eabi-
+ ARCROSSDEV ?= arm-none-eabi-
+ MAXOPTIMIZATION = -O2
+endif
+
+# CodeSourcery under Windows
+
+ifeq ($(CONFIG_ARMV7A_TOOLCHAIN),CODESOURCERYW)
+ CROSSDEV ?= arm-none-eabi-
+ ARCROSSDEV ?= arm-none-eabi-
+ MAXOPTIMIZATION = -O2
+ ifneq ($(CONFIG_WINDOWS_NATIVE),y)
+ WINTOOL = y
+ endif
+endif
+
+# devkitARM under Windows
+
+ifeq ($(CONFIG_ARMV7A_TOOLCHAIN),DEVKITARM)
+ CROSSDEV ?= arm-eabi-
+ ARCROSSDEV ?= arm-eabi-
+ ifneq ($(CONFIG_WINDOWS_NATIVE),y)
+ WINTOOL = y
+ endif
+endif
+
+# Generic GNU EABI toolchain on OS X, Linux or any typical Posix system
+
+ifeq ($(CONFIG_ARMV7A_TOOLCHAIN),GNU_EABIL)
+ CROSSDEV ?= arm-none-eabi-
+ ARCROSSDEV ?= arm-none-eabi-
+ MAXOPTIMIZATION = -Os
+endif
+
+# Generic GNU EABI toolchain under Windows
+
+ifeq ($(CONFIG_ARMV7A_TOOLCHAIN),GNU_EABIW)
+ CROSSDEV ?= arm-none-eabi-
+ ARCROSSDEV ?= arm-none-eabi-
+ MAXOPTIMIZATION = -Os
+ ifneq ($(CONFIG_WINDOWS_NATIVE),y)
+ WINTOOL = y
+ endif
+endif
+
diff --git a/nuttx/arch/arm/src/armv7-a/arm.h b/nuttx/arch/arm/src/armv7-a/arm.h
new file mode 100644
index 000000000..7c23d739d
--- /dev/null
+++ b/nuttx/arch/arm/src/armv7-a/arm.h
@@ -0,0 +1,119 @@
+/************************************************************************************
+ * arch/arm/src/armv7-a/arm.h
+ * Non-CP15 Registers
+ *
+ * Copyright (C) 2013 Gregory Nutt. All rights reserved.
+ * Author: Gregory Nutt <gnutt@nuttx.org>
+ *
+ * References:
+ *
+ * "Cortex-A5™ MPCore, Technical Reference Manual", Revision: r0p1, Copyright © 2010
+ * ARM. All rights reserved. ARM DDI 0434B (ID101810)
+ * "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition", Copyright ©
+ * 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM DDI 0406C.b (ID072512)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV7_A_CPSR_H
+#define __ARCH_ARM_SRC_ARMV7_A_CPSR_H
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+
+/* ARMv7-A **************************************************************************/
+
+/* PSR bits */
+
+#define PSR_MODE_SHIFT (1) /* Bits 0-4: Mode fields */
+#define PSR_MODE_MASK (31 << PSR_MODE_SHIFT)
+# define PSR_MODE_USER (16 << PSR_MODE_SHIFT) /* User mode */
+# define PSR_MODE_FIQ (17 << PSR_MODE_SHIFT) /* FIQ mode */
+# define PSR_MODE_IRQ (18 << PSR_MODE_SHIFT) /* IRQ mode */
+# define PSR_MODE_SUPER (19 << PSR_MODE_SHIFT) /* Supervisor mode */
+# define PSR_MODE_MON (22 << PSR_MODE_SHIFT) /* Monitor mode */
+# define PSR_MODE_ABORT (23 << PSR_MODE_SHIFT) /* Abort mode */
+# define PSR_MODE_HYP (26 << PSR_MODE_SHIFT) /* Hyp mode */
+# define PSR_MODE_UNDEF (27 << PSR_MODE_SHIFT) /* Undefined mode */
+# define PSR_MODE_SYSTEM (31 << PSR_MODE_SHIFT) /* System mode */
+#define PSR_T_BIT (1 << 5) /* Bit 5: Thumb execution state bit */
+#define PSR_MASK_SHIFT (6) /* Bits 6-8: Mask Bits */
+#define PSR_MASK_MASK (7 << PSR_GE_SHIFT)
+# define PSR_F_BIT (1 << 6) /* Bit 6: FIQ mask bit */
+# define PSR_I_BIT (1 << 7) /* Bit 7: IRQ mask bit */
+# define PSR_A_BIT (1 << 8) /* Bit 8: Asynchronous abort mask */
+#define PSR_E_BIT (1 << 9) /* Bit 9: Endianness execution state bit */
+#define PSR_GE_SHIFT (16) /* Bits 16-19: Greater than or Equal flags */
+#define PSR_GE_MASK (15 << PSR_GE_SHIFT)
+ /* Bits 20-23: Reserved. RAZ/SBZP */
+#define PSR_J_BIT (1 << 24) /* Bit 24: Jazelle state bit */
+#define PSR_IT01_SHIFT (25) /* Bits 25-26: If-Then execution state bits IT[0:1] */
+#define PSR_IT01_MASK (3 << PSR_IT01_SHIFT)
+#define PSR_Q_BIT (1 << 27) /* Bit 27: Cumulative saturation bit */
+#define PSR_V_BIT (1 << 28) /* Bit 28: Overflow condition flag */
+#define PSR_C_BIT (1 << 29) /* Bit 29: Carry condition flag */
+#define PSR_Z_BIT (1 << 30) /* Bit 30: Zero condition flag */
+#define PSR_N_BIT (1 << 31) /* Bit 31: Negative condition flag */
+
+/************************************************************************************
+ * Inline Functions
+ ************************************************************************************/
+
+#ifndef __ASSEMBLY__
+
+#endif /* __ASSEMBLY__ */
+
+/****************************************************************************
+ * Public Variables
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+#ifndef __ASSEMBLY__
+#ifdef __cplusplus
+#define EXTERN extern "C"
+extern "C" {
+#else
+#define EXTERN extern
+#endif
+
+#undef EXTERN
+#ifdef __cplusplus
+}
+#endif
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ARCH_ARM_SRC_ARMV7_A_CPSR_H */
diff --git a/nuttx/arch/arm/src/armv7-a/arm_head.S b/nuttx/arch/arm/src/armv7-a/arm_head.S
new file mode 100644
index 000000000..f0d4b0da1
--- /dev/null
+++ b/nuttx/arch/arm/src/armv7-a/arm_head.S
@@ -0,0 +1,663 @@
+/****************************************************************************
+ * arch/arm/src/armv7-a/arm_head.S
+ *
+ * Copyright (C) 2013 Gregory Nutt. All rights reserved.
+ * Author: Gregory Nutt <gnutt@nuttx.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifdef CONFIG_PAGING
+# include <nuttx/page.h>
+# include "pg_macros.h"
+#endif
+
+#include "arm.h"
+#include "cp15.h"
+
+/**********************************************************************************
+ * Configuration
+ **********************************************************************************/
+
+#undef ALIGNMENT_TRAP
+#undef CPU_DCACHE_WRITETHROUGH
+#undef CPU_CACHE_ROUND_ROBIN
+#undef CPU_DCACHE_DISABLE
+#undef CPU_ICACHE_DISABLE
+
+/* There are three operational memory configurations:
+ *
+ * 1. We execute in place in FLASH (CONFIG_BOOT_RUNFROMFLASH=y). In this case
+ * the boot logic must:
+ *
+ * - Configure SDRAM,
+ * - Initialize the .data section in RAM, and
+ * - Clear .bss section
+ */
+
+#ifdef CONFIG_BOOT_RUNFROMFLASH
+# error "Configuration not implemented"
+# define DO_SDRAM_INIT 1
+
+ /* Check for the identity mapping: For this configuration, this would be
+ * the case where the virtual beginning of FLASH is the same as the physical
+ * beginning of FLASH.
+ */
+
+# if !defined(CONFIG_FLASH_START) || !defined(CONFIG_FLASH_VSTART)
+# error "CONFIG_FLASH_START or CONFIG_FLASH_VSTART is not defined"
+# endif
+
+# if CONFIG_FLASH_START == CONFIG_FLASH_VSTART
+# define CONFIG_IDENTITY_TEXTMAP 1
+# endif
+
+/* 2. We boot in FLASH but copy ourselves to DRAM from better performance.
+ * (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=y). In this case
+ * the boot logic must:
+ *
+ * - Configure SDRAM,
+ * - Copy ourself to DRAM (after mapping it), and
+ * - Clear .bss section
+ *
+ * In this case, we assume that the logic within this file executes from FLASH.
+ */
+
+#elif defined(CONFIG_BOOT_COPYTORAM)
+# error "configuration not implemented
+# define DO_SDRAM_INIT 1
+
+ /* Check for the identity mapping: For this configuration, this would be
+ * the case where the virtual beginning of FLASH is the same as the physical
+ * beginning of FLASH.
+ */
+
+# if !defined(CONFIG_FLASH_START) || !defined(CONFIG_FLASH_VSTART)
+# error "CONFIG_FLASH_START or CONFIG_FLASH_VSTART is not defined"
+# endif
+
+# if CONFIG_FLASH_START == CONFIG_FLASH_VSTART
+# define CONFIG_IDENTITY_TEXTMAP 1
+# endif
+
+/* 3. There is bootloader that copies us to DRAM (but probably not to the beginning)
+ * (CONFIG_BOOT_RUNFROMFLASH=n && CONFIG_BOOT_COPYTORAM=n). In this case SDRAM
+ * was initialized by the boot loader, and this boot logic must:
+ *
+ * - Clear .bss section
+ */
+
+#else
+
+ /* Check for the identity mapping: For this configuration, this would be
+ * the case where the virtual beginning of RAM is the same as the physical
+ * beginning of RAM.
+ */
+
+# if !defined(CONFIG_DRAM_START) || !defined(CONFIG_DRAM_VSTART)
+# error "CONFIG_DRAM_START or CONFIG_DRAM_VSTART is not defined"
+# endif
+
+# if CONFIG_DRAM_START == CONFIG_DRAM_VSTART
+# define CONFIG_IDENTITY_TEXTMAP 1
+# endif
+
+#endif
+
+/* For each page table offset, the following provide (1) the physical address of
+ * the start of the page table and (2) the number of page table entries in the
+ * first page table.
+ *
+ * Coarse: PG_L1_PADDRMASK=0xfffffc00
+ * NPAGE1=(256 -((a) & 0x000003ff) >> 2) NPAGE1=1-256
+ * Fine: PG_L1_PADDRMASK=0xfffff000
+ * NPAGE1=(1024 -((a) & 0x00000fff) >> 2) NPAGE1=1-1024
+ */
+
+#ifdef CONFIG_PAGING
+# define PG_L2_TEXT_PBASE (PG_L2_TEXT_PADDR & PG_L1_PADDRMASK)
+# define PG_L2_TEXT_NPAGE1 (PTE_NPAGES - ((PG_L2_TEXT_PADDR & ~PG_L1_PADDRMASK) >> 2))
+# define PG_L2_PGTABLE_PBASE (PG_L2_PGTABLE_PADDR & PG_L1_PADDRMASK)
+# define PG_L2_PGTABLE_NPAGE1 (PTE_NPAGES - ((PG_L2_PGTABLE_PADDR & ~PG_L1_PADDRMASK) >> 2))
+# define PG_L2_DATA_PBASE (PG_L2_DATA_PADDR & PG_L1_PADDRMASK)
+# define PG_L2_DATA_NPAGE1 (PTE_NPAGES - ((PG_L2_DATA_PADDR & ~PG_L1_PADDRMASK) >> 2))
+#endif
+
+/****************************************************************************
+ * Definitions
+ ****************************************************************************/
+
+/* RX_NSECTIONS determines the number of 1Mb sections to map for the
+ * Read/eXecute address region. This is based on CONFIG_DRAM_SIZE. For most
+ * ARMv7-A architectures, CONFIG_DRAM_SIZE describes the size of installed SDRAM.
+ * But for other architectures, this might refer to the size of FLASH or
+ * SRAM regions. (bad choice of naming).
+ */
+
+#define RX_NSECTIONS ((CONFIG_DRAM_SIZE+0x000fffff) >> 20)
+
+/****************************************************************************
+ * Assembly Macros
+ ****************************************************************************/
+
+/* The ARMv7-A L1 page table can be placed at the beginning or at the end of
+ * the RAM space. This decision is based on the placement of the vector
+ * area: If the vectors are place in low memory at address 0x0000 0000, then
+ * the page table is placed in high memory; if the vectors are placed in
+ * high memory at address 0xfff0 0000, then the page table is locating at
+ * the beginning of RAM.
+ *
+ * For the special case where (1) the program executes out of RAM, and (2)
+ * the page is located at the beginning of RAM (i.e., the high vector case),
+ * then the following macro can easily find the physical address of the
+ * section that includes the first part of the text region: Since the page
+ * table is closely related to the NuttX base address in this case, we can
+ * convert the page table base address to the base address of the section
+ * containing both.
+ */
+
+/* REVISIT: This works now of the low vector case only because the RAM
+ * sizes that we have been dealing with are less then 1MB so that both the
+ * page table and the vector table are in the same 1MB RAM block. But
+ * this will certainly break later. Hence, the annoying warning.
+ */
+
+#ifdef CONFIG_ARCH_LOWVECTORS
+# warning "REVISIT"
+#endif
+
+//#ifndef CONFIG_ARCH_LOWVECTORS
+ .macro mksection, section, pgtable
+ bic \section, \pgtable, #0x000ff000
+ .endm
+//#endif
+
+/* This macro will modify r0, r1, r2 and r14 */
+
+#ifdef CONFIG_DEBUG
+ .macro showprogress, code
+ mov r0, #\code
+ bl up_lowputc
+ .endm
+#else
+ .macro showprogress, code
+ .endm
+#endif
+
+/****************************************************************************
+ * Name: __start
+ ****************************************************************************/
+
+ .text
+ .global __start
+ .type __start, #function
+
+__start:
+ /* Make sure that we are in SVC mode with all IRQs disabled */
+
+ mov r0, #(PSR_MODE_SUPER | PSR_I_BIT | PSR_F_BIT)
+ msr cpsr_c, r0
+
+ /* Initialize DRAM using a macro provided by board-specific logic.
+ *
+ * This must be done in two cases:
+ * 1. CONFIG_BOOT_RUNFROMFLASH. The system is running from FLASH
+ * 2. CONFIG_BOOT_COPYTORAM. The system booted from FLASH but
+ * will copy itself to SDRAM.
+ */
+
+#ifdef DO_SDRAM_INIT
+ config_sdram
+#endif
+
+ /* Clear the 16K level 1 page table */
+
+ ldr r4, .LCppgtable /* r4=phys. page table */
+#ifndef CONFIG_ARCH_ROMPGTABLE
+ mov r0, r4
+ mov r1, #0
+ add r2, r0, #PGTABLE_SIZE
+.Lpgtableclear:
+ str r1, [r0], #4
+ str r1, [r0], #4
+ str r1, [r0], #4
+ str r1, [r0], #4
+ teq r0, r2
+ bne .Lpgtableclear
+
+ /* Create identity mapping for first MB of the .text section to support
+ * this startup logic executing out of the physical address space. This
+ * identity mapping will be removed by .Lvstart (see below). Of course,
+ * we would only do this if the physical-virtual mapping is not already
+ * the identity mapping.
+ */
+
+#ifndef CONFIG_IDENTITY_TEXTMAP
+ mksection r0, r4 /* r0=phys. base section */
+ ldr r1, .LCmmuflags /* FLGS=MMU_MEMFLAGS */
+ add r3, r1, r0 /* r3=flags + base */
+ str r3, [r4, r0, lsr #18] /* identity mapping */
+#endif
+
+#ifdef CONFIG_PAGING
+
+ /* Map the read-only .text region in place. This must be done
+ * before the MMU is enabled and the virtual addressing takes
+ * effect. First populate the L1 table for the locked and paged
+ * text regions.
+ *
+ * We could probably make the the pg_l1span and pg_l2map macros into
+ * call-able subroutines, but we would have to be carefully during
+ * this phase while we are operating in a physical address space.
+ *
+ * NOTE: That the value of r5 (L1 table base address) must be
+ * preserved through the following.
+ */
+
+ adr r0, .Ltxtspan
+ ldmia r0, {r0, r1, r2, r3, r5}
+ pg_l1span r0, r1, r2, r3, r5, r6
+
+ /* Then populate the L2 table for the locked text region only. */
+
+ adr r0, .Ltxtmap
+ ldmia r0, {r0, r1, r2, r3}
+ pg_l2map r0, r1, r2, r3, r5
+
+ /* Make sure that the page table is itself mapped and and read/write-able.
+ * First, populate the L1 table:
+ */
+
+ adr r0, .Lptabspan
+ ldmia r0, {r0, r1, r2, r3, r5}
+ pg_l1span r0, r1, r2, r3, r5, r6
+
+ /* Then populate the L2 table. */
+
+ adr r0, .Lptabmap
+ ldmia r0, {r0, r1, r2, r3}
+ pg_l2map r0, r1, r2, r3, r5
+
+#else /* CONFIG_PAGING */
+
+ /* Create a virtual single section mapping for the first MB of the .text
+ * address space. Now, we have the first 1MB mapping to both phyical and
+ * virtual addresses. The rest of the .text mapping will be completed in
+ * .Lvstart once we have moved the physical mapping out of the way.
+ *
+ * Here we expect to have:
+ * r4 = Address of the base of the L1 table
+ */
+
+ ldr r2, .LCvpgtable /* r2=virt. page table */
+ mksection r0, r2 /* r0=virt. base section */
+ str r3, [r4, r0, lsr #18] /* identity mapping */
+
+ /* NOTE: No .data/.bss access should be attempted. This temporary mapping
+ * can only be assumed to cover the initial .text region.
+ */
+
+#endif /* CONFIG_PAGING */
+#endif /* CONFIG_ARCH_ROMPGTABLE */
+
+ /* The following logic will set up the ARMv7-A for normal operation.
+ *
+ * Here we expect to have:
+ * r4 = Address of the base of the L1 table
+ */
+
+ mov r0, #0
+ mcr p15, 0, r0, c7, c7 /* Invalidate I,D caches */
+ mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
+ mcr p15, 0, r0, c8, c7 /* Invalidate I,D TLBs */
+ mcr p15, 0, r4, c2, c0 /* Load page table pointer */
+
+#ifdef CPU_DCACHE_WRITETHROUGH
+ mov r0, #4 /* Disable write-back on caches explicitly */
+ mcr p15, 7, r0, c15, c0, 0
+#endif
+
+ /* Enable the MMU and caches
+ * lr = Resume at .Lvstart with the MMU enabled
+ */
+
+ ldr lr, .LCvstart /* Abs. virtual address */
+
+ mov r0, #0x1f /* Domains 0, 1 = client */
+ mcr p15, 0, r0, c3, c0 /* Load domain access register */
+ mrc p15, 0, r0, c1, c0 /* Get control register */
+
+ /* Clear bits (see arm.h)
+ *
+ * CR_R - ROM MMU protection
+ * CR_F - Implementation defined
+ * CR_Z - Implementation defined
+ *
+ * CR_A - Alignment abort enable
+ * CR_C - Dcache enable
+ * CR_W - Write buffer enable
+ *
+ * CR_I - Icache enable
+ */
+
+ bic r0, r0, #(CR_R|CR_F|CR_Z)
+ bic r0, r0, #(CR_A|CR_C|CR_W)
+ bic r0, r0, #(CR_I)
+
+ /* Set bits (see arm.h)
+ *
+ * CR_M - MMU enable
+ * CR_P - 32-bit exception handler
+ * CR_D - 32-bit data address range
+ */
+
+ orr r0, r0, #(CR_M|CR_P|CR_D)
+
+ /* In most architectures, vectors are relocated to 0xffff0000.
+ * -- but not all
+ *
+ * CR_S - System MMU protection
+ * CR_V - Vectors relocated to 0xffff0000
+ */
+
+#ifndef CONFIG_ARCH_LOWVECTORS
+ orr r0, r0, #(CR_S|CR_V)
+#else
+ orr r0, r0, #(CR_S)
+#endif
+ /* CR_RR - Round Robin cache replacement */
+
+#ifdef CPU_CACHE_ROUND_ROBIN
+ orr r0, r0, #(CR_RR)
+#endif
+ /* CR_C - Dcache enable */
+
+#ifndef CPU_DCACHE_DISABLE
+ orr r0, r0, #(CR_C)
+#endif
+ /* CR_C - Dcache enable */
+
+#ifndef CPU_ICACHE_DISABLE
+ orr r0, r0, #(CR_I)
+#endif
+ /* CR_A - Alignment abort enable */
+
+#ifdef ALIGNMENT_TRAP
+ orr r0, r0, #(CR_A)
+#endif
+ mcr p15, 0, r0, c1, c0, 0 /* write control reg */
+
+ /* Get TMP=2 Processor ID register */
+
+ mrc p15, 0, r1, c0, c0, 0 /* read id reg */
+ mov r1,r1 /* Null-avoiding nop */
+ mov r1,r1 /* Null-avoiding nop */
+
+ /* And "jump" to .Lvstart */
+
+ mov pc, lr
+
+/****************************************************************************
+ * PC_Relative Data
+ ****************************************************************************/
+
+ /* Most addresses are all virtual address */
+
+ .type .LCvstart, %object
+.LCvstart:
+ .long .Lvstart
+
+#ifndef CONFIG_ARCH_ROMPGTABLE
+ .type .LCmmuflags, %object
+.LCmmuflags:
+ .long MMU_MEMFLAGS /* MMU flags for memory sections */
+#endif
+
+ .type .LCppgtable, %object
+.LCppgtable:
+ .long PGTABLE_BASE_PADDR /* Physical start of page table */
+
+#ifndef CONFIG_ARCH_ROMPGTABLE
+ .type .LCvpgtable, %object
+.LCvpgtable:
+ .long PGTABLE_BASE_VADDR /* Virtual start of page table */
+#endif
+
+#ifdef CONFIG_PAGING
+
+.Ltxtspan:
+ .long PG_L1_TEXT_PADDR /* Physical address in the L1 table */
+ .long PG_L2_TEXT_PBASE /* Physical address of the start of the L2 page table */
+ .long PG_TEXT_NVPAGES /* Total (virtual) text pages to be mapped */
+ .long PG_L2_TEXT_NPAGE1 /* The number of text pages in the first page table */
+ .long MMU_L1_TEXTFLAGS /* L1 MMU flags to use */
+
+.Ltxtmap:
+ .long PG_L2_LOCKED_PADDR /* Physical address in the L2 table */
+ .long PG_LOCKED_PBASE /* Physical address of locked base memory */
+ .long CONFIG_PAGING_NLOCKED /* Number of pages in the locked region */
+ .long MMU_L2_TEXTFLAGS /* L2 MMU flags to use */
+
+.Lptabspan:
+ .long PG_L1_PGTABLE_PADDR /* Physical address in the L1 table */
+ .long PG_L2_PGTABLE_PBASE /* Physical address of the start of the L2 page table */
+ .long PG_PGTABLE_NPAGES /* Total mapped page table pages */
+ .long PG_L2_PGTABLE_NPAGE1 /* The number of text pages in the first page table */
+ .long MMU_L1_PGTABFLAGS /* L1 MMU flags to use */
+
+.Lptabmap:
+ .long PG_L2_PGTABLE_PADDR /* Physical address in the L2 table */
+ .long PGTABLE_BASE_PADDR /* Physical address of the page table memory */
+ .long PG_PGTABLE_NPAGES /* Total mapped page table pages */
+ .long MMU_L2_PGTABFLAGS /* L2 MMU flags to use */
+
+#endif /* CONFIG_PAGING */
+ .size __start, .-__start
+
+/****************************************************************************
+ * Name: .Lvstart
+ ***************************************************************************/
+
+/* The following is executed after the MMU has been enabled. This uses
+ * absolute addresses; this is not position independent.
+ */
+ .align 5
+ .local .Lvstart
+ .type .Lvstart, %function
+.Lvstart:
+
+ /* Remove the temporary mapping (if one was made). The following assumes
+ * that the total RAM size is > 1Mb and extends that initial mapping to
+ * cover additinal RAM sections.
+ */
+
+
+#ifndef CONFIG_ARCH_ROMPGTABLE
+#ifndef CONFIG_IDENTITY_TEXTMAP
+ ldr r4, .LCvpgtable /* r4=virtual page table */
+ ldr r1, .LCppgtable /* r1=phys. page table */
+ mksection r3, r1 /* r2=phys. base addr */
+ mov r0, #0 /* flags + base = 0 */
+ str r0, [r4, r3, lsr #18] /* Undo identity mapping */
+#endif
+
+#if defined(CONFIG_PAGING)
+ /* Populate the L1 table for the data region */
+
+ adr r0, .Ldataspan
+ ldmia r0, {r0, r1, r2, r3, r4}
+ pg_l1span r0, r1, r2, r3, r4, r5
+
+ /* Populate the L2 table for the data region */
+
+ adr r0, .Ldatamap
+ ldmia r0, {r0, r1, r2, r3}
+ pg_l2map r0, r1, r2, r3, r4
+
+#elif defined(CONFIG_BOOT_RUNFROMFLASH)
+# error "Logic not implemented"
+#else
+ /* Now setup the pagetables for our normal SDRAM mappings mapped region.
+ * We round NUTTX_START_VADDR down to the nearest megabyte boundary.
+ */
+
+ ldr r1, .LCmmuflags /* FLGS=MMU_MEMFLAGS */
+ add r3, r3, r1 /* r3=flags + base */
+
+ add r0, r4, #(NUTTX_START_VADDR & 0xff000000) >> 18
+ bic r2, r3, #0x00f00000
+ str r2, [r0]
+
+ add r0, r0, #(NUTTX_START_VADDR & 0x00f00000) >> 18
+ str r3, [r0], #4
+
+ /* Now map the remaining RX_NSECTIONS-1 sections of the executable
+ * memory region.
+ */
+
+ .rept RX_NSECTIONS-1
+ add r3, r3, #SECTION_SIZE
+ str r3, [r0], #4
+ .endr
+
+ /* If we are executing from RAM with a fixed page configuration, then
+ * we can assume that the above contiguous mapping included all of the
+ * .text, .data, .bss, heap, etc. But if we are executing from FLASH,
+ * then the RAM area is probably in a separate physical address region
+ * and will require a separate mapping. Or, if we are supporting on-demand
+ * paging of the .text region, then the RAM-based .data/.bss/heap section
+ * will still probably be located in a separate (virtual) address region.
+ */
+
+#endif /* CONFIG_PAGING */
+#endif /* CONFIG_ARCH_ROMPGTABLE */
+
+ /* Zero BSS and set up the stack pointer */
+
+ adr r0, .Linitparms
+ ldmia r0, {r0, r1, sp}
+
+ /* Clear the frame pointer and .bss */
+
+ mov fp, #0
+
+.Lbssinit:
+ cmp r0, r1 /* Clear up to _bss_end_ */
+ strcc fp, [r0],#4
+ bcc .Lbssinit
+
+ /* If the .data section is in a separate, unitialized address space,
+ * then we will also need to copy the initial values of of the .data
+ * section from the .text region into that .data region. This would
+ * be the case if we are executing from FLASH and the .data section
+ * lies in a different physical address region OR if we are support
+ * on-demand paging and the .data section lies in a different virtual
+ * address region.
+ */
+
+#if defined(CONFIG_BOOT_RUNFROMFLASH) || defined(CONFIG_PAGING)
+ adr r3, .Ldatainit
+ ldmia r3, {r0, r1, r2}
+
+1: ldmia r0!, {r3 - r10}
+ stmia r1!, {r3 - r10}
+ cmp r1, r2
+ blt 1b
+#endif
+
+ /* Perform early C-level, platform-specific initialization */
+
+ bl up_boot
+
+ /* Finally branch to the OS entry point */
+
+ mov lr, #0
+ b os_start
+
+ /* Text-section constants:
+ *
+ * _sbss is the start of the BSS region (see ld.script)
+ * _ebss is the end of the BSS regsion (see ld.script)
+ *
+ * The idle task stack starts at the end of BSS and is of size
+ * CONFIG_IDLETHREAD_STACKSIZE. The heap continues from there until the
+ * end of memory. See g_idle_topstack below.
+ */
+
+.Linitparms:
+ .long _sbss
+ .long _ebss
+ .long _ebss+CONFIG_IDLETHREAD_STACKSIZE-4
+
+#ifdef CONFIG_PAGING
+
+.Ldataspan:
+ .long PG_L1_DATA_VADDR /* Virtual address in the L1 table */
+ .long PG_L2_DATA_PBASE /* Physical address of the start of the L2 page table */
+ .long PG_DATA_NPAGES /* Number of pages in the data region */
+ .long PG_L2_DATA_NPAGE1 /* The number of text pages in the first page table */
+ .long MMU_L1_DATAFLAGS /* L1 MMU flags to use */
+
+.Ldatamap:
+ .long PG_L2_DATA_VADDR /* Virtual address in the L2 table */
+ .long PG_DATA_PBASE /* Physical address of data memory */
+ .long PG_DATA_NPAGES /* Number of pages in the data region */
+ .long MMU_L2_DATAFLAGS /* L2 MMU flags to use */
+
+#endif /* CONFIG_PAGING */
+
+#if defined(CONFIG_BOOT_RUNFROMFLASH) || defined(CONFIG_PAGING)
+.Ldatainit:
+ .long _eronly /* Where .data defaults are stored in FLASH */
+ .long _sdata /* Where .data needs to reside in SDRAM */
+ .long _edata
+#endif
+ .size .Lvstart, .-.Lvstart
+
+ /* Data section variables */
+
+ /* This global variable is unsigned long g_idle_topstack and is
+ * exported from here only because of its coupling to .Linitparms
+ * above.
+ */
+
+ .data
+ .align 4
+ .globl g_idle_topstack
+ .type g_idle_topstack, object
+g_idle_topstack:
+ .long _ebss+CONFIG_IDLETHREAD_STACKSIZE
+ .size g_idle_topstack, .-g_idle_topstack
+ .end
diff --git a/nuttx/arch/arm/src/armv7-a/arm_vectors.S b/nuttx/arch/arm/src/armv7-a/arm_vectors.S
new file mode 100644
index 000000000..79597f178
--- /dev/null
+++ b/nuttx/arch/arm/src/armv7-a/arm_vectors.S
@@ -0,0 +1,445 @@
+/************************************************************************************
+ * arch/arm/src/armv7-a/arm_vectors.S
+ *
+ * Copyright (C) 2013 Gregory Nutt. All rights reserved.
+ * Author: Gregory Nutt <gnutt@nuttx.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ************************************************************************************/
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+#include <nuttx/config.h>
+#include <nuttx/irq.h>
+
+#include "arm.h"
+
+/************************************************************************************
+ * Definitions
+ ************************************************************************************/
+
+/************************************************************************************
+ * Global Data
+ ************************************************************************************/
+
+ .data
+g_irqtmp:
+ .word 0 /* Saved lr */
+ .word 0 /* Saved spsr */
+g_undeftmp:
+ .word 0 /* Saved lr */
+ .word 0 /* Saved spsr */
+g_aborttmp:
+ .word 0 /* Saved lr */
+ .word 0 /* Saved spsr */
+
+/************************************************************************************
+ * Assembly Macros
+ ************************************************************************************/
+
+/************************************************************************************
+ * Private Functions
+ ************************************************************************************/
+
+ .text
+
+/************************************************************************************
+ * Public Functions
+ ************************************************************************************/
+
+ .text
+
+/************************************************************************************
+ * Name: up_vectorirq
+ *
+ * Description:
+ * Interrupt excetpion. Entered in IRQ mode with spsr = SVC CPSR, lr = SVC PC
+ *
+ ************************************************************************************/
+
+ .globl up_vectorirq
+ .type up_vectorirq, %function
+up_vectorirq:
+ /* On entry, we are in IRQ mode. We are free to use
+ * the IRQ mode r13 and r14.
+ */
+
+ ldr r13, .Lirqtmp
+ sub lr, lr, #4
+ str lr, [r13] @ save lr_IRQ
+ mrs lr, spsr
+ str lr, [r13, #4] @ save spsr_IRQ
+
+ /* Then switch back to SVC mode */
+
+ bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
+ orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT)
+ msr cpsr_c, lr /* Switch to SVC mode */
+
+ /* Create a context structure. First set aside a stack frame
+ * and store r0-r12 into the frame.
+ */
+
+ sub sp, sp, #XCPTCONTEXT_SIZE
+ stmia sp, {r0-r12} /* Save the SVC mode regs */
+
+ /* Get the correct values of r13(sp) and r14(lr) in r1 and r2 */
+
+ add r1, sp, #XCPTCONTEXT_SIZE
+ mov r2, r14
+
+ /* Get the values for r15(pc) and CPSR in r3 and r4 */
+
+ ldr r0, .Lirqtmp /* Points to temp storage */
+ ldmia r0, {r3, r4} /* Recover r1=lr_IRQ, r2=spsr_IRQ */
+
+ add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
+ stmia r0, {r1-r4}
+
+ /* Then call the IRQ handler with interrupts disabled. */
+
+ mov fp, #0 /* Init frame pointer */
+ mov r0, sp /* Get r0=xcp */
+
+#if CONFIG_ARCH_INTERRUPTSTACK > 3
+ ldr sp, .Lirqstackbase /* SP = interrupt stack base */
+ str r0, [sp] /* Save the user stack pointer */
+ bl up_decodeirq /* Call the handler */
+ ldr sp, [sp] /* Restore the user stack pointer */
+#else
+ bl up_decodeirq /* Call the handler */
+#endif
+
+ /* Restore the CPSR, SVC modr registers and return */
+.Lnoirqset:
+ ldr r0, [sp, #(4*REG_CPSR)] /* Setup the SVC mode SPSR */
+ msr spsr, r0
+ ldmia sp, {r0-r15}^ /* Return */
+
+.Lirqtmp:
+ .word g_irqtmp
+#if CONFIG_ARCH_INTERRUPTSTACK > 3
+.Lirqstackbase:
+ .word up_stackbase
+#endif
+ .size up_vectorirq, . - up_vectorirq
+ .align 5
+
+/************************************************************************************
+ * Function: up_vectorswi
+ *
+ * Description:
+ * SWI interrupt. We enter the SWI in SVC mode.
+ *
+ ************************************************************************************/
+
+ .globl up_vectorswi
+ .type up_vectorswi, %function
+up_vectorswi:
+
+ /* Create a context structure. First set aside a stack frame
+ * and store r0-r12 into the frame.
+ */
+
+ sub sp, sp, #XCPTCONTEXT_SIZE
+ stmia sp, {r0-r12} /* Save the SVC mode regs */
+
+ /* Get the correct values of r13(sp), r14(lr), r15(pc)
+ * and CPSR in r1-r4 */
+
+ add r1, sp, #XCPTCONTEXT_SIZE
+ mov r2, r14 /* R14 is altered on return from SWI */
+ mov r3, r14 /* Save r14 as the PC as well */
+ mrs r4, spsr /* Get the saved CPSR */
+
+ add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
+ stmia r0, {r1-r4}
+
+ /* Then call the SWI handler with interrupts disabled.
+ * void up_syscall(struct xcptcontext *xcp)
+ */
+
+ mov fp, #0 /* Init frame pointer */
+ mov r0, sp /* Get r0=xcp */
+ bl up_syscall /* Call the handler */
+
+ /* Restore the CPSR, SVC modr registers and return */
+
+ ldr r0, [sp, #(4*REG_CPSR)] /* Setup the SVC mode SPSR */
+ msr spsr, r0
+ ldmia sp, {r0-r15}^ /* Return */
+ .size up_vectorswi, . - up_vectorswi
+
+ .align 5
+
+/************************************************************************************
+ * Name: up_vectordata
+ *
+ * Description:
+ * This is the data abort exception dispatcher. The ARM data abort exception occurs
+ * when a memory fault is detected during a data transfer. This handler saves the
+ * current processor state and gives control to data abort handler. This function
+ * is entered in ABORT mode with spsr = SVC CPSR, lr = SVC PC
+ *
+ ************************************************************************************/
+
+ .globl up_vectordata
+ .type up_vectordata, %function
+up_vectordata:
+ /* On entry we are free to use the ABORT mode registers
+ * r13 and r14
+ */
+
+ ldr r13, .Ldaborttmp /* Points to temp storage */
+ sub lr, lr, #8 /* Fixup return */
+ str lr, [r13] /* Save in temp storage */
+ mrs lr, spsr /* Get SPSR */
+ str lr, [r13, #4] /* Save in temp storage */
+
+ /* Then switch back to SVC mode */
+
+ bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
+ orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT)
+ msr cpsr_c, lr /* Switch to SVC mode */
+
+ /* Create a context structure. First set aside a stack frame
+ * and store r0-r12 into the frame.
+ */
+
+ sub sp, sp, #XCPTCONTEXT_SIZE
+ stmia sp, {r0-r12} /* Save the SVC mode regs */
+
+ /* Get the correct values of r13(sp) and r14(lr) in r1 and r2 */
+
+ add r1, sp, #XCPTCONTEXT_SIZE
+ mov r2, r14
+
+ /* Get the values for r15(pc) and CPSR in r3 and r4 */
+
+ ldr r0, .Ldaborttmp /* Points to temp storage */
+ ldmia r0, {r3, r4} /* Recover r1=lr_IRQ, r2=spsr_IRQ */
+
+ add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
+ stmia r0, {r1-r4}
+
+ /* Then call the data abort handler with interrupts disabled.
+ * void up_dataabort(struct xcptcontext *xcp)
+ */
+
+ mov fp, #0 /* Init frame pointer */
+ mov r0, sp /* Get r0=xcp */
+#ifdef CONFIG_PAGING
+ mrc p15, 0, r2, c5, c0, 0 /* Get r2=FSR */
+ mrc p15, 0, r1, c6, c0, 0 /* Get R1=FAR */
+#endif
+ bl up_dataabort /* Call the handler */
+
+ /* Restore the CPSR, SVC modr registers and return */
+
+ ldr r0, [sp, #(4*REG_CPSR)] /* Setup the SVC mode SPSR */
+ msr spsr_cxsf, r0
+ ldmia sp, {r0-r15}^ /* Return */
+
+.Ldaborttmp:
+ .word g_aborttmp
+ .size up_vectordata, . - up_vectordata
+
+ .align 5
+
+/************************************************************************************
+ * Name: up_vectorprefetch
+ *
+ * Description:
+ * This is the prefetch abort exception dispatcher. The ARM prefetch abort exception
+ * occurs when a memory fault is detected during an an instruction fetch. This
+ * handler saves the current processor state and gives control to prefetch abort
+ * handler. This function is entered in ABT mode with spsr = SVC CPSR, lr = SVC PC.
+ *
+ ************************************************************************************/
+
+ .globl up_vectorprefetch
+ .type up_vectorprefetch, %function
+up_vectorprefetch:
+ /* On entry we are free to use the ABORT mode registers
+ * r13 and r14
+ */
+
+ ldr r13, .Lpaborttmp /* Points to temp storage */
+ sub lr, lr, #4 /* Fixup return */
+ str lr, [r13] /* Save in temp storage */
+ mrs lr, spsr /* Get SPSR */
+ str lr, [r13, #4] /* Save in temp storage */
+
+ /* Then switch back to SVC mode */
+
+ bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
+ orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT)
+ msr cpsr_c, lr /* Switch to SVC mode */
+
+ /* Create a context structure. First set aside a stack frame
+ * and store r0-r12 into the frame.
+ */
+
+ sub sp, sp, #XCPTCONTEXT_SIZE
+ stmia sp, {r0-r12} /* Save the SVC mode regs */
+
+ /* Get the correct values of r13(sp) and r14(lr) in r1 and r2 */
+
+ add r1, sp, #XCPTCONTEXT_SIZE
+ mov r2, r14
+
+ /* Get the values for r15(pc) and CPSR in r3 and r4 */
+
+ ldr r0, .Lpaborttmp /* Points to temp storage */
+ ldmia r0, {r3, r4} /* Recover r1=lr_IRQ, r2=spsr_IRQ */
+
+ add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
+ stmia r0, {r1-r4}
+
+ /* Then call the prefetch abort handler with interrupts disabled.
+ * void up_prefetchabort(struct xcptcontext *xcp)
+ */
+
+ mov fp, #0 /* Init frame pointer */
+ mov r0, sp /* Get r0=xcp */
+ bl up_prefetchabort /* Call the handler */
+
+ /* Restore the CPSR, SVC modr registers and return */
+
+ ldr r0, [sp, #(4*REG_CPSR)] /* Setup the SVC mode SPSR */
+ msr spsr_cxsf, r0
+ ldmia sp, {r0-r15}^ /* Return */
+
+.Lpaborttmp:
+ .word g_aborttmp
+ .size up_vectorprefetch, . - up_vectorprefetch
+
+ .align 5
+
+/************************************************************************************
+ * Name: up_vectorundefinsn
+ *
+ * Description:
+ * Undefined instruction entry exception. Entered in UND mode, spsr = SVC CPSR,
+ * lr = SVC PC
+ *
+ ************************************************************************************/
+
+ .globl up_vectorundefinsn
+ .type up_vectorundefinsn, %function
+up_vectorundefinsn:
+ /* On entry we are free to use the UND mode registers
+ * r13 and r14
+ */
+
+ ldr r13, .Lundeftmp /* Points to temp storage */
+ str lr, [r13] /* Save in temp storage */
+ mrs lr, spsr /* Get SPSR */
+ str lr, [r13, #4] /* Save in temp storage */
+
+ /* Then switch back to SVC mode */
+
+ bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
+ orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT)
+ msr cpsr_c, lr /* Switch to SVC mode */
+
+ /* Create a context structure. First set aside a stack frame
+ * and store r0-r12 into the frame.
+ */
+
+ sub sp, sp, #XCPTCONTEXT_SIZE
+ stmia sp, {r0-r12} /* Save the SVC mode regs */
+
+ /* Get the correct values of r13(sp) and r14(lr) in r1 and r2 */
+
+ add r1, sp, #XCPTCONTEXT_SIZE
+ mov r2, r14
+
+ /* Get the values for r15(pc) and CPSR in r3 and r4 */
+
+ ldr r0, .Lundeftmp /* Points to temp storage */
+ ldmia r0, {r3, r4} /* Recover r1=lr_IRQ, r2=spsr_IRQ */
+
+ add r0, sp, #(4*REG_SP) /* Offset to pc, cpsr storage */
+ stmia r0, {r1-r4}
+
+ /* Then call the undef insn handler with interrupts disabled.
+ * void up_undefinedinsn(struct xcptcontext *xcp)
+ */
+
+ mov fp, #0 /* Init frame pointer */
+ mov r0, sp /* Get r0=xcp */
+ bl up_undefinedinsn /* Call the handler */
+
+ /* Restore the CPSR, SVC modr registers and return */
+
+ ldr r0, [sp, #(4*REG_CPSR)] /* Setup the SVC mode SPSR */
+ msr spsr_cxsf, r0
+ ldmia sp, {r0-r15}^ /* Return */
+
+.Lundeftmp:
+ .word g_undeftmp
+ .size up_vectorundefinsn, . - up_vectorundefinsn
+
+ .align 5
+
+/************************************************************************************
+ * Name: up_vectorfiq
+ *
+ * Description:
+ * Shouldn't happen
+ *
+ ************************************************************************************/
+
+ .globl up_vectorfiq
+ .type up_vectorfiq, %function
+up_vectorfiq:
+ subs pc, lr, #4
+ .size up_vectorfiq, . - up_vectorfiq
+
+/************************************************************************************
+ * Name: up_interruptstack/g_userstack
+ ************************************************************************************/
+
+#if CONFIG_ARCH_INTERRUPTSTACK > 3
+ .bss
+ .align 4
+ .globl g_userstack
+ .type g_userstack, object
+up_interruptstack:
+ .skip ((CONFIG_ARCH_INTERRUPTSTACK & ~3) - 4)
+g_userstack:
+up_stackbase:
+ .skip 4
+ .size g_userstack, 4
+ .size up_interruptstack, (CONFIG_ARCH_INTERRUPTSTACK & ~3)
+#endif
+ .end
diff --git a/nuttx/arch/arm/src/armv7-a/arm_vectortab.S b/nuttx/arch/arm/src/armv7-a/arm_vectortab.S
new file mode 100644
index 000000000..80de0f63a
--- /dev/null
+++ b/nuttx/arch/arm/src/armv7-a/arm_vectortab.S
@@ -0,0 +1,103 @@
+/****************************************************************************
+ * arch/arm/src/arm7-a/arm_vectortab.S
+ *
+ * Copyright (C) 2013 Gregory Nutt. All rights reserved.
+ * Author: Gregory Nutt <gnutt@nuttx.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+/****************************************************************************
+ * Definitions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Global Data
+ ****************************************************************************/
+
+/****************************************************************************
+ * Assembly Macros
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: _vector_start
+ *
+ * Description:
+ * Vector initialization block
+ ****************************************************************************/
+
+ .globl _vector_start
+
+/* These will be relocated to VECTOR_BASE. */
+
+_vector_start:
+ ldr pc, .Lresethandler /* 0x00: Reset */
+ ldr pc, .Lundefinedhandler /* 0x04: Undefined instruction */
+ ldr pc, .Lswihandler /* 0x08: Software interrupt */
+ ldr pc, .Lprefetchaborthandler /* 0x0c: Prefetch abort */
+ ldr pc, .Ldataaborthandler /* 0x10: Data abort */
+ ldr pc, .Laddrexcptnhandler /* 0x14: Address exception (reserved) */
+ ldr pc, .Lirqhandler /* 0x18: IRQ */
+ ldr pc, .Lfiqhandler /* 0x1c: FIQ */
+
+ .globl __start
+ .globl up_vectorundefinsn
+ .globl up_vectorswi
+ .globl up_vectorprefetch
+ .globl up_vectordata
+ .globl up_vectoraddrexcptn
+ .globl up_vectorirq
+ .globl up_vectorfiq
+
+.Lresethandler:
+ .long __start
+.Lundefinedhandler:
+ .long up_vectorundefinsn
+.Lswihandler:
+ .long up_vectorswi
+.Lprefetchaborthandler:
+ .long up_vectorprefetch
+.Ldataaborthandler:
+ .long up_vectordata
+.Laddrexcptnhandler:
+ .long up_vectoraddrexcptn
+.Lirqhandler:
+ .long up_vectorirq
+.Lfiqhandler:
+ .long up_vectorfiq
+
+ .globl _vector_end
+_vector_end:
+ .end
diff --git a/nuttx/arch/arm/src/armv7-a/cache.h b/nuttx/arch/arm/src/armv7-a/cache.h
new file mode 100644
index 000000000..74aaebccb
--- /dev/null
+++ b/nuttx/arch/arm/src/armv7-a/cache.h
@@ -0,0 +1,447 @@
+/************************************************************************************
+ * arch/arm/src/armv7-a/cache.h
+ *
+ * Copyright (C) 2013 Gregory Nutt. All rights reserved.
+ * Author: Gregory Nutt <gnutt@nuttx.org>
+ *
+ * References:
+ *
+ * "Cortex-A5™ MPCore, Technical Reference Manual", Revision: r0p1, Copyright © 2010
+ * ARM. All rights reserved. ARM DDI 0434B (ID101810)
+ * "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition", Copyright ©
+ * 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM DDI 0406C.b (ID072512)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV7_A_CPSR_H
+#define __ARCH_ARM_SRC_ARMV7_A_CPSR_H
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+
+/* Reference: Cortex-A5™ MPCore Paragraph 4.1.5, "Cache Operations Registers."
+ *
+ * Terms:
+ * 1) Point of coherency (PoC)
+ * The PoC is the point at which all agents that can access memory are guaranteed
+ * to see the same copy of a memory location
+ * 2) Point of unification (PoU)
+ * The PoU is the point by which the instruction and data caches and the
+ * translation table walks of the processor are guaranteed to see the same copy
+ * of a memory location.
+ *
+ * Cache Operations:
+ *
+ * CP15 Register: ICIALLUIS
+ * Description: Invalidate entire instruction cache Inner Shareable.
+ * Register Format: SBZ
+ * Instruction: MCR p15, 0, <Rd>, c7, c1, 0
+ * CP15 Register: BPIALLIS
+ * Description: Invalidate entire branch predictor array Inner Shareable.
+ * Register Format: SBZ
+ * Instruction: MCR p15, 0, <Rd>, c7, c1, 6
+ * CP15 Register: ICIALLU
+ * Description: Invalidate all instruction caches to PoU. Also flushes branch
+ * target cache.
+ * Register Format: SBZ
+ * Instruction: MCR p15, 0, <Rd>, c7, c5, 0
+ * CP15 Register: ICIMVAU
+ * Description: Invalidate instruction cache by VA to PoU.
+ * Register Format: VA
+ * Instruction: MCR p15, 0, <Rd>, c7, c5, 1
+ * CP15 Register: BPIALL
+ * Description: Invalidate entire branch predictor array.
+ * Register Format: SBZ
+ * Instruction: MCR p15, 0, <Rd>, c7, c5, 6
+ * CP15 Register: BPIMVA
+ * Description: Invalidate VA from branch predictor array.
+ * Register Format: SBZ
+ * Instruction: MCR p15, 0, <Rd>, c7, c5, 7
+ * CP15 Register: DCIMVAC
+ * Description: Invalidate data cache line by VA to PoC.
+ * Register Format: VA
+ * Instruction: MCR p15, 0, <Rd>, c7, c6, 1
+ * CP15 Register: DCISW
+ * Description: Invalidate data cache line by Set/Way.
+ * Register Format: Set/Way
+ * Instruction: MCR p15, 0, <Rd>, c7, c6, 2
+ * CP15 Register: DCCMVAC
+ * Description: Clean data cache line to PoC by VA.
+ * Register Format: VA
+ * Instruction: MCR p15, 0, <Rd>, c7, c10, 1
+ * CP15 Register: DCCSW
+ * Description: Clean data cache line by Set/Way.
+ * Register Format: Set/Way
+ * Instruction: MCR p15, 0, <Rd>, c7, c10, 2
+ * CP15 Register: DCCMVAU
+ * Description: Clean data or unified cache line by VA to PoU.
+ * Register Format: VA
+ * Instruction: MCR p15, 0, <Rd>, c7, c11, 1
+ * CP15 Register: DCCIMVAC
+ * Description: Clean and invalidate data cache line by VA to PoC.
+ * Register Format: VA
+ * Instruction: MCR p15, 0, <Rd>, c7, c14, 1
+ * CP15 Register: DCCISW
+ * Description: Clean and invalidate data cache line by Set/Way.
+ * Register Format: Set/Way
+ * Instruction: MCR p15, 0, <Rd>, c7, c14, 2
+ */
+
+/* Set/way format */
+
+#define CACHE_WAY_SHIFT (3) /* Bits 30-31: Way in set being accessed */
+#define CACHE_WAY_MASK (3 << CACHE_WAY_SHIFT)
+#define CACHE_SET_SHIFT (5) /* Bits 5-(S+4): Way in set being accessed */
+ /* For 4KB cache size: S=5 */
+#define CACHE_SET4KB_MASK (0x1f << CACHE_SET_SHIFT)
+ /* Bits 10-29: Reserved */
+ /* For 8KB cache size: S=6 */
+#define CACHE_SET8KB_MASK (0x3f << CACHE_SET_SHIFT)
+ /* Bits 11-29: Reserved */
+ /* For 16KB cache size: S=7 */
+#define CACHE_SET16KB_MASK (0x7f << CACHE_SET_SHIFT)
+ /* Bits 12-29: Reserved */
+ /* For 32KB cache size: S=8 */
+#define CACHE_SET32KB_MASK (0xff << CACHE_SET_SHIFT)
+ /* Bits 13-29: Reserved */
+ /* For 64KB cache size: S=9 */
+#define CACHE_SET64KB_MASK (0x1fff << CACHE_SET_SHIFT)
+ /* Bits 14-29: Reserved */
+
+/* VA and SBZ format */
+
+#define CACHE_SBZ_SHIFT (4) /* Bits 0-4: SBZ */
+#define CACHE_SBZ_MASK (31 << TLB_SBZ_SHIFT)
+#define CACHE_VA_MASK (0xfffffffe0) /* Bits 5-31: Virtual address */
+
+/************************************************************************************
+ * Assemby Macros
+ ************************************************************************************/
+
+#ifdef __ASSEMBLY__
+
+/* Invalidate I cache predictor array inner sharable */
+
+ .macro cp15_invalidate_icache_inner_sharable, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c1, 0
+ .endm
+
+/* Invalidate entire branch predictor array inner sharable */
+
+ .macro cp15_invalidate_btb_inner_sharable, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c1, 6
+ .endm
+
+/* Invalidate all instruction caches to PoU, also flushes branch target cache */
+
+ .macro cp15_invalidate_icache, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c5, 0
+ .endm
+
+/* Invalidate instruction caches by VA to PoU */
+
+ .macro cp15_invalidate_icache_bymva, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c5, 1
+ .endm
+
+/* Flush entire branch predictor array */
+
+ .macro cp15_flush_btb, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c5, 6
+ .endm
+
+/* Flush branch predictor array entry by MVA */
+
+ .macro cp15_flush_btb_bymva, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c5, 7
+ .endm
+
+/* Invalidate data cache line by VA to PoC */
+
+ .macro cp15_invalidate_dcacheline_bymva, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c6, 1
+ .endm
+
+/* Invalidate data cache line by set/way */
+
+ .macro cp15_invalidate_dcacheline_bysetway, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c6, 2
+ .endm
+
+/* Clean data cache line by MVA */
+
+ .macro cp15_clean_dcache_bymva, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c10, 1
+ .endm
+
+/* Clean data cache line by Set/way */
+
+ .macro cp15_clean_dcache_bysetway, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c10, 2
+ .endm
+
+/* Clean unified cache line by MVA */
+
+ .macro cp15_clean_dcache_bymva, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c11, 1
+ .endm
+
+/* Clean and invalidate data cache line by VA to PoC */
+
+ .macro cp15_cleaninvalidate_dcacheline_bymva, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c14, 1
+ .endm
+
+/* Clean and Incalidate data cache line by Set/Way */
+
+ .macro cp15_cleaninvalidate_dcacheline, scratch
+ mov \scratch, #0
+ mrc p15, 0, \scratch, c7, c14, 2
+ .endm
+
+#endif /* __ASSEMBLY__ */
+
+/************************************************************************************
+ * Inline Functions
+ ************************************************************************************/
+
+#ifndef __ASSEMBLY__
+
+/* Invalidate I cache predictor array inner sharable */
+
+static inline void cp15_invalidate_icache_inner_sharable(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c1, 0\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Invalidate entire branch predictor array inner sharable */
+
+static inline void cp15_invalidate_btb_inner_sharable(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c1, 6\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Invalidate all instruction caches to PoU, also flushes branch target cache */
+
+static inline void cp15_invalidate_icache(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c5, 0\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Invalidate instruction caches by VA to PoU */
+
+static inline void cp15_invalidate_icache_bymva(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c5, 1\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Flush entire branch predictor array */
+
+static inline void cp15_flush_btb(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c5, 6\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Flush branch predictor array entry by MVA */
+
+static inline void cp15_flush_btb_bymva(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c5, 7\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Invalidate data cache line by VA to PoC */
+
+static inline void cp15_invalidate_dcacheline_bymva(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c6, 1\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Invalidate data cache line by set/way */
+
+static inline void cp15_invalidate_dcacheline_bysetway(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c6, 2\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Clean data cache line by MVA */
+
+static inline void cp15_clean_dcache_bymva(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c10, 1\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Clean data cache line by Set/way */
+
+static inline void cp15_clean_dcache_bysetway(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c10, 2\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Clean unified cache line by MVA */
+
+static inline void cp15_clean_dcache_bymva(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c11, 1\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Clean and invalidate data cache line by VA to PoC */
+
+static inline void cp15_cleaninvalidate_dcacheline_bymva(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c14, 1\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+/* Clean and Incalidate data cache line by Set/Way */
+
+static inline void cp15_cleaninvalidate_dcacheline(void)
+{
+ __asm__ __volatile__
+ (
+ "\tmov r0, #0\n"
+ "\tmcr p15, 0, r0, c7, c14, 2\n"
+ :
+ : "r" (ttb)
+ : "r0", "memory"
+ );
+
+#endif /* __ASSEMBLY__ */
+
+/****************************************************************************
+ * Public Variables
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+#ifndef __ASSEMBLY__
+#ifdef __cplusplus
+#define EXTERN extern "C"
+extern "C" {
+#else
+#define EXTERN extern
+#endif
+
+#undef EXTERN
+#ifdef __cplusplus
+}
+#endif
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ARCH_ARM_SRC_ARMV7_A_CPSR_H */
diff --git a/nuttx/arch/arm/src/armv7-a/cp15.h b/nuttx/arch/arm/src/armv7-a/cp15.h
new file mode 100644
index 000000000..160bc487b
--- /dev/null
+++ b/nuttx/arch/arm/src/armv7-a/cp15.h
@@ -0,0 +1,206 @@
+/************************************************************************************
+ * arch/arm/src/armv7-a/cp15.h
+ * CP15 register access
+ *
+ * Copyright (C) 2013 Gregory Nutt. All rights reserved.
+ * Authors: Gregory Nutt <gnutt@nuttx.org>
+ *
+ * References:
+ *
+ * "Cortex-A5™ MPCore, Technical Reference Manual", Revision: r0p1, Copyright © 2010
+ * ARM. All rights reserved. ARM DDI 0434B (ID101810)
+ * "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition", Copyright ©
+ * 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM DDI 0406C.b (ID072512)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV7_A_CP15_H
+#define __ARCH_ARM_SRC_ARMV7_A_CP15_H
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+#include <nuttx/config.h>
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+/* System control register descriptions.
+ *
+ * CP15 registers are accessed with MRC and MCR instructions as follows:
+ *
+ * MRC p15, <Op1>, <Rd>, <CRn>, <CRm>, <Op2> ; Read CP15 Register
+ * MCR p15, <Op1>, <Rd>, <CRn>, <CRm>, <Op2> ; Write CP15 Register
+ *
+ * Where
+ *
+ * <Op1> is the Opcode_1 value for the register
+ * <Rd> is a general purpose register
+ * <CRn> is the register number within CP15
+ * <CRm> is the operational register
+ * <Op2> is the Opcode_2 value for the register.
+ *
+ * Reference: Cortex-A5™ MPCore, Technical Reference Manual, Paragraph 4.2.
+ */
+
+#define _CP15(op1,rd,crn,crm,op2) p15, op1, rd, crn, crm, op2
+
+#define CP15_MIDR(r) _CP15(0, r, c0, c0, 0) /* Main ID Register */
+#define CP15_TR(r) _CP15(0, r, c0, c0, 1) /* Cache Type Register */
+#define CP15_TCMTR(r) _CP15(0, r, c0, c0, 2) /* TCM Type Register */
+#define CP15_TLBTR(r) _CP15(0, r, c0, c0, 3) /* TLB Type Register */
+#define CP15_MPIDR(r) _CP15(0, r, c0, c0, 5) /* Multiprocessor Affinity Register */
+#define CP15_MID_PFR0(r) _CP15(0, r, c0, c1, 0) /* Processor Feature Register 0 */
+#define CP15_MD_PFR1(r) _CP15(0, r, c0, c1, 1) /* Processor Feature Register 1 */
+#define CP15_MID_DFR0(r) _CP15(0, r, c0, c1, 2) /* Debug Feature Register 0 */
+#define CP15_MID_MMFR0(r) _CP15(0, r, c0, c1, 4) /* Memory Model Features Register 0 */
+#define CP15_MID_MMFR1(r) _CP15(0, r, c0, c1, 5) /* Memory Model Features Register 1 */
+#define CP15_MID_MMFR2(r) _CP15(0, r, c0, c1, 6) /* Memory Model Features Register 2 */
+#define CP15_MID_MMFR3(r) _CP15(0, r, c0, c1, 7) /* Memory Model Features Register 3 */
+#define CP15_ID_ISAR0(r) _CP15(0, r, c0, c2, 0) /* Instruction Set Attributes Register 0 */
+#define CP15_ID_ISAR1(r) _CP15(0, r, c0, c2, 1) /* Instruction Set Attributes Register 1 */
+#define CP15_ID_ISAR2(r) _CP15(0, r, c0, c2, 2) /* Instruction Set Attributes Register 2 */
+#define CP15_ID_ISAR3(r) _CP15(0, r, c0, c2, 3) /* Instruction Set Attributes Register 3 */
+#define CP15_ID_ISAR4(r) _CP15(0, r, c0, c2, 4) /* Instruction Set Attributes Register 4 */
+#define CP15_ID_ISAR5(r) _CP15(0, r, c0, c2, 5) /* Instruction Set Attributes Register 5 */
+#define CP15_CCSIDR(r) _CP15(1, r, c0, c0, 0) /* Cache Size Identification Register */
+#define CP15_CLIDR(r) _CP15(1, r, c0, c0, 1) /* Cache Level ID Register */
+#define CP15_AIDR(r) _CP15(1, r, c0, c0, 7) /* Auxiliary ID Register */
+#define CP15_CSSELR(r) _CP15(2, r, c0, c0, 0) /* Cache Size Selection Register */
+
+#define CP15_SCTLR(r) _CP15(0, r, c1, c0, 0) /* System Control Register */
+#define CP15_ACTLR(r) _CP15(0, r, c1, c0, 1) /* Auxiliary Control Register */
+#define CP15_CPACR(r) _CP15(0, r, c1, c0, 2) /* Coprocessor Access Control Register */
+#define CP15_SCR(r) _CP15(0, r, c1, c1, 0) /* Secure Configuration Register */
+#define CP15_SDER(r) _CP15(0, r, c1, c1, 1) /* Secure Debug Enable Register */
+#define CP15_NSACR(r) _CP15(0, r, c1, c1, 2) /* Non-secure Access Control Register */
+#define CP15_VCR(r) _CP15(0, r, c1, c1, 3) /* Virtualization Control Register */
+
+#define CP15_TTBR0(r) _CP15(0, r, c2, c0, 0) /* Translation Table Base Register 0 */
+#define CP15_TTBR1(r) _CP15(0, r, c2, c0, 1) /* Translation Table Base Register 1 */
+#define CP15_TTBCR(r) _CP15(0, r, c2, c0, 2) /* Translation Table Base Control Register */
+
+#define CP15_DACR(r) _CP15(0, r, c3, c0, 0) /* Domain Access Control Register */
+
+#define CP15_DFSR(r) _CP15(0, r, c5, c0, 0) /* Data Fault Status Register */
+#define CP15_IFSR(r) _CP15(0, r, c5, c0, 1) /* Instruction Fault Status Register */
+#define CP15_ADFSR(r) _CP15(0, r, c5, c1, 0) /* Auxiliary Data Fault Status Register */
+#define CP15_AIFSR(r) _CP15(0, r, c5, c1, 1) /* Auxiliary Instruction Fault Status Register */
+
+#define CP15_DFAR(r) _CP15(0, r, c6, c0, 0) /* Data Fault Address Register */
+#define CP15_IFAR(r) _CP15(0, r, c6, c0, 2) /* Instruction Fault Address Register */
+
+#define CP15_NOP(r) _CP15(0, r, c7, c0, 4)
+#define CP15_ICIALLUIS(r) _CP15(0, r, c7, c1, 0) /* Cache Operations Registers */
+#define CP15_BPIALLIS(r) _CP15(0, r, c7, c1, 6)
+#define CP15_PAR(r) _CP15(0, r, c7, c4, 0) /* Physical Address Register */
+#define CP15_ICIALLU(r) _CP15(0, r, c7, c5, 0) /* Cache Operations Registers */
+#define CP15_ICIMVAU(r) _CP15(0, r, c7, c5, 1)
+#define CP15_ISB(r) _CP15(0, r, c7, c5, 4)
+#define CP15_BPIALL(r) _CP15(0, r, c7, c5, 6) /* Cache Operations Registers */
+#define CP15_BPIMVA(r) _CP15(0, r, c7, c5, 7)
+#define CP15_DCIMVAC(r) _CP15(0, r, c7, c6, 1)
+#define CP15_DCISW(r) _CP15(0, r, c7, c6, 2)
+#define CP15_V2PCWPR(r,n) _CP15(0, r, c7, c8, (n)) /* VA to PA operations, n=0-3 */
+# define CP15_V2PCWPR0(r) _CP15(0, r, c7, c8, 0)
+# define CP15_V2PCWPR1(r) _CP15(0, r, c7, c8, 1)
+# define CP15_V2PCWPR2(r) _CP15(0, r, c7, c8, 2)
+# define CP15_V2PCWPR3(r) _CP15(0, r, c7, c8, 3)
+#define CP15_V2POWPR(r,n) _CP15(0, r, c7, c8, ((n)+4)) /* n=0-3 */
+# define CP15_V2POWPR0(r) _CP15(0, r, c7, c8, 4)
+# define CP15_V2POWPR1(r) _CP15(0, r, c7, c8, 5)
+# define CP15_V2POWPR2(r) _CP15(0, r, c7, c8, 6)
+# define CP15_V2POWPR3(r) _CP15(0, r, c7, c8, 7)
+#define CP15_DCCMVAC(r) _CP15(0, r, c7, c10, 1) /* Cache Operations Registers */
+#define CP15_DCCSW(r) _CP15(0, r, c7, c10, 2)
+#define CP15_DSB(r) _CP15(0, r, c7, c10, 4)
+#define CP15_DMB(r) _CP15(0, r, c7, c10, 5)
+#define CP15_DCCMVAU(r) _CP15(0, r, c7, c11, 1) /* Cache Operations Registers */
+#define CP15_DCCIMVAC(r) _CP15(0, r, c7, c14, 1)
+#define CP15_DCCISW(r) _CP15(0, r, c7, c14, 2)
+
+#define CP15_TLBIALLIS(r) _CP15(0, r, c8, c3, 0) /* TLB maintenance operations */
+#define CP15_TLBIMVAIS(r) _CP15(0, r, c8, c3, 1)
+#define CP15_TLBIASIDIS(r) _CP15(0, r, c8, c3, 2)
+#define CP15_TLBIMVAAIS(r) _CP15(0, r, c8, c3, 3)
+#define CP15_TLBIALL(r,c) _CP15(0, r, c8, c, 0) /* CRm = c5, c6, or c7 */
+#define CP15_TLBIMVA(r,c) _CP15(0, r, c8, c, 1) /* CRm = c5, c6, or c7 */
+#define CP15_TLBIASID(r,c) _CP15(0, r, c8, c, 2) /* CRm = c5, c6, or c7 */
+#define CP15_TLBIMVAA(r,c) _CP15(0, r, c8, c, 3) /* CRm = c5, c6, or c7 */
+
+#define CP15_MCR(r) _CP15(0, r, c9, c12, 0) /* Performance Monitor Control Register */
+#define CP15_PMCNTENSET(r) _CP15(0, r, c9, c12, 1) /* Count Enable Set Register */
+#define CP15_PMCNTENCLR(r) _CP15(0, r, c9, c12, 2) /* Count Enable Clear Register */
+#define CP15_MOVSR(r) _CP15(0, r, c9, c12, 3) /* Overflow Flag Status Register */
+#define CP15_PMSWINC(r) _CP15(0, r, c9, c12, 4) /* Software Increment Register */
+#define CP15_PMSELR(r) _CP15(0, r, c9, c12, 5) /* Event Counter Selection Register */
+#define CP15_PMCEID0(r) _CP15(0, r, c9, c12, 6) /* Common Event Identification Registers */
+#define CP15_PMCEID1(r) _CP15(0, r, c9, c12, 7)
+#define CP15_PMCCNTR(r) _CP15(0, r, c9, c13, 0) /* Cycle Count Register */
+#define CP15_PMXEVTYPER(r) _CP15(0, r, c9, c13, 1) /* Event Type Select Register */
+#define CP15_PMCCFILTR(r) _CP15(0, r, c9, c13, 1) /* Cycle Count Filter Control Register */
+#define CP15_MXEVCNTR(r) _CP15(0, r, c9, c13, 2) /* Event Count Registers */
+#define CP15_PMUSERENR(r) _CP15(0, r, c9, c14, 0) /* User Enable Register */
+#define CP15_PMINTENSET(r) _CP15(0, r, c9, c14, 1) /* Interrupt Enable Set Register */
+#define CP15_PMINTENCLR(r) _CP15(0, r, c9, c14, 2) /* Interrupt Enable Clear Register */
+
+#define CP15_PPRRR(r) _CP15(0, r, c10, c2, 0) /* Memory region remap */
+#define CP15_PNMRR(r) _CP15(0, r, c10, c2, 1)
+
+#define CP15_VBAR(r) _CP15(0, r, c12, c0, 0) /* Vector Base Address Register */
+#define CP15_MVBAR(r) _CP15(0, r, c12, c0, 1) /* Monitor Vector Base Address Register */
+#define CP15_ISR(r) _CP15(0, r, c12, c1, 0) /* Interrupt Status Register */
+#define CP15_VIR(r) _CP15(0, r, c12, c1, 1) /* Virtualization Interrupt Register */
+
+#define CP15_FCSEIDR(r) _CP15(0, r, c13, c0, 0) /* Fast Context Switch Extension (FCSE) not implemented */
+#define CP15_CONTEXTIDR(r) _CP15(0, r, c13, c0, 1) /* Context ID Register */
+#define CP15_TPIDRURW(r) _CP15(0, r, c13, c0, 2) /* Software Thread ID Registers */
+#define CP15_TPIDRURO(r) _CP15(0, r, c13, c0, 3)
+#define CP15_TPIDRPRW(r) _CP15(0, r, c13, c0, 4)
+
+#define CP15_DR0(r) _CP15(3, r, c15, c0, 0) /* Data Register */
+#define CP15_DR1(r) _CP15(3, r, c15, c0, 1) /* Data Register */
+#define CP15_DTAGR(r) _CP15(3, r, c15, c2, 0) /* Data Cache Tag Read Operation Register */
+#define CP15_ITAGR(r) _CP15(3, r, c15, c2, 1) /* Instruction Cache Tag Read Operation Register */
+#define CP15_DDATAR(r) _CP15(3, r, c15, c4, 0) /* Data Cache Data Read Operation Register */
+#define CP15_IDATAR(r) _CP15(3, r, c15, c4, 1) /* Instruction Cache Data Read Operation Register */
+#define CP15_TLBR(r) _CP15(3, r, c15, c4, 2) /* TLB Data Read Operation Register */
+#define CP15_CBADDR(r) _CP15(4, r, c15, c0, 0) /* Configuration Base Address Register */
+#define CP15_TLBHITMAP(r) _CP15(5, r, c15, c0, 0) /* TLB access and attributes
+
+/* System control register descriptions.
+ *
+ * To be provided
+ *
+ * Reference: Cortex-A5™ MPCore, Technical Reference Manual, Paragraph 4.3.
+ */
+
+#endif /* __ARCH_ARM_SRC_ARMV7_A_CP15_H */
diff --git a/nuttx/arch/arm/src/armv7-a/mmu.h b/nuttx/arch/arm/src/armv7-a/mmu.h
new file mode 100644
index 000000000..2daab81fc
--- /dev/null
+++ b/nuttx/arch/arm/src/armv7-a/mmu.h
@@ -0,0 +1,332 @@
+/************************************************************************************
+ * arch/arm/src/armv7-a/mmu.h
+ * CP15 MMU register definitions
+ *
+ * Copyright (C) 2013 Gregory Nutt. All rights reserved.
+ * Author: Gregory Nutt <gnutt@nuttx.org>
+ *
+ * References:
+ *
+ * "Cortex-A5™ MPCore, Technical Reference Manual", Revision: r0p1, Copyright © 2010
+ * ARM. All rights reserved. ARM DDI 0434B (ID101810)
+ * "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition", Copyright ©
+ * 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM DDI 0406C.b (ID072512)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV7_A_MMU_H
+#define __ARCH_ARM_SRC_ARMV7_A_MMU_H
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+/* Reference: Cortex-A5™ MPCore Paragraph 6.7, "MMU software accessible registers." */
+
+/* TLB Type Register TLB Type Register
+ *
+ * The Translation Lookaside Buffer (TLB) Type Register, TLBTR, returns the number
+ * of lockable entries for the TLB. The Cortex-A5 MPCore processor does not
+ * implement this feature, so this register always RAZ.
+ */
+
+/* System Control Register (SCTRL). see sctrl.h */
+/* Non-secure Access Control Register (NSACR). See sctrl.h */
+
+/* Translation Table Base Register 0 (TTBR0)*/
+
+#define TTBR0_IRGN1 (1 << 0) /* Bit 0: Inner cacheability for table walk */
+#define TTBR0_S (1 << 1) /* Bit 1: Translation table walk */
+ /* Bit 2: Reserved */
+#define TTBR0_RGN_SHIFT (3) /* Bits 3-4: Outer cacheable attributes for table walk */
+#define TTBR0_RGN_MASK (3 << TTBR0_RGN_SHIFT)
+# define TTBR0_RGN_NONE (0 << TTBR0_RGN_SHIFT) /* Non-cacheable */
+# define TTBR0_RGN_WBWA (1 << TTBR0_RGN_SHIFT) /* Write-Back cached + Write-Allocate */
+# define TTBR0_RGN_WT (2 << TTBR0_RGN_SHIFT) /* Write-Through */
+# define TTBR0_RGN_WB (3 << TTBR0_RGN_SHIFT) /* Write-Back */
+ /* Bit 5: Reserved */
+#define TTBR0_IRGN0 (1 << 6) /* Bit 6: Inner cacheability (with IRGN0) */
+ /* Bits 7-n: Reserved, n=7-13 */
+#define _TTBR0_LOWER(n) (0xffffffff << (n))
+ /* Bits (n+1)-31: Translation table base 0 */
+#define TTBR0_BASE_MASK(n) (~_TTBR0_LOWER(n))
+
+/* Translation Table Base Register 1 (TTBR1) */
+
+#define TTBR1_IRGN1 (1 << 0) /* Bit 0: Inner cacheability for table walk */
+#define TTBR1_S (1 << 1) /* Bit 1: Translation table walk */
+ /* Bit 2: Reserved */
+#define TTBR1_RGN_SHIFT (3) /* Bits 3-4: Outer cacheable attributes for table walk */
+#define TTBR1_RGN_MASK (3 << TTBR1_RGN_SHIFT)
+# define TTBR1_RGN_NONE (0 << TTBR1_RGN_SHIFT) /* Non-cacheable */
+# define TTBR1_RGN_WBWA (1 << TTBR1_RGN_SHIFT) /* Write-Back cached + Write-Allocate */
+# define TTBR1_RGN_WT (2 << TTBR1_RGN_SHIFT) /* Write-Through */
+# define TTBR1_RGN_WB (3 << TTBR1_RGN_SHIFT) /* Write-Back */
+ /* Bit 5: Reserved */
+#define TTBR1_IRGN0 (1 << 6) /* Bit 6: Inner cacheability (with IRGN0) */
+ /* Bits 7-13: Reserved */
+#define TTBR1_BASE_SHIFT (14) /* Bits 14-31: Translation table base 1 */
+#define TTBR1_BASE_MASK (0xffffc000)
+
+/* Translation Table Base Control Register (TTBCR) */
+
+#define TTBCR_N_SHIFT (0) /* Bits 0-2: Boundary size of TTBR0 */
+#define TTBCR_N_MASK (7 << TTBCR_N_SHIFT)
+# define TTBCR_N_16KB (0 << TTBCR_N_SHIFT) /* Reset value */
+# define TTBCR_N_8KB (1 << TTBCR_N_SHIFT)
+# define TTBCR_N_4KB (2 << TTBCR_N_SHIFT)
+# define TTBCR_N_2KB (3 << TTBCR_N_SHIFT)
+# define TTBCR_N_1KB (4 << TTBCR_N_SHIFT)
+# define TTBCR_N_512B (5 << TTBCR_N_SHIFT)
+# define TTBCR_N_256B (6 << TTBCR_N_SHIFT)
+# define TTBCR_N_128B (7 << TTBCR_N_SHIFT)
+ /* Bit 3: Reserved */
+#define TTBCR_PD0 (1 << 4) /* Bit 4: Translation table walk on a TLB miss w/TTBR0 */
+#define TTBCR_PD1 (1 << 5) /* Bit 5: Translation table walk on a TLB miss w/TTBR1 */
+ /* Bits 6-31: Reserved */
+
+/* Domain Access Control Register (DACR) */
+
+#define DACR_SHIFT(n) (1 << ((n) << 1)) /* Domain n, n=0-31 */
+#define DACR_MASK(n) (3 << DACR_SHIFT(n))
+# define DACR_NONE(n) (0 << DACR_SHIFT(n)) /* Any access generates a domain fault */
+# define DACR_CLIENT(n) (1 << DACR_SHIFT(n)) /* Accesses checked against permissions TLB */
+# define DACR_MANAGER(n) (3 << DACR_SHIFT(n)) /* Accesses are not checked */
+
+/* Data Fault Status Register (DFSR) */
+
+#define DFSR_STATUS_SHIFT (0) /* Bits 0-3: Type of exception generated (w/EXT and FS) */
+#define DFSR_STATUS_MASK (15 << DFSR_STATUS_SHIFT)
+#define DFSR_DOMAIN_SHIFT (4) /* Bits 4-7: Domain accessed when a data fault occurred */
+#define DFSR_DOMAIN_MASK (15 << DFSR_STATUS_MASK)
+ /* Bits 8-9: Reserved */
+#define DFSR_FS (1 << 10) /* Bit 10: Part of the STATUS field */
+#define DFSR_WNR (1 << 11) /* Bit 11: Not read and write */
+#define DFSR_EXT (1 << 12) /* Bit 12: External Abort Qualifier */
+ /* Bits 13-31: Reserved */
+
+/* Instruction Fault Status Register (IFSR) */
+
+#define IFSR_STATUS_SHIFT (0) /* Bits 0-3: Type of fault generated (w/EXT and FS) */
+#define IFSR_STATUS_MASK (15 << IFSR_STATUS_SHIFT)
+ /* Bits 4-9: Reserved */
+#define IFSR_S (1 << 10) /* Bit 10: Part of the STATUS field */
+ /* Bits 11: Reserved */
+#define IFSR_EXT (1 << 12) /* Bit 12: External Abort Qualifier */
+ /* Bits 13-31: Reserved */
+
+/* Data Fault Address Register(DFAR). Holds the MVA of the faulting address when a
+ * synchronous fault occurs
+ *
+ * Instruction Fault Address Register(IFAR). Holds the MVA of the faulting address
+ * of the instruction that caused a prefetch abort.
+ */
+
+/* TLB operations.
+ *
+ * CP15 Register: TLBIALLIS
+ * Description: Invalidate entire Unified TLB Inner Shareable
+ * Register Format: SBZ
+ * Instruction: MCR p15, 0, <Rd>, c8, c3, 0
+ * CP15 Register: TLBIMVAIS
+ * Description: Invalidate Unified TLB entry by VA Inner Shareable
+ * Register Format: VA/ASID
+ * Instruction: MCR p15, 0, <Rd>, c8, c3, 1
+ * CP15 Register: TLBIASIDIS
+ * Description: Invalidate Unified TLB entry by ASID match Inner Shareable
+ * Register Format: ASID
+ * Instruction: MCR p15, 0, <Rd>, c8, c3, 2
+ * CP15 Register: TLBIMVAAIS
+ * Description: Invalidate Unified TLB entry by VA all ASID Inner Shareable
+ * Register Format: VA
+ * Instruction: MCR p15, 0, <Rd>, c8, c3, 3
+ * CP15 Register: TLBIALL
+ * Description: Invalidate entire Unified TLB
+ * Register Format: Ignored
+ * Instruction: MCR p15, 0, <Rd>, c8, c7, 0
+ * CP15 Register: TLBIMVA
+ * Description: Invalidate Unified TLB by VA
+ * Register Format: VA/ASID
+ * Instruction: MCR p15, 0, <Rd>, c8, c7, 1
+ * CP15 Register: TLBIASID
+ * Description: Invalidate TLB entries by ASID Match
+ * Register Format: ASID
+ * MCR p15, 0, <Rd>, c8, c7, 2
+ * CP15 Register: TLBIMVAA
+ * Description: Invalidate TLB entries by VA All ASID
+ * Register Format: VA
+ * Instruction: MCR p15, 0, <Rd>, c8, c7, 3
+ */
+
+#define TLB_ASID_SHIFT (0) /* Bits 0-7: Address Space Identifier */
+#define TLB_ASID_MASK (0xff << TLB_ASID_SHIFT)
+#define TLB_SBZ_SHIFT (8) /* Bits 8-11: SBZ */
+#define TLB_SBZ_MASK (15 << TLB_SBZ_SHIFT)
+#define TLB_VA_MASK (0xfffff000) /* Bits 12-31: Virtual address */
+
+/* Primary Region Remap Register (PRRR) */
+/* Normal Memory Remap Register (NMRR) */
+
+/* TLB Hitmap Register (TLBHR) */
+
+#define TLBHR_4KB (1 << 0) /* Bit 0: 4KB pages are present in the TLB */
+#define TLBHR_16KB (1 << 1) /* Bit 1: 16KB pages are present in the TLB */
+#define TLBHR_1MB (1 << 2) /* Bit 2: 1MB sections are present in the TLB */
+#define TLBHR_16MB (1 << 3) /* Bit 3: 16MB supersections are present in the TLB */
+ /* Bits 4-31: Reserved */
+
+/* Context ID Register (CONTEXTIDR). See sctrl.h */
+
+/************************************************************************************
+ * Assemby Macros
+ ************************************************************************************/
+
+#ifdef __ASSEMBLY__
+
+/* Write the Domain Access Control Register (DACR) */
+
+ .macro cp15_wrdacr, dacr
+ mcr p15, 0, \dacr, c3, c0, 0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ .endm
+
+/* The ARMv7-aA architecture supports two translation tables. This
+ * implementation, however, uses only translation table 0. This
+ * functions clears the TTB control register (TTBCR), indicating that
+ * we are using TTB 0. This is it writes the value of the page table
+ * to Translation Table Base Register 0 (TTBR0).
+ */
+
+ .macro cp14_wrttb, ttb, scratch
+ mcr p15, 0, \ttb, c2, c0, 0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ mov \scratch, #0x0
+ mcr p15, 0, \scratch, c2, c0, 2
+ .endm
+
+#endif /* __ASSEMBLY__ */
+
+/************************************************************************************
+ * Inline Functions
+ ************************************************************************************/
+
+#ifndef __ASSEMBLY__
+
+/* Write the Domain Access Control Register (DACR) */
+
+static inline void cp15_wrdacr(unsigned int dacr)
+{
+ __asm__ __volatile__
+ (
+ "\tmcr p15, 0, $0, c3, c0, 0\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ :
+ : "r" (dacr)
+ : "memory"
+ );
+}
+
+/* The ARMv7-aA architecture supports two translation tables. This
+ * implementation, however, uses only translation table 0. This
+ * functions clears the TTB control register (TTBCR), indicating that
+ * we are using TTB 0. This is it writes the value of the page table
+ * to Translation Table Base Register 0 (TTBR0).
+ */
+
+static inline void cp14_wrttb(unsigned int ttb)
+{
+ __asm__ __volatile__
+ (
+ "\tmcr p15, 0, $0, c2, c0, 0\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tmov r1, #0\n"
+ "\tmcr p15, 0, r1, c2, c0, 2\n"
+ :
+ : "r" (ttb)
+ : "r1", "memory"
+ );
+}
+
+#endif /* __ASSEMBLY__ */
+
+/****************************************************************************
+ * Public Variables
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+#ifndef __ASSEMBLY__
+#ifdef __cplusplus
+#define EXTERN extern "C"
+extern "C" {
+#else
+#define EXTERN extern
+#endif
+
+#undef EXTERN
+#ifdef __cplusplus
+}
+#endif
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ARCH_ARM_SRC_ARMV7_A_MMU_H */
diff --git a/nuttx/arch/arm/src/armv7-a/sctrl.h b/nuttx/arch/arm/src/armv7-a/sctrl.h
new file mode 100644
index 000000000..7ea447b1b
--- /dev/null
+++ b/nuttx/arch/arm/src/armv7-a/sctrl.h
@@ -0,0 +1,340 @@
+/************************************************************************************
+ * arch/arm/src/armv7-a/sctrl.h
+ * CP15 System Control Registers
+ *
+ * Copyright (C) 2013 Gregory Nutt. All rights reserved.
+ * Author: Gregory Nutt <gnutt@nuttx.org>
+ *
+ * References:
+ *
+ * "Cortex-A5™ MPCore, Technical Reference Manual", Revision: r0p1, Copyright © 2010
+ * ARM. All rights reserved. ARM DDI 0434B (ID101810)
+ * "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition", Copyright ©
+ * 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM DDI 0406C.b (ID072512)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name NuttX nor the names of its contributors may be
+ * used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ************************************************************************************/
+
+#ifndef __ARCH_ARM_SRC_ARMV7_A_CPSR_H
+#define __ARCH_ARM_SRC_ARMV7_A_CPSR_H
+
+/************************************************************************************
+ * Included Files
+ ************************************************************************************/
+
+/************************************************************************************
+ * Pre-processor Definitions
+ ************************************************************************************/
+/* Reference: Cortex-A5™ MPCore Paragraph 4.2, "Register summary." */
+
+/* Main ID Register (MIDR) */
+/* TODO: To be provided */
+
+/* Cache Type Register (CTR) */
+/* TODO: To be provided */
+
+/* TCM Type Register
+ *
+ * The Cortex-A5 MPCore processor does not implement instruction or data Tightly
+ * Coupled Memory (TCM), so this register always Reads-As-Zero (RAZ).
+ *
+ * TLB Type Register
+ *
+ * The Cortex-A5 MPCore processor does not implement instruction or data Tightly
+ * CoupledMemory (TCM), so this register always Reads-As-Zero (RAZ).
+ */
+
+/* Multiprocessor Affinity Register (MPIDR) */
+/* TODO: To be provided */
+
+/* Processor Feature Register 0 (ID_PFR0) */
+/* TODO: To be provided */
+
+/* Processor Feature Register 1 (ID_PFR1) */
+/* TODO: To be provided */
+
+/* Debug Feature Register 0 (ID_DFR0) */
+/* TODO: To be provided */
+
+/* Auxiliary Feature Register 0 (ID_AFR0) */
+/* TODO: To be provided */
+
+/* Memory Model Features Register 0 (ID_MMFR0) */
+/* Memory Model Features Register 1 (ID_MMFR1) */
+/* Memory Model Features Register 2 (ID_MMFR2) */
+/* Memory Model Features Register 3 (ID_MMFR3) */
+/* TODO: To be provided */
+
+/* Instruction Set Attributes Register 0 (ID_ISAR0) */
+/* Instruction Set Attributes Register 1 (ID_ISAR1) */
+/* Instruction Set Attributes Register 2 (ID_ISAR2) */
+/* Instruction Set Attributes Register 3 (ID_ISAR3) */
+/* Instruction Set Attributes Register 4 (ID_ISAR4) */
+/* Instruction Set Attributes Register 5 (ID_ISAR5) */
+/* Instruction Set Attributes Register 6-7 (ID_ISAR6-7). Reserved. */
+/* TODO: Others to be provided */
+
+/* Cache Size Identification Register (CCSIDR) */
+/* TODO: To be provided */
+
+/* Cache Level ID Register (CLIDR) */
+/* TODO: To be provided */
+
+/* Auxiliary ID Register (AIDR) */
+/* TODO: To be provided */
+
+/* Cache Size Selection Register (CSSELR) */
+/* TODO: To be provided */
+
+/* System Control Register (SCTLR)
+ *
+ * NOTES:
+ * (1) Always enabled on A5
+ * (2) Not available on A5
+ */
+
+#define SCTLR_M (1 << 0) /* Bit 0: Enables the MMU */
+#define SCTLR_A (1 << 1) /* Bit 1: Enables strict alignment of data */
+#define SCTLR_C (1 << 2) /* Bit 2: Determines if data can be cached */
+ /* Bits 3-9: Reserved */
+#define SCTLR_SW (1 << 10) /* Bit 10: SWP/SWPB Enable bit */
+#define SCTLR_A (1 << 11) /* Bit 11: Program flow prediction control (1) */
+#define SCTLR_I (1 << 12) /* Bit 12: Determines if instructions can be cached */
+#define SCTLR_V (1 << 13) /* Bit 13: Vectors bit */
+#define SCTLR_RR (1 << 14) /* Bit 14: Cache replacement strategy (2) */
+ /* Bits 15-16: Reserved */
+#define SCTLR_HA (1 << 17) /* Bit 17: Hardware management access disabled (2) */
+ /* Bits 18-24: Reserved */
+#define SCTLR_EE (1 << 25) /* Bit 15: Determines the value the CPSR.E */
+ /* Bits 26-27: Reserved */
+#define SCTLR_TRE (1 << 28) /* Bit 28: TEX remap */
+#define SCTLR_AFE (1 << 29) /* Bit 29: Access Flag Enable bit */
+#define SCTLR_TE (1 << 30) /* Bit 30: Thumb exception enable */
+ /* Bit 31: Reserved */
+
+/* Auxiliary Control Register (ACTLR) */
+/* TODO: To be provided */
+
+/* Coprocessor Access Control Register (CPACR) */
+/* TODO: To be provided */
+
+/* Secure Configuration Register (SCR) */
+/* TODO: To be provided */
+
+/* Secure Debug Enable Register (SDER) */
+/* TODO: To be provided */
+
+/* Non-secure Access Control Register (NSACR) */
+
+ /* Bits 0-9: Reserved */
+#define NSACR_CP10 (1 << 10) /* Bit 10: Permission to access coprocessor 10 */
+#define NSACR_CP11 (1 << 11) /* Bit 11: Permission to access coprocessor 11 */
+ /* Bits 12-13: Reserved */
+#define NSACR_NSD32DIS (1 << 14) /* Bit 14: Disable the Non-secure use of VFP D16-D31 */
+#define NSACR_NSASEDIS (1 << 15) /* Bit 15: Disable Non-secure Advanced SIMD Extension */
+ /* Bits 16-17: Reserved */
+#define NSACR_NSSMP (1 << 18) /* Bit 18: ACR SMP bit writable */
+ /* Bits 19-31: Reserved */
+
+/* Virtualization Control Register (VCR) */
+/* TODO: To be provided */
+
+/* Translation Table Base Register 0 (TTBR0). See mmu.h */
+/* Translation Table Base Register 1 (TTBR1). See mmu.h */
+/* Translation Table Base Control Register (TTBCR). See mmu.h */
+/* Domain Access Control Register (DACR). See mmu.h */
+/* Data Fault Status Register (DFSR). See mmu.h */
+/* Instruction Fault Status Register (IFSR). See mmu.h */
+
+/* Auxiliary Data Fault Status Register (ADFSR). Not used in this implementation. */
+
+/* Data Fault Address Register(DFAR)
+ *
+ * Holds the MVA of the faulting address when a synchronous fault occurs
+ *
+ * Instruction Fault Address Register(IFAR)
+ *
+ * Holds the MVA of the faulting address of the instruction that caused a prefetch
+ * abort.
+ *
+ * NOP Register
+ *
+ * The use of this register is optional and deprecated. Use the NOP instruction
+ * instead.
+ *
+ * Physical Address Register (PAR)
+ *
+ * Holds:
+ * - the PA after a successful translation
+ * - the source of the abort for an unsuccessful translation
+ *
+ * Instruction Synchronization Barrier
+ *
+ * The use of ISB is optional and deprecated. Use the instruction ISB instead.
+ *
+ * Data Memory Barrier
+ * The use of DMB is deprecated and, on Cortex-A5 MPCore, behaves as NOP. Use the
+ * instruction DMB instead.
+ */
+
+/* Vector Base Address Register (VBAR) */
+/* TODO: To be provided */
+
+/* Monitor Vector Base Address Register (MVBAR) */
+/* TODO: To be provided */
+
+/* Interrupt Status Register (ISR) */
+/* TODO: To be provided */
+
+/* Virtualization Interrupt Register (VIR) */
+/* TODO: To be provided */
+
+/* Context ID Register (CONTEXTIDR) */
+
+#define CONTEXTIDR_ASID_SHIFT (0) /* Bits 0-7: Address Space Identifier */
+#define CONTEXTIDR_ASID_MASK (0xff << CONTEXTIDR_ASID_SHIFT)
+#define CONTEXTIDR_PROCID_SHIFT (8) /* Bits 8-31: Process Identifier */
+#define CONTEXTIDR_PROCID_MASK (0x00ffffff << CONTEXTIDR_PROCID_SHIFT)
+
+/* Configuration Base Address Register (CBAR) */
+/* TODO: To be provided */
+
+/************************************************************************************
+ * Assemby Macros
+ ************************************************************************************/
+
+#ifdef __ASSEMBLY__
+
+/* Get the device ID */
+
+ .macro cp15_rdid, id
+ mrc p15, 0, \id, c0, c0, 0
+ .endm
+
+/* Read/write the system control register (SCTRL) */
+
+ .macro cp15_rdsctrl, sctrl
+ mrc p15, 0, \sctrl, c1, c0, 0
+ .endm
+
+ .macro cp15_wrsctrl, sctrl
+ mcr p15, 0, \sctrl, c1, c0, 0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ .endm
+#endif /* __ASSEMBLY__ */
+
+/************************************************************************************
+ * Inline Functions
+ ************************************************************************************/
+
+#ifndef __ASSEMBLY__
+
+/* Get the device ID */
+
+static inline unsigned int cp15_rdid(void)
+{
+ unsigned int id;
+ __asm__ __volatile__
+ (
+ "\tmrc p15, 0, %0, c0, c0, 0"
+ : "=r" (id)
+ :
+ : "memory"
+ );
+
+ return id;
+}
+
+/* Read/write the system control register (SCTRL) */
+
+static inline unsigned int cp15_rdsctrl(void)
+{
+ unsigned int sctrl;
+ __asm__ __volatile__
+ (
+ "\tmrc p15, 0, %0, c1, c0, 0"
+ : "=r" (sctrl)
+ :
+ : "memory"
+ );
+
+ return sctrl;
+}
+
+static inline void cp15_wrsctrl(unsigned int sctrl)
+{
+ __asm__ __volatile__
+ (
+ "\tmcr p15, 0, $0, c1, c0, 0\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ "\tnop\n"
+ :
+ : "r" (sctrl)
+ : "memory"
+ );
+}
+
+#endif /* __ASSEMBLY__ */
+
+/****************************************************************************
+ * Public Variables
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+#ifndef __ASSEMBLY__
+#ifdef __cplusplus
+#define EXTERN extern "C"
+extern "C" {
+#else
+#define EXTERN extern
+#endif
+
+#undef EXTERN
+#ifdef __cplusplus
+}
+#endif
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ARCH_ARM_SRC_ARMV7_A_CPSR_H */
diff --git a/nuttx/configs/README.txt b/nuttx/configs/README.txt
index b9ea98e22..ce910376a 100644
--- a/nuttx/configs/README.txt
+++ b/nuttx/configs/README.txt
@@ -1960,8 +1960,8 @@ configs/rgmp
hybrid operating system. This makes your application able to use both RTOS
and GPOS features.
- See http://rgmp.sourceforge.net/wiki/index.php/Main_Page for further information
- about RGMP.
+ See http://rgmp.sourceforge.net/wiki/index.php/Main_Page for further
+ information about RGMP.
configs/sam3u-ek
The port of NuttX to the Atmel SAM3U-EK development board.