summaryrefslogtreecommitdiff
path: root/nuttx/arch/arm/src/armv7-a/mmu.h
blob: 59bc056a2194d7e6fdd7f99efdb510de04635024 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
/************************************************************************************
 * arch/arm/src/armv7-a/mmu.h
 * CP15 MMU register definitions
 *
 *   Copyright (C) 2013-2014 Gregory Nutt. All rights reserved.
 *   Author: Gregory Nutt <gnutt@nuttx.org>
 *
 * References:
 *
 *  "Cortex-A5� MPCore, Technical Reference Manual", Revision: r0p1, Copyright �
 *   2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
 *  "ARM� Architecture Reference Manual, ARMv7-A and ARMv7-R edition", Copyright �
 *   1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
 *   DDI 0406C.b (ID072512)
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 * 3. Neither the name NuttX nor the names of its contributors may be
 *    used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 *
 ************************************************************************************/

#ifndef __ARCH_ARM_SRC_ARMV7_A_MMU_H
#define __ARCH_ARM_SRC_ARMV7_A_MMU_H

/************************************************************************************
 * Included Files
 ************************************************************************************/

#include <nuttx/config.h>

#ifndef __ASSEMBLY__
#  include <sys/types.h>
#  include <stdint.h>
#  include "chip.h"
#endif /* __ASSEMBLY__ */

/************************************************************************************
 * Pre-processor Definitions
 ************************************************************************************/
/* Configuration ********************************************************************/

#if defined(CONFIG_PAGING) || defined(CONFIG_ARCH_ADDRENV)

/* Sanity check -- we cannot be using a ROM page table and supporting on-
 * demand paging.
 */

#ifdef CONFIG_ARCH_ROMPGTABLE
#  error "Cannot support both CONFIG_PAGING/CONFIG_ARCH_ADDRENV and CONFIG_ARCH_ROMPGTABLE"
#endif
#endif /* CONFIG_PAGING */

/* MMU CP15 Register Bit Definitions ************************************************/
/* Reference: Cortex-A5� MPCore Paragraph 6.7, "MMU software accessible registers." */

/* TLB Type Register TLB Type Register
 *
 * The Translation Lookaside Buffer (TLB) Type Register, TLBTR, returns the number of
 * lockable entries for the TLB. The Cortex-A5 MPCore processor does not implement
 * this feature, so this register always RAZ.
 */

/* System Control Register (SCTLR). see cstlr.h */
/* Non-secure Access Control Register (NSACR).  See cstlr.h */

/* Translation Table Base Register 0 (TTBR0)*/

#define TTBR0_IRGN1          (1 << 0)  /* Bit 0:  Inner cacheability IRGN[1] (MP extensions) */
#define TTBR0_C              (1 << 0)  /* Bit 0:  Inner cacheability for table walk */
#define TTBR0_S              (1 << 1)  /* Bit 1:  Translation table walk */
                                       /* Bit 2:  Reserved */
#define TTBR0_RGN_SHIFT      (3)       /* Bits 3-4: Outer cacheable attributes for table walk */
#define TTBR0_RGN_MASK       (3 << TTBR0_RGN_SHIFT)
#  define TTBR0_RGN_NONE     (0 << TTBR0_RGN_SHIFT) /* Non-cacheable */
#  define TTBR0_RGN_WBWA     (1 << TTBR0_RGN_SHIFT) /* Write-Back cached + Write-Allocate */
#  define TTBR0_RGN_WT       (2 << TTBR0_RGN_SHIFT) /* Write-Through */
#  define TTBR0_RGN_WB       (3 << TTBR0_RGN_SHIFT) /* Write-Back */
#define TTBR0_NOS            (1 << 5)  /* Bit 5:  Not Outer Shareable bit */
#define TTBR0_IRGN0          (1 << 6)  /* Bit 6:  Inner cacheability IRGN[0] (MP extensions) */
                                       /* Bits 7-n: Reserved, n=7-13 */
#define _TTBR0_LOWER(n)      (0xffffffff << (n))
                                       /* Bits (n+1)-31: Translation table base 0 */
#define TTBR0_BASE_MASK(n)   (~_TTBR0_LOWER(n))

/* Translation Table Base Register 1 (TTBR1) */

#define TTBR1_IRGN1          (1 << 0)  /* Bit 0:  Inner cacheability IRGN[1] (MP extensions) */
#define TTBR1_C              (1 << 0)  /* Bit 0:  Inner cacheability for table walk */
#define TTBR1_S              (1 << 1)  /* Bit 1:  Translation table walk */
                                       /* Bit 2:  Reserved */
#define TTBR1_RGN_SHIFT      (3)       /* Bits 3-4: Outer cacheable attributes for table walk */
#define TTBR1_RGN_MASK       (3 << TTBR1_RGN_SHIFT)
#  define TTBR1_RGN_NONE     (0 << TTBR1_RGN_SHIFT) /* Non-cacheable */
#  define TTBR1_RGN_WBWA     (1 << TTBR1_RGN_SHIFT) /* Write-Back cached + Write-Allocate */
#  define TTBR1_RGN_WT       (2 << TTBR1_RGN_SHIFT) /* Write-Through */
#  define TTBR1_RGN_WB       (3 << TTBR1_RGN_SHIFT) /* Write-Back */
#define TTBR1_NOS            (1 << 5)  /* Bit 5:  Not Outer Shareable bit */
#define TTBR1_IRGN0          (1 << 6)  /* Bit 6:  Inner cacheability IRGN[0] (MP extensions) */
                                       /* Bits 7-13: Reserved */
#define TTBR1_BASE_SHIFT     (14)      /* Bits 14-31: Translation table base 1 */
#define TTBR1_BASE_MASK      (0xffffc000)

/* Translation Table Base Control Register (TTBCR) */

#define TTBCR_N_SHIFT        (0)       /* Bits 0-2: Boundary size of TTBR0 */
#define TTBCR_N_MASK         (7 << TTBCR_N_SHIFT)
#  define TTBCR_N_16KB       (0 << TTBCR_N_SHIFT) /* Reset value */
#  define TTBCR_N_8KB        (1 << TTBCR_N_SHIFT)
#  define TTBCR_N_4KB        (2 << TTBCR_N_SHIFT)
#  define TTBCR_N_2KB        (3 << TTBCR_N_SHIFT)
#  define TTBCR_N_1KB        (4 << TTBCR_N_SHIFT)
#  define TTBCR_N_512B       (5 << TTBCR_N_SHIFT)
#  define TTBCR_N_256B       (6 << TTBCR_N_SHIFT)
#  define TTBCR_N_128B       (7 << TTBCR_N_SHIFT)
                                       /* Bit 3:  Reserved */
#define TTBCR_PD0            (1 << 4)  /* Bit 4:  Translation table walk on a TLB miss w/TTBR0 */
#define TTBCR_PD1            (1 << 5)  /* Bit 5:  Translation table walk on a TLB miss w/TTBR1 */
                                       /* Bits 6-31: Reserved */

/* Domain Access Control Register (DACR) */

#define DACR_SHIFT(n)        ((n) << 1) /* Domain n, n=0-15 */
#define DACR_MASK(n)         (3 << DACR_SHIFT(n))
#  define DACR_NONE(n)       (0 << DACR_SHIFT(n)) /* Any access generates a domain fault */
#  define DACR_CLIENT(n)     (1 << DACR_SHIFT(n)) /* Accesses checked against permissions TLB */
#  define DACR_MANAGER(n)    (3 << DACR_SHIFT(n)) /* Accesses are not checked */

/* Data Fault Status Register (DFSR) */

#define DFSR_STATUS_SHIFT    (0)       /* Bits 0-3: Type of exception generated (w/EXT and FS) */
#define DFSR_STATUS_MASK     (15 << DFSR_STATUS_SHIFT)
#define DFSR_DOMAIN_SHIFT    (4)       /* Bits 4-7: Domain accessed when a data fault occurred */
#define DFSR_DOMAIN_MASK     (15 << DFSR_STATUS_MASK)
                                       /* Bits 8-9: Reserved */
#define DFSR_FS              (1 << 10) /* Bit 10: Part of the STATUS field */
#define DFSR_WNR             (1 << 11) /* Bit 11: Not read and write */
#define DFSR_EXT             (1 << 12) /* Bit 12: External Abort Qualifier */
                                       /* Bits 13-31: Reserved */

/* Instruction Fault Status Register (IFSR) */

#define IFSR_STATUS_SHIFT    (0)       /* Bits 0-3: Type of fault generated (w/EXT and FS) */
#define IFSR_STATUS_MASK     (15 << IFSR_STATUS_SHIFT)
                                       /* Bits 4-9: Reserved */
#define IFSR_S               (1 << 10) /* Bit 10: Part of the STATUS field */
                                       /* Bits 11: Reserved */
#define IFSR_EXT             (1 << 12) /* Bit 12: External Abort Qualifier */
                                       /* Bits 13-31: Reserved */

/* Data Fault Address Register(DFAR).  Holds the MVA of the faulting address when a
 * synchronous fault occurs
 *
 * Instruction Fault Address Register(IFAR).  Holds the MVA of the faulting address
 * of the instruction that caused a prefetch abort.
 */

/* TLB operations.
 *
 * CP15 Register: TLBIALLIS
 *   Description:     Invalidate entire Unified TLB Inner Shareable
 *   Register Format: SBZ
 *   Instruction:     MCR p15, 0, <Rd>, c8, c3, 0
 * CP15 Register: TLBIMVAIS
 *   Description:     Invalidate Unified TLB entry by VA Inner Shareable
 *   Register Format: VA/ASID
 *   Instruction:     MCR p15, 0, <Rd>, c8, c3, 1
 * CP15 Register: TLBIASIDIS
 *   Description:     Invalidate Unified TLB entry by ASID match Inner
 *                    Shareable
 *   Register Format: ASID
 *   Instruction:     MCR p15, 0, <Rd>, c8, c3, 2
 * CP15 Register: TLBIMVAAIS
 *   Description:     Invalidate Unified TLB entry by VA all ASID Inner
 *                    Shareable
 *   Register Format: VA
 *   Instruction:     MCR p15, 0, <Rd>, c8, c3, 3
 * CP15 Register: TLBIALL
 *   Description:     Invalidate entire Unified TLB
 *   Register Format: Ignored
 *   Instruction:     MCR p15, 0, <Rd>, c8, c7, 0
 * CP15 Register: TLBIMVA
 *   Description:     Invalidate Unified TLB by VA
 *   Register Format: VA/ASID
 *   Instruction:     MCR p15, 0, <Rd>, c8, c7, 1
 * CP15 Register: TLBIASID
 *   Description:     Invalidate TLB entries by ASID Match
 *   Register Format: ASID
 *   MCR p15, 0, <Rd>, c8, c7, 2
 * CP15 Register: TLBIMVAA
 *   Description:     Invalidate TLB entries by VA All ASID
 *   Register Format: VA
 *   Instruction:     MCR p15, 0, <Rd>, c8, c7, 3
 */

#define TLB_ASID_SHIFT       (0)       /* Bits 0-7:  Address Space Identifier */
#define TLB_ASID_MASK        (0xff << TLB_ASID_SHIFT)
#define TLB_SBZ_SHIFT        (8)       /* Bits 8-11:  SBZ */
#define TLB_SBZ_MASK         (15 << TLB_SBZ_SHIFT)
#define TLB_VA_MASK          (0xfffff000) /* Bits 12-31: Virtual address */

/* Primary Region Remap Register (PRRR) */
/* Normal Memory Remap Register (NMRR) */

/* TLB Hitmap Register (TLBHR) */

#define TLBHR_4KB            (1 << 0)  /* Bit 0:  4KB pages are present in the TLB */
#define TLBHR_16KB           (1 << 1)  /* Bit 1:  16KB pages are present in the TLB */
#define TLBHR_1MB            (1 << 2)  /* Bit 2:  1MB sections are present in the TLB */
#define TLBHR_16MB           (1 << 3)  /* Bit 3:  16MB supersections are present in the TLB */
                                       /* Bits 4-31: Reserved */

/* Context ID Register (CONTEXTIDR).  See cstlr.h */

/* Translation Table Definitions ****************************************************/
/* Hardware translation table definitions.  Only the "short descriptor format" is
 * supported.
 *
 * Level 1 Descriptor (PMD)
 *
 * Common definitions that apply to all L1 table entry types
 */

#define PMD_TYPE_SHIFT       (0)         /* Bits: 1:0:  Type of mapping */
#define PMD_TYPE_MASK        (3 << PMD_TYPE_SHIFT)
#  define PMD_TYPE_FAULT     (0 << PMD_TYPE_SHIFT) /* None */
#  define PMD_TYPE_PTE       (1 << PMD_TYPE_SHIFT) /* Page table */
#  define PMD_TYPE_SECT      (2 << PMD_TYPE_SHIFT) /* Section or supersection */
#  define PMD_TYPE_PXN       (3 << PMD_TYPE_SHIFT) /* PXN Section or supersection */
                                        /* Bits 2-31: Depend on the mapping type */

/* Level 1 Fault Translation Table Format.
 *
 * Invalid or fault entry.  "The associated VA is unmapped, and any attempt to
 *   access it generates a Translation fault.  Software can use bits[31:2] of the
 *   descriptor for its own purposes, because the hardware ignores
 *   these bits."
 */

/* Level 1 Page Table Translation Table Format.
 *
 * Page table. "The descriptor gives the address of a second-level translation
 *   table, that specifies the mapping of the associated 1MByte VA range."
 */

                                          /* Bits 0-1:   Type of mapping */
#define PMD_PTE_PXN          (1 << 2)     /* Bit 2:  Privileged execute-never bit */
#define PMD_PTE_NS           (1 << 3)     /* Bit 3:  Non-secure bit */
                                          /* Bit 4:  Should be zero (SBZ) */
#define PMD_PTE_DOM_SHIFT    (5)          /* Bits 5-8: Domain */
#define PMD_PTE_DOM_MASK     (15 << PMD_PTE_DOM_SHIFT)
#  define PMD_PTE_DOM(n)     ((n) << PMD_PTE_DOM_SHIFT)
                                          /* Bit 9:  Not implemented */
#define PMD_PTE_PADDR_MASK   (0xfffffc00) /* Bits 10-31: Page table base address */

/* Level 1 Section/Supersection Descriptor.
 *
 * Section or Supersection.  "The descriptor gives the base address of the
 *   Section or Supersection. Bit[18] determines whether the entry describes a
 *   Section or a Supersection.  If the implementation supports the PXN
 *   attribute, this encoding also defines the PXN bit as 0. Section descriptors
 *   allow fast, single level mapping between 1Mb address regions."

 * PXN Section or Supersection.  "If an implementation supports the PXN attribute,
 *   this encoding is identical..., except that it defines the PXN bit as 1.
 *
 *  "If the implementation does not support the PXN attribute, an attempt to access
 *   the associated VA generates a Translation fault.  On an implementation that
 *   does not support the PXN attribute, this encoding must not be used."
 */

/* Section */

#define PMD_SECT_PXN         (1 << 0)     /* Bit 0:  Privileged execute-never bit */
                                          /* Bits 0-1: Type of mapping */
#define PMD_SECT_B           (1 << 2)     /* Bit 2:  Bufferable bit */
#define PMD_SECT_C           (1 << 3)     /* Bit 3:  Cacheable bit */
#define PMD_SECT_XN          (1 << 4)     /* Bit 4:  Execute-never bit */
#define PMD_SECT_DOM_SHIFT   (5)          /* Bits 5-8: Domain */
#define PMD_SECT_DOM_MASK    (15 << PMD_SECT_DOM_SHIFT)
#  define PMD_SECT_DOM(n)    ((n) << PMD_SECT_DOM_SHIFT)
                                          /* Bit 9:  Implementation defined */
#define PMD_SECT_AP_SHIFT    (10)         /* Bits 10-11: Access Permissions bits AP[0:1] */
#define PMD_SECT_AP_MASK     (3 << PMD_SECT_AP_SHIFT)
#  define PMD_SECT_AP0       (1 << PMD_SECT_AP_SHIFT) /* AP[0]:  Access permission bit 0 */
#  define PMD_SECT_AP1       (2 << PMD_SECT_AP_SHIFT) /* AP[1]:  Access permission bit 1 */
#define PMD_SECT_TEX_SHIFT   (12)         /* Bits 12-14: Memory region attribute bits */
#define PMD_SECT_TEX_MASK    (7 << PMD_SECT_TEX_SHIFT)
#define PMD_SECT_AP2         (1 << 15)    /* Bit 15: AP[2]:  Access permission bit 2 */
#define PMD_SECT_S           (1 << 16)    /* Bit 16: Shareable bit */
#define PMD_SECT_NG          (1 << 17)    /* Bit 17: Not global bit. */
#define PMD_SECT_PADDR_MASK  (0xfff00000) /* Bits 20-31: Section base address, PA[31:20] */

/* Super Section (differences only) */

#define PMD_SSECT_XBA3_SHIFT  (5)          /* Bits 24-31: Extended base address, PA[39:36] */
#define PMD_SSECT_XBA3_MASK   (15 << PMD_SSECT_XBA3_SHIFT)
#define PMD_SSECT_XBA2_SHIFT  (5)          /* Bits 20-23: Extended base address, PA[35:32] */
#define PMD_SSECT_XBA2_MASK   (15 << PMD_SSECT_XBA2_SHIFT)
#define PMD_SSECT_XBA1_SHIFT  (5)          /* Bits 24-31: Extended base address, PA[31:24] */
#define PMD_SSECT_XBA1_MASK   (15 << PMD_SSECT_XBA1_SHIFT)

/* Level 1 Section/Supersection Access Permissions.
 *
 * Paragraph B3.7.1, Access permissions: "If address translation is using
 * the Short-descriptor translation table format, it must set SCTLR.AFE to
 * 1 to enable use of the Access flag.... Setting this bit to 1 redefines
 * the AP[0] bit in the translation table descriptors as an Access flag, and
 * limits the access permissions information in the translation table
 * descriptors to AP[2:1]...
 *
 * Key:
 *
 *   WR    - Read/write address allowed
 *   R     - Read-only access allowed
 *   0,1,2 - At PL0, PL1, and/or PL2
 *
 *   PL0   - User privilege level
 *   PL1   - Privileged mode
 *   PL2   - Software executing in Hyp mode
 */

#ifdef CONFIG_AFE_ENABLE
/* AP[2:1] access permissions model.  AP[0] is used as an access flag:
 *
 * AP[2] AP[1]   PL1        PL0        Description
 * ----- ----- ----------- ---------- --------------------------------
 *   0     0   Read/write  No access  Access only at PL1
 *   0     1   Read/write  Read/write Full access
 *   1     0   Read-only   No access  Read-only for PL1
 *   1     1   Read-only   Read-only  Read-only at any privilege level
 */

#  define PMD_SECT_AP_RW1     (0)
#  define PMD_SECT_AP_RW01    (PMD_SECT_AP1)
#  define PMD_SECT_AP_R1      (PMD_SECT_AP2)
#  define PMD_SECT_AP_R01     (PMD_SECT_AP1 | PMD_SECT_AP2)

#else
/* AP[2:0] access permissions control, Short-descriptor format only:
 *
 * AP[2] AP[1] AP[0]  PL1/2       PL0        Description
 * ----- ----- ----- ----------- ---------- --------------------------------
 *   0     0     0   No access   No access  All accesses generate faults
 *   0     0     1   Read/write  No access  Access only at PL1 and higher
 *   0     1     0   Read/write  Read-only  Writes at PL0 generate faults
 *   0     1     1   Read/write  Read/write Full access
 *   1     0     0     ----        ---      Reserved
 *   1     0     1   Read-only   No access  Read-only for PL1 and higher
 *   1     1     0   Read-only   Read-only  (deprecated)
 *   1     1     1   Read-only   Read-only  Read-only at any privilege level
 */

#  define PMD_SECT_AP_NONE    (0)
#  define PMD_SECT_AP_RW12    (PMD_SECT_AP0)
#  define PMD_SECT_AP_RW12_R0 (PMD_SECT_AP1)
#  define PMD_SECT_AP_RW012   (PMD_SECT_AP0 | PMD_SECT_AP1)
#  define PMD_SECT_AP_R12     (PMD_SECT_AP0 | PMD_SECT_AP2)
#  define PMD_SECT_AP_R012    (PMD_SECT_AP0 | PMD_SECT_AP1 | PMD_SECT_AP2)

/* Some mode-independent aliases */

#  define PMD_SECT_AP_RW1     PMD_SECT_AP_RW12
#  define PMD_SECT_AP_RW01    PMD_SECT_AP_RW012
#  define PMD_SECT_AP_R1      PMD_SECT_AP_R12
#  define PMD_SECT_AP_R01     PMD_SECT_AP_R012

#endif

/* Short-descriptor translation table second-level descriptor formats
 *
 * A PMD_TYPE_PTE level-one table entry provides the base address of the beginning
 * of a second-level page table. There are two types of page table entries:
 *
 *   - Large page table entries support mapping of 64KB memory regions.
 *   - Small page table entries support mapping of 4KB memory regions.
 *
 * The following definitions apply to all L2 tables:
 */

#define PTE_TYPE_SHIFT       (0)          /* Bits: 1:0:  Type of mapping */
#define PTE_TYPE_MASK        (3 << PTE_TYPE_SHIFT)
#  define PTE_TYPE_FAULT     (0 << PTE_TYPE_SHIFT) /* None */
#  define PTE_TYPE_LARGE     (1 << PTE_TYPE_SHIFT) /* 64Kb of memory */
#  define PTE_TYPE_SMALL     (2 << PTE_TYPE_SHIFT) /*  4Kb of memory */
#define PTE_B                (1 << 2)     /* Bit 2:  Bufferable bit */
#define PTE_C                (1 << 3)     /* Bit 3:  Cacheable bit */
#define PTE_AP_SHIFT         (4)          /* Bits 4-5: Access Permissions bits AP[0:1] */
#define PTE_AP_MASK          (3 << PTE_AP_SHIFT)
#  define PTE_AP0            (1 << PTE_AP_SHIFT)   /* AP[0]:  Access permission bit 0 */
#  define PTE_AP1            (2 << PTE_AP_SHIFT)   /* AP[1]:  Access permission bit 1 */
                                          /* Bits 6-8: Depend on entry type */
#define PTE_AP2              (1 << 9)     /* Bit 9: AP[2]:  Access permission bit 2 */
#define PTE_S                (1 << 10)    /* Bit 10: Shareable bit */
#define PTE_NG               (1 << 11)    /* Bit 11: Not global bit. */
                                          /* Bits 12-31:Depend on entry type */

/* Large page -- 64Kb */
                                          /* Bits: 1:0:  Type of mapping */
                                          /* Bit 2:  Bufferable bit */
                                          /* Bit 3:  Cacheable bit */
                                          /* Bits 4-5: Access Permissions bits AP[0:1] */
#define PTE_LARGE_TEX_SHIFT  (12)         /* Bits 12-14: Memory region attribute bits */
#define PTE_LARGE_TEX_MASK   (7 << PTE_LARGE_TEX_SHIFT)
#define PTE_LARGE_XN         (1 << 15)    /* Bit 15: Execute-never bit */
#define PTE_LARGE_FLAG_MASK  (0x0000f03f) /* Bits 0-15: MMU flags (mostly) */
#define PTE_LARGE_PADDR_MASK (0xffff0000) /* Bits 16-31: Large page base address, PA[31:16] */

/* Small page -- 4Kb */

                                          /* Bits: 1:0:  Type of mapping */
                                          /* Bit 2:  Bufferable bit */
                                          /* Bit 3:  Cacheable bit */
                                          /* Bits 4-5: Access Permissions bits AP[0:1] */
#define PTE_SMALL_FLAG_MASK  (0x0000003f) /* Bits 0-11: MMU flags (mostly) */
#define PTE_SMALL_PADDR_MASK (0xfffff000) /* Bits 12-31: Small page base address, PA[31:12] */

/* Level 2 Translation Table Access Permissions:
 *
 * WR    - Read/write access allowed
 * R     - Read-only access allowed
 * 0,1,2 - At PL0, PL1, and/or PL2
 *
 * PL0   - User privilege level
 * PL1   - Privileged mode
 * PL2   - Software executing in Hyp mode
 */

#ifdef CONFIG_AFE_ENABLE
/* AP[2:1] access permissions model.  AP[0] is used as an access flag:
 *
 * AP[2] AP[1]   PL1        PL0        Description
 * ----- ----- ----------- ---------- --------------------------------
 *   0     0   Read/write  No access  Access only at PL1
 *   0     1   Read/write  Read/write Full access
 *   1     0   Read-only   No access  Read-only for PL1
 *   1     1   Read-only   Read-only  Read-only at any privilege level
 */

#  define PTE_AP_RW1         (0)
#  define PTE_AP_RW01        (PTE_AP1)
#  define PTE_AP_R1          (PTE_AP2)
#  define PTE_AP_R01         (PTE_AP1 | PTE_AP2)

#else
/* AP[2:0] access permissions control, Short-descriptor format only:
 *
 * AP[2] AP[1] AP[0]  PL1/2       PL0        Description
 * ----- ----- ----- ----------- ---------- --------------------------------
 *   0     0     0   No access   No access  All accesses generate faults
 *   0     0     1   Read/write  No access  Access only at PL1 and higher
 *   0     1     0   Read/write  Read-only  Writes at PL0 generate faults
 *   0     1     1   Read/write  Read/write Full access
 *   1     0     0     ----        ---      Reserved
 *   1     0     1   Read-only   No access  Read-only for PL1 and higher
 *   1     1     0   Read-only   Read-only  (deprecated)
 *   1     1     1   Read-only   Read-only  Read-only at any privilege level
 */

#  define PTE_AP_NONE        (0)
#  define PTE_AP_RW12        (PTE_AP0)
#  define PTE_AP_RW12_R0     (PTE_AP1)
#  define PTE_AP_RW012       (PTE_AP0 | PTE_AP1)
#  define PTE_AP_R12         (PTE_AP0 | PTE_AP2)
#  define PTE_AP_R012        (PTE_AP0 | PTE_AP1 | PTE_AP2)

/* Some mode-independent aliases */

#  define PTE_AP_RW1         PTE_AP_RW12
#  define PTE_AP_RW01        PTE_AP_RW012
#  define PTE_AP_R1          PTE_AP_R12
#  define PTE_AP_R01         PTE_AP_R012

#endif

/* Memory types
 *
 * When TEX[2] == 1, the memory region is cacheable memory, and TEX[1:0]
 * describe inner and outer cache attributes.  In this implementation,
 * however, TEX[2:0] are always zero.  In this case, the cacheability is
 * described simply as:
 *
 *  C B Memory Type
 *  - - ---------------------------------------------------------------
 *  0 0 Strongly-ordered. Strongly-ordered Shareable
 *  0 1 Shareable Device. Device Shareable
 *  1 0 Outer and Inner Write-Through, no Write-Allocate. Normal S bit
 *  1 1 Outer and Inner Write-Back, no Write-Allocate. Normal S bit
 *
 * The memory type is actually controlled by the contents of the PRRR and
 * NMRR registers.  For the simple case where TEX[2:0] = 0b000, the control
 * is as follows:
 *
 *
 *       MEMORY     INNER         OUTER        OUTER SHAREABLE
 *   C B TYPE       CACHEABILITY  CACHEABILITY ATTRIBUTE
 *   - - ---------- ------------- ------------ -----------------
 *   0 0 PRRR[1:0]  NMRR[1:0]     NMRR[17:16]  NOT(PRRR[24])
 *   0 1 PRRR[3:2]  NMRR[3:2]     NMRR[19:18]  NOT(PRRR[25])
 *   1 0 PRRR[5:4]  NMRR[5:4]     NMRR[21:20]  NOT(PRRR[26])
 *   1 1 PRRR[7:6]  NMRR[7:6]     NMRR[23:22]  NOT(PRRR[27])
 *
 * But on reset I see the following in PRRR:
 *
 *   PRRR[1:0]   = 0b00, Strongly ordered memory
 *   PRRR[3:2]   = 0b01, Device memory
 *   PRRR[5:4]   = 0b10, Normal memory
 *   PRRR[7:6]   = 0b10, Normal memory
 *   PRRR[14:27] = 0b10, Outer shareable
 *
 * And the following in NMRR:
 *
 *   NMRR[1:0]   = 0b00, Region is Non-cacheable
 *   NMRR[3:2]   = 0b00, Region is Non-cacheable
 *   NMRR[5:4]   = 0b10, Region is Write-Through, no Write-Allocate
 *   NMRR[7:6]   = 0b11, Region is Write-Back, no Write-Allocate
 *   NMRR[17:16] = 0b00, Region is Non-cacheable
 *   NMRR[19:18] = 0b00, Region is Non-cacheable
 *   NMRR[21:20] = 0b10, Region is Write-Through, no Write-Allocate
 *   NMRR[23:22] = 0b11, Region is Write-Back, no Write-Allocate
 *
 * Interpretation of Cacheable (C) and Bufferable (B) Bits:
 *
 *         Write-Through  Write-Back    Write-Through/Write-Back
 *  C   B  Cache          Only Cache    Cache
 * --- --- -------------- ------------- -------------------------
 *  0   0  Uncached/      Uncached/     Uncached/
 *         Unbuffered     Unbuffered    Unbuffered
 *  0   1  Uncached/      Uncached/     Uncached/
 *         Buffered       Buffered      Buffered
 *  1   0  Cached/        UNPREDICTABLE Write-Through cached
 *         Unbuffered                   Buffered
 *  1   1  Cached/        Cached/       Write-Back cached
 *         Buffered       Buffered      Buffered
 */

#define PMD_STRONGLY_ORDERED (0)
#define PMD_DEVICE           (PMD_SECT_B)
#define PMD_CACHEABLE        (PMD_SECT_B | PMD_SECT_C)

#define PTE_STRONGLY_ORDER   (0)
#define PTE_DEVICE           (PTE_B)
#define PTE_WRITE_THROUGH    (PTE_C)
#define PTE_WRITE_BACK       (PTE_B | PTE_C)

/* Default MMU flags for RAM memory, IO, vector sections (level 1)
 *
 * REVISIT:  Here we expect all threads to be running at PL1
 */

#define MMU_ROMFLAGS         (PMD_TYPE_SECT | PMD_SECT_AP_R1 | PMD_CACHEABLE | \
                              PMD_SECT_DOM(0))
#define MMU_MEMFLAGS         (PMD_TYPE_SECT | PMD_SECT_AP_RW1 | PMD_CACHEABLE | \
                              PMD_SECT_DOM(0))
#define MMU_IOFLAGS          (PMD_TYPE_SECT | PMD_SECT_AP_RW1 | PMD_DEVICE | \
                              PMD_SECT_DOM(0) | PMD_SECT_XN)
#define MMU_STRONGLY_ORDERED (PMD_TYPE_SECT | PMD_SECT_AP_RW1 | \
                              PMD_STRONGLY_ORDERED | PMD_SECT_DOM(0) | \
                              PMD_SECT_XN)

/* MMU Flags for each type memory region (level 1 and 2) */

#define MMU_L1_TEXTFLAGS      (PMD_TYPE_PTE | PMD_PTE_DOM(0))

#define MMU_L2_KTEXTFLAGS     (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_R1)
#ifdef CONFIG_AFE_ENABLE
#  define MMU_L2_UTEXTFLAGS   (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW01)
#else
#  define MMU_L2_UTEXTFLAGS   (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW12_R0)
#endif

#define MMU_L1_DATAFLAGS      (PMD_TYPE_PTE | PMD_PTE_PXN | PMD_PTE_DOM(0))
#define MMU_L2_UDATAFLAGS     (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW01)
#define MMU_L2_KDATAFLAGS     (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW1)
#define MMU_L2_UALLOCFLAGS    (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW01)
#define MMU_L2_KALLOCFLAGS    (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW1)

#define MMU_L1_PGTABFLAGS     (PMD_TYPE_PTE | PMD_PTE_PXN | PTE_WRITE_THROUGH | \
                               PMD_PTE_DOM(0))
#define MMU_L2_PGTABFLAGS     (PTE_TYPE_SMALL | PTE_WRITE_THROUGH | PTE_AP_RW1)

#define MMU_L1_VECTORFLAGS    (PMD_TYPE_PTE | PMD_PTE_PXN | PMD_PTE_DOM(0))

#define MMU_L2_VECTRWFLAGS    (PTE_TYPE_SMALL | PTE_WRITE_THROUGH | PTE_AP_RW1)
#define MMU_L2_VECTROFLAGS    (PTE_TYPE_SMALL | PTE_WRITE_THROUGH | PTE_AP_R1)
#define MMU_L2_VECTORFLAGS    MMU_L2_VECTRWFLAGS

/* Mapped section size */

#define SECTION_SHIFT         (20)
#define SECTION_SIZE          (1 << SECTION_SHIFT)   /* 1Mb */
#define SECTION_MASK          (SECTION_SIZE - 1)

/* The Cortex-A5 supports two translation table base address registers.  In
 * this, implementation, only Translation Table Base Register 0 (TTBR0) is
 * used.  The TTBR0 contains the upper bits of the address a a page table in
 * physical memory. If 4KB page sizes are used, then TTBR0 registers holds
 * bits 14-31 of the page table address;  A full 30-bit address is formed by
 * ORing in bits 2-13 or the virtual address (MVA).  As a consequence, the
 * page table must be aligned to a 16Kb address in physical memory and could
 * require up to 16Kb of memory.
 */

#define PGTABLE_SIZE       0x00004000

/* Virtual Page Table Location ******************************************************/

#ifdef CONFIG_PAGING
/* Check if the virtual address of the page table has been defined. It
 * should not be defined:  architecture specific logic should suppress
 * defining PGTABLE_BASE_VADDR unless:  (1) it is defined in the NuttX
 * configuration file, or (2) the page table is position in low memory
 * (because the vectors are in high memory).
 */

#ifndef PGTABLE_BASE_VADDR
#  define PGTABLE_BASE_VADDR      (PG_LOCKED_VBASE + PG_TEXT_VSIZE + PG_DATA_SIZE)

  /* Virtual base of the address of the L2 page tables need to recalculates
   * using this new virtual base address of the L2 page table.
   */

#  undef  PGTABLE_L2_VBASE
#  define PGTABLE_L2_VBASE (PGTABLE_BASE_VADDR+PGTABLE_L2_OFFSET)

#endif /* PGTABLE_BASE_VADDR */

/* MMU flags ************************************************************************/

/* Create some friendly definitions to handle page table entries */

#if CONFIG_PAGING_PAGESIZE != 4096
#  error "Unsupported value for CONFIG_PAGING_PAGESIZE"
#endif

/* Base of the L2 page table (aligned to 1Kb byte boundaries) */

#define PGTABLE_L2_BASE_PADDR PGTABLE_L2_PBASE
#define PGTABLE_L2_BASE_VADDR PGTABLE_L2_VBASE

/* Number of pages in an L2 table per L1 entry */

#define PTE_NPAGES            PTE_SMALL_NPAGES
#define PT_SIZE               (4*PTE_NPAGES)

/* Mask to get the page table physical address from an L1 entry */

#define PG_L1_PADDRMASK       PMD_SECT_PADDR_MASK

/* Addresses of Memory Regions ******************************************************/

/* We position the locked region PTEs at an offset into the first
 * L2 page table.  The L1 entry points to an 1Mb aligned virtual
 * address.  The actual L2 entry will be offset into the aligned
 * L2 table.  For 4KB, "small" pages:
 *
 *   PG_L1_PADDRMASK=0xfffff000
 *   OFFSET=(((a) & 0x000fffff) >> 10) << 2)
 */

#define PG_L1_LOCKED_PADDR      (PGTABLE_BASE_PADDR + ((PG_LOCKED_VBASE >> 20) << 2))
#define PG_L1_LOCKED_VADDR      (PGTABLE_BASE_VADDR + ((PG_LOCKED_VBASE >> 20) << 2))

#define PG_L2_LOCKED_OFFSET     (((PG_LOCKED_VBASE & 0x000fffff) >> PAGESHIFT) << 2)
#define PG_L2_LOCKED_PADDR      (PGTABLE_L2_BASE_PADDR + PG_L2_LOCKED_OFFSET)
#define PG_L2_LOCKED_VADDR      (PGTABLE_L2_BASE_VADDR + PG_L2_LOCKED_OFFSET)
#define PG_L2_LOCKED_SIZE       (4*CONFIG_PAGING_NLOCKED)

/* We position the paged region PTEs immediately after the locked
 * region PTEs.  NOTE that the size of the paged regions is much
 * larger than the size of the physical paged region.  That is the
 * core of what the On-Demanding Paging feature provides.
 */

#define PG_L1_PAGED_PADDR       (PGTABLE_BASE_PADDR + ((PG_PAGED_VBASE >> 20) << 2))
#define PG_L1_PAGED_VADDR       (PGTABLE_BASE_VADDR + ((PG_PAGED_VBASE >> 20) << 2))

#define PG_L2_PAGED_PADDR       (PG_L2_LOCKED_PADDR + PG_L2_LOCKED_SIZE)
#define PG_L2_PAGED_VADDR       (PG_L2_LOCKED_VADDR + PG_L2_LOCKED_SIZE)
#define PG_L2_PAGED_SIZE        (4*CONFIG_PAGING_NVPAGED)

/* This describes the overall text region */

#define PG_L1_TEXT_PADDR        PG_L1_LOCKED_PADDR
#define PG_L1_TEXT_VADDR        PG_L1_LOCKED_VADDR

#define PG_L2_TEXT_PADDR        PG_L2_LOCKED_PADDR
#define PG_L2_TEXT_VADDR        PG_L2_LOCKED_VADDR
#define PG_L2_TEXT_SIZE         (PG_L2_LOCKED_SIZE + PG_L2_PAGED_SIZE)

/* We position the data section PTEs just after the text region PTE's */

#define PG_L1_DATA_PADDR        (PGTABLE_BASE_PADDR + ((PG_DATA_VBASE >> 20) << 2))
#define PG_L1_DATA_VADDR        (PGTABLE_BASE_VADDR + ((PG_DATA_VBASE >> 20) << 2))

#define PG_L2_DATA_PADDR        (PG_L2_LOCKED_PADDR + PG_L2_TEXT_SIZE)
#define PG_L2_DATA_VADDR        (PG_L2_LOCKED_VADDR + PG_L2_TEXT_SIZE)
#define PG_L2_DATA_SIZE         (4*PG_DATA_NPAGES)

/* Page Table Info ******************************************************************/

/* The number of pages in the in the page table (PG_PGTABLE_NPAGES).  We
 * position the page table PTEs just after the data section PTEs.
 */

#define PG_PGTABLE_NPAGES       (PGTABLE_SIZE >> PAGESHIFT)
#define PG_L1_PGTABLE_PADDR     (PGTABLE_BASE_PADDR + ((PGTABLE_BASE_VADDR >> 20) << 2))
#define PG_L1_PGTABLE_VADDR     (PGTABLE_BASE_VADDR + ((PGTABLE_BASE_VADDR >> 20) << 2))

#define PG_L2_PGTABLE_PADDR     (PG_L2_DATA_PADDR + PG_L2_DATA_SIZE)
#define PG_L2_PGTABLE_VADDR     (PG_L2_DATA_VADDR + PG_L2_DATA_SIZE)
#define PG_L2_PGTABLE_SIZE      (4*PG_DATA_NPAGES)

/* Vector Mapping *******************************************************************/

/* One page is required to map the vector table.  The vector table could lie
 * at virtual address zero (or at the start of RAM which is aliased to address
 * zero on the ea3131) or at virtual address 0xfff00000.  We only have logic
 * here to support the former case.
 *
 * NOTE:  If the vectors are at address zero, the page table will be
 * forced to the highest RAM addresses.  If the vectors are at 0xfff0000,
 * then the page table is forced to the beginning of RAM.
 *
 * When the vectors are at the beginning of RAM, they will probably overlap
 * the first page of the locked text region.  In any other case, the
 * configuration must set CONFIG_PAGING_VECPPAGE to provide the physical
 * address of the page to use for the vectors.
 *
 * When the vectors overlap the first page of the locked text region (the
 * only case in use so far), then the text page will be temporarily be made
 * writable in order to copy the vectors.
 *
 * PG_VECT_PBASE - This the physical address of the page in memory to be
 *   mapped to the vector address.
 * PG_L2_VECT_PADDR - This is the physical address of the L2 page table
 *   entry to use for the vector mapping.
 * PG_L2_VECT_VADDR - This is the virtual address of the L2 page table
 *   entry to use for the vector mapping.
 */

/* Case 1: The configuration tells us everything */

#if defined(CONFIG_PAGING_VECPPAGE)
#  define PG_VECT_PBASE         CONFIG_PAGING_VECPPAGE
#  define PG_L2_VECT_PADDR      CONFIG_PAGING_VECL2PADDR
#  define PG_L2_VECT_VADDR      CONFIG_PAGING_VECL2VADDR

/* Case 2: Vectors are in low memory and the locked text region starts at
 * the beginning of SRAM (which will be aliased to address 0x00000000).
 * However, the beginning of SRAM may not be aligned to the beginning
 * of the L2 page table (because the beginning of RAM is offset into
 * the table.
 */

#elif defined(CONFIG_ARCH_LOWVECTORS) && !defined(CONFIG_PAGING_LOCKED_PBASE)
#  define PG_VECT_PBASE         PG_LOCKED_PBASE
#  define PG_L2_VECT_OFFSET     (((PG_LOCKED_VBASE & 0x000fffff) >> PAGESHIFT) << 2)
#  define PG_L2_VECT_PADDR      (PGTABLE_L2_BASE_PADDR + PG_L2_VECT_OFFSET)
#  define PG_L2_VECT_VADDR      (PGTABLE_L2_BASE_VADDR + PG_L2_VECT_OFFSET)

/* Case 3: High vectors or the locked region is not at the beginning or SRAM */

#else
#  error "Logic missing for high vectors in this case"
#endif

/* Page Usage ***********************************************************************/

/* This is the total number of pages used in the text/data mapping: */

#define PG_TOTAL_NPPAGES        (PG_TEXT_NPPAGES + PG_DATA_NPAGES + PG_PGTABLE_NPAGES)
#define PG_TOTAL_NVPAGES        (PG_TEXT_NVPAGES + PG_DATA_NPAGES + PG_PGTABLE_NPAGES)
#define PG_TOTAL_PSIZE          (PG_TOTAL_NPPAGES << PAGESHIFT)
#define PG_TOTAL_VSIZE          (PG_TOTAL_NVPAGES << PAGESHIFT)

/* Sanity check: */

#if PG_TOTAL_NPPAGES > PG_RAM_PAGES
#  error "Total pages required exceeds RAM size"
#endif

/* Page Management ******************************************************************/

/* For page management purposes, the following summarize the "heap" of
 * free pages, operations on free pages and the L2 page table.
 *
 * PG_POOL_VA2L1OFFSET(va)  - Given a virtual address, return the L1 table
 *                            offset (in bytes).
 * PG_POOL_VA2L1VADDR(va)   - Given a virtual address, return the virtual
 *                            address of the L1 table entry
 * PG_POOL_L12PPTABLE(L1)   - Given the value of an L1 table entry return
 *                            the physical address of the start of the L2
 *                            page table
 * PG_POOL_L12PPTABLE(L1)   - Given the value of an L1 table entry return
 *                            the virtual address of the start of the L2
 *                            page table.
 *
 * PG_POOL_L1VBASE          - The virtual address of the start of the L1
 *                            page table range corresponding to the first
 *                            virtual address of the paged text region.
 * PG_POOL_L1VEND           - The virtual address of the end+1 of the L1
 *                            page table range corresponding to the last
 *                            virtual address+1 of the paged text region.
 *
 * PG_POOL_VA2L2NDX(va)     - Converts a virtual address within the paged
 *                            text region to the most compact possible
 *                            representation. Each PAGESIZE of address
 *                            corresponds to 1 index in the L2 page table;
 *                            Index 0 corresponds to the first L2 page table
 *                            entry for the first page in the virtual paged
 *                            text address space.
 * PG_POOL_NDX2VA(ndx)      - Performs the opposite conversion.. converts
 *                            an index into a virtual address in the paged
 *                            text region (the address at the beginning of
 *                            the page).
 * PG_POOL_MAXL2NDX         - This is the maximum value+1 of such an index.
 *
 * PG_POOL_PGPADDR(ndx)     - Converts an page index into the corresponding
 *                            (physical) address of the backing page memory.
 * PG_POOL_PGVADDR(ndx)     - Converts an page index into the corresponding
 *                            (virtual)address of the backing page memory.
 *
 * These are used as follows:  If a miss occurs at some virtual address, va,
 * A new page index, ndx, is allocated.  PG_POOL_PGPADDR(i) converts the index
 * into the physical address of the page memory; PG_POOL_L2VADDR(va) converts
 * the virtual address in the L2 page table there the new mapping will be
 * written.
 */

#define PG_POOL_VA2L1OFFSET(va) (((va) >> 20) << 2)
#define PG_POOL_VA2L1VADDR(va)  (PGTABLE_BASE_VADDR + PG_POOL_VA2L1OFFSET(va))
#define PG_POOL_L12PPTABLE(L1)  ((L1) & PG_L1_PADDRMASK)
#define PG_POOL_L12VPTABLE(L1)  (PG_POOL_L12PPTABLE(L1) - PGTABLE_BASE_PADDR + PGTABLE_BASE_VADDR)

#define PG_POOL_L1VBASE         (PGTABLE_BASE_VADDR + ((PG_PAGED_VBASE >> 20) << 2))
#define PG_POOL_L1VEND          (PG_POOL_L1VBASE + (CONFIG_PAGING_NVPAGED << 2))

#define PG_POOL_VA2L2NDX(va)    (((va) -  PG_PAGED_VBASE) >> PAGESHIFT)
#define PG_POOL_NDX2VA(ndx)     (((ndx) << PAGESHIFT) + PG_PAGED_VBASE)
#define PG_POOL_MAXL2NDX        PG_POOL_VA2L2NDX(PG_PAGED_VEND)

#define PG_POOL_PGPADDR(ndx)    (PG_PAGED_PBASE + ((ndx) << PAGESHIFT))
#define PG_POOL_PGVADDR(ndx)    (PG_PAGED_VBASE + ((ndx) << PAGESHIFT))

#endif /* CONFIG_PAGING */

/************************************************************************************
 * Public Types
 ************************************************************************************/

#ifndef __ASSEMBLY__
/* struct section_mapping_s describes the L1 mapping of a large region of memory
 * consisting of one or more 1MB sections (nsections).
 *
 * All addresses must be aligned to 1MB address boundaries.
 */

struct section_mapping_s
{
  uint32_t physbase;   /* Physical address of the region to be mapped */
  uint32_t virtbase;   /* Virtual address of the region to be mapped */
  uint32_t mmuflags;   /* MMU settings for the region (e.g., cache-able) */
  uint32_t nsections;  /* Number of mappings in the region */
};
#endif

/************************************************************************************
 * Assemby Macros
 ************************************************************************************/

#ifdef __ASSEMBLY__

/************************************************************************************
 * Name: cp15_disable_mmu
 *
 * Description:
 *   Disable the MMU
 *
 * Inputs:
 *   None
 *
 ************************************************************************************/

	.macro	cp15_disable_mmu, scratch
	mrc		p15, 0, \scratch, c1, c0, 0
	bic		\scratch, \scratch, #1
	mcr		p15, 0, \scratch, c1, c0, 0
	.endm

/************************************************************************************
 * Name: cp15_invalidate_tlbs
 *
 * Description:
 *   Invalidate entire unified TLB
 *
 *   The Invalidate entire TLB operations invalidate all unlocked entries in the
 *   TLB. The operation ignores the value in the register Rt specified by the MCR
 *   instruction that performs the operation. Software does not have to write a
 *   value to the register before issuing the MCR instruction.
 *
 * Inputs:
 *   None
 *
 ************************************************************************************/

	.macro	cp15_invalidate_tlbs, scratch
	mcr		p15, 0, \scratch, c8, c7, 0	/* TLBIALL */
	.endm

/************************************************************************************
 * Name: cp15_invalidate_tlb_bymva
 *
 * Description:
 *   Invalidate unified TLB entry by MVA all ASID Inner Shareable
 *
 * Inputs:
 *   vaddr - The virtual address to be invalidated
 *
 ************************************************************************************/

	.macro	cp15_invalidate_tlb_bymva, vaddr
	dsb
	mcr		p15, 0, \vaddr, c8, c3, 3	/* TLBIMVAAIS */
	dsb
	isb
	.endm

/************************************************************************************
 * Name: cp15_wrdacr
 *
 * Description:
 *   Write the Domain Access Control Register (DACR)
 *
 * Inputs:
 *   dacr - The new value of the DACR
 *
 ************************************************************************************/

	.macro	cp15_wrdacr, dacr
	mcr		p15, 0, \dacr, c3, c0, 0
	nop
	nop
	nop
	nop
	nop
	nop
	nop
	nop
	.endm

/************************************************************************************
 * Name: cp14_wrttb
 *
 * Description:
 *   The ARMv7-aA architecture supports two translation tables.  This
 *   implementation, however, uses only translation table 0.  This
 *   function  writes the address of the page table to the Translation
 *   Table Base Register 0 (TTBR0).  Then it clears the TTB control
 *   register (TTBCR), indicating that we are using TTBR0.
 *
 * Inputs:
 *   ttb - The new value of the TTBR0 register
 *
 ************************************************************************************/

	.macro	cp14_wrttb, ttb, scratch
	mcr		p15, 0, \ttb, c2, c0, 0
	nop
	nop
	nop
	nop
	nop
	nop
	nop
	nop
	mov		\scratch, #0x0
	mcr		p15, 0, \scratch, c2, c0, 2
	.endm

/************************************************************************************
 * Name: pg_l2map
 *
 * Description:
 *   Write several, contiguous L2 page table entries.  npages entries will be
 *   written. This macro is used when CONFIG_PAGING is enable.  This case,
 *   it is used as follows:
 *
 *	ldr	r0, =PGTABLE_L2_BASE_PADDR	<-- Address in L2 table
 *	ldr	r1, =PG_LOCKED_PBASE		<-- Physical page memory address
 *	ldr	r2, =CONFIG_PAGING_NLOCKED	<-- number of pages
 *      ldr	r3, =MMUFLAGS			<-- L2 MMU flags
 *	pg_l2map r0, r1, r2, r3, r4
 *
 * Inputs:
 *   l2 - Physical or virtual start address in the L2 page table, depending
 *        upon the context. (modified)
 *   ppage - The physical address of the start of the region to span. Must
 *           be aligned to 1Mb section boundaries (modified)
 *   npages - Number of pages to write in the section (modified)
 *   mmuflags - L2 MMU FLAGS
 *
 * Scratch registers (modified): tmp
 *   l2  - Next address in the L2 page table.
 *   ppage - Start of next physical page
 *   npages - Loop counter
 *   tmp - scratch
 *
 * Assumptions:
 * - The MMU is not yet enabled
 * - The L2 page tables have been zeroed prior to calling this function
 * - pg_l1span has been called to initialize the L1 table.
 *
 ************************************************************************************/

#ifdef CONFIG_PAGING
	.macro	pg_l2map, l2, ppage, npages, mmuflags, tmp
	b		2f
1:
	/* Write the one L2 entries.  First,  get tmp = (ppage | mmuflags),
	 * the value to write into the L2 PTE
	 */

	orr		\tmp, \ppage, \mmuflags

	/* Write value into table at the current table address
	 * (and increment the L2 page table address by 4)
	 */

	str		\tmp, [\l2], #4

	/* Update the physical address that will correspond to the next
	 * table entry.
	 */

	add		\ppage, \ppage, #CONFIG_PAGING_PAGESIZE

	/* Decrement the number of pages written */

	sub		\npages, \npages, #1
2:
	/* Check if all of the pages have been written.  If not, then
	 * loop and write the next PTE.
	 */

	cmp		\npages, #0
	bgt		1b
	.endm
#endif /* CONFIG_PAGING */

/************************************************************************************
 * Name: pg_l1span
 *
 * Description:
 *   Write several, contiguous, unmapped, small L1 page table entries.  As many
 *   entries will be written as  many as needed to span npages.  This macro is
 *   used when CONFIG_PAGING is enable.  In this case, it is used as follows:
 *
 *	ldr	r0, =PG_L1_PGTABLE_PADDR	<-- Address in the L1 table
 *	ldr	r1, =PG_L2_PGTABLE_PADDR	<-- Physical address of L2 page table
 *	ldr	r2, =PG_PGTABLE_NPAGES		<-- Total number of pages
 *	ldr	r3, =PG_PGTABLE_NPAGE1		<-- Number of pages in the first PTE
 *	ldr	r4, =MMU_L1_PGTABFLAGS		<-- L1 MMU flags
 *	pg_l1span r0, r1, r2, r3, r4, r4
 *
 * Inputs (unmodified unless noted):
 *   l1 - Physical or virtual address in the L1 table to begin writing (modified)
 *   l2 - Physical start address in the L2 page table (modified)
 *   npages - Number of pages to required to span that memory region (modified)
 *   ppage - The number of pages in page 1 (modified)
 *   mmuflags - L1 MMU flags to use
 *
 * Scratch registers (modified): l1, l2, npages, tmp
 *   l1 - Next L1 table address
 *   l2 - Physical start address of the next L2 page table
 *   npages - Loop counter
 *   ppage - After the first page, this will be the full number of pages.
 *   tmp - scratch
 *
 * Return:
 *   Nothing of interest.
 *
 * Assumptions:
 * - The MMU is not yet enabled
 * - The L2 page tables have been zeroed prior to calling this function
 *
 ************************************************************************************/

#ifdef CONFIG_PAGING
	.macro	pg_l1span, l1, l2, npages, ppage, mmuflags, tmp
	b		2f
1:
	/* Write the L1 table entry that refers to this (unmapped) small page
	 * table.
	 *
	 * tmp = (l2table | mmuflags), the value to write into the page table
	 */

	orr		\tmp, \l2, \mmuflags

	/* Write the value into the L1 table at the correct offset.
	 * (and increment the L1 table address by 4)
	 */

	str		\tmp, [\l1], #4

	/* Update the L2 page table address for the next L1 table entry. */

	add		\l2, \l2, #PT_SIZE  /* Next L2 page table start address */

	/* Update the number of pages that we have account for (with
	 * non-mappings).  NOTE that the first page may have fewer than
	 * the maximum entries per page table.
	 */

	sub		\npages, \npages, \ppage
	mov		\ppage, #PTE_NPAGES
2:
	/* Check if all of the pages have been written.  If not, then
	 * loop and write the next L1 entry.
	 */

	cmp		\npages, #0
	bgt		1b
	.endm

#endif /* CONFIG_PAGING */
#endif /* __ASSEMBLY__ */

/************************************************************************************
 * Inline Functions
 ************************************************************************************/

#ifndef __ASSEMBLY__

/************************************************************************************
 * Name: cp15_disable_mmu
 *
 * Description:
 *   Disable the MMU
 *
 * Inputs:
 *   None
 *
 ************************************************************************************/

static inline void cp15_disable_mmu(void)
{
  __asm__ __volatile__
    (
      "\tmrc p15, 0, r0, c1, c0, 0\n"
      "\tbic r0, r0, #1\n"
      "\tmcr p15, 0, r0, c1, c0, 0\n"
      :
      :
      : "r0", "memory"
    );
}

/************************************************************************************
 * Name: cp15_invalidate_tlbs
 *
 * Description:
 *   Invalidate entire unified TLB
 *
 *   The Invalidate entire TLB operations invalidate all unlocked entries in the
 *   TLB. The operation ignores the value in the register Rt specified by the MCR
 *   instruction that performs the operation. Software does not have to write a
 *   value to the register before issuing the MCR instruction.
 *
 * Inputs:
 *   None
 *
 ************************************************************************************/

static inline void cp15_invalidate_tlbs(void)
{
  __asm__ __volatile__
    (
      "\tmcr p15, 0, r0, c8, c7, 0\n" /* TLBIALL */
      :
      :
      : "r0", "memory"
    );
}

/************************************************************************************
 * Name: cp15_invalidate_tlb_bymva
 *
 * Description:
 *   Invalidate unified TLB entry by MVA all ASID Inner Shareable
 *
 * Inputs:
 *   vaddr - The virtual address to be invalidated
 *
 ************************************************************************************/

static inline void cp15_invalidate_tlb_bymva(uint32_t vaddr)
{
  __asm__ __volatile__
    (
      "\tdsb\n"
      "\tmcr p15, 0, %0, c8, c3, 3\n" /* TLBIMVAAIS */
      "\tdsb\n"
      "\tisb\n"
      :
      : "r" (vaddr)
      : "r1", "memory"
    );
}

/************************************************************************************
 * Name: cp15_wrdacr
 *
 * Description:
 *   Write the Domain Access Control Register (DACR)
 *
 * Inputs:
 *   dacr - The new value of the DACR
 *
 ************************************************************************************/

static inline void cp15_wrdacr(unsigned int dacr)
{
  __asm__ __volatile__
    (
      "\tmcr p15, 0,0, c3, c0, 0\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      :
      : "r" (dacr)
      : "memory"
    );
}

/************************************************************************************
 * Name: cp14_wrttb
 *
 * Description:
 *   The ARMv7-aA architecture supports two translation tables.  This
 *   implementation, however, uses only translation table 0.  This
 *   function  writes the address of the page table to the Translation
 *   Table Base Register 0 (TTBR0).  Then it clears the TTB control
 *   register (TTBCR), indicating that we are using TTBR0.
 *
 * Inputs:
 *   ttb - The new value of the TTBR0 register
 *
 ************************************************************************************/

static inline void cp14_wrttb(unsigned int ttb)
{
  __asm__ __volatile__
    (
      "\tmcr p15, 0,0, c2, c0, 0\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      "\tnop\n"
      "\tmov r1, #0\n"
      "\tmcr p15, 0, r1, c2, c0, 2\n"
      :
      : "r" (ttb)
      : "r1", "memory"
    );
}

/*************************************************************************************
 * Name: mmu_l1_getentry
 *
 * Description:
 *   Given a virtual address, return the value of the corresponding L1 table entry.
 *
 * Input Parameters:
 *   vaddr - The virtual address to be mapped.
 *
 ************************************************************************************/

#ifndef CONFIG_ARCH_ROMPGTABLE
static inline uint32_t mmu_l1_getentry(uint32_t vaddr)
{
  uint32_t *l1table = (uint32_t*)PGTABLE_BASE_VADDR;
  uint32_t  index   = vaddr >> 20;

  /* Return the address of the page table entry */

  return l1table[index];
}
#endif

/*************************************************************************************
 * Name: mmu_l2_getentry
 *
 * Description:
 *   Given a address of the beginning of an L2 page table and a virtual address,
 *   return the value of the corresponding L2 page table entry.
 *
 * Input Parameters:
 *   l2vaddr - The virtual address of the beginning of the L2 page table
 *   vaddr - The virtual address to be mapped.
 *
 ************************************************************************************/

#ifndef CONFIG_ARCH_ROMPGTABLE
static inline uint32_t mmu_l2_getentry(uint32_t l2vaddr, uint32_t vaddr)
{
  uint32_t *l2table  = (uint32_t*)l2vaddr;
  uint32_t  index;

  /* The table divides a 1Mb address space up into 256 entries, each
   * corresponding to 4Kb of address space.  The page table index is
   * related to the offset from the beginning of 1Mb region.
   */

  index = (vaddr & 0x000ff000) >> 12;

  /* Return the address of the page table entry */

  return l2table[index];
}
#endif

#endif /* __ASSEMBLY__ */

/************************************************************************************
 * Public Variables
 ************************************************************************************/

/************************************************************************************
 * Public Function Prototypes
 ************************************************************************************/

#ifndef __ASSEMBLY__
#ifdef __cplusplus
#define EXTERN extern "C"
extern "C" {
#else
#define EXTERN extern
#endif

/************************************************************************************
 * Name: mmu_l1_setentry
 *
 * Description:
 *   Set a one level 1 translation table entry.  Only a single L1 page table is
 *   supported.
 *
 * Input Parameters:
 *   paddr - The physical address to be mapped.  Must be aligned to a 1MB address
 *     boundary
 *   vaddr - The virtual address to be mapped.  Must be aligned to a 1MB address
 *     boundary
 *   mmuflags - The MMU flags to use in the mapping.
 *
 ************************************************************************************/

#ifndef CONFIG_ARCH_ROMPGTABLE
void mmu_l1_setentry(uint32_t paddr, uint32_t vaddr, uint32_t mmuflags);
#endif

/****************************************************************************
 * Name: mmu_l1_restore
 *
 * Description:
 *   Restore one L1 table entry previously returned by mmu_l1_getentry() (or
 *   any other encoded L1 page table value).
 *
 * Input Parameters:
 *   vaddr - A virtual address to be mapped
 *   l1entry - The value to write into the page table entry
 *
 ****************************************************************************/

#if !defined(CONFIG_ARCH_ROMPGTABLE) && defined(CONFIG_ARCH_ADDRENV)
void mmu_l1_restore(uintptr_t vaddr, uint32_t l1entry);
#endif

/************************************************************************************
 * Name: mmu_l1_clrentry
 *
 * Description:
 *   Unmap one L1 region by writing zero into the L1 page table entry and by
 *   flushing caches and TLBs appropriately.
 *
 * Input Parameters:
 *   vaddr - A virtual address within the L1 address region to be unmapped.
 *
 ************************************************************************************/

#if !defined (CONFIG_ARCH_ROMPGTABLE) && defined(CONFIG_ARCH_ADDRENV)
#  define mmu_l1_clrentry(v) mmu_l1_restore(v,0)
#endif

/************************************************************************************
 * Name: mmu_l1_map_region
 *
 * Description:
 *   Set multiple level 1 translation table entries in order to map a region of
 *   memory.
 *
 * Input Parameters:
 *   mapping - Describes the mapping to be performed.
 *
 ************************************************************************************/

#ifndef CONFIG_ARCH_ROMPGTABLE
void mmu_l1_map_region(const struct section_mapping_s *mapping);
#endif

/****************************************************************************
 * Name: mmu_invalidate_region
 *
 * Description:
 *   Invalidate TLBs for a range of addresses (all 4KB aligned).
 *
 * Input Parameters:
 *   vaddr - The beginning of the region to invalidate.
 *   size  - The size of the region in bytes to be invalidated.
 *
 ****************************************************************************/

#ifndef CONFIG_ARCH_ROMPGTABLE
void mmu_invalidate_region(uint32_t vstart, size_t size);
#endif

#undef EXTERN
#ifdef __cplusplus
}
#endif
#endif /* __ASSEMBLY__ */

#endif  /* __ARCH_ARM_SRC_ARMV7_A_MMU_H */