proc_reg.h
1 /* 2 * Copyright (c) 2007-2018 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* CMU_ENDHIST */ 32 /* 33 * Mach Operating System 34 * Copyright (c) 1991,1990 Carnegie Mellon University 35 * All Rights Reserved. 36 * 37 * Permission to use, copy, modify and distribute this software and its 38 * documentation is hereby granted, provided that both the copyright 39 * notice and this permission notice appear in all copies of the 40 * software, derivative works or modified versions, and any portions 41 * thereof, and that both notices appear in supporting documentation. 42 * 43 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 44 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 45 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 46 * 47 * Carnegie Mellon requests users of this software to return to 48 * 49 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 50 * School of Computer Science 51 * Carnegie Mellon University 52 * Pittsburgh PA 15213-3890 53 * 54 * any improvements or extensions that they make and grant Carnegie Mellon 55 * the rights to redistribute these changes. 56 */ 57 58 /* 59 */ 60 61 /* 62 * Processor registers for ARM 63 */ 64 #ifndef _ARM_PROC_REG_H_ 65 #define _ARM_PROC_REG_H_ 66 67 #if defined (__arm64__) 68 #include <pexpert/arm64/board_config.h> 69 #elif defined (__arm__) 70 #include <pexpert/arm/board_config.h> 71 #endif 72 73 #if defined (ARMA7) 74 #define __ARM_ARCH__ 7 75 #define __ARM_SUB_ARCH__ CPU_ARCH_ARMv7k 76 #define __ARM_VMSA__ 7 77 #define __ARM_VFP__ 3 78 79 /* Force physical aperture to be mapped at PTE level so that its mappings 80 * can be updated to reflect cache attribute changes on alias mappings. This prevents 81 * prefetched physical aperture cachelines from becoming dirty in L1 due to a write to 82 * an uncached alias mapping on the same core. Subsequent uncached writes from another 83 * core may not snoop this line, and the dirty line may end up being evicted later to 84 * effectively overwrite the uncached writes from other cores. */ 85 #define __ARM_PTE_PHYSMAP__ 1 86 /* __ARMA7_SMP__ controls whether we are consistent with the A7 MP_CORE spec; needed because entities other than 87 * the xnu-managed processors may need to snoop our cache operations. 88 */ 89 #define __ARMA7_SMP__ 1 90 #define __ARM_COHERENT_CACHE__ 1 91 #define __ARM_DEBUG__ 7 92 #define __ARM_USER_PROTECT__ 1 93 #define __ARM_TIME_TIMEBASE_ONLY__ 1 94 95 #endif 96 97 #if __ARM_42BIT_PA_SPACE__ 98 /* For now, force the issue! */ 99 /* We need more VA space for the identity map to bootstrap the MMU */ 100 #undef __ARM64_PMAP_SUBPAGE_L1__ 101 #endif /* __ARM_42BIT_PA_SPACE__ */ 102 103 #if __ARM_KERNEL_PROTECT__ 104 /* 105 * This feature is not currently implemented for 32-bit ARM CPU architectures. 106 * A discussion of this feature for 64-bit ARM CPU architectures can be found 107 * in the ARM64 version of this file. 108 */ 109 #if __arm__ 110 #error __ARM_KERNEL_PROTECT__ is not supported on ARM32 111 #endif /* __arm__ */ 112 #endif /* __ARM_KERNEL_PROTECT__ */ 113 114 #if defined(ARM_BOARD_WFE_TIMEOUT_NS) 115 #define __ARM_ENABLE_WFE_ 1 116 #else /* defined(ARM_BOARD_WFE_TIMEOUT_NS) */ 117 #define __ARM_ENABLE_WFE_ 0 118 #endif /* defined(ARM_BOARD_WFE_TIMEOUT_NS) */ 119 120 /* 121 * MAX_PSETS allows the scheduler to create statically sized 122 * scheduling data structures (such as an array of processor sets, clutch 123 * buckets in Edge scheduler etc.). All current AMP platforms are dual 124 * pset and all non-AMP platforms are single pset architectures. This 125 * define might need to be conditionalized better (or moved to a better 126 * header) in the future. 127 * 128 * <Edge Multi-cluster Support Needed> 129 */ 130 #if __ARM_AMP__ 131 #define MAX_PSETS 2 132 #else /*__ARM_AMP__ */ 133 #define MAX_PSETS 1 134 #endif /* __ARM_AMP__ */ 135 136 /* 137 * The clutch scheduler is enabled only on non-AMP platforms for now. 138 */ 139 #if CONFIG_CLUTCH 140 141 #if __ARM_AMP__ 142 143 /* Enable the Edge scheduler for all J129 platforms */ 144 #if XNU_TARGET_OS_OSX 145 #define CONFIG_SCHED_CLUTCH 1 146 #define CONFIG_SCHED_EDGE 1 147 #endif /* XNU_TARGET_OS_OSX */ 148 149 #else /* __ARM_AMP__ */ 150 #define CONFIG_SCHED_CLUTCH 1 151 #endif /* __ARM_AMP__ */ 152 153 #endif /* CONFIG_CLUTCH */ 154 155 /* Thread groups are enabled on all ARM platforms (irrespective of scheduler) */ 156 #define CONFIG_THREAD_GROUPS 1 157 158 #ifdef XNU_KERNEL_PRIVATE 159 160 #if __ARM_VFP__ 161 #define ARM_VFP_DEBUG 0 162 #endif /* __ARM_VFP__ */ 163 164 #endif /* XNU_KERNEL_PRIVATE */ 165 166 167 168 /* 169 * FSR registers 170 * 171 * CPSR: Current Program Status Register 172 * SPSR: Saved Program Status Registers 173 * 174 * 31 30 29 28 27 24 19 16 9 8 7 6 5 4 0 175 * +-----------------------------------------------------------+ 176 * | N| Z| C| V| Q|...| J|...|GE[3:0]|...| E| A| I| F| T| MODE | 177 * +-----------------------------------------------------------+ 178 */ 179 180 /* 181 * Flags 182 */ 183 #define PSR_NF 0x80000000 /* Negative/Less than */ 184 #define PSR_ZF 0x40000000 /* Zero */ 185 #define PSR_CF 0x20000000 /* Carry/Borrow/Extend */ 186 #define PSR_VF 0x10000000 /* Overflow */ 187 #define PSR_QF 0x08000000 /* saturation flag (QADD ARMv5) */ 188 189 /* 190 * Modified execution mode flags 191 */ 192 #define PSR_JF 0x01000000 /* Jazelle flag (BXJ ARMv5) */ 193 #define PSR_EF 0x00000200 /* mixed-endian flag (SETEND ARMv6) */ 194 #define PSR_AF 0x00000100 /* precise abort flag (ARMv6) */ 195 #define PSR_TF 0x00000020 /* thumb flag (BX ARMv4T) */ 196 #define PSR_TFb 5 /* thumb flag (BX ARMv4T) */ 197 198 /* 199 * Interrupts 200 */ 201 #define PSR_IRQFb 7 /* IRQ : 0 = IRQ enable */ 202 #define PSR_IRQF 0x00000080 /* IRQ : 0 = IRQ enable */ 203 #define PSR_FIQF 0x00000040 /* FIQ : 0 = FIQ enable */ 204 205 /* 206 * CPU mode 207 */ 208 #define PSR_USER_MODE 0x00000010 /* User mode */ 209 #define PSR_FIQ_MODE 0x00000011 /* FIQ mode */ 210 #define PSR_IRQ_MODE 0x00000012 /* IRQ mode */ 211 #define PSR_SVC_MODE 0x00000013 /* Supervisor mode */ 212 #define PSR_ABT_MODE 0x00000017 /* Abort mode */ 213 #define PSR_UND_MODE 0x0000001B /* Undefined mode */ 214 215 #define PSR_MODE_MASK 0x0000001F 216 #define PSR_IS_KERNEL(psr) (((psr) & PSR_MODE_MASK) != PSR_USER_MODE) 217 #define PSR_IS_USER(psr) (((psr) & PSR_MODE_MASK) == PSR_USER_MODE) 218 219 #define PSR_USERDFLT PSR_USER_MODE 220 #define PSR_USER_MASK (PSR_AF | PSR_IRQF | PSR_FIQF | PSR_MODE_MASK) 221 #define PSR_USER_SET PSR_USER_MODE 222 223 #define PSR_INTMASK PSR_IRQF /* Interrupt disable */ 224 225 /* 226 * FPEXC: Floating-Point Exception Register 227 */ 228 229 #define FPEXC_EX 0x80000000 /* Exception status */ 230 #define FPEXC_EX_BIT 31 231 #define FPEXC_EN 0x40000000 /* VFP : 1 = EN enable */ 232 #define FPEXC_EN_BIT 30 233 234 235 /* 236 * FPSCR: Floating-point Status and Control Register 237 */ 238 239 #define FPSCR_DN 0x02000000 /* Default NaN */ 240 #define FPSCR_FZ 0x01000000 /* Flush to zero */ 241 242 #define FPSCR_DEFAULT FPSCR_DN | FPSCR_FZ 243 244 245 /* 246 * FSR registers 247 * 248 * IFSR: Instruction Fault Status Register 249 * DFSR: Data Fault Status Register 250 */ 251 #define FSR_ALIGN 0x00000001 /* Alignment */ 252 #define FSR_DEBUG 0x00000002 /* Debug (watch/break) */ 253 #define FSR_ICFAULT 0x00000004 /* Fault on instruction cache maintenance */ 254 #define FSR_SFAULT 0x00000005 /* Translation Section */ 255 #define FSR_PFAULT 0x00000007 /* Translation Page */ 256 #define FSR_SACCESS 0x00000003 /* Section access */ 257 #define FSR_PACCESS 0x00000006 /* Page Access */ 258 #define FSR_SDOM 0x00000009 /* Domain Section */ 259 #define FSR_PDOM 0x0000000B /* Domain Page */ 260 #define FSR_SPERM 0x0000000D /* Permission Section */ 261 #define FSR_PPERM 0x0000000F /* Permission Page */ 262 #define FSR_EXT 0x00001000 /* External (Implementation Defined Classification) */ 263 264 #define FSR_MASK 0x0000040F /* Valid bits */ 265 #define FSR_ALIGN_MASK 0x0000040D /* Valid bits to check align */ 266 267 #define DFSR_WRITE 0x00000800 /* write data abort fault */ 268 269 #if defined (ARMA7) || defined (APPLE_ARM64_ARCH_FAMILY) || defined (BCM2837) 270 271 #define TEST_FSR_VMFAULT(status) \ 272 (((status) == FSR_PFAULT) \ 273 || ((status) == FSR_PPERM) \ 274 || ((status) == FSR_SFAULT) \ 275 || ((status) == FSR_SPERM) \ 276 || ((status) == FSR_ICFAULT) \ 277 || ((status) == FSR_SACCESS) \ 278 || ((status) == FSR_PACCESS)) 279 280 #define TEST_FSR_TRANSLATION_FAULT(status) \ 281 (((status) == FSR_SFAULT) \ 282 || ((status) == FSR_PFAULT)) 283 284 #else 285 286 #error Incompatible CPU type configured 287 288 #endif 289 290 /* 291 * Cache configuration 292 */ 293 294 #if defined (ARMA7) 295 296 /* I-Cache */ 297 #define MMU_I_CLINE 5 /* cache line size as 1<<MMU_I_CLINE (32) */ 298 299 /* D-Cache */ 300 #define MMU_CSIZE 15 /* cache size as 1<<MMU_CSIZE (32K) */ 301 #define MMU_CLINE 6 /* cache line size as 1<<MMU_CLINE (64) */ 302 #define MMU_NWAY 2 /* set associativity 1<<MMU_NWAY (4) */ 303 #define MMU_I7SET 6 /* cp15 c7 set incrementer 1<<MMU_I7SET */ 304 #define MMU_I7WAY 30 /* cp15 c7 way incrementer 1<<MMU_I7WAY */ 305 306 #define MMU_SWAY (MMU_CSIZE - MMU_NWAY) /* set size 1<<MMU_SWAY */ 307 #define MMU_NSET (MMU_SWAY - MMU_CLINE) /* lines per way 1<<MMU_NSET */ 308 309 #define __ARM_L2CACHE__ 1 310 311 #define L2_CSIZE 20 /* cache size as 1<<MMU_CSIZE */ 312 #define L2_CLINE 6 /* cache line size as 1<<MMU_CLINE (64) */ 313 #define L2_NWAY 3 /* set associativity 1<<MMU_NWAY (8) */ 314 #define L2_I7SET 6 /* cp15 c7 set incrementer 1<<MMU_I7SET */ 315 #define L2_I7WAY 29 /* cp15 c7 way incrementer 1<<MMU_I7WAY */ 316 #define L2_I9WAY 29 /* cp15 c9 way incrementer 1<<MMU_I9WAY */ 317 318 #define L2_SWAY (L2_CSIZE - L2_NWAY) /* set size 1<<MMU_SWAY */ 319 #define L2_NSET (L2_SWAY - L2_CLINE) /* lines per way 1<<MMU_NSET */ 320 321 #elif defined (APPLETYPHOON) 322 323 /* I-Cache */ 324 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 325 326 /* D-Cache */ 327 #define MMU_CLINE 6 /* cache line size as 1<<MMU_CLINE (64) */ 328 329 #elif defined (APPLETWISTER) 330 331 /* I-Cache */ 332 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 333 334 /* D-Cache */ 335 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 336 337 #elif defined (APPLEHURRICANE) 338 339 /* I-Cache */ 340 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 341 342 /* D-Cache */ 343 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 344 345 #elif defined (APPLEMONSOON) 346 347 /* I-Cache, 96KB for Monsoon, 48KB for Mistral, 6-way. */ 348 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 349 350 /* D-Cache, 64KB for Monsoon, 32KB for Mistral, 4-way. */ 351 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 352 353 #elif defined (APPLEVORTEX) 354 355 /* I-Cache, 128KB 8-way for Vortex, 48KB 6-way for Tempest. */ 356 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 357 358 /* D-Cache, 128KB 8-way for Vortex, 32KB 4-way for Tempest. */ 359 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 360 361 #elif defined (APPLELIGHTNING) 362 363 /* I-Cache, 192KB for Lightning, 96KB for Thunder, 6-way. */ 364 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 365 366 /* D-Cache, 128KB for Lightning, 8-way. 48KB for Thunder, 6-way. */ 367 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 368 369 #elif defined (APPLEFIRESTORM) 370 371 /* I-Cache, 256KB for Firestorm, 128KB for Icestorm, 6-way. */ 372 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 373 374 /* D-Cache, 160KB for Firestorm, 8-way. 64KB for Icestorm, 6-way. */ 375 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 376 377 #elif defined (BCM2837) /* Raspberry Pi 3 */ 378 379 /* I-Cache. We don't have detailed spec so we just follow the ARM technical reference. */ 380 #define MMU_I_CLINE 6 381 382 /* D-Cache. */ 383 #define MMU_CLINE 6 384 385 #elif defined (VMAPPLE) 386 387 /* I-Cache. */ 388 #define MMU_I_CLINE 6 389 390 /* D-Cache. */ 391 #define MMU_CLINE 6 392 393 #else 394 #error processor not supported 395 #endif 396 397 398 #if (__ARM_VMSA__ <= 7) 399 400 /* 401 * SCTLR: System Control Register 402 */ 403 /* 404 * System Control Register (SCTLR) 405 * 406 * 31 30 29 28 27 25 24 22 21 20 19 17 15 14 13 12 11 10 5 2 1 0 407 * +-+--+---+---+----+-+--+--+--+--+----+---+-+--+-+-+--+--+--+--+--+---+-+------+--+-+-+-+ 408 * |0|TE|AFE|TRE|NMFI|0|EE|VE|11|FI|UWXN|WXN|1|HA|1|0|RR| V| I| Z|SW|000|1|C15BEN|11|C|A|M| 409 * +-+--+---+---+----+-+--+--+--+--+----+---+-+--+-+-+--+--+--+--+--+---+-+------+--+-+-+-+ 410 * 411 * Where: 412 * TE: Thumb Exception enable 413 * AFE: Access flag enable 414 * TRE: TEX remap enable 415 * NMFI: Non-maskable FIQ (NMFI) support 416 * EE: Exception Endianness 417 * VE: Interrupt Vectors Enable 418 * FI: Fast interrupts configuration enable 419 * ITD: IT Disable 420 * UWXN: Unprivileged write permission implies PL1 XN 421 * WXN: Write permission implies XN 422 * HA: Hardware Access flag enable 423 * RR: Round Robin select 424 * V: High exception vectors 425 * I: Instruction cache enable 426 * Z: Branch prediction enable 427 * SW: SWP/SWPB enable 428 * C15BEN: CP15 barrier enable 429 * C: Cache enable 430 * A: Alignment check enable 431 * M: MMU enable 432 */ 433 434 #define SCTLR_RESERVED 0x82DD8394 435 436 #define SCTLR_ENABLE 0x00000001 /* MMU enable */ 437 #define SCTLR_ALIGN 0x00000002 /* Alignment check enable */ 438 #define SCTLR_DCACHE 0x00000004 /* Data or Unified Cache enable */ 439 #define SCTLR_BEN 0x00000040 /* CP15 barrier enable */ 440 #define SCTLR_SW 0x00000400 /* SWP/SWPB Enable */ 441 #define SCTLR_PREDIC 0x00000800 /* Branch prediction enable */ 442 #define SCTLR_ICACHE 0x00001000 /* Instruction cache enabled. */ 443 #define SCTLR_HIGHVEC 0x00002000 /* Vector table at 0xffff0000 */ 444 #define SCTLR_RROBIN 0x00004000 /* Round Robin replacement */ 445 #define SCTLR_HA 0x00020000 /* Hardware Access flag enable */ 446 #define SCTLR_NMFI 0x08000000 /* Non-maskable FIQ */ 447 #define SCTLR_TRE 0x10000000 /* TEX remap enable */ 448 #define SCTLR_AFE 0x20000000 /* Access flag enable */ 449 #define SCTLR_TE 0x40000000 /* Thumb Exception enable */ 450 451 #define SCTLR_DEFAULT \ 452 (SCTLR_AFE|SCTLR_TRE|SCTLR_HIGHVEC|SCTLR_ICACHE|SCTLR_PREDIC|SCTLR_DCACHE|SCTLR_ENABLE) 453 454 455 /* 456 * PRRR: Primary Region Remap Register 457 * 458 * 31 24 20 19 18 17 16 0 459 * +---------------------------------------------------------------+ 460 * | NOSn | Res |NS1|NS0|DS1|DS0| TRn | 461 * +---------------------------------------------------------------+ 462 */ 463 464 #define PRRR_NS1 0x00080000 465 #define PRRR_NS0 0x00040000 466 #define PRRR_DS1 0x00020000 467 #define PRRR_DS0 0x00010000 468 469 #define PRRR_NOSn_ISH(region) (0x1<<((region)+24)) 470 471 #if defined (ARMA7) 472 #define PRRR_SETUP (0x1F08022A) 473 #else 474 #error processor not supported 475 #endif 476 477 /* 478 * NMRR, Normal Memory Remap Register 479 * 480 * 30 28 26 24 22 20 18 16 14 12 10 8 6 4 2 0 481 * +---------------------------------------------------------------+ 482 * |OR7|OR6|OR5|OR4|OR3|OR2|OR1|OR0|IR7|IR6|IR5|IR4|IR3|IR2|IR1|IR0| 483 * +---------------------------------------------------------------+ 484 */ 485 486 #define NMRR_DISABLED 0x0 /* Non-cacheable */ 487 #define NMRR_WRITEBACK 0x1 /* Write-Back, Write-Allocate */ 488 #define NMRR_WRITETHRU 0x2 /* Write-Through, no Write-Allocate */ 489 #define NMRR_WRITEBACKNO 0x3 /* Write-Back, no Write-Allocate */ 490 491 #if defined (ARMA7) 492 #define NMRR_SETUP (0x01210121) 493 #else 494 #error processor not supported 495 #endif 496 497 /* 498 * TTBR: Translation Table Base Register 499 * 500 */ 501 502 #define TTBR_IRGN_DISBALED 0x00000000 /* inner non-cacheable */ 503 #define TTBR_IRGN_WRITEBACK 0x00000040 /* inner write back and allocate */ 504 #define TTBR_IRGN_WRITETHRU 0x00000001 /* inner write thru */ 505 #define TTBR_IRGN_WRITEBACKNO 0x00000041 /* inner write back no allocate */ 506 507 #define TTBR_RGN_DISBALED 0x00000000 /* outer non-cacheable */ 508 #define TTBR_RGN_WRITEBACK 0x00000008 /* outer write back and allocate */ 509 #define TTBR_RGN_WRITETHRU 0x00000010 /* outer write thru outer cache */ 510 #define TTBR_RGN_WRITEBACKNO 0x00000018 /* outer write back no allocate */ 511 512 #define TTBR_SHARED 0x00000002 /* Shareable memory atribute */ 513 #define TTBR_SHARED_NOTOUTER 0x00000020 /* Outer not shareable memory atribute */ 514 515 #if defined (ARMA7) 516 #define TTBR_SETUP (TTBR_RGN_WRITEBACK|TTBR_IRGN_WRITEBACK|TTBR_SHARED) 517 #else 518 #error processor not supported 519 #endif 520 521 /* 522 * TTBCR: Translation Table Base Control register 523 * 524 * 31 3 2 0 525 * +----------+ 526 * | zero | N | 527 * +----------+ 528 * 529 * If N=0, always use translation table base register 0. Otherwise, if 530 * bits [31:32-N] of the address are all zero use base register 0. Otherwise, 531 * use base register 1. 532 * 533 * Reading from this register also returns the page table boundary for TTB0. 534 * Writing to it updates the boundary for TTB0. (0=16KB, 1=8KB, 2=4KB, etc...) 535 */ 536 537 #define TTBCR_N_1GB_TTB0 0x2 /* 1 GB TTB0, 3GB TTB1 */ 538 #define TTBCR_N_2GB_TTB0 0x1 /* 2 GB TTB0, 2GB TTB1 */ 539 #define TTBCR_N_4GB_TTB0 0x0 /* 4 GB TTB0 */ 540 #define TTBCR_N_MASK 0x3 541 542 #define TTBCR_N_SETUP (TTBCR_N_2GB_TTB0) 543 544 545 546 /* 547 * ARM Page Granule 548 */ 549 #define ARM_PGSHIFT 12 550 #define ARM_PGBYTES (1 << ARM_PGSHIFT) 551 #define ARM_PGMASK (ARM_PGBYTES-1) 552 553 /* 554 * DACR: Domain Access Control register 555 */ 556 557 #define DAC_FAULT 0x0 /* invalid domain - everyone loses */ 558 #define DAC_CLIENT 0x1 /* client domain - use AP bits */ 559 #define DAC_RESERVE 0x2 /* reserved domain - undefined */ 560 #define DAC_MANAGER 0x3 /* manager domain - all access */ 561 562 #define DACR_SET(dom, x) ((x)<<((dom)<<1)) 563 564 565 #define ARM_DOM_DEFAULT 0 /* domain that forces AP use */ 566 #define ARM_DAC_SETUP 0x1 567 568 /* 569 * ARM 2-level Page Table support 570 */ 571 572 /* 573 * Memory Attribute Index 574 */ 575 #define CACHE_ATTRINDX_WRITEBACK 0x0 /* cache enabled, buffer enabled */ 576 #define CACHE_ATTRINDX_WRITECOMB 0x1 /* no cache, buffered writes */ 577 #define CACHE_ATTRINDX_WRITETHRU 0x2 /* cache enabled, buffer disabled */ 578 #define CACHE_ATTRINDX_DISABLE 0x3 /* no cache, no buffer */ 579 #define CACHE_ATTRINDX_INNERWRITEBACK 0x4 /* inner cache enabled, buffer enabled, write allocate */ 580 #define CACHE_ATTRINDX_POSTED CACHE_ATTRINDX_DISABLE 581 #define CACHE_ATTRINDX_POSTED_REORDERED CACHE_ATTRINDX_DISABLE 582 #define CACHE_ATTRINDX_POSTED_COMBINED_REORDERED CACHE_ATTRINDX_DISABLE 583 #define CACHE_ATTRINDX_DEFAULT CACHE_ATTRINDX_WRITEBACK 584 585 586 /* 587 * Access protection bit values 588 */ 589 #define AP_RWNA 0x0 /* priv=read-write, user=no-access */ 590 #define AP_RWRW 0x1 /* priv=read-write, user=read-write */ 591 #define AP_RONA 0x2 /* priv=read-only , user=no-access */ 592 #define AP_RORO 0x3 /* priv=read-only , user=read-only */ 593 594 /* 595 * L1 Translation table 596 * 597 * Each translation table is up to 16KB 598 * 4096 32-bit entries of 1MB of address space. 599 */ 600 601 #define ARM_TT_L1_SIZE 0x00100000 /* size of area covered by a tte */ 602 #define ARM_TT_L1_OFFMASK 0x000FFFFF /* offset within an L1 entry */ 603 #define ARM_TT_L1_TABLE_OFFMASK 0x000FFFFF /* offset within an L1 entry */ 604 #define ARM_TT_L1_BLOCK_OFFMASK 0x000FFFFF /* offset within an L1 entry */ 605 #define ARM_TT_L1_SUPER_OFFMASK 0x00FFFFFF /* offset within an L1 entry */ 606 #define ARM_TT_L1_SHIFT 20 /* page descriptor shift */ 607 #define ARM_TT_L1_INDEX_MASK 0xfff00000 /* mask for getting index in L1 table from virtual address */ 608 609 #define ARM_TT_L1_PT_SIZE (4 * ARM_TT_L1_SIZE) /* 4 L1 table entries required to consume 1 L2 pagetable page */ 610 #define ARM_TT_L1_PT_OFFMASK (ARM_TT_L1_PT_SIZE - 1) 611 612 /* 613 * L2 Translation table 614 * 615 * Each translation table is up to 1KB 616 * 4096 32-bit entries of 1MB (2^30) of address space. 617 */ 618 619 #define ARM_TT_L2_SIZE 0x00001000 /* size of area covered by a tte */ 620 #define ARM_TT_L2_OFFMASK 0x00000FFF /* offset within an L2 entry */ 621 #define ARM_TT_L2_SHIFT 12 /* page descriptor shift */ 622 #define ARM_TT_L2_INDEX_MASK 0x000ff000 /* mask for getting index in L2 table from virtual address */ 623 624 /* 625 * Convenience definitions for: 626 * ARM_TT_LEAF: The last level of the configured page table format. 627 * ARM_TT_TWIG: The second to last level of the configured page table format. 628 * ARM_TT_ROOT: The first level of the configured page table format. 629 * 630 * My apologies to any botanists who may be reading this. 631 */ 632 #define ARM_TT_LEAF_SIZE ARM_TT_L2_SIZE 633 #define ARM_TT_LEAF_OFFMASK ARM_TT_L2_OFFMASK 634 #define ARM_TT_LEAF_SHIFT ARM_TT_L2_SHIFT 635 #define ARM_TT_LEAF_INDEX_MASK ARM_TT_L2_INDEX_MASK 636 637 #define ARM_TT_TWIG_SIZE ARM_TT_L1_SIZE 638 #define ARM_TT_TWIG_OFFMASK ARM_TT_L1_OFFMASK 639 #define ARM_TT_TWIG_SHIFT ARM_TT_L1_SHIFT 640 #define ARM_TT_TWIG_INDEX_MASK ARM_TT_L1_INDEX_MASK 641 642 #define ARM_TT_ROOT_SIZE ARM_TT_L1_SIZE 643 #define ARM_TT_ROOT_OFFMASK ARM_TT_L1_OFFMASK 644 #define ARM_TT_ROOT_SHIFT ARM_TT_L1_SHIFT 645 #define ARM_TT_ROOT_INDEX_MASK ARM_TT_L1_INDEX_MASK 646 647 /* 648 * Level 1 Translation Table Entry 649 * 650 * page table entry 651 * 652 * 31 10 9 8 5 4 2 0 653 * +----------------------+-+----+--+--+--+ 654 * | page table base addr | |dom |XN|00|01| 655 * +----------------------+-+----+--+--+--+ 656 * 657 * direct (1MB) section entry 658 * 659 * 31 20 18 15 12 10 9 8 5 4 2 0 660 * +------------+--+-+-+-+---+--+-+----+--+--+--+ 661 * | base addr |00|G|S|A|TEX|AP| |dom |XN|CB|10| 662 * +------------+--+-+-+-+---+--+-+----+--+--+--+ 663 * 664 * super (16MB) section entry 665 * 666 * 31 24 23 18 15 12 10 9 8 5 4 2 0 667 * +---------+------+-+-+-+---+--+-+----+--+--+--+ 668 * |base addr|000001|G|S|A|TEX|AP| |dom |XN|CB|10| 669 * +---------+------+-+-+-+---+--+-+----+--+--+--+ 670 * 671 * where: 672 * 'G' is the notGlobal bit 673 * 'S' is the shared bit 674 * 'A' in the access permission extension (APX) bit 675 * 'TEX' remap register control bits 676 * 'AP' is the access protection 677 * 'dom' is the domain for the translation 678 * 'XN' is the eXecute Never bit 679 * 'CB' is the cache/buffer attribute 680 */ 681 682 #define ARM_TTE_EMPTY 0x00000000 /* unasigned entry */ 683 684 #define ARM_TTE_TYPE_FAULT 0x00000000 /* fault entry type */ 685 #define ARM_TTE_TYPE_TABLE 0x00000001 /* page table type */ 686 #define ARM_TTE_TYPE_BLOCK 0x00000002 /* section entry type */ 687 #define ARM_TTE_TYPE_MASK 0x00000003 /* mask for extracting the type */ 688 689 #define ARM_TTE_BLOCK_NGSHIFT 17 690 #define ARM_TTE_BLOCK_NG_MASK 0x00020000 /* mask to determine notGlobal bit */ 691 #define ARM_TTE_BLOCK_NG 0x00020000 /* value for a per-process mapping */ 692 693 #define ARM_TTE_BLOCK_SHSHIFT 16 694 #define ARM_TTE_BLOCK_SH_MASK 0x00010000 /* shared (SMP) mapping mask */ 695 #define ARM_TTE_BLOCK_SH 0x00010000 /* shared (SMP) mapping */ 696 697 #define ARM_TTE_BLOCK_CBSHIFT 2 698 #define ARM_TTE_BLOCK_CB(x) ((x) << ARM_TTE_BLOCK_CBSHIFT) 699 #define ARM_TTE_BLOCK_CB_MASK (3<< ARM_TTE_BLOCK_CBSHIFT) 700 701 #define ARM_TTE_BLOCK_AP0SHIFT 10 702 #define ARM_TTE_BLOCK_AP0 (1<<ARM_TTE_BLOCK_AP0SHIFT) 703 #define ARM_TTE_BLOCK_AP0_MASK (1<<ARM_TTE_BLOCK_AP0SHIFT) 704 705 #define ARM_TTE_BLOCK_AP1SHIFT 11 706 #define ARM_TTE_BLOCK_AP1 (1<<ARM_TTE_BLOCK_AP1SHIFT) 707 #define ARM_TTE_BLOCK_AP1_MASK (1<<ARM_TTE_BLOCK_AP1SHIFT) 708 709 #define ARM_TTE_BLOCK_AP2SHIFT 15 710 #define ARM_TTE_BLOCK_AP2 (1<<ARM_TTE_BLOCK_AP2SHIFT) 711 #define ARM_TTE_BLOCK_AP2_MASK (1<<ARM_TTE_BLOCK_AP2SHIFT) 712 713 /* access protections */ 714 #define ARM_TTE_BLOCK_AP(ap) \ 715 ((((ap)&0x1)<<ARM_TTE_BLOCK_AP1SHIFT) | \ 716 ((((ap)>>1)&0x1)<<ARM_TTE_BLOCK_AP2SHIFT)) 717 718 /* mask access protections */ 719 #define ARM_TTE_BLOCK_APMASK \ 720 (ARM_TTE_BLOCK_AP1_MASK | ARM_TTE_BLOCK_AP2_MASK) 721 722 #define ARM_TTE_BLOCK_AF ARM_TTE_BLOCK_AP0 /* value for access */ 723 #define ARM_TTE_BLOCK_AFMASK ARM_TTE_BLOCK_AP0_MASK /* access mask */ 724 725 #define ARM_TTE_TABLE_MASK 0xFFFFFC00 /* mask for a L2 page table entry */ 726 #define ARM_TTE_TABLE_SHIFT 10 /* shift for L2 page table phys address */ 727 728 #define ARM_TTE_BLOCK_L1_MASK 0xFFF00000 /* mask to extract phys address from L1 section entry */ 729 #define ARM_TTE_BLOCK_L1_SHIFT 20 /* shift for 1MB section phys address */ 730 731 #define ARM_TTE_SUPER_L1_MASK 0xFF000000 /* mask to extract phys address from L1 super entry */ 732 #define ARM_TTE_SUPER_L1_SHIFT 24 /* shift for 16MB section phys address */ 733 734 #define ARM_TTE_BLOCK_SUPER 0x00040000 /* make section a 16MB section */ 735 #define ARM_TTE_BLOCK_SUPER_MASK 0x00F40000 /* make section a 16MB section */ 736 737 #define ARM_TTE_BLOCK_NXSHIFT 4 738 #define ARM_TTE_BLOCK_NX 0x00000010 /* section is no execute */ 739 #define ARM_TTE_BLOCK_NX_MASK 0x00000010 /* mask for extracting no execute bit */ 740 #define ARM_TTE_BLOCK_PNX ARM_TTE_BLOCK_NX 741 742 #define ARM_TTE_BLOCK_TEX0SHIFT 12 743 #define ARM_TTE_BLOCK_TEX0 (1<<ARM_TTE_BLOCK_TEX0SHIFT) 744 #define ARM_TTE_BLOCK_TEX0_MASK (1<<ARM_TTE_BLOCK_TEX0SHIFT) 745 746 #define ARM_TTE_BLOCK_TEX1SHIFT 13 747 #define ARM_TTE_BLOCK_TEX1 (1<<ARM_TTE_BLOCK_TEX1SHIFT) 748 #define ARM_TTE_BLOCK_TEX1_MASK (1<<ARM_TTE_BLOCK_TEX1SHIFT) 749 750 #define ARM_TTE_BLOCK_TEX2SHIFT 14 751 #define ARM_TTE_BLOCK_TEX2 (1<<ARM_TTE_BLOCK_TEX2SHIFT) 752 #define ARM_TTE_BLOCK_TEX2_MASK (1<<ARM_TTE_BLOCK_TEX2SHIFT) 753 754 755 /* mask memory attributes index */ 756 #define ARM_TTE_BLOCK_ATTRINDX(i) \ 757 ((((i)&0x3)<<ARM_TTE_BLOCK_CBSHIFT) | \ 758 ((((i)>>2)&0x1)<<ARM_TTE_BLOCK_TEX0SHIFT)) 759 760 /* mask memory attributes index */ 761 #define ARM_TTE_BLOCK_ATTRINDXMASK \ 762 (ARM_TTE_BLOCK_CB_MASK | ARM_TTE_BLOCK_TEX0_MASK) 763 764 765 /* 766 * Level 2 Page table entries 767 * 768 * The following page table entry types are possible: 769 * 770 * fault page entry 771 * 31 2 0 772 * +----------------------------------------+--+ 773 * | ignored |00| 774 * +----------------------------------------+--+ 775 * 776 * large (64KB) page entry 777 * 31 16 15 12 9 6 4 3 2 0 778 * +----------------+--+---+-+-+-+---+--+-+-+--+ 779 * | base phys addr |XN|TEX|G|S|A|000|AP|C|B|01| 780 * +----------------+--+---+-+-+-+---+--+-+-+--+ 781 * 782 * small (4KB) page entry 783 * 31 12 9 6 4 3 2 1 0 784 * +-----------------------+-+-+-+---+--+-+-+-+--+ 785 * | base phys addr |G|S|A|TEX|AP|C|B|1|XN| 786 * +-----------------------+-+-+-+---+--+-+-+-+--+ 787 * 788 * also where: 789 * 'XN' is the eXecute Never bit 790 * 'G' is the notGlobal (process-specific) bit 791 * 'S' is the shared bit 792 * 'A' in the access permission extension (ATX) bit 793 * 'TEX' remap register control bits 794 * 'AP' is the access protection 795 * 'dom' is the domain for the translation 796 * 'C' is the cache attribute 797 * 'B' is the write buffer attribute 798 */ 799 800 /* markers for (invalid) PTE for a page sent to compressor */ 801 #define ARM_PTE_COMPRESSED ARM_PTE_TEX1 /* compressed... */ 802 #define ARM_PTE_COMPRESSED_ALT ARM_PTE_TEX2 /* ... and was "alt_acct" */ 803 #define ARM_PTE_COMPRESSED_MASK (ARM_PTE_COMPRESSED | ARM_PTE_COMPRESSED_ALT) 804 #define ARM_PTE_IS_COMPRESSED(x, p) \ 805 ((((x) & 0x3) == 0) && /* PTE is not valid... */ \ 806 ((x) & ARM_PTE_COMPRESSED) && /* ...has "compressed" marker" */ \ 807 ((!((x) & ~ARM_PTE_COMPRESSED_MASK)) || /* ...no other bits */ \ 808 (panic("compressed PTE %p 0x%x has extra bits 0x%x: corrupted?", \ 809 (p), (x), (x) & ~ARM_PTE_COMPRESSED_MASK), FALSE))) 810 811 #define PTE_SHIFT 2 /* shift width of a pte (sizeof(pte) == (1 << PTE_SHIFT)) */ 812 #define PTE_PGENTRIES (1024 >> PTE_SHIFT) /* number of ptes per page */ 813 814 #define ARM_PTE_EMPTY 0x00000000 /* unasigned - invalid entry */ 815 816 #define ARM_PTE_TYPE_FAULT 0x00000000 /* fault entry type */ 817 #define ARM_PTE_TYPE_VALID 0x00000002 /* valid L2 entry */ 818 #define ARM_PTE_TYPE 0x00000002 /* small page entry type */ 819 #define ARM_PTE_TYPE_MASK 0x00000002 /* mask to get pte type */ 820 821 #define ARM_PTE_NG_MASK 0x00000800 /* mask to determine notGlobal bit */ 822 #define ARM_PTE_NG 0x00000800 /* value for a per-process mapping */ 823 824 #define ARM_PTE_SHSHIFT 10 825 #define ARM_PTE_SHMASK 0x00000400 /* shared (SMP) mapping mask */ 826 #define ARM_PTE_SH 0x00000400 /* shared (SMP) mapping */ 827 828 #define ARM_PTE_CBSHIFT 2 829 #define ARM_PTE_CB(x) ((x)<<ARM_PTE_CBSHIFT) 830 #define ARM_PTE_CB_MASK (0x3<<ARM_PTE_CBSHIFT) 831 832 #define ARM_PTE_AP0SHIFT 4 833 #define ARM_PTE_AP0 (1<<ARM_PTE_AP0SHIFT) 834 #define ARM_PTE_AP0_MASK (1<<ARM_PTE_AP0SHIFT) 835 836 #define ARM_PTE_AP1SHIFT 5 837 #define ARM_PTE_AP1 (1<<ARM_PTE_AP1SHIFT) 838 #define ARM_PTE_AP1_MASK (1<<ARM_PTE_AP1SHIFT) 839 840 #define ARM_PTE_AP2SHIFT 9 841 #define ARM_PTE_AP2 (1<<ARM_PTE_AP2SHIFT) 842 #define ARM_PTE_AP2_MASK (1<<ARM_PTE_AP2SHIFT) 843 844 /* access protections */ 845 #define ARM_PTE_AP(ap) \ 846 ((((ap)&0x1)<<ARM_PTE_AP1SHIFT) | \ 847 ((((ap)>>1)&0x1)<<ARM_PTE_AP2SHIFT)) 848 849 /* mask access protections */ 850 #define ARM_PTE_APMASK \ 851 (ARM_PTE_AP1_MASK | ARM_PTE_AP2_MASK) 852 853 #define ARM_PTE_AF ARM_PTE_AP0 /* value for access */ 854 #define ARM_PTE_AFMASK ARM_PTE_AP0_MASK /* access mask */ 855 856 #define ARM_PTE_PAGE_MASK 0xFFFFF000 /* mask for a small page */ 857 #define ARM_PTE_PAGE_SHIFT 12 /* page shift for 4KB page */ 858 859 #define ARM_PTE_NXSHIFT 0 860 #define ARM_PTE_NX 0x00000001 /* small page no execute */ 861 #define ARM_PTE_NX_MASK (1<<ARM_PTE_NXSHIFT) 862 863 #define ARM_PTE_PNXSHIFT 0 864 #define ARM_PTE_PNX 0x00000000 /* no privilege execute. not impl */ 865 #define ARM_PTE_PNX_MASK (0<<ARM_PTE_NXSHIFT) 866 867 #define ARM_PTE_XMASK (ARM_PTE_PNX_MASK | ARM_PTE_NX_MASK) 868 869 #define ARM_PTE_TEX0SHIFT 6 870 #define ARM_PTE_TEX0 (1<<ARM_PTE_TEX0SHIFT) 871 #define ARM_PTE_TEX0_MASK (1<<ARM_PTE_TEX0SHIFT) 872 873 #define ARM_PTE_TEX1SHIFT 7 874 #define ARM_PTE_TEX1 (1<<ARM_PTE_TEX1SHIFT) 875 #define ARM_PTE_TEX1_MASK (1<<ARM_PTE_TEX1SHIFT) 876 877 #define ARM_PTE_WRITEABLESHIFT ARM_PTE_TEX1SHIFT 878 #define ARM_PTE_WRITEABLE ARM_PTE_TEX1 879 #define ARM_PTE_WRITEABLE_MASK ARM_PTE_TEX1_MASK 880 881 #define ARM_PTE_TEX2SHIFT 8 882 #define ARM_PTE_TEX2 (1<<ARM_PTE_TEX2SHIFT) 883 #define ARM_PTE_TEX2_MASK (1<<ARM_PTE_TEX2SHIFT) 884 885 #define ARM_PTE_WIREDSHIFT ARM_PTE_TEX2SHIFT 886 #define ARM_PTE_WIRED ARM_PTE_TEX2 887 #define ARM_PTE_WIRED_MASK ARM_PTE_TEX2_MASK 888 889 /* mask memory attributes index */ 890 #define ARM_PTE_ATTRINDX(indx) \ 891 ((((indx)&0x3)<<ARM_PTE_CBSHIFT) | \ 892 ((((indx)>>2)&0x1)<<ARM_PTE_TEX0SHIFT)) 893 894 /* mask memory attributes index */ 895 #define ARM_PTE_ATTRINDXMASK \ 896 (ARM_PTE_CB_MASK | ARM_PTE_TEX0_MASK) 897 898 #define ARM_SMALL_PAGE_SIZE (4096) /* 4KB */ 899 #define ARM_LARGE_PAGE_SIZE (64*1024) /* 64KB */ 900 #define ARM_SECTION_SIZE (1024*1024) /* 1MB */ 901 #define ARM_SUPERSECTION_SIZE (16*1024*1024) /* 16MB */ 902 903 #define TLBI_ADDR_SHIFT (12) 904 #define TLBI_ADDR_SIZE (20) 905 #define TLBI_ADDR_MASK (((1ULL << TLBI_ADDR_SIZE) - 1)) 906 #define TLBI_ASID_SHIFT (0) 907 #define TLBI_ASID_SIZE (8) 908 #define TLBI_ASID_MASK (((1ULL << TLBI_ASID_SIZE) - 1)) 909 #endif 910 911 /* 912 * Format of the Debug Status and Control Register (DBGDSCR) 913 */ 914 #define ARM_DBGDSCR_RXFULL (1 << 30) 915 #define ARM_DBGDSCR_TXFULL (1 << 29) 916 #define ARM_DBGDSCR_RXFULL_1 (1 << 27) 917 #define ARM_DBGDSCR_TXFULL_1 (1 << 26) 918 #define ARM_DBGDSCR_PIPEADV (1 << 25) 919 #define ARM_DBGDSCR_INSTRCOMPL_1 (1 << 24) 920 #define ARM_DBGDSCR_EXTDCCMODE_MASK (3 << 20) 921 #define ARM_DBGDSCR_EXTDCCMODE_NONBLOCKING (0 << 20) 922 #define ARM_DBGDSCR_EXTDCCMODE_STALL (1 << 20) 923 #define ARM_DBGDSCR_EXTDCCMODE_FAST (1 << 20) 924 #define ARM_DBGDSCR_ADADISCARD (1 << 19) 925 #define ARM_DBGDSCR_NS (1 << 18) 926 #define ARM_DBGDSCR_SPNIDDIS (1 << 17) 927 #define ARM_DBGDSCR_SPIDDIS (1 << 16) 928 #define ARM_DBGDSCR_MDBGEN (1 << 15) 929 #define ARM_DBGDSCR_HDBGEN (1 << 14) 930 #define ARM_DBGDSCR_ITREN (1 << 13) 931 #define ARM_DBGDSCR_UDCCDIS (1 << 12) 932 #define ARM_DBGDSCR_INTDIS (1 << 11) 933 #define ARM_DBGDSCR_DBGACK (1 << 10) 934 #define ARM_DBGDSCR_DBGNOPWRDWN (1 << 9) 935 #define ARM_DBGDSCR_UND_1 (1 << 8) 936 #define ARM_DBGDSCR_ADABORT_1 (1 << 7) 937 #define ARM_DBGDSCR_SDABORT_1 (1 << 6) 938 #define ARM_DBGDSCR_MOE_MASK (15 << 2) 939 #define ARM_DBGDSCR_MOE_HALT_REQUEST (0 << 2) 940 #define ARM_DBGDSCR_MOE_BREAKPOINT (1 << 2) 941 #define ARM_DBGDSCR_MOE_ASYNC_WATCHPOINT (2 << 2) 942 #define ARM_DBGDSCR_MOE_BKPT_INSTRUCTION (3 << 2) 943 #define ARM_DBGDSCR_MOE_EXT_DEBUG_REQ (4 << 2) 944 #define ARM_DBGDSCR_MOE_VECTOR_CATCH (5 << 2) 945 #define ARM_DBGDSCR_MOE_DSIDE_ABORT (6 << 2) 946 #define ARM_DBGDSCR_MOE_ISIDE_ABORT (7 << 2) 947 #define ARM_DBGDSCR_MOE_OS_UNLOCK_CATCH (8 << 2) 948 #define ARM_DBGDSCR_MOE_SYNC_WATCHPOINT (10 << 2) 949 950 #define ARM_DBGDSCR_RESTARTED (1 << 1) 951 #define ARM_DBGDSCR_HALTED (1 << 0) 952 953 /* 954 * Format of the Debug & Watchpoint Breakpoint Value and Control Registers 955 * Using ARMv7 names; ARMv6 and ARMv6.1 are bit-compatible 956 */ 957 #define ARM_DBG_VR_ADDRESS_MASK 0xFFFFFFFC /* BVR & WVR */ 958 #define ARM_DBGBVR_CONTEXTID_MASK 0xFFFFFFFF /* BVR only */ 959 960 #define ARM_DBG_CR_ADDRESS_MASK_MASK 0x1F000000 /* BCR & WCR */ 961 #define ARM_DBGBCR_MATCH_MASK (1 << 22) /* BCR only */ 962 #define ARM_DBGBCR_MATCH_MATCH (0 << 22) 963 #define ARM_DBGBCR_MATCH_MISMATCH (1 << 22) 964 #define ARM_DBGBCR_TYPE_MASK (1 << 21) /* BCR only */ 965 #define ARM_DBGBCR_TYPE_IVA (0 << 21) 966 #define ARM_DBGBCR_TYPE_CONTEXTID (1 << 21) 967 #define ARM_DBG_CR_LINKED_MASK (1 << 20) /* BCR & WCR */ 968 #define ARM_DBG_CR_LINKED_LINKED (1 << 20) 969 #define ARM_DBG_CR_LINKED_UNLINKED (0 << 20) 970 #define ARM_DBG_CR_LINKED_BRP_MASK 0x000F0000 /* BCR & WCR */ 971 #define ARM_DBG_CR_SECURITY_STATE_MASK (3 << 14) /* BCR & WCR */ 972 #define ARM_DBG_CR_SECURITY_STATE_BOTH (0 << 14) 973 #define ARM_DBG_CR_SECURITY_STATE_NONSECURE (1 << 14) 974 #define ARM_DBG_CR_SECURITY_STATE_SECURE (2 << 14) 975 #define ARM_DBG_CR_HIGHER_MODE_MASK (1 << 13) /* BCR & WCR */ 976 #define ARM_DBG_CR_HIGHER_MODE_ENABLE (1 << 13) 977 #define ARM_DBG_CR_HIGHER_MODE_DISABLE (0 << 13) 978 #define ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK 0x00001FE0 /* WCR only */ 979 #define ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK 0x000001E0 /* BCR & WCR */ 980 #define ARM_DBGWCR_ACCESS_CONTROL_MASK (3 << 3) /* WCR only */ 981 #define ARM_DBCWCR_ACCESS_CONTROL_LOAD (1 << 3) 982 #define ARM_DBCWCR_ACCESS_CONTROL_STORE (2 << 3) 983 #define ARM_DBCWCR_ACCESS_CONTROL_ANY (3 << 3) 984 #define ARM_DBG_CR_MODE_CONTROL_MASK (3 << 1) /* BCR & WCR */ 985 #define ARM_DBG_CR_MODE_CONTROL_U_S_S (0 << 1) /* BCR only */ 986 #define ARM_DBG_CR_MODE_CONTROL_PRIVILEGED (1 << 1) /* BCR & WCR */ 987 #define ARM_DBG_CR_MODE_CONTROL_USER (2 << 1) /* BCR & WCR */ 988 #define ARM_DBG_CR_MODE_CONTROL_ANY (3 << 1) /* BCR & WCR */ 989 #define ARM_DBG_CR_ENABLE_MASK (1 << 0) /* BCR & WCR */ 990 #define ARM_DBG_CR_ENABLE_ENABLE (1 << 0) 991 #define ARM_DBG_CR_ENABLE_DISABLE (0 << 0) 992 993 /* 994 * Format of the Device Power-down and Reset Status Register (DBGPRSR) 995 */ 996 #define ARM_DBGPRSR_STICKY_RESET_STATUS (1 << 3) 997 #define ARM_DBGPRSR_RESET_STATUS (1 << 2) 998 #define ARM_DBGPRSR_STICKY_POWERDOWN_STATUS (1 << 1) 999 #define ARM_DBGPRSR_POWERUP_STATUS (1 << 0) 1000 1001 /* 1002 * Format of the OS Lock Access (DBGOSLAR) and Lock Access Registers (DBGLAR) 1003 */ 1004 #define ARM_DBG_LOCK_ACCESS_KEY 0xC5ACCE55 1005 1006 /* ARMv7 Debug register map */ 1007 #define ARM_DEBUG_OFFSET_DBGDIDR (0x000) 1008 #define ARM_DEBUG_OFFSET_DBGWFAR (0x018) 1009 #define ARM_DEBUG_OFFSET_DBGVCR (0x01C) 1010 #define ARM_DEBUG_OFFSET_DBGECR (0x024) 1011 #define ARM_DEBUG_OFFSET_DBGDSCCR (0x028) 1012 #define ARM_DEBUG_OFFSET_DBGDSMCR (0x02C) 1013 #define ARM_DEBUG_OFFSET_DBGDTRRX (0x080) 1014 #define ARM_DEBUG_OFFSET_DBGITR (0x084) /* Write-only */ 1015 #define ARM_DEBUG_OFFSET_DBGPCSR (0x084) /* Read-only */ 1016 #define ARM_DEBUG_OFFSET_DBGDSCR (0x088) 1017 #define ARM_DEBUG_OFFSET_DBGDTRTX (0x08C) 1018 #define ARM_DEBUG_OFFSET_DBGDRCR (0x090) 1019 #define ARM_DEBUG_OFFSET_DBGBVR (0x100) /* 0x100 - 0x13C */ 1020 #define ARM_DEBUG_OFFSET_DBGBCR (0x140) /* 0x140 - 0x17C */ 1021 #define ARM_DEBUG_OFFSET_DBGWVR (0x180) /* 0x180 - 0x1BC */ 1022 #define ARM_DEBUG_OFFSET_DBGWCR (0x1C0) /* 0x1C0 - 0x1FC */ 1023 #define ARM_DEBUG_OFFSET_DBGOSLAR (0x300) 1024 #define ARM_DEBUG_OFFSET_DBGOSLSR (0x304) 1025 #define ARM_DEBUG_OFFSET_DBGOSSRR (0x308) 1026 #define ARM_DEBUG_OFFSET_DBGPRCR (0x310) 1027 #define ARM_DEBUG_OFFSET_DBGPRSR (0x314) 1028 #define ARM_DEBUG_OFFSET_DBGITCTRL (0xF00) 1029 #define ARM_DEBUG_OFFSET_DBGCLAIMSET (0xFA0) 1030 #define ARM_DEBUG_OFFSET_DBGCLAIMCLR (0xFA4) 1031 #define ARM_DEBUG_OFFSET_DBGLAR (0xFB0) 1032 #define ARM_DEBUG_OFFSET_DBGLSR (0xFB4) 1033 #define ARM_DEBUG_OFFSET_DBGAUTHSTATUS (0xFB8) 1034 #define ARM_DEBUG_OFFSET_DBGDEVID (0xFC8) 1035 #define ARM_DEBUG_OFFSET_DBGDEVTYPE (0xFCC) 1036 #define ARM_DEBUG_OFFSET_DBGPID0 (0xFD0) 1037 #define ARM_DEBUG_OFFSET_DBGPID1 (0xFD4) 1038 #define ARM_DEBUG_OFFSET_DBGPID2 (0xFD8) 1039 #define ARM_DEBUG_OFFSET_DBGPID3 (0xFDA) 1040 #define ARM_DEBUG_OFFSET_DBGPID4 (0xFDC) 1041 #define ARM_DEBUG_OFFSET_DBGCID0 (0xFF0) 1042 #define ARM_DEBUG_OFFSET_DBGCID1 (0xFF4) 1043 #define ARM_DEBUG_OFFSET_DBGCID2 (0xFF8) 1044 #define ARM_DEBUG_OFFSET_DBGCID3 (0xFFA) 1045 #define ARM_DEBUG_OFFSET_DBGCID4 (0xFFC) 1046 1047 /* 1048 * Media and VFP Feature Register 1 (MVFR1) 1049 */ 1050 #define MVFR_ASIMD_HPFP 0x00100000UL 1051 1052 /* 1053 * Main ID Register (MIDR) 1054 * 1055 * 31 24 23 20 19 16 15 4 3 0 1056 * +-----+-----+------+------+-----+ 1057 * | IMP | VAR | ARCH | PNUM | REV | 1058 * +-----+-----+------+------+-----+ 1059 * 1060 * where: 1061 * IMP: Implementor code 1062 * VAR: Variant number 1063 * ARCH: Architecture code 1064 * PNUM: Primary part number 1065 * REV: Minor revision number 1066 */ 1067 #define MIDR_REV_SHIFT 0 1068 #define MIDR_REV_MASK (0xf << MIDR_REV_SHIFT) 1069 #define MIDR_PNUM_SHIFT 4 1070 #define MIDR_PNUM_MASK (0xfff << MIDR_PNUM_SHIFT) 1071 #define MIDR_ARCH_SHIFT 16 1072 #define MIDR_ARCH_MASK (0xf << MIDR_ARCH_SHIFT) 1073 #define MIDR_VAR_SHIFT 20 1074 #define MIDR_VAR_MASK (0xf << MIDR_VAR_SHIFT) 1075 #define MIDR_IMP_SHIFT 24 1076 #define MIDR_IMP_MASK (0xff << MIDR_IMP_SHIFT) 1077 1078 #ifdef __arm__ 1079 1080 /* Macros meant to make __builtin_arm_* functions easier to use. */ 1081 #define MRC_SCTLR 15,0,1,0,0 1082 #define MCR_SCTLR(x) 15,0,(x),1,0,0 1083 1084 #define MRC_ACTLR 15,0,1,0,1 1085 #define MCR_ACTLR(x) 15,0,(x),1,0,1 1086 1087 #endif /* __arm__ */ 1088 1089 1090 #endif /* _ARM_PROC_REG_H_ */