From 3cca1ec00ea1f875d16975691deca631d9e4a575 Mon Sep 17 00:00:00 2001 From: Ahmed Ismail Date: Thu, 31 Jul 2025 16:27:38 +0100 Subject: [PATCH 1/2] arm-cortex-r82: Add MPU support This commit introduces support for the Memory Protection Unit (MPU) to the ARM Cortex-R82 port. The MPU enhances system security by allowing the definition of memory regions with specific access permissions. The following changes have been made: - Added MPU configuration functions in `port.c` to set up memory regions and their attributes. - Updated `portASM.S` to include assembly routines for MPU and context switching with MPU support. - Created `mpu_wrappers_v2_asm.c` to provide assembly wrappers for MPU operations. - Updated `portmacro.h` to include MPU-related macros and definitions. - Modified `task.h` to include MPU-related task attributes. - Updated `CMakeLists.txt` to include the new MPU source file. - Enhanced the `README.md` with instructions on MPU configuration. Signed-off-by: Ahmed Ismail --- .github/.cSpellWords.txt | 14 + include/task.h | 9 +- portable/CMakeLists.txt | 4 +- portable/GCC/ARM_CR82/README.md | 8 +- portable/GCC/ARM_CR82/mpu_wrappers_v2_asm.c | 944 ++++++++++++++ portable/GCC/ARM_CR82/port.c | 1236 ++++++++++++++++--- portable/GCC/ARM_CR82/portASM.S | 875 ++++++++++--- portable/GCC/ARM_CR82/portmacro.h | 268 +++- 8 files changed, 2973 insertions(+), 385 deletions(-) create mode 100644 portable/GCC/ARM_CR82/mpu_wrappers_v2_asm.c diff --git a/.github/.cSpellWords.txt b/.github/.cSpellWords.txt index 2fc0d1455a4..cefff52da36 100644 --- a/.github/.cSpellWords.txt +++ b/.github/.cSpellWords.txt @@ -69,6 +69,7 @@ CANTX capitalisation cbmc CBMC +cbnz cbor CBOR CCIE @@ -107,6 +108,8 @@ CLKS CLKSOURCE CLKSTA CLRB +clrex +CLREX CLRF clrm CLRPSW @@ -378,6 +381,7 @@ IFSR imajeff INACK INDF +initialisations inpw INTE INTFRCH @@ -651,6 +655,8 @@ PPUDR PPUER PPUSR ppux +Prbar +PRBAR PRCR PREA PREB @@ -658,11 +664,15 @@ PRIA Prioritised PRIS PRIVDEFENA +Prlar +PRLAR PROCDLY PRODH PRODL PROGE Prokic +Prselr +PRSELR prtmacro psha psplim @@ -705,6 +715,7 @@ REENT REGA RELD Renesas +restoreallgpregisters reta reti RETP @@ -772,6 +783,8 @@ SCBR SCDR SCER SCSR +Sctlr +SCTLR SDCC SECU SENDA @@ -929,6 +942,7 @@ UNSUB UNSUBACK unsubscriptions unsuspended +unupdated UPAC URAD URAT diff --git a/include/task.h b/include/task.h index a25740e3bbd..7d0e1bae406 100644 --- a/include/task.h +++ b/include/task.h @@ -68,18 +68,21 @@ #if defined( portARMV8M_MINOR_VERSION ) && ( portARMV8M_MINOR_VERSION >= 1 ) #define tskMPU_REGION_PRIVILEGED_EXECUTE_NEVER ( 1U << 5U ) #endif /* portARMV8M_MINOR_VERSION >= 1 */ +#define tskMPU_REGION_NON_SHAREABLE ( 1U << 6U ) +#define tskMPU_REGION_OUTER_SHAREABLE ( 1U << 7U ) +#define tskMPU_REGION_INNER_SHAREABLE ( 1U << 8U ) /* MPU region permissions stored in MPU settings to * authorize access requests. */ -#define tskMPU_READ_PERMISSION ( 1U << 0U ) -#define tskMPU_WRITE_PERMISSION ( 1U << 1U ) +#define tskMPU_READ_PERMISSION ( 1U << 0U ) +#define tskMPU_WRITE_PERMISSION ( 1U << 1U ) /* The direct to task notification feature used to have only a single notification * per task. Now there is an array of notifications per task that is dimensioned by * configTASK_NOTIFICATION_ARRAY_ENTRIES. For backward compatibility, any use of the * original direct to task notification defaults to using the first index in the * array. */ -#define tskDEFAULT_INDEX_TO_NOTIFY ( 0 ) +#define tskDEFAULT_INDEX_TO_NOTIFY ( 0 ) /** * task. h diff --git a/portable/CMakeLists.txt b/portable/CMakeLists.txt index 870d399fefa..9b891f03113 100644 --- a/portable/CMakeLists.txt +++ b/portable/CMakeLists.txt @@ -243,7 +243,8 @@ add_library(freertos_kernel_port OBJECT # ARMv8-R ports for GCC $<$: GCC/ARM_CR82/port.c - GCC/ARM_CR82/portASM.S> + GCC/ARM_CR82/portASM.S + GCC/ARM_CR82/mpu_wrappers_v2_asm.c> # ARMv4T ARM7TDMI ports for GCC $<$: @@ -822,6 +823,7 @@ if( FREERTOS_PORT MATCHES "GCC_ARM_CM(3|4)_MPU" OR FREERTOS_PORT MATCHES "GCC_ARM_CM(23|33|52|55|85)_NTZ_NONSECURE" OR FREERTOS_PORT MATCHES "GCC_ARM_CM(23|33|52|55|85)_NONSECURE" OR FREERTOS_PORT MATCHES "GCC_ARM_CM(33|52|55|85)_TFM" OR + FREERTOS_PORT MATCHES "GCC_ARM_CR82" OR FREERTOS_PORT MATCHES "IAR_ARM_CM(23|33|52|55|85)_NTZ_NONSECURE" OR FREERTOS_PORT MATCHES "IAR_ARM_CM(23|33|52|55|85)_NONSECURE" OR FREERTOS_PORT MATCHES "IAR_ARM_CM(33|52|55|85)_TFM" diff --git a/portable/GCC/ARM_CR82/README.md b/portable/GCC/ARM_CR82/README.md index 6a300391859..4b63bf90413 100644 --- a/portable/GCC/ARM_CR82/README.md +++ b/portable/GCC/ARM_CR82/README.md @@ -18,6 +18,12 @@ The port is supported and tested on the following toolchains: - The port does not perform cache maintenance for shared buffers. - If your hardware or model doesn't support full cache coherency, you must handle cache clean/invalidate operations, memory attributes, and any additional barriers in your BSP/application (especially around shared-memory regions). +# MPU Support + +- This port supports the FreeRTOS MPU on both single-core and SMP (multi-core) configurations. Enable via `configENABLE_MPU = 1`; the port programs MPU regions per task on each active core. + +- Minimum MPU granularity and alignment: 64 bytes. Ensure any user‑defined region base and size are 64‑byte aligned. + # SMP Multicore Bring-up For SMP systems using this port, the application only needs to start the scheduler on the primary core and issue an SVC from each secondary core once they are online. The kernel coordinates the rest and ensures all cores are properly managed. @@ -39,4 +45,4 @@ Secondary core flow (to be done in each core’s reset handler): 2. Wait for the primary core's signal that shared initialization is complete (i.e., `ucPrimaryCoreInitDoneFlag` set to 1). 3. Update `VBAR_EL1` from the boot vector table to the FreeRTOS vector table. 4. Initialize the GIC redistributor and enable SGIs so interrupts from the primary core are receivable; signal the primary that this secondary is online and ready by setting the its flag in the `ucSecondaryCoresReadyFlags` array. -5. Issue an SVC with immediate value `106` to enter `FreeRTOS_SWI_Handler`, which will call `vPortRestoreContext()` based on the SVC number to start scheduling on this core. +5. Issue an SVC with immediate value `106` (i.e., `portSVC_START_FIRST_TASK`) to enter `FreeRTOS_SWI_Handler`, which will call `vPortRestoreContext()` based on the SVC number to start scheduling on this core. diff --git a/portable/GCC/ARM_CR82/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CR82/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..2d08949d2f5 --- /dev/null +++ b/portable/GCC/ARM_CR82/mpu_wrappers_v2_asm.c @@ -0,0 +1,944 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2025 Arm Limited and/or its affiliates + * + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" +#include "mpu_prototypes.h" +#include "mpu_syscall_numbers.h" +#include "portmacro.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#if ( configENABLE_MPU == 1 ) + + /* + * Common single-SVC dispatch: wrappers do only one SVC. + * The SVC handler decides whether to tail-call the implementation directly + * (privileged) or set up full system-call state (unprivileged). + */ + #define FREERTOS_MPU_SVC_DISPATCH( xSystemCallNumber ) \ + __asm volatile ( \ + "svc %0 \n" \ + : \ + : "i" ( xSystemCallNumber ) \ + : "memory" \ + ); + + #if ( INCLUDE_xTaskDelayUntil == 1 ) + + BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTaskDelayUntil ); + } + + #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskAbortDelay == 1 ) + + BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTaskAbortDelay ); + } + + #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_vTaskDelay == 1 ) + + void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vTaskDelay ); + } + + #endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_uxTaskPriorityGet ); + } + + #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_eTaskGetState == 1 ) + + eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_eTaskGetState ); + } + + #endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TRACE_FACILITY == 1 ) + + void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vTaskGetInfo ); + } + + #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + + TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTaskGetIdleTaskHandle ); + } + + #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_vTaskSuspend == 1 ) + + void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vTaskSuspend ); + } + +/*-----------------------------------------------------------*/ + + void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vTaskResume ); + } + + #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + + TickType_t MPU_xTaskGetTickCount( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTaskGetTickCount ); + } +/*-----------------------------------------------------------*/ + + UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_uxTaskGetNumberOfTasks ); + } +/*-----------------------------------------------------------*/ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_ulTaskGetRunTimeCounter ); + } + + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_ulTaskGetRunTimePercent ); + } + + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_ulTaskGetIdleRunTimePercent ); + } + +/*-----------------------------------------------------------*/ + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_ulTaskGetIdleRunTimeCounter ); + } + + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vTaskSetApplicationTaskTag ); + } + + #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTaskGetApplicationTaskTag ); + } + + #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vTaskSetThreadLocalStoragePointer ); + } + + #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + + #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_pvTaskGetThreadLocalStoragePointer ); + } + + #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_uxTaskGetSystemState ); + } + + #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + + UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_uxTaskGetStackHighWaterMark ); + } + + #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + + configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_uxTaskGetStackHighWaterMark2 ); + } + + #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + + TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTaskGetCurrentTaskHandle ); + } + + #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskGetSchedulerState == 1 ) + + BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTaskGetSchedulerState ); + } + + #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + + void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vTaskSetTimeOutState ); + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTaskCheckForTimeOut ); + } +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xTaskGenericNotifyEntry( const xTaskGenericNotifyParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTaskGenericNotify ); + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xTaskGenericNotifyWaitEntry( const xTaskGenericNotifyWaitParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTaskGenericNotifyWait ); + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_ulTaskGenericNotifyTake ); + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTaskGenericNotifyStateClear ); + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_ulTaskGenericNotifyValueClear ); + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xQueueGenericSend ); + } +/*-----------------------------------------------------------*/ + + UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_uxQueueMessagesWaiting ); + } +/*-----------------------------------------------------------*/ + + UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_uxQueueSpacesAvailable ); + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xQueueReceive ); + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xQueuePeek ); + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xQueueSemaphoreTake ); + } +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xQueueGetMutexHolder ); + } + + #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xQueueTakeMutexRecursive ); + } + + #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xQueueGiveMutexRecursive ); + } + + #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xQueueSelectFromSet ); + } + + #endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xQueueAddToSet ); + } + + #endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vQueueAddToRegistry ); + } + + #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + + void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vQueueUnregisterQueue ); + } + + #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + + const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_pcQueueGetName ); + } + + #endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_pvTimerGetTimerID ); + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vTimerSetTimerID ); + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTimerIsTimerActive ); + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTimerGetTimerDaemonTaskHandle ); + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + BaseType_t MPU_xTimerGenericCommandFromTaskEntry( const xTimerGenericCommandFromTaskParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xTimerGenericCommandFromTaskEntry( const xTimerGenericCommandFromTaskParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTimerGenericCommandFromTask ); + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_pcTimerGetName ); + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t xAutoReload ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t xAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vTimerSetReloadMode ); + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTimerGetReloadMode ); + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_uxTimerGetReloadMode ); + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTimerGetPeriod ); + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xTimerGetExpiryTime ); + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_EVENT_GROUPS == 1 ) + + EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + EventBits_t MPU_xEventGroupWaitBitsEntry( const xEventGroupWaitBitsParams_t * pxParams ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xEventGroupWaitBits ); + } + + #endif /* #if ( configUSE_EVENT_GROUPS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_EVENT_GROUPS == 1 ) + + EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xEventGroupClearBits ); + } + + #endif /* #if ( configUSE_EVENT_GROUPS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_EVENT_GROUPS == 1 ) + + EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xEventGroupSetBits ); + } + + #endif /* #if ( configUSE_EVENT_GROUPS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_EVENT_GROUPS == 1 ) + + EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xEventGroupSync ); + } + + #endif /* #if ( configUSE_EVENT_GROUPS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_EVENT_GROUPS == 1 ) && ( configUSE_TRACE_FACILITY == 1 ) ) + + UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_uxEventGroupGetNumber ); + } + + #endif /* #if ( ( configUSE_EVENT_GROUPS == 1 ) && ( configUSE_TRACE_FACILITY == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_EVENT_GROUPS == 1 ) && ( configUSE_TRACE_FACILITY == 1 ) ) + + void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_vEventGroupSetNumber ); + } + + #endif /* #if ( ( configUSE_EVENT_GROUPS == 1 ) && ( configUSE_TRACE_FACILITY == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_STREAM_BUFFERS == 1 ) + + size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xStreamBufferSend ); + } + + #endif /* #if ( configUSE_STREAM_BUFFERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_STREAM_BUFFERS == 1 ) + + size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xStreamBufferReceive ); + } + + #endif /* #if ( configUSE_STREAM_BUFFERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_STREAM_BUFFERS == 1 ) + + BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xStreamBufferIsFull ); + } + + #endif /* #if ( configUSE_STREAM_BUFFERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_STREAM_BUFFERS == 1 ) + + BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xStreamBufferIsEmpty ); + } + + #endif /* #if ( configUSE_STREAM_BUFFERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_STREAM_BUFFERS == 1 ) + + size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xStreamBufferSpacesAvailable ); + } + + #endif /* #if ( configUSE_STREAM_BUFFERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_STREAM_BUFFERS == 1 ) + + size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xStreamBufferBytesAvailable ); + } + + #endif /* #if ( configUSE_STREAM_BUFFERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_STREAM_BUFFERS == 1 ) + + BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xStreamBufferSetTriggerLevel ); + } + + #endif /* #if ( configUSE_STREAM_BUFFERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_STREAM_BUFFERS == 1 ) + + size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__( ( naked ) ) FREERTOS_SYSTEM_CALL; + + size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ + { + FREERTOS_MPU_SVC_DISPATCH( SYSTEM_CALL_xStreamBufferNextMessageLengthBytes ); + } + + #endif /* #if ( configUSE_STREAM_BUFFERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) */ diff --git a/portable/GCC/ARM_CR82/port.c b/portable/GCC/ARM_CR82/port.c index fa150b37507..cc08c9f240d 100644 --- a/portable/GCC/ARM_CR82/port.c +++ b/portable/GCC/ARM_CR82/port.c @@ -32,10 +32,21 @@ #include #include +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + /* Scheduler includes. */ #include "FreeRTOS.h" #include "task.h" +/* MPU includes. */ +#include "mpu_wrappers.h" +#include "mpu_syscall_numbers.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + #ifndef configINTERRUPT_CONTROLLER_BASE_ADDRESS #error configINTERRUPT_CONTROLLER_BASE_ADDRESS must be defined. Refer to Cortex-A equivalent: /* https://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors */ #endif @@ -84,6 +95,14 @@ #error configNUMBER_OF_CORES must be set to 1 or greater. If the application is not using multiple cores then set configNUMBER_OF_CORES to 1. #endif /* configNUMBER_OF_CORES < 1 */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* #if ( configENABLE_MPU == 1 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 != 0) ) + #error Arm Cortex-R82 port supports only MPU Wrapper V2. +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 != 0) */ + /* A critical section is exited when the critical section nesting count reaches * this value. */ #define portNO_CRITICAL_NESTING ( ( size_t ) 0 ) @@ -91,6 +110,25 @@ /* Macro to unmask all interrupt priorities. */ #define portCLEAR_INTERRUPT_PRIORITIES_MASK() __asm volatile ( "SVC %0" : : "i" ( portSVC_UNMASK_ALL_INTERRUPTS ) : "memory" ) +/* Macro to unmask all interrupt priorities from EL1. */ +#define portCLEAR_INTERRUPT_PRIORITIES_MASK_FROM_EL1() \ +{ \ + __asm volatile ( \ + " MSR DAIFSET, # 2 \n" \ + " DSB SY \n" \ + " ISB SY \n" \ + " MOV X0, %0 \n" \ + " MSR ICC_PMR_EL1, X0 \n" \ + " DSB SY \n" \ + " ISB SY \n" \ + " MSR DAIFCLR, # 2 \n" \ + " DSB SY \n" \ + " ISB SY \n" \ + : \ + : "i" ( portUNMASK_VALUE ) \ + ); \ +} + /* Tasks are not created with a floating point context, but can be given a * floating point context after they have been created. A variable is stored as * part of the tasks context that holds portNO_FLOATING_POINT_CONTEXT if the task @@ -104,7 +142,8 @@ #define portEL1 ( ( StackType_t ) 0x04 ) #define portEL0 ( ( StackType_t ) 0x00 ) -#define portINITIAL_PSTATE ( portEL0 | portSP_EL0 ) +#define portINITIAL_PSTATE_EL0 ( portEL0 | portSP_EL0 ) +#define portINITIAL_PSTATE_EL1 ( portEL1 | portSP_EL0 ) /* Used by portASSERT_IF_INTERRUPT_PRIORITY_INVALID() when ensuring the binary * point is zero. */ @@ -125,6 +164,66 @@ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Setup the Memory Protection Unit (MPU). + */ + PRIVILEGED_FUNCTION void vSetupMPU( void ); + + /** + * @brief Enable the Memory Protection Unit (MPU). + */ + PRIVILEGED_FUNCTION void vEnableMPU( void ); + + /** + * @brief Called from an ISR and returns the core ID the code is executing on. + * + * @return uint8_t The core ID. + */ + PRIVILEGED_FUNCTION uint8_t ucPortGetCoreIDFromIsr( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + PRIVILEGED_FUNCTION BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Extract MPU region's access permissions from the Protection Region Base Address Register + * (PRBAR_EL1) value. + * + * @param ullPrbarEl1Value PRBAR_EL1 value for the MPU region. + * + * @return uint32_t Access permissions. + */ + PRIVILEGED_FUNCTION static uint32_t prvGetRegionAccessPermissions( uint64_t ullPrbarEl1Value ); + + /** + * @brief Does the necessary work to enter a system call. + * + * @param pullPrivilegedOnlyTaskStack The task's privileged SP when the SVC was raised. + * @param ucSystemCallNumber The system call number of the system call. + */ + PRIVILEGED_FUNCTION void vSystemCallEnter( uint64_t * pullPrivilegedOnlyTaskStack, + uint8_t ucSystemCallNumber ); + + /** + * @brief Raise SVC for exiting from a system call. + */ + PRIVILEGED_FUNCTION __attribute__( ( naked ) ) void vRequestSystemCallExit( void ); + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param ullSystemCallReturnValue The actual system call return value. + */ + PRIVILEGED_FUNCTION void vSystemCallExit( uint64_t ullSystemCallReturnValue ); + +#endif /* #if ( configENABLE_MPU == 1 ) */ + /* * Starts the first task executing. This function is necessarily written in * assembly code so is implemented in portASM.s. @@ -139,163 +238,913 @@ extern void vGIC_EnableCPUInterface( void ); /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES == 1 ) - volatile uint64_t ullCriticalNesting = 0ULL; + PRIVILEGED_DATA volatile uint64_t ullCriticalNesting = 0ULL; /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero * then floating point context must be saved and restored for the task. */ - uint64_t ullPortTaskHasFPUContext = pdFALSE; + PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext = pdFALSE; /* Set to 1 to pend a context switch from an ISR. */ - uint64_t ullPortYieldRequired = pdFALSE; + PRIVILEGED_DATA uint64_t ullPortYieldRequired = pdFALSE; /* Counts the interrupt nesting depth. A context switch is only performed if * if the nesting depth is 0. */ - uint64_t ullPortInterruptNesting = 0; + PRIVILEGED_DATA uint64_t ullPortInterruptNesting = 0; #else /* #if ( configNUMBER_OF_CORES == 1 ) */ - volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ] = { 0 }; + PRIVILEGED_DATA volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ] = { 0 }; + /* Flags to check if the secondary cores are ready. */ + PRIVILEGED_DATA volatile uint8_t ucSecondaryCoresReadyFlags[ configNUMBER_OF_CORES - 1 ] = { 0 }; + PRIVILEGED_DATA volatile uint8_t ucPrimaryCoreInitDoneFlag = 0; /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero * then floating point context must be saved and restored for the task. */ - uint64_t ullPortTaskHasFPUContext[ configNUMBER_OF_CORES ] = { pdFALSE }; - uint64_t ullPortYieldRequired[ configNUMBER_OF_CORES ] = { pdFALSE }; - uint64_t ullPortInterruptNestings[ configNUMBER_OF_CORES ] = { 0 }; + PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext[ configNUMBER_OF_CORES ] = { pdFALSE }; + PRIVILEGED_DATA uint64_t ullPortYieldRequired[ configNUMBER_OF_CORES ] = { pdFALSE }; + PRIVILEGED_DATA uint64_t ullPortInterruptNestings[ configNUMBER_OF_CORES ] = { 0 }; - /* Flags to check if the secondary cores are ready. */ - volatile uint8_t ucSecondaryCoresReadyFlags[ configNUMBER_OF_CORES - 1 ] = { 0 }; - volatile uint8_t ucPrimaryCoreInitDoneFlag = 0; #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ -/* Used in the ASM code. */ -__attribute__( ( used ) ) const uint64_t ullMaxAPIPriorityMask = ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ); +#if ( configENABLE_MPU == 1 ) + /* Set to pdTRUE when the scheduler is started. */ + PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE; +#endif /* ( configENABLE_MPU == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) + { + uint32_t ulIndex = 0; + + /* Layout must match portRESTORE_CONTEXT pop order (descending stack): + * 1) FPU flag, 2) Critical nesting, 3) Optional FPU save area, + * 4) ELR (PC), 5) SPSR (PSTATE), 6) GPRs in restore order pairs. + */ + + /* 1) FPU flag and 2) Critical nesting. */ + #if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT ) + xMPUSettings->ullContext[ ulIndex++ ] = portNO_FLOATING_POINT_CONTEXT; /* FPU flag */ + xMPUSettings->ullContext[ ulIndex++ ] = portNO_CRITICAL_NESTING; /* Critical nesting */ + #elif ( configUSE_TASK_FPU_SUPPORT == portTASK_HAVE_FPU_CONTEXT_BY_DEFAULT ) + xMPUSettings->ullContext[ ulIndex++ ] = pdTRUE; /* FPU flag */ + xMPUSettings->ullContext[ ulIndex++ ] = portNO_CRITICAL_NESTING; /* Critical nesting */ + #if ( configNUMBER_OF_CORES == 1 ) + ullPortTaskHasFPUContext = pdTRUE; + #else + ullPortTaskHasFPUContext[ portGET_CORE_ID() ] = pdTRUE; + #endif + #else + #error "Invalid configUSE_TASK_FPU_SUPPORT setting - must be 1 or 2." + #endif + + /* 3) Optional FPU save area immediately after the flag+critical pair. */ + #if ( configUSE_TASK_FPU_SUPPORT == portTASK_HAVE_FPU_CONTEXT_BY_DEFAULT ) + memset( &xMPUSettings->ullContext[ ulIndex ], 0x00, portFPU_REGISTER_WORDS * sizeof( StackType_t ) ); + ulIndex += portFPU_REGISTER_WORDS; + #endif + + /* 4) ELR (PC) and 5) SPSR (PSTATE). */ + xMPUSettings->ullContext[ ulIndex++ ] = ( StackType_t ) pxCode; /* ELR */ + + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ullContext[ ulIndex++ ] = portINITIAL_PSTATE_EL1; /* SPSR */ + } + else + { + xMPUSettings->ullContext[ ulIndex++ ] = portINITIAL_PSTATE_EL0; /* SPSR */ + } + + /* 6) General-purpose registers in the order expected by restoreallgpregisters. */ + xMPUSettings->ullContext[ ulIndex++ ] = ( StackType_t ) 0x00; /* X30 (LR) */ + xMPUSettings->ullContext[ ulIndex++ ] = ( StackType_t ) 0x00; /* XZR (dummy) */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x2828282828282828ULL; /* X28 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x2929292929292929ULL; /* X29 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x2626262626262626ULL; /* X26 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x2727272727272727ULL; /* X27 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x2424242424242424ULL; /* X24 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x2525252525252525ULL; /* X25 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x2222222222222222ULL; /* X22 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x2323232323232323ULL; /* X23 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x2020202020202020ULL; /* X20 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x2121212121212121ULL; /* X21 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x1818181818181818ULL; /* X18 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x1919191919191919ULL; /* X19 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x1616161616161616ULL; /* X16 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x1717171717171717ULL; /* X17 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x1414141414141414ULL; /* X14 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x1515151515151515ULL; /* X15 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x1212121212121212ULL; /* X12 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x1313131313131313ULL; /* X13 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x1010101010101010ULL; /* X10 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x1111111111111111ULL; /* X11 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x0808080808080808ULL; /* X8 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x0909090909090909ULL; /* X9 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x0606060606060606ULL; /* X6 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x0707070707070707ULL; /* X7 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x0404040404040404ULL; /* X4 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x0505050505050505ULL; /* X5 */ + + xMPUSettings->ullContext[ ulIndex++ ] = 0x0202020202020202ULL; /* X2 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x0303030303030303ULL; /* X3 */ + + xMPUSettings->ullContext[ ulIndex++ ] = ( StackType_t ) pvParameters; /* X0 */ + xMPUSettings->ullContext[ ulIndex++ ] = 0x0101010101010101ULL; /* X1 */ + + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ullTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + } + else + { + xMPUSettings->ullTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + } + + xMPUSettings->ullTaskUnprivilegedSP = ( ( uint64_t ) pxTopOfStack & portMPU_PRBAR_EL1_ADDRESS_MASK ); + + return &( xMPUSettings->ullContext[ 0 ] ); + } + +#else /* #if ( configENABLE_MPU == 1 ) */ + + /* + * See header file for description. + */ + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + TaskFunction_t pxCode, + void * pvParameters ) + { + /* Setup the initial stack of the task. The stack is set exactly as + * expected by the portRESTORE_CONTEXT() macro. */ + + /* First all the general purpose registers. */ + pxTopOfStack--; + *pxTopOfStack = 0x0101010101010101ULL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = 0x0303030303030303ULL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = 0x0202020202020202ULL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = 0x0505050505050505ULL; /* R5 */ + pxTopOfStack--; + *pxTopOfStack = 0x0404040404040404ULL; /* R4 */ + pxTopOfStack--; + *pxTopOfStack = 0x0707070707070707ULL; /* R7 */ + pxTopOfStack--; + *pxTopOfStack = 0x0606060606060606ULL; /* R6 */ + pxTopOfStack--; + *pxTopOfStack = 0x0909090909090909ULL; /* R9 */ + pxTopOfStack--; + *pxTopOfStack = 0x0808080808080808ULL; /* R8 */ + pxTopOfStack--; + *pxTopOfStack = 0x1111111111111111ULL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = 0x1010101010101010ULL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = 0x1313131313131313ULL; /* R13 */ + pxTopOfStack--; + *pxTopOfStack = 0x1212121212121212ULL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = 0x1515151515151515ULL; /* R15 */ + pxTopOfStack--; + *pxTopOfStack = 0x1414141414141414ULL; /* R14 */ + pxTopOfStack--; + *pxTopOfStack = 0x1717171717171717ULL; /* R17 */ + pxTopOfStack--; + *pxTopOfStack = 0x1616161616161616ULL; /* R16 */ + pxTopOfStack--; + *pxTopOfStack = 0x1919191919191919ULL; /* R19 */ + pxTopOfStack--; + *pxTopOfStack = 0x1818181818181818ULL; /* R18 */ + pxTopOfStack--; + *pxTopOfStack = 0x2121212121212121ULL; /* R21 */ + pxTopOfStack--; + *pxTopOfStack = 0x2020202020202020ULL; /* R20 */ + pxTopOfStack--; + *pxTopOfStack = 0x2323232323232323ULL; /* R23 */ + pxTopOfStack--; + *pxTopOfStack = 0x2222222222222222ULL; /* R22 */ + pxTopOfStack--; + *pxTopOfStack = 0x2525252525252525ULL; /* R25 */ + pxTopOfStack--; + *pxTopOfStack = 0x2424242424242424ULL; /* R24 */ + pxTopOfStack--; + *pxTopOfStack = 0x2727272727272727ULL; /* R27 */ + pxTopOfStack--; + *pxTopOfStack = 0x2626262626262626ULL; /* R26 */ + pxTopOfStack--; + *pxTopOfStack = 0x2929292929292929ULL; /* R29 */ + pxTopOfStack--; + *pxTopOfStack = 0x2828282828282828ULL; /* R28 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x00; /* XZR - has no effect, used so there are an even number of registers. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x00; /* R30 - procedure call link register. */ + + pxTopOfStack--; + *pxTopOfStack = portINITIAL_PSTATE_EL0; + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* Exception return address. */ + + #if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT ) + { + /* The task will start with a critical nesting count of 0 as interrupts are + * enabled. */ + pxTopOfStack--; + *pxTopOfStack = portNO_CRITICAL_NESTING; + + /* The task will start without a floating point context. A task that + * uses the floating point hardware must call vPortTaskUsesFPU() before + * executing any floating point instructions. */ + pxTopOfStack--; + *pxTopOfStack = portNO_FLOATING_POINT_CONTEXT; + } + #elif ( configUSE_TASK_FPU_SUPPORT == portTASK_HAVE_FPU_CONTEXT_BY_DEFAULT ) + { + /* The task will start with a floating point context. Leave enough + * space for the registers - and ensure they are initialised to 0. */ + pxTopOfStack -= portFPU_REGISTER_WORDS; + memset( pxTopOfStack, 0x00, portFPU_REGISTER_WORDS * sizeof( StackType_t ) ); + + /* The task will start with a critical nesting count of 0 as interrupts are + * enabled. */ + pxTopOfStack--; + *pxTopOfStack = portNO_CRITICAL_NESTING; + + pxTopOfStack--; + *pxTopOfStack = pdTRUE; + #if ( configNUMBER_OF_CORES == 1 ) + ullPortTaskHasFPUContext = pdTRUE; + #else + ullPortTaskHasFPUContext[ portGET_CORE_ID() ] = pdTRUE; + #endif + } + #else /* if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT ) */ + { + #error "Invalid configUSE_TASK_FPU_SUPPORT setting - configUSE_TASK_FPU_SUPPORT must be set to 1, 2, or left undefined." + } + #endif /* if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT ) */ + return pxTopOfStack; + + } + +#endif /* #if ( configENABLE_MPU == 1 ) */ /*-----------------------------------------------------------*/ -/* - * See header file for description. - */ -StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - TaskFunction_t pxCode, - void * pvParameters ) -{ - /* Setup the initial stack of the task. The stack is set exactly as - * expected by the portRESTORE_CONTEXT() macro. */ - - /* First all the general purpose registers. */ - pxTopOfStack--; - *pxTopOfStack = 0x0101010101010101ULL; /* R1 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack--; - *pxTopOfStack = 0x0303030303030303ULL; /* R3 */ - pxTopOfStack--; - *pxTopOfStack = 0x0202020202020202ULL; /* R2 */ - pxTopOfStack--; - *pxTopOfStack = 0x0505050505050505ULL; /* R5 */ - pxTopOfStack--; - *pxTopOfStack = 0x0404040404040404ULL; /* R4 */ - pxTopOfStack--; - *pxTopOfStack = 0x0707070707070707ULL; /* R7 */ - pxTopOfStack--; - *pxTopOfStack = 0x0606060606060606ULL; /* R6 */ - pxTopOfStack--; - *pxTopOfStack = 0x0909090909090909ULL; /* R9 */ - pxTopOfStack--; - *pxTopOfStack = 0x0808080808080808ULL; /* R8 */ - pxTopOfStack--; - *pxTopOfStack = 0x1111111111111111ULL; /* R11 */ - pxTopOfStack--; - *pxTopOfStack = 0x1010101010101010ULL; /* R10 */ - pxTopOfStack--; - *pxTopOfStack = 0x1313131313131313ULL; /* R13 */ - pxTopOfStack--; - *pxTopOfStack = 0x1212121212121212ULL; /* R12 */ - pxTopOfStack--; - *pxTopOfStack = 0x1515151515151515ULL; /* R15 */ - pxTopOfStack--; - *pxTopOfStack = 0x1414141414141414ULL; /* R14 */ - pxTopOfStack--; - *pxTopOfStack = 0x1717171717171717ULL; /* R17 */ - pxTopOfStack--; - *pxTopOfStack = 0x1616161616161616ULL; /* R16 */ - pxTopOfStack--; - *pxTopOfStack = 0x1919191919191919ULL; /* R19 */ - pxTopOfStack--; - *pxTopOfStack = 0x1818181818181818ULL; /* R18 */ - pxTopOfStack--; - *pxTopOfStack = 0x2121212121212121ULL; /* R21 */ - pxTopOfStack--; - *pxTopOfStack = 0x2020202020202020ULL; /* R20 */ - pxTopOfStack--; - *pxTopOfStack = 0x2323232323232323ULL; /* R23 */ - pxTopOfStack--; - *pxTopOfStack = 0x2222222222222222ULL; /* R22 */ - pxTopOfStack--; - *pxTopOfStack = 0x2525252525252525ULL; /* R25 */ - pxTopOfStack--; - *pxTopOfStack = 0x2424242424242424ULL; /* R24 */ - pxTopOfStack--; - *pxTopOfStack = 0x2727272727272727ULL; /* R27 */ - pxTopOfStack--; - *pxTopOfStack = 0x2626262626262626ULL; /* R26 */ - pxTopOfStack--; - *pxTopOfStack = 0x2929292929292929ULL; /* R29 */ - pxTopOfStack--; - *pxTopOfStack = 0x2828282828282828ULL; /* R28 */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x00; /* XZR - has no effect, used so there are an even number of registers. */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x00; /* R30 - procedure call link register. */ - - pxTopOfStack--; - *pxTopOfStack = portINITIAL_PSTATE; - - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* Exception return address. */ - - #if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT ) - { - /* The task will start with a critical nesting count of 0 as interrupts are - * enabled. */ - pxTopOfStack--; - *pxTopOfStack = portNO_CRITICAL_NESTING; - - /* The task will start without a floating point context. A task that - * uses the floating point hardware must call vPortTaskUsesFPU() before - * executing any floating point instructions. */ - pxTopOfStack--; - *pxTopOfStack = portNO_FLOATING_POINT_CONTEXT; - } - #elif ( configUSE_TASK_FPU_SUPPORT == portTASK_HAVE_FPU_CONTEXT_BY_DEFAULT ) - { - /* The task will start with a floating point context. Leave enough - * space for the registers - and ensure they are initialised to 0. */ - pxTopOfStack -= portFPU_REGISTER_WORDS; - memset( pxTopOfStack, 0x00, portFPU_REGISTER_WORDS * sizeof( StackType_t ) ); - - /* The task will start with a critical nesting count of 0 as interrupts are - * enabled. */ - pxTopOfStack--; - *pxTopOfStack = portNO_CRITICAL_NESTING; - - pxTopOfStack--; - *pxTopOfStack = pdTRUE; +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Store a task's MPU settings in its TCB. + * + * @ingroup Task Context + * @ingroup MPU Control + * + * @param xMPUSettings The MPU settings in TCB. + * @param xRegions The MPU regions requested by the task. + * @param pxBottomOfStack The base address of the task's Stack. + * @param xStackDepth The length of the task's stack. + */ + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + StackType_t xStackDepth ) /* PRIVILEGED_FUNCTION */ + { + uint64_t ullRegionStartAddress, ullRegionEndAddress; + uint8_t ucIndex = 0, ucRegionNumber; + + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint64_t * __privileged_sram_start__; + extern uint64_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint64_t __privileged_sram_start__[]; + extern uint64_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR_EL1. */ + xMPUSettings->ullMairEl1 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_EL1_ATTR0_POS ) & portMPU_MAIR_EL1_ATTR0_MASK ); + xMPUSettings->ullMairEl1 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_EL1_ATTR1_POS ) & portMPU_MAIR_EL1_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( xStackDepth > 0 ) + { + ullRegionStartAddress = ( uint64_t ) pxBottomOfStack; + ullRegionEndAddress = ( uint64_t ) pxBottomOfStack + ( xStackDepth * ( configSTACK_DEPTH_TYPE ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because this + * region is already protected using an MPU region and ARMv8-R does + * not allow overlapping MPU regions. + */ + if( ( ullRegionStartAddress >= ( uint64_t ) __privileged_sram_start__ ) && + ( ullRegionEndAddress <= ( uint64_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ portSTACK_REGION ].ullPrbarEl1 = 0; + xMPUSettings->xRegionsSettings[ portSTACK_REGION ].ullPrlarEl1 = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ullRegionStartAddress &= portMPU_PRBAR_EL1_ADDRESS_MASK; + ullRegionEndAddress &= portMPU_PRLAR_EL1_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ portSTACK_REGION ].ullPrbarEl1 = ( ullRegionStartAddress ) | + ( portMPU_REGION_INNER_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ portSTACK_REGION ].ullPrlarEl1 = ( ullRegionEndAddress ) | + ( portMPU_PRLAR_EL1_ATTR_INDEX0 ) | + ( portMPU_PRLAR_EL1_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ucRegionNumber = 1; ucRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ucRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. + * The minimum region size is 64 Bytes. + */ + if( ( xRegions != NULL ) && ( xRegions[ ucIndex ].ulLengthInBytes > 64UL ) ) + { + uint64_t ullPrbarEl1RegValue, ullPrlarEl1RegValue; + + /* Translate the generic region definition contained in xRegions + * into the ARMv8-R specific MPU settings that are then stored in + * xMPUSettings. + */ + ullRegionStartAddress = ( ( uint64_t ) xRegions[ ucIndex ].pvBaseAddress ) & portMPU_PRBAR_EL1_ADDRESS_MASK; + ullRegionEndAddress = ( uint64_t ) xRegions[ ucIndex ].pvBaseAddress + xRegions[ ucIndex ].ulLengthInBytes - 1; + ullRegionEndAddress &= portMPU_PRLAR_EL1_ADDRESS_MASK; + + /* Checks for overlaps with another user defined regions and stack region, which are already configured. */ + for( uint8_t ucUserRegionNumber = 0; ucUserRegionNumber < portNUM_CONFIGURABLE_REGIONS; ucUserRegionNumber++ ) + { + /* Check for overlap. */ + if( ( portIS_ADDRESS_WITHIN_RANGE( ullRegionStartAddress, + ( xMPUSettings->xRegionsSettings[ ucUserRegionNumber ].ullPrbarEl1 & portMPU_PRBAR_EL1_ADDRESS_MASK ), + ( xMPUSettings->xRegionsSettings[ ucUserRegionNumber ].ullPrlarEl1 & portMPU_PRLAR_EL1_ADDRESS_MASK ) ) || + ( portIS_ADDRESS_WITHIN_RANGE( ullRegionEndAddress, + ( xMPUSettings->xRegionsSettings[ ucUserRegionNumber ].ullPrbarEl1 & portMPU_PRBAR_EL1_ADDRESS_MASK ), + ( xMPUSettings->xRegionsSettings[ ucUserRegionNumber ].ullPrlarEl1 & portMPU_PRLAR_EL1_ADDRESS_MASK ) ) ) ) ) + { + /* Overlap detected - assert. */ + configASSERT( NULL ); + } + } + + /* Checks for overlaps with kernel programmed regions which are already programmed as part of vSetupMPU. */ + for (uint8_t ucProgrammedRegionIndex = 0; ucProgrammedRegionIndex < 4; ucProgrammedRegionIndex++) + { + __asm volatile ( "msr PRSELR_EL1, %0" : : "r" ( ( uint64_t ) ucProgrammedRegionIndex ) ); + + __asm volatile ( "mrs %0, PRBAR_EL1" : "=r" ( ullPrbarEl1RegValue ) ); + ullPrbarEl1RegValue &= portMPU_PRBAR_EL1_ADDRESS_MASK; + + __asm volatile ( "mrs %0, PRLAR_EL1" : "=r" ( ullPrlarEl1RegValue ) ); + ullPrlarEl1RegValue &= portMPU_PRLAR_EL1_ADDRESS_MASK; + + /* Check for overlap. */ + if( ( portIS_ADDRESS_WITHIN_RANGE( ullRegionStartAddress, + ullPrbarEl1RegValue, + ullPrlarEl1RegValue ) ) || + ( portIS_ADDRESS_WITHIN_RANGE( ullRegionEndAddress, + ullPrbarEl1RegValue, + ullPrlarEl1RegValue ) ) ) + { + /* Overlap detected - assert. */ + configASSERT( NULL ); + } + } + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 = ( ullRegionStartAddress ); + + /* RO/RW. */ + if( ( xRegions[ ucIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ ucIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* SH. */ + if( ( xRegions[ ucIndex ].ulParameters & tskMPU_REGION_INNER_SHAREABLE ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_INNER_SHAREABLE ); + } + else if( ( xRegions[ ucIndex ].ulParameters & tskMPU_REGION_OUTER_SHAREABLE ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_OUTER_SHAREABLE ); + } + else + { + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_NON_SHAREABLE ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrlarEl1 = ( ullRegionEndAddress ) | + ( portMPU_PRLAR_EL1_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ ucIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR_EL1 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrlarEl1 |= portMPU_PRLAR_EL1_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR_EL1 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrlarEl1 |= portMPU_PRLAR_EL1_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 = 0UL; + xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrlarEl1 = 0UL; + } + + ucIndex++; + } + + } + /*-----------------------------------------------------------*/ + + void vSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. + */ + extern uint64_t * __privileged_functions_start__; + extern uint64_t * __privileged_functions_end__; + extern uint64_t * __syscalls_flash_start__; + extern uint64_t * __syscalls_flash_end__; + extern uint64_t * __unprivileged_flash_start__; + extern uint64_t * __unprivileged_flash_end__; + extern uint64_t * __privileged_sram_start__; + extern uint64_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint64_t __privileged_functions_start__[]; + extern uint64_t __privileged_functions_end__[]; + extern uint64_t __syscalls_flash_start__[]; + extern uint64_t __syscalls_flash_end__[]; + extern uint64_t __unprivileged_flash_start__[]; + extern uint64_t __unprivileged_flash_end__[]; + extern uint64_t __privileged_sram_start__[]; + extern uint64_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 16 or 32. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 16 ) || ( configTOTAL_MPU_REGIONS == 32 ) ); + + /* MAIR_EL1 - Index 0. */ + uint64_t ullMairEl1RegValue = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_EL1_ATTR0_POS ) & portMPU_MAIR_EL1_ATTR0_MASK ); + /* MAIR_EL1 - Index 1. */ + ullMairEl1RegValue |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_EL1_ATTR1_POS ) & portMPU_MAIR_EL1_ATTR1_MASK ); + + __asm volatile ( "msr MAIR_EL1, %0" : : "r" ( ullMairEl1RegValue ) ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. + */ + uint64_t ullPrselrEl1RegValue = portPRIVILEGED_FLASH_REGION; + __asm volatile ( "msr PRSELR_EL1, %0" : : "r" ( ullPrselrEl1RegValue ) ); + + uint64_t ullPrbarEl1RegValue = ( ( ( uint64_t ) __privileged_functions_start__ ) & portMPU_PRBAR_EL1_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + __asm volatile ( "msr PRBAR_EL1, %0" : : "r" ( ullPrbarEl1RegValue ) ); + + uint64_t ullPrlarEl1RegValue = ( ( ( uint64_t ) __privileged_functions_end__ ) & portMPU_PRLAR_EL1_ADDRESS_MASK ) | + ( portMPU_PRLAR_EL1_ATTR_INDEX0 ) | + ( portMPU_PRLAR_EL1_REGION_ENABLE ); + __asm volatile ( "msr PRLAR_EL1, %0" : : "r" ( ullPrlarEl1RegValue ) ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. + */ + ullPrselrEl1RegValue = portUNPRIVILEGED_FLASH_REGION; + __asm volatile ( "msr PRSELR_EL1, %0" : : "r" ( ullPrselrEl1RegValue ) ); + + ullPrbarEl1RegValue = ( ( ( uint64_t ) __unprivileged_flash_start__ ) & portMPU_PRBAR_EL1_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + __asm volatile ( "msr PRBAR_EL1, %0" : : "r" ( ullPrbarEl1RegValue ) ); + + ullPrlarEl1RegValue = ( ( ( uint64_t ) __unprivileged_flash_end__ ) & portMPU_PRLAR_EL1_ADDRESS_MASK ) | + ( portMPU_PRLAR_EL1_ATTR_INDEX0 ) | + ( portMPU_PRLAR_EL1_REGION_ENABLE ); + __asm volatile ( "msr PRLAR_EL1, %0" : : "r" ( ullPrlarEl1RegValue ) ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. + */ + ullPrselrEl1RegValue = portUNPRIVILEGED_SYSCALLS_REGION; + __asm volatile ( "msr PRSELR_EL1, %0" : : "r" ( ullPrselrEl1RegValue ) ); + + ullPrbarEl1RegValue = ( ( ( uint64_t ) __syscalls_flash_start__ ) & portMPU_PRBAR_EL1_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + __asm volatile ( "msr PRBAR_EL1, %0" : : "r" ( ullPrbarEl1RegValue ) ); + + ullPrlarEl1RegValue = ( ( ( uint64_t ) __syscalls_flash_end__ ) & portMPU_PRLAR_EL1_ADDRESS_MASK ) | + ( portMPU_PRLAR_EL1_ATTR_INDEX0 ) | + ( portMPU_PRLAR_EL1_REGION_ENABLE ); + __asm volatile ( "msr PRLAR_EL1, %0" : : "r" ( ullPrlarEl1RegValue ) ); + + /* Setup RAM containing kernel data for privileged access only. */ + ullPrselrEl1RegValue = portPRIVILEGED_RAM_REGION; + __asm volatile ( "msr PRSELR_EL1, %0" : : "r" ( ullPrselrEl1RegValue ) ); + + ullPrbarEl1RegValue = ( ( ( uint64_t ) __privileged_sram_start__ ) & portMPU_PRBAR_EL1_ADDRESS_MASK ) | + ( portMPU_REGION_INNER_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + __asm volatile ( "msr PRBAR_EL1, %0" : : "r" ( ullPrbarEl1RegValue ) ); + + ullPrlarEl1RegValue = ( ( ( uint64_t ) __privileged_sram_end__ ) & portMPU_PRLAR_EL1_ADDRESS_MASK ) | + ( portMPU_PRLAR_EL1_ATTR_INDEX0 ) | + ( portMPU_PRLAR_EL1_REGION_ENABLE ); + __asm volatile ( "msr PRLAR_EL1, %0" : : "r" ( ullPrlarEl1RegValue ) ); + } + /*-----------------------------------------------------------*/ + + void vEnableMPU( void ) /* PRIVILEGED_FUNCTION */ + { + uint64_t ullSctlrEl1RegValue; + + __asm volatile ( "mrs %0, SCTLR_EL1" : "=r" ( ullSctlrEl1RegValue ) ); + /* Enable the MPU. Also enable privileged access to the + * background region. + */ + ullSctlrEl1RegValue |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + __asm volatile ( "msr SCTLR_EL1, %0" : : "r" ( ullSctlrEl1RegValue ) ); + + /* Ensure the write to SCTLR_EL1 is committed before + * returning. + */ + __asm volatile ( "isb" ); + } + /*-----------------------------------------------------------*/ + + BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xTaskIsPrivileged = pdFALSE; + #if ( configNUMBER_OF_CORES == 1 ) - ullPortTaskHasFPUContext = pdTRUE; + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); #else - ullPortTaskHasFPUContext[ portGET_CORE_ID() ] = pdTRUE; + extern TaskHandle_t pxCurrentTCBs[ configNUMBER_OF_CORES ]; + xMPU_SETTINGS * pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCBs[ portGET_CORE_ID_FROM_ISR() ] ); #endif + + if( ( pxMpuSettings->ullTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; } - #else /* if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT ) */ + /*-----------------------------------------------------------*/ + + static uint32_t prvGetRegionAccessPermissions( uint64_t ullPrbarEl1Value ) /* PRIVILEGED_FUNCTION */ { - #error "Invalid configUSE_TASK_FPU_SUPPORT setting - configUSE_TASK_FPU_SUPPORT must be set to 1, 2, or left undefined." + uint32_t ulAccessPermissions = 0; + + if( ( ullPrbarEl1Value & portMPU_PRBAR_EL1_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ullPrbarEl1Value & portMPU_PRBAR_EL1_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; } - #endif /* if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT ) */ + /*-----------------------------------------------------------*/ - return pxTopOfStack; + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ -} + { + uint32_t i; + uint64_t ullBufferStartAddress, ullBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ -/*-----------------------------------------------------------*/ + if( xSchedulerRunning == pdFALSE ) + { + /* Grant access to all the kernel objects before the scheduler + * is started. It is necessary because there is no task running + * yet and therefore, we cannot use the permissions of any + * task. */ + xAccessGranted = pdTRUE; + } + else if( ( xTaskMpuSettings->ullTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT64_WILL_OVERFLOW( ( ( uint64_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ullBufferStartAddress = ( uint64_t ) pvBuffer; + ullBufferEndAddress = ( ( ( uint64_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ullPrlarEl1 & portMPU_PRLAR_EL1_REGION_ENABLE ) == portMPU_PRLAR_EL1_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ullBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_PRBAR_EL1( xTaskMpuSettings->xRegionsSettings[ i ].ullPrbarEl1 ), + portEXTRACT_LAST_ADDRESS_FROM_PRLAR_EL1( xTaskMpuSettings->xRegionsSettings[ i ].ullPrlarEl1 ) ) && + portIS_ADDRESS_WITHIN_RANGE( ullBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_PRBAR_EL1( xTaskMpuSettings->xRegionsSettings[ i ].ullPrbarEl1 ), + portEXTRACT_LAST_ADDRESS_FROM_PRLAR_EL1( xTaskMpuSettings->xRegionsSettings[ i ].ullPrlarEl1 ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ullPrbarEl1 ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } + /*-----------------------------------------------------------*/ + + void vSystemCallEnter( uint64_t * pullPrivilegedOnlyTaskStack, + uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */ + { + #if ( configNUMBER_OF_CORES == 1 ) + extern TaskHandle_t pxCurrentTCB; + #else + extern TaskHandle_t pxCurrentTCBs[ configNUMBER_OF_CORES ]; + #endif + extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ]; + xMPU_SETTINGS * pxMpuSettings; + uint64_t ullSystemCallLocation; /* Address where SVC was raised. */ + __asm volatile ( "MRS %0, ELR_EL1" : "=r" ( ullSystemCallLocation ) ); + + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. + */ + extern uint64_t * __syscalls_flash_start__; + extern uint64_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint64_t __syscalls_flash_start__[]; + extern uint64_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + #if ( configNUMBER_OF_CORES == 1 ) + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + #else + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCBs[ portGET_CORE_ID_FROM_ISR() ] ); + #endif + + /* Checks: + * 1. SVC is raised from the system call section (i.e. application is + * not raising SVC directly). + * 2. We do not need to check that ucSystemCallNumber is within range + * because the assembly SVC handler checks that before calling + * this function. + */ + if( ( ullSystemCallLocation >= ( uint64_t ) __syscalls_flash_start__ ) && + ( ullSystemCallLocation <= ( uint64_t ) __syscalls_flash_end__ ) && + ( uxSystemCallImplementations[ ucSystemCallNumber ] != 0 ) ) + { + /* Store the value of the Link Register before the SVC was raised. + * It contains the address of the caller of the System Call entry + * point (i.e. the caller of the MPU_). We need to restore it + * when we exit from the system call. + */ + pxMpuSettings->xSystemCallInfo.ullLinkRegisterAtSystemCallEntry = pullPrivilegedOnlyTaskStack[ portOFFSET_TO_LR ]; + + /* Capture user-mode SP at system call entry. */ + uint64_t ullUserSpAtEntry; + __asm volatile ( "MRS %0, SP_EL0" : "=r" ( ullUserSpAtEntry ) ); + pxMpuSettings->xSystemCallInfo.ullUserSPAtSystemCallEntry = ullUserSpAtEntry; + + /* Setup the MPU_ inputs, the system call stack, and SPSR. */ + __asm volatile ( + "MOV X0, %0 \n" + "MOV X1, %1 \n" + "MOV X2, %2 \n" + "MOV X3, %3 \n" + "MSR ELR_EL1, %4 \n" + "MSR SP_EL0, %5 \n" + "MSR SPSR_EL1, %6 \n" + : + : "r" ( pullPrivilegedOnlyTaskStack[ portOFFSET_TO_X0 ] ), + "r" ( pullPrivilegedOnlyTaskStack[ portOFFSET_TO_X1 ] ), + "r" ( pullPrivilegedOnlyTaskStack[ portOFFSET_TO_X2 ] ), + "r" ( pullPrivilegedOnlyTaskStack[ portOFFSET_TO_X3 ] ), + "r" ( ( uint64_t ) uxSystemCallImplementations[ ucSystemCallNumber ] ), + "r" ( &( pxMpuSettings->ullContext[ MAX_CONTEXT_SIZE + configSYSTEM_CALL_STACK_SIZE ] ) ), + "r" ( portINITIAL_PSTATE_EL1 ) + : "memory", "x0", "x1", "x2", "x3" + ); + } + } + /*-----------------------------------------------------------*/ + + void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */ + { + __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" ); + } + /*-----------------------------------------------------------*/ + + void vSystemCallExit( uint64_t ullSystemCallReturnValue ) /* PRIVILEGED_FUNCTION */ + { + #if ( configNUMBER_OF_CORES == 1 ) + extern TaskHandle_t pxCurrentTCB; + #else + extern TaskHandle_t pxCurrentTCBs[ configNUMBER_OF_CORES ]; + #endif + xMPU_SETTINGS * pxMpuSettings; + uint64_t ullSystemCallLocation; /* Address where SVC was raised. */ + __asm volatile ( "MRS %0, ELR_EL1" : "=r" ( ullSystemCallLocation ) ); + + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint64_t * __privileged_functions_start__; + extern uint64_t * __privileged_functions_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint64_t __privileged_functions_start__[]; + extern uint64_t __privileged_functions_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + #if ( configNUMBER_OF_CORES == 1 ) + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + #else + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCBs[ portGET_CORE_ID_FROM_ISR() ] ); + #endif + + /* Check: + * SVC is raised from the privileged code (i.e. application is not + * raising SVC directly). This SVC is only raised from + * vRequestSystemCallExit which is in the privileged code section. + */ + if( ( ullSystemCallLocation >= ( uint64_t ) __privileged_functions_start__ ) && + ( ullSystemCallLocation <= ( uint64_t ) __privileged_functions_end__ ) ) + { + __asm volatile ( + "MSR ELR_EL1, %0 \n" /* Return to the MPU_ caller. */ + "MSR SP_EL0, %1 \n" /* Restore user SP saved at syscall entry. */ + "MSR SPSR_EL1, %3 \n" /* Ensure return to EL0. */ + "MOV X0, %2 \n" /* Move the system call return value to X0. */ + : + : "r" ( pxMpuSettings->xSystemCallInfo.ullLinkRegisterAtSystemCallEntry ), + "r" ( pxMpuSettings->xSystemCallInfo.ullUserSPAtSystemCallEntry ), + "r" ( ullSystemCallReturnValue ), + "r" ( ( uint64_t ) portINITIAL_PSTATE_EL0 ) + : "memory" + ); + } + } + /*-----------------------------------------------------------*/ + + #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) + + void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle, + int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit; + xMPU_SETTINGS * xTaskMpuSettings; + + /* Calculate the Access Control List entry index and bit position + * within that entry. */ + ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS ); + ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS ); + + xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle ); + + /* Set the bit corresponding to the kernel object to grant access. */ + xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit ); + } + /*-----------------------------------------------------------*/ + + void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle, + int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit; + xMPU_SETTINGS * xTaskMpuSettings; + + /* Calculate the Access Control List entry index and bit position + * within that entry. */ + ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS ); + ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS ); + + xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle ); + + /* Clear the bit corresponding to the kernel object to revoke access. */ + xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit ); + } + /*-----------------------------------------------------------*/ + + BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings; + + if( xSchedulerRunning == pdFALSE ) + { + /* Grant access to all the kernel objects before the scheduler + * is started. It is necessary because there is no task running + * yet and therefore, we cannot use the permissions of any + * task. */ + xAccessGranted = pdTRUE; + } + else + { + xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS ); + ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS ); + + if( ( xTaskMpuSettings->ullTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 ) + { + xAccessGranted = pdTRUE; + } + } + } + + return xAccessGranted; + } + /*-----------------------------------------------------------*/ + #else /* configENABLE_ACCESS_CONTROL_LIST == 1 */ + + BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */ + { + ( void ) lInternalIndexOfKernelObject; + + /* If Access Control List feature is not used, all the tasks have + * access to all the kernel objects. */ + return pdTRUE; + } + /*-----------------------------------------------------------*/ + + #endif /* configENABLE_ACCESS_CONTROL_LIST == 1 */ + +#endif /* #if ( configENABLE_MPU == 1 ) */ BaseType_t xPortStartScheduler( void ) { @@ -341,6 +1190,13 @@ BaseType_t xPortStartScheduler( void ) configASSERT( ullAPSR == portEL1 ); + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + vSetupMPU(); + } + #endif /* #if ( configENABLE_MPU == 1 ) */ + /* Interrupts are turned off in the CPU itself to ensure a tick does * not execute while the scheduler is being started. Interrupts are * automatically turned back on in the CPU when the first task starts @@ -381,6 +1237,22 @@ BaseType_t xPortStartScheduler( void ) configSETUP_TICK_INTERRUPT(); #endif /* if ( configNUMBER_OF_CORES > 1 ) */ + #if ( configENABLE_MPU == 1 ) + xSchedulerRunning = pdTRUE; + #endif /* ( configENABLE_MPU == 1 ) */ + + #if ( configENABLE_MPU == 1 ) + { + /* Enable the Memory Protection Unit (MPU) + * MPU is only enabled after the primary and secondary handshakes + * are done as to prevent inconsistent MPU regions attributes across + * different cores resulting in unupdated values of the handshake + * flags. + */ + vEnableMPU(); + } + #endif /* #if ( configENABLE_MPU == 1 ) */ + /* Start the first task executing. */ vPortRestoreTaskContext(); @@ -389,7 +1261,7 @@ BaseType_t xPortStartScheduler( void ) /*-----------------------------------------------------------*/ -void vPortEndScheduler( void ) +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ { /* Stub implementation for ports where there is nothing to return to * Artificially force an assert. */ @@ -399,7 +1271,7 @@ void vPortEndScheduler( void ) /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES == 1 ) - void vPortEnterCritical( void ) + PRIVILEGED_FUNCTION void vPortEnterCritical( void ) { /* Mask interrupts up to the max syscall interrupt priority. */ uxPortSetInterruptMask(); @@ -422,7 +1294,7 @@ void vPortEndScheduler( void ) /*-----------------------------------------------------------*/ - void vPortExitCritical( void ) + PRIVILEGED_FUNCTION void vPortExitCritical( void ) { if( ullCriticalNesting > portNO_CRITICAL_NESTING ) { @@ -462,21 +1334,13 @@ void FreeRTOS_Tick_Handler( void ) } #endif /* configASSERT_DEFINED */ - /* Set interrupt mask before altering scheduler structures. The tick - * handler runs at the lowest priority, so interrupts cannot already be masked, + /* Set interrupt mask before altering scheduler structures. The tick + * interrupt runs at the lowest priority, so interrupts cannot already be masked, * so there is no need to save and restore the current mask value. It is * necessary to turn off interrupts in the CPU itself while the ICCPMR is being - * updated. */ - __asm volatile ( "MSR ICC_PMR_EL1, %0 \n" - "DSB SY \n" - "ISB SY \n" - ::"r" ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) : "memory" ); - - /* Ok to enable interrupts after the interrupt source has been cleared. */ - configCLEAR_TICK_INTERRUPT(); - __asm volatile ( "MSR DAIFCLR, #2\n" - "DSB SY\n" - "ISB SY\n" ::: "memory" ); + * updated. + */ + UBaseType_t uxInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); #if ( configNUMBER_OF_CORES > 1 ) UBaseType_t x = portENTER_CRITICAL_FROM_ISR(); @@ -488,7 +1352,7 @@ void FreeRTOS_Tick_Handler( void ) #if ( configNUMBER_OF_CORES == 1 ) ullPortYieldRequired = pdTRUE; #else - ullPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE; + ullPortYieldRequired[ portGET_CORE_ID_FROM_ISR() ] = pdTRUE; #endif } #if ( configNUMBER_OF_CORES > 1 ) @@ -496,7 +1360,10 @@ void FreeRTOS_Tick_Handler( void ) #endif /* if ( configNUMBER_OF_CORES > 1 ) */ /* Ensure all interrupt priorities are active again. */ - portCLEAR_INTERRUPT_PRIORITIES_MASK(); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxInterruptStatus ); + + /* Ok to enable interrupts after the interrupt source has been cleared. */ + configCLEAR_TICK_INTERRUPT(); } /*-----------------------------------------------------------*/ @@ -533,7 +1400,7 @@ void vPortClearInterruptMask( UBaseType_t uxNewMaskValue ) __asm volatile ( "SVC %0 \n" : - : "i" ( portSVC_UNMASK_INTERRUPTS ), "r" ( uxNewMaskValue ) + : "i" ( portSVC_UNMASK_INTERRUPTS ) : "memory" ); } @@ -655,21 +1522,27 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void ) #if ( configNUMBER_OF_CORES > 1 ) - /* Which core owns the lock? */ - volatile uint64_t ucOwnedByCore[ portMAX_CORE_COUNT ]; + /* Which core owns the lock? Keep in privileged, shareable RAM. */ + PRIVILEGED_DATA volatile uint64_t ucOwnedByCore[ portMAX_CORE_COUNT ]; /* Lock count a core owns. */ - volatile uint64_t ucRecursionCountByLock[ eLockCount ]; + PRIVILEGED_DATA volatile uint64_t ucRecursionCountByLock[ eLockCount ]; /* Index 0 is used for ISR lock and Index 1 is used for task lock. */ - uint32_t ulGateWord[ eLockCount ]; + PRIVILEGED_DATA uint32_t ulGateWord[ eLockCount ]; void vInterruptCore( uint32_t ulInterruptID, uint32_t ulCoreID ) { uint64_t ulRegVal = 0; uint32_t ulCoreMask = ( 1UL << ulCoreID ); ulRegVal |= ( (ulCoreMask & 0xFFFF) | ( ( ulInterruptID & 0xF ) << 24U ) ); - __asm__ volatile ( "msr ICC_SGI1R_EL1, %0" : : "r" ( ulRegVal ) ); - __asm__ volatile ( "dsb sy"); - __asm__ volatile ( "isb sy"); + __asm volatile ( + "str x0, [ sp, #-0x10 ]! \n" + "mov x0, %0 \n" + "svc %1 \n" + "ldr x0, [ sp ], # 0x10 \n" + : + : "r" ( ulRegVal ), "i" ( portSVC_INTERRUPT_CORE ) + : "memory", "w1" + ); } /*-----------------------------------------------------------*/ @@ -748,10 +1621,11 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void ) /* Lock acquire */ if( uxAcquire ) { - /* Check if spinlock is available. */ - /* If spinlock is not available check if the core owns the lock. */ - /* If the core owns the lock wait increment the lock count by the core. */ - /* If core does not own the lock wait for the spinlock. */ + /* Check if spinlock is available. + * If spinlock is not available check if the core owns the lock. + * If the core owns the lock wait increment the lock count by the core. + * If core does not own the lock wait for the spinlock. + */ if( prvSpinTrylock( &ulGateWord[ eLockNum ] ) != 0 ) { /* Check if the core owns the spinlock. */ @@ -811,8 +1685,12 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void ) /*-----------------------------------------------------------*/ - BaseType_t xPortGetCoreID( void ) + uint8_t ucPortGetCoreID( void ) { + /* Use SVC to obtain the core ID in a way that is safe when called + * from EL0 tasks. ISRs and EL1 code should use + * ucPortGetCoreIDFromIsr()/portGET_CORE_ID_FROM_ISR(). + */ BaseType_t xCoreID; __asm volatile ( "svc %1 \n" @@ -826,6 +1704,15 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void ) /*-----------------------------------------------------------*/ + uint8_t ucPortGetCoreIDFromIsr ( void ) /* PRIVILEGED_FUNCTION */ + { + uint64_t ullMpidrEl1; + __asm volatile ( "MRS %0, MPIDR_EL1" : "=r" ( ullMpidrEl1 ) ); + + return ( uint8_t ) ( ullMpidrEl1 & 0xff ); + } + +/*------------------------------------------------------------*/ void FreeRTOS_SGI_Handler( void ) { /* Must be the lowest possible priority. */ @@ -843,29 +1730,22 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void ) } #endif /* configASSERT_DEFINED */ - /* Set interrupt mask before altering scheduler structures. The SGI - * handler runs at the lowest priority, so interrupts cannot already be masked, + /* Set interrupt mask before altering scheduler structures. The SGI + * interrupt runs at the lowest priority, so interrupts cannot already be masked, * so there is no need to save and restore the current mask value. It is * necessary to turn off interrupts in the CPU itself while the ICCPMR is being - * updated. */ - __asm volatile ( "MSR ICC_PMR_EL1, %0 \n" - "DSB SY \n" - "ISB SY \n" - ::"r" ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) : "memory" ); - - /* Ok to enable interrupts after the interrupt source has been cleared. */ - __asm volatile ( "MSR DAIFCLR, #2\n" - "DSB SY\n" - "ISB SY\n" ::: "memory" ); + * updated. + */ + UBaseType_t uxInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + UBaseType_t uxSavedInterruptStatus = portENTER_CRITICAL_FROM_ISR(); #if ( configNUMBER_OF_CORES == 1 ) ullPortYieldRequired = pdTRUE; #else - ullPortYieldRequired[ portGET_CORE_ID() ] = pdTRUE; + ullPortYieldRequired[ portGET_CORE_ID_FROM_ISR() ] = pdTRUE; #endif - - /* Ensure all interrupt priorities are active again. */ - portCLEAR_INTERRUPT_PRIORITIES_MASK(); + portEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxInterruptStatus ); } /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CR82/portASM.S b/portable/GCC/ARM_CR82/portASM.S index 5032a315dff..16fc7780de8 100644 --- a/portable/GCC/ARM_CR82/portASM.S +++ b/portable/GCC/ARM_CR82/portASM.S @@ -37,10 +37,20 @@ #include "FreeRTOSConfig.h" #include "portmacro.h" +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION + * is defined correctly and privileged functions are placed in correct sections. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* System call numbers includes. */ +#include "mpu_syscall_numbers.h" + +/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the + * header files. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + .text /* Variables and functions. */ - .extern ullMaxAPIPriorityMask #if ( configNUMBER_OF_CORES == 1 ) .extern pxCurrentTCB .extern ullCriticalNesting @@ -56,13 +66,62 @@ .extern ullPortYieldRequired .extern _freertos_vector_table +#if ( configENABLE_MPU == 1 ) + .extern xPortIsTaskPrivileged + .extern vSystemCallEnter + .extern vSystemCallExit + .extern vRequestSystemCallExit + .extern uxSystemCallImplementations +#endif /* #if ( configENABLE_MPU == 1 ) */ + .global FreeRTOS_IRQ_Handler .global FreeRTOS_SWI_Handler + .global vPortSaveTaskContext .global vPortRestoreTaskContext +#if ( configENABLE_MPU == 1 ) - .macro saveallgpregisters -/* Save all general-purpose registers on stack. */ + .macro portLOAD_MPU_REGIONS_ADDRESSES +MOV X3, #4 /* i = 4 First four MPU regions are already programmed.*/ +MOV X4, # configTOTAL_MPU_REGIONS - 1 /* Upper limit = configTOTAL_MPU_REGIONS - 1 */ +1 : + CMP X3, X4 /* Compare i with ( configTOTAL_MPU_REGIONS - 1 ) */ + B.GT 2f /* if i > ( configTOTAL_MPU_REGIONS - 1 ), exit loop */ + MSR PRSELR_EL1, X3 /* Program PRSELR_EL1. */ + ISB /* Ensure PRSELR selection takes effect before registers access. */ + LDP X1, X2, [ X0 ], # 0x10 /* Retrieve ullPrbarEl1 and ullPrlarEl1r */ + MSR PRBAR_EL1, X1 /* Program PRBAR_EL1. */ + MSR PRLAR_EL1, X2 /* Program PRLAR_EL1. */ + ADD X3, X3, # 1 /* i++ */ + B 1b +2 : + DSB SY + ISB + .endm + + .macro portSTORE_MPU_REGIONS_ADDRESSES +MOV X3, #4 /* i = 4 First four MPU regions are already programmed.*/ +MOV X4, # configTOTAL_MPU_REGIONS - 1 /* Upper limit = configTOTAL_MPU_REGIONS - 1 */ +1 : + CMP X3, X4 /* Compare i with ( configTOTAL_MPU_REGIONS - 1 ) */ + B.GT 2f /* if i > ( configTOTAL_MPU_REGIONS - 1 ), exit loop */ + MSR PRSELR_EL1, X3 /* Program PRSELR_EL1. */ + ISB /* Ensure PRSELR selection takes effect before registers access. */ + MRS X1, PRBAR_EL1 /* Retrieve PRBAR_EL1. */ + MRS X2, PRLAR_EL1 /* Retrieve PRLAR_EL1. */ + STP X1, X2, [ X0 ], # 0x10 /* Store PRBAR_EL1 and PRLAR_EL1 in ullPrbarEl1 and ullPrlarEl1r */ + ADD X3, X3, # 1 /* i++ */ + B 1b +2 : + /* No additional barrier required after reading PR* registers. */ + .endm + +#endif /* #if ( configENABLE_MPU == 1 ) */ + +/*-----------------------------------------------------------*/ + + .macro savefuncontextgpregs +/* Save function context general-purpose registers. */ STP X0, X1, [ SP, # - 0x10 ] ! STP X2, X3, [ SP, # - 0x10 ] ! STP X4, X5, [ SP, # - 0x10 ] ! @@ -72,26 +131,30 @@ STP X10, X11, [ SP, # - 0x10 ] ! STP X12, X13, [ SP, # - 0x10 ] ! STP X14, X15, [ SP, # - 0x10 ] ! STP X16, X17, [ SP, # - 0x10 ] ! -STP X18, X19, [ SP, # - 0x10 ] ! -STP X20, X21, [ SP, # - 0x10 ] ! -STP X22, X23, [ SP, # - 0x10 ] ! -STP X24, X25, [ SP, # - 0x10 ] ! -STP X26, X27, [ SP, # - 0x10 ] ! -STP X28, X29, [ SP, # - 0x10 ] ! +STP X18, X29, [ SP, # - 0x10 ] ! STR X30, [ SP, # - 0x10 ] ! .endm /*-----------------------------------------------------------*/ - .macro restoreallgpregisters -/* Restore all general-purpose registers from stack. */ + .macro savesyscallcontextgpregs +/* Save system call context general-purpose registers. */ +STP X4, X5, [ SP, # - 0x10 ] ! +STP X6, X7, [ SP, # - 0x10 ] ! +STP X8, X9, [ SP, # - 0x10 ] ! +STP X10, X11, [ SP, # - 0x10 ] ! +STP X12, X13, [ SP, # - 0x10 ] ! +STP X14, X15, [ SP, # - 0x10 ] ! +STP X16, X17, [ SP, # - 0x10 ] ! +STP X18, X29, [ SP, # - 0x10 ] ! + .endm + +/*-----------------------------------------------------------*/ + + .macro restorefuncontextgpregs +/* Restore function context general-purpose registers. */ LDR X30, [ SP ], # 0x10 -LDP X28, X29, [ SP ], # 0x10 -LDP X26, X27, [ SP ], # 0x10 -LDP X24, X25, [ SP ], # 0x10 -LDP X22, X23, [ SP ], # 0x10 -LDP X20, X21, [ SP ], # 0x10 -LDP X18, X19, [ SP ], # 0x10 +LDP X18, X29, [ SP ], # 0x10 LDP X16, X17, [ SP ], # 0x10 LDP X14, X15, [ SP ], # 0x10 LDP X12, X13, [ SP ], # 0x10 @@ -105,8 +168,38 @@ LDP X0, X1, [ SP ], # 0x10 /*-----------------------------------------------------------*/ - .macro savefuncontextgpregs -/* Save function context general-purpose registers. */ + .macro restorefuncontextgpregexceptx0 +/* Restore function context general-purpose registers while discarding old X0. */ +LDR X30, [ SP ], # 0x10 +LDP X18, X29, [ SP ], # 0x10 +LDP X16, X17, [ SP ], # 0x10 +LDP X14, X15, [ SP ], # 0x10 +LDP X12, X13, [ SP ], # 0x10 +LDP X10, X11, [ SP ], # 0x10 +LDP X8, X9, [ SP ], # 0x10 +LDP X6, X7, [ SP ], # 0x10 +LDP X4, X5, [ SP ], # 0x10 +LDP X2, X3, [ SP ], # 0x10 +LDP XZR, X1, [ SP ], # 0x10 + .endm + +/*-----------------------------------------------------------*/ + + .macro restoresyscallcontextgpregs +/* Restore system call context general-purpose registers. */ +LDP X18, X29, [ SP ], # 0x10 +LDP X16, X17, [ SP ], # 0x10 +LDP X14, X15, [ SP ], # 0x10 +LDP X12, X13, [ SP ], # 0x10 +LDP X10, X11, [ SP ], # 0x10 +LDP X8, X9, [ SP ], # 0x10 +LDP X6, X7, [ SP ], # 0x10 +LDP X4, X5, [ SP ], # 0x10 + .endm +/*-----------------------------------------------------------*/ + + .macro saveallgpregisters +/* Save all general-purpose registers on stack. */ STP X0, X1, [ SP, # - 0x10 ] ! STP X2, X3, [ SP, # - 0x10 ] ! STP X4, X5, [ SP, # - 0x10 ] ! @@ -116,16 +209,26 @@ STP X10, X11, [ SP, # - 0x10 ] ! STP X12, X13, [ SP, # - 0x10 ] ! STP X14, X15, [ SP, # - 0x10 ] ! STP X16, X17, [ SP, # - 0x10 ] ! -STP X18, X29, [ SP, # - 0x10 ] ! -STR X30, [ SP, # - 0x10 ] ! +STP X18, X19, [ SP, # - 0x10 ] ! +STP X20, X21, [ SP, # - 0x10 ] ! +STP X22, X23, [ SP, # - 0x10 ] ! +STP X24, X25, [ SP, # - 0x10 ] ! +STP X26, X27, [ SP, # - 0x10 ] ! +STP X28, X29, [ SP, # - 0x10 ] ! +STP X30, XZR, [ SP, # - 0x10 ] ! .endm /*-----------------------------------------------------------*/ - .macro restorefuncontextgpregs -/* Restore function context general-purpose registers. */ -LDR X30, [ SP ], # 0x10 -LDP X18, X29, [ SP ], # 0x10 + .macro restoreallgpregisters +/* Restore all general-purpose registers from stack. */ +LDP X30, XZR, [ SP ], # 0x10 +LDP X28, X29, [ SP ], # 0x10 +LDP X26, X27, [ SP ], # 0x10 +LDP X24, X25, [ SP ], # 0x10 +LDP X22, X23, [ SP ], # 0x10 +LDP X20, X21, [ SP ], # 0x10 +LDP X18, X19, [ SP ], # 0x10 LDP X16, X17, [ SP ], # 0x10 LDP X14, X15, [ SP ], # 0x10 LDP X12, X13, [ SP ], # 0x10 @@ -190,7 +293,81 @@ LDP Q0, Q1, [ SP ], # 0x20 /*-----------------------------------------------------------*/ .macro portSAVE_CONTEXT -/* Switch to use the EL0 stack pointer. */ + +#if ( configENABLE_MPU == 1 ) + /* Switch to use the EL1 stack pointer. */ + MSR SPSEL, # 1 + + /* Store X0-X4 as they are being used to save the user allocated task stack and to program the MPU */ + STP X0, X1, [ SP, # - 0x10 ] ! + STP X2, X3, [ SP, # - 0x10 ] ! + STR X4, [ SP, # - 0x10 ] ! + + /* Switch to use the EL0 stack pointer. */ + MSR SPSEL, # 0 + + /* Store user allocated task stack and use ullContext as the SP */ + #if ( configNUMBER_OF_CORES == 1 ) + adrp X0, pxCurrentTCB + add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */ + #else + adrp X0, pxCurrentTCBs + add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */ + /* Get the core ID to index the TCB correctly. */ + MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ + AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */ + LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ + ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */ + #endif + LDR X1, [ X0 ] + ADD X1, X1, #8 /* X1 = X1 + 8, X1 now points to ullTaskUnprivilegedSP in TCB. */ + MOV X0, SP + STR X0, [ X1 ] /* Save ullTaskUnprivilegedSP on task's TCB */ + SUB X1, X1, #8 /* X1 = X1 - 8, X1 now points to pxTopOfStack in TCB. */ + LDR X1, [ X1 ] + MOV SP, X1 /* Use pxTopOfStack ( ullContext ) as the SP. */ + + savefuncontextgpregs + #if ( configNUMBER_OF_CORES > 1 ) + MRS X1, ELR_EL1 /* Save ELR_EL1 before calling xPortIsTaskPrivileged which would change its value in case of multicore */ + STR X1, [ SP, # - 0x10 ] ! + #endif + BL xPortIsTaskPrivileged + #if ( configNUMBER_OF_CORES > 1 ) + LDR X1, [ SP ], # 0x10 + MSR ELR_EL1, X1 + #endif + CBNZ X0, 3f /* If task is privileged, skip saving MPU context. */ + #if ( configNUMBER_OF_CORES == 1 ) + adrp X0, pxCurrentTCB + add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */ + #else + adrp X0, pxCurrentTCBs + add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */ + /* Get the core ID to index the TCB correctly. */ + MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ + AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */ + LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ + ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */ + #endif + LDR X0, [ X0 ] + + ADD X0, X0, #16 /* X0 = X0 + 16. X0 now points to MAIR_EL1 in TCB. */ + MRS X1, MAIR_EL1 /* X1 = MAIR_EL1. */ + STR X1, [ X0 ], # 0x8 /* Store MAIR_EL1 in TCB, X0 = X0 + 8. */ + + portSTORE_MPU_REGIONS_ADDRESSES /* Store MPU region addresses onto TCB. */ + +3 : + restorefuncontextgpregs + MSR SPSEL, # 1 + + /* Restore X0-X4. */ + LDR X4, [ SP ], # 0x10 + LDP X2, X3, [ SP ], # 0x10 + LDP X0, X1, [ SP ], # 0x10 +#endif /* #if ( configENABLE_MPU == 1 ) */ + MSR SPSEL, # 0 /* Save the entire context. */ saveallgpregisters @@ -201,112 +378,198 @@ MRS X2, ELR_EL1 STP X2, X3, [ SP, # - 0x10 ] ! /* Save the critical section nesting depth. */ -LDR X0, ullCriticalNestingsConst -#if configNUMBER_OF_CORES > 1 - /* Calculate per-core index using MPIDR_EL1 for SMP support. */ - MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */ - AND X1, X1, # 0xff /* Extract Aff0 (core ID). */ - LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */ - ADD X0, X0, X1 /* Add offset to base address. */ +#if ( configNUMBER_OF_CORES == 1 ) +adrp X0, ullCriticalNesting +add X0, X0, :lo12:ullCriticalNesting /* X0 = &ullCriticalNesting */ +#else +adrp X0, ullCriticalNestings +add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */ +/* Calculate per-core index using MPIDR_EL1 for SMP support. */ +MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */ +AND X1, X1, # 0xff /* Extract Aff0 (core ID). */ +LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */ +ADD X0, X0, X1 /* Add offset to base address. */ #endif LDR X3, [ X0 ] /* Save the FPU context indicator. */ -LDR X0, ullPortTaskHasFPUContextConst +adrp X0, ullPortTaskHasFPUContext +add X0, X0, :lo12:ullPortTaskHasFPUContext /* X0 = &ullPortTaskHasFPUContext */ + #if configNUMBER_OF_CORES > 1 - ADD X0, X0, X1 /* Add to the base of the FPU array. */ + ADD X0, X0, X1 /* Add to the base of the FPU array. */ #endif LDR X2, [ X0 ] /* Save the FPU context, if any (32 128-bit registers). */ -CMP X2, # 0 -B.EQ 1f /* FPU context not present, skip saving FPU registers. */ +CBZ X2, 4f /* FPU context not present, skip saving FPU registers. */ savefloatregisters -1 : +4 : /* Store the critical nesting count and FPU context indicator. */ STP X2, X3, [ SP, # - 0x10 ] ! -LDR X0, pxCurrentTCBsConst -#if ( configNUMBER_OF_CORES > 1 ) - MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register .*/ - AND X1, X1, # 0xff /* Extract core ID. */ - LSL X1, X1, # 3 /* Multiply core ID by pointer size. */ - ADD X0, X0, X1 /* Offset for current core's TCB pointer. */ + +#if ( configNUMBER_OF_CORES == 1 ) + adrp X0, pxCurrentTCB + add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */ +#else + adrp X0, pxCurrentTCBs + add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */ + MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register .*/ + AND X1, X1, # 0xff /* Extract core ID. */ + LSL X1, X1, # 3 /* Multiply core ID by pointer size. */ + ADD X0, X0, X1 /* Offset for current core's TCB pointer. */ #endif LDR X1, [ X0 ] -MOV X0, SP /* Save current stack pointer. */ -STR X0, [ X1 ] +MOV X0, SP +STR X0, [ X1 ] /* Save pxTopOfStack on the TCB. */ -/* Switch to use the ELx stack pointer. */ +/* Switch to use the EL1 stack pointer. */ MSR SPSEL, # 1 .endm /*-----------------------------------------------------------*/ -.macro portRESTORE_CONTEXT -/* Switch to use the EL0 stack pointer. */ -MSR SPSEL, # 0 - -/* Set the SP to point to the stack of the task being restored. */ -LDR X0, pxCurrentTCBsConst -#if configNUMBER_OF_CORES > 1 - /* Get the core ID to index the TCB correctly. */ - MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */ - AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID. */ - LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system). */ + .macro portRESTORE_CONTEXT + +#if ( configENABLE_MPU == 1 ) + /* Switch to use the EL1 stack pointer. */ + MSR SPSEL, # 1 + + savefuncontextgpregs + BL xPortIsTaskPrivileged + CBNZ X0, 3f /* If task is privileged, skip restoring MPU context. */ + + /* Switch to use the EL0 stack pointer. */ + MSR SPSEL, # 0 + #if ( configNUMBER_OF_CORES == 1 ) + adrp X0, pxCurrentTCB + add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */ + #else + adrp X0, pxCurrentTCBs + add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */ + /* Get the core ID to index the TCB correctly. */ + MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ + AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */ + LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ + + ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */ + #endif + LDR X0, [ X0 ] + + DMB SY /* Complete outstanding transfers before disabling MPU. */ + MRS X1, SCTLR_EL1 /* X1 = SCTLR_EL1 */ + BIC X1, X1, # (1 << 0) /* Clears bit 0 of X1 */ + MSR SCTLR_EL1, X1 /* Disable MPU. */ + + ADD X0, X0, #16 /* X0 = X0 + 16. X0 now points to MAIR_EL1 in TCB. */ + LDR X1, [ X0 ], # 0x8 /* X1 = *X0 i.e. X1 = MAIR_EL1, X0 = X0 + 8. */ + MSR MAIR_EL1, X1 /* Program MAIR_EL1. */ + + portLOAD_MPU_REGIONS_ADDRESSES /* Load MPU region addresses from TCB. */ + MRS X1, SCTLR_EL1 /* X1 = SCTLR_EL1 */ + ORR X1, X1, # (1 << 0) /* Sets bit 0 of X1 */ + MSR SCTLR_EL1, X1 /* Enable MPU. */ + DSB SY /* Force memory writes before continuing. */ + +3 : + MSR SPSEL, # 1 + restorefuncontextgpregs +#endif /* #if ( configENABLE_MPU == 1 ) */ + + /* Switch to use the EL0 stack pointer. */ + MSR SPSEL, # 0 + + #if ( configNUMBER_OF_CORES == 1 ) + adrp X0, pxCurrentTCB + add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */ + #else + adrp X0, pxCurrentTCBs + add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */ + /* Get the core ID to index the TCB correctly. */ + MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ + AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */ + LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ + ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */ + #endif + LDR X1, [ X0 ] + LDR X0, [ X1 ] /* X0 = Location of saved context in TCB. */ + MOV SP, X0 + LDP X2, X3, [ SP ], # 0x10 /* Retrieve critical nesting and FPU indicator */ + + #if ( configNUMBER_OF_CORES == 1 ) + adrp X0, ullCriticalNesting + add X0, X0, :lo12:ullCriticalNesting /* X0 = &ullCriticalNesting */ + #else + adrp X0, ullCriticalNestings + add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */ + /* Calculate offset for current core's ullCriticalNesting */ + MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register */ + AND X1, X1, # 0xff /* Extract Aff0, which contains the core ID */ + LSL X1, X1, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */ + ADD X0, X0, X1 /* Add offset for the current core's ullCriticalNesting */ + #endif + + MOV X1, # 255 /* Default mask */ + CBZ X3, 4f + MOV X1, # portMAX_API_PRIORITY_MASK - ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer. */ -#endif -LDR X1, [ X0 ] -LDR X0, [ X1 ] -MOV SP, X0 -LDP X2, X3, [ SP ], # 0x10 /* Retrieve critical nesting and FPU indicator. */ -LDR X0, ullCriticalNestingsConst -/* Calculate offset for current core's ullCriticalNesting. */ -#if configNUMBER_OF_CORES > 1 -/* Existing code to get core ID and scale to pointer size is reused. */ - MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register. */ - AND X1, X1, # 0xff /* Extract Aff0, which contains the core ID. */ - LSL X1, X1, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system). */ - ADD X0, X0, X1 /* Add offset for the current core's ullCriticalNesting. */ -#endif -MOV X1, # 255 /* Default mask. */ -CMP X3, # 0 -B.EQ 1f -LDR X6, ullMaxAPIPriorityMaskConst -LDR X1, [ X6 ] /* Use computed mask value. */ -1 : -MSR ICC_PMR_EL1, X1 /* Set interrupt mask. */ -DSB SY -ISB SY -STR X3, [ X0 ] /* Restore critical nesting .*/ -/* Restore the FPU context indicator. */ -LDR X0, ullPortTaskHasFPUContextConst -#if configNUMBER_OF_CORES > 1 -/* Existing code to get core ID and scale to pointer size is reused. */ - MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register. */ - AND X1, X1, # 0xff /* Extract Aff0, which contains the core ID. */ - LSL X1, X1, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system). */ -/* Restore the FPU context indicator. */ - ADD X0, X0, X1 /* Add to the base of the FPU array. */ -#endif -STR X2, [ X0 ] - -/* Restore the FPU context, if any. */ -CMP X2, # 0 -B.EQ 1f -restorefloatregisters -1 : -LDP X2, X3, [ SP ], # 0x10 /* Restore SPSR and ELR. */ -MSR SPSR_EL1, X3 -MSR ELR_EL1, X2 - -restoreallgpregisters -/* Switch to use the ELx stack pointer. */ -MSR SPSEL, # 1 +4: + MSR ICC_PMR_EL1, X1 /* Set interrupt mask */ + DSB SY + ISB SY + STR X3, [ X0 ] /* Restore critical nesting */ + /* Restore the FPU context indicator. */ + adrp X0, ullPortTaskHasFPUContext + add X0, X0, :lo12:ullPortTaskHasFPUContext /* X0 = &ullPortTaskHasFPUContext */ + #if ( configNUMBER_OF_CORES > 1 ) + MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register */ + AND X1, X1, # 0xff /* Extract Aff0, which contains the core ID */ + LSL X1, X1, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */ + ADD X0, X0, X1 /* Add to the base of the FPU array */ + #endif + STR X2, [ X0 ] + /* Restore the FPU context, if any. */ + CBZ X2, 5f + restorefloatregisters -ERET +5: + LDP X2, X3, [ SP ], # 0x10 /* Restore SPSR and ELR */ + + MSR SPSR_EL1, X3 + MSR ELR_EL1, X2 + restoreallgpregisters + +#if ( configENABLE_MPU == 1 ) + /* Save pxTopOfStack ( ullContext ) on the task's TCB and set SP_EL0 to ullTaskUnprivilegedSP. */ + MSR SPSEL, # 1 + STP X8, X9, [ SP, # - 0x10 ] ! + STR X10, [ SP, # - 0x10 ] ! + #if ( configNUMBER_OF_CORES == 1 ) + adrp X8, pxCurrentTCB + add X8, X8, :lo12:pxCurrentTCB /* X8 = &pxCurrentTCB */ + #else + adrp X8, pxCurrentTCBs + add X8, X8, :lo12:pxCurrentTCBs /* X8 = &pxCurrentTCBs */ + /* Get the core ID to index the TCB correctly. */ + MRS X10, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ + AND X10, X10, # 0xff /* Extract Aff0 which contains the core ID */ + LSL X10, X10, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ + ADD X8, X8, X10 /* Add the offset for the current core's TCB pointer */ + #endif + LDR X9, [ X8 ] + MRS X8, SP_EL0 + STR X8, [ X9 ] /* Store pxTopOfStack on task's TCB */ + ADD X9, X9, #8 /* X9 = X9 + 8. X1 now points to ullTaskUnprivilegedSP in TCB. */ + LDR X9, [ X9 ] + MSR SP_EL0, X9 /* Use ullTaskUnprivilegedSP as SP_EL0. */ + LDR X10, [ SP ], # 0x10 + LDP X8, X9, [ SP ], # 0x10 +#endif /* #if ( configENABLE_MPU == 1 ) */ + + /* Switch to use the EL1 stack pointer. */ + MSR SPSEL, # 1 .endm /*-----------------------------------------------------------*/ @@ -317,9 +580,14 @@ ERET .align 8 .type FreeRTOS_SWI_Handler, % function FreeRTOS_SWI_Handler: -/* Save X0-X2 temporarily as they are used in the handler. */ +/* Save X0-X5 temporarily as they are used in the handler. */ STP X0, X1, [SP, #-0x10]! -STR X2, [SP, #-0x10]! +STP X2, X3, [SP, #-0x10]! +STP X4, X5, [SP, #-0x10]! + +MRS X4, ELR_EL1 /* Save exception return address. */ +MRS X5, SPSR_EL1 /* Save program status register address. */ + /* Decide action based on SVC immediate without corrupting any task context. */ MRS X0, ESR_EL1 @@ -331,33 +599,38 @@ B.NE FreeRTOS_Abort /* Extract SVC immediate from ISS[15:0]. */ AND X2, X0, # 0xFFFF +/* portSVC_YIELD: yield from a running task. */ +CMP X2, # portSVC_YIELD +B.EQ FreeRTOS_Yield + /* portSVC_START_FIRST_TASK: start first task on this core without saving any prior context. */ CMP X2, # portSVC_START_FIRST_TASK -B.NE 1f -/* Discard temp-saved X0-X2 before restoring first task. */ -ADD SP, SP, # 0x20 -B Start_First_Task +B.EQ Start_First_Task 1: -/* portSVC_DISABLE_INTERRUPTS: disable IRQs (DAIF.I) without touching task context. */ +/* portSVC_DISABLE_INTERRUPTS: disable IRQs (DAIF.I) in SPSR_EL1 without touching task context. */ CMP X2, # portSVC_DISABLE_INTERRUPTS B.NE 2f -MSR DAIFSET, # 2 -LDR X2, [SP], #0x10 +ORR X5, X5, # (1 << portPSTATE_I_BIT) /* Set I bit in SPSR_EL1 */ +MSR ELR_EL1, X4 +MSR SPSR_EL1, X5 +LDP X4, X5, [SP], #0x10 +LDP X2, X3, [SP], #0x10 LDP X0, X1, [SP], #0x10 DSB SY ISB SY ERET 2: -/* portSVC_ENABLE_INTERRUPTS: enable IRQs (DAIF.I clear) without touching task context. */ +/* portSVC_ENABLE_INTERRUPTS: enable IRQs (DAIF.I clear) in SPSR_EL1 without touching task context. */ CMP X2, # portSVC_ENABLE_INTERRUPTS B.NE 3f -MSR DAIFCLR, # 2 -LDR X2, [SP], #0x10 +BIC X5, X5, # (1 << portPSTATE_I_BIT) /* Clear I bit in SPSR_EL1 */ +MSR ELR_EL1, X4 +MSR SPSR_EL1, X5 +LDP X4, X5, [SP], #0x10 +LDP X2, X3, [SP], #0x10 LDP X0, X1, [SP], #0x10 -DSB SY -ISB SY ERET 3: @@ -366,8 +639,10 @@ CMP X2, # portSVC_GET_CORE_ID B.NE 4f MRS X0, MPIDR_EL1 AND X0, X0, # 0xff -/* Restore X2, then restore X1 while discarding old X0. */ -LDR X2, [ SP ], # 0x10 +MSR SPSR_EL1, X5 +/* Restore X5-X1 while discarding old X0. */ +LDP X4, X5, [SP], #0x10 +LDP X2, X3, [ SP ], # 0x10 LDP XZR, X1, [ SP ], # 0x10 ERET @@ -375,18 +650,16 @@ ERET /* portSVC_MASK_ALL_INTERRUPTS: set ICC_PMR_EL1 to max API mask and return previous-mask-equal flag in X0. */ CMP X2, # portSVC_MASK_ALL_INTERRUPTS B.NE 5f -/* Load max API mask from ullMaxAPIPriorityMask. */ -LDR X3, ullMaxAPIPriorityMaskConst -LDR X1, [ X3 ] /* Read current PMR and compare. */ -MRS X2, ICC_PMR_EL1 -CMP X2, X1 +MRS X0, ICC_PMR_EL1 +CMP X0, # portMAX_API_PRIORITY_MASK B.EQ 41f /* Disable IRQs while updating PMR. */ MSR DAIFSET, # 2 DSB SY ISB SY /* Write new PMR value. */ +MOV X1, # portMAX_API_PRIORITY_MASK MSR ICC_PMR_EL1, X1 DSB SY ISB SY @@ -394,11 +667,13 @@ ISB SY MSR DAIFCLR, # 2 DSB SY ISB SY +MSR ELR_EL1, X4 +MSR SPSR_EL1, X5 41: -MOV X0, X2 /* return ICC_PMR_EL1 original value */ -/* Restore X2, then restore X1 while discarding old X0. */ -LDR X2, [ SP ], # 0x10 +/* Restore X5-X1 while discarding old X0. */ +LDP X4, X5, [ SP ], # 0x10 +LDP X2, X3, [ SP ], # 0x10 LDP XZR, X1, [ SP ], # 0x10 ERET @@ -418,19 +693,22 @@ ISB SY MSR DAIFCLR, # 2 DSB SY ISB SY -LDR X2, [SP], #0x10 +MSR ELR_EL1, X4 +MSR SPSR_EL1, X5 +LDP X4, X5, [SP], #0x10 +LDP X2, X3, [SP], #0x10 LDP X0, X1, [SP], #0x10 ERET 6: -/* portSVC_UNMASK_INTERRUPTS: set ICC_PMR_EL1 to uxNewMaskValue. */ +/* portSVC_UNMASK_INTERRUPTS: set ICC_PMR_EL1 to uxNewMaskValue stored in X0. */ CMP X2, # portSVC_UNMASK_INTERRUPTS B.NE 7f /* Disable IRQs while updating PMR. */ MSR DAIFSET, # 2 DSB SY ISB SY -MOV X0, X1 /* uxNewMaskValue is in X1. */ +LDR X0, [ SP, # 0x20 ] /* Original X0 */ MSR ICC_PMR_EL1, X0 DSB SY ISB SY @@ -438,30 +716,247 @@ ISB SY MSR DAIFCLR, # 2 DSB SY ISB SY -LDR X2, [SP], #0x10 +MSR ELR_EL1, X4 +MSR SPSR_EL1, X5 +LDP X4, X5, [SP], #0x10 +LDP X2, X3, [SP], #0x10 LDP X0, X1, [SP], #0x10 ERET 7: -/* Default (portSVC_YIELD): yield from a running task. Save context first. */ -/* Restore X0-X2 to their original values before saving full context. */ -LDR X2, [SP], #0x10 +#if ( configENABLE_MPU == 1 ) + /* portSVC_CHECK_PRIVILEGE: Check if the task is a privileged task */ + CMP X2, # portSVC_CHECK_PRIVILEGE + B.NE 8f + savefuncontextgpregs + BL xPortIsTaskPrivileged + restorefuncontextgpregexceptx0 /* xPortIsTaskPrivileged() return value is stored in X0. */ + MSR ELR_EL1, X4 + MSR SPSR_EL1, X5 + /* Restore X5-X1 while discarding old X0. */ + LDP X4, X5, [ SP ], # 0x10 + LDP X2, X3, [ SP ], # 0x10 + LDP XZR, X1, [ SP ], # 0x10 + ERET +#endif /* #if ( configENABLE_MPU == 1 ) */ + +8: +/* portSVC_SAVE_TASK_CONTEXT: Save task's context */ +CMP X2, # portSVC_SAVE_TASK_CONTEXT +B.NE 9f +MSR ELR_EL1, X4 +MSR SPSR_EL1, X5 +/* Restore X5-X0. */ +LDP X4, X5, [ SP ], # 0x10 +LDP X2, X3, [ SP ], # 0x10 +LDP X0, X1, [ SP ], # 0x10 +portSAVE_CONTEXT +ERET + +9: +/* portSVC_RESTORE_CONTEXT: Restore task's context */ +CMP X2, # portSVC_RESTORE_CONTEXT +B.NE 10f +MSR ELR_EL1, X4 +MSR SPSR_EL1, X5 +/* Restore X5-X0. */ +LDP X4, X5, [ SP ], # 0x10 +LDP X2, X3, [ SP ], # 0x10 +LDP X0, X1, [ SP ], # 0x10 +portRESTORE_CONTEXT +ERET + +10: +/* portSVC_DELETE_CURRENT_TASK: Delete current task */ +CMP X2, # portSVC_DELETE_CURRENT_TASK +B.NE 11f +/* Restore X5-X0. */ +LDP X4, X5, [ SP ], #0x10 +LDP X2, X3, [ SP ], # 0x10 +LDP X0, X1, [ SP ], # 0x10 +#if ( configNUMBER_OF_CORES == 1 ) + adrp X0, pxCurrentTCB + add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */ +#else + adrp X0, pxCurrentTCBs + add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */ + /* Get the core ID to index the TCB correctly. */ + MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ + AND X1, X1, # 0xff /* Extract Aff0 which contains the core ID */ + LSL X1, X1, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ + ADD X0, X0, X1 /* Add the offset for the current core's TCB pointer */ +#endif +LDR X0, [ X0 ] /* X0 = pxCurrentTCB */ +B vTaskDelete + +11: +/* portSVC_INTERRUPT_CORE: Interrupt core */ +CMP X2, # portSVC_INTERRUPT_CORE +B.NE 12f +LDR X0, [ SP, # 0x20 ] /* Original X0 */ +MSR ICC_SGI1R_EL1, X0 /* X0 contains the value to write to ICC_SGI1R_EL1 */ +MSR ELR_EL1, X4 +MSR SPSR_EL1, X5 +/* Restore X5-X0. */ +LDP X4, X5, [SP], #0x10 +LDP X2, X3, [ SP ], # 0x10 +LDP X0, X1, [ SP ], # 0x10 +ERET + +12: +#if ( configENABLE_MPU == 1 ) + /* ---------- SystemCallEnter? ---------------------------------*/ + LDR X3, =NUM_SYSTEM_CALLS + CMP X2, X3 + BLO 121f /* imm 0 … NUM_SYSCALLS-1 */ + + /* ---------- SystemCallExit? ----------------------------------*/ + LDR X3, =portSVC_SYSTEM_CALL_EXIT + CMP X2, X3 + BEQ 122f + +/* ---------- SystemCallEnter -------------------------------------*/ +121: + /* If calling task is privileged, directly tail-call the implementation at EL1. */ + savefuncontextgpregs + BL xPortIsTaskPrivileged + restorefuncontextgpregexceptx0 /* X0 holds pdTRUE if privileged */ + CBNZ X0, priv_path + + /* Unprivileged tasks path */ + #if ( configNUMBER_OF_CORES == 1 ) + adrp X0, pxCurrentTCB + add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */ + #else + adrp X0, pxCurrentTCBs + add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */ + /* Get the core ID to index the TCB correctly. */ + MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ + AND X1, X1, # 0xff /* Extract Aff0 which contains the core ID */ + LSL X1, X1, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ + ADD X0, X0, X1 /* Add the offset for the current core's TCB pointer */ + #endif + LDR X0, [ X0 ] + LDR X0, [ X0 ] /* X0 = Location of saved context in TCB. */ + + /* Save inputs (X0-X3) and LR (X30) + * onto the current task's context to be used by the system call implementation. + */ + STR X30, [ X0, # ( portOFFSET_TO_LR * 8 ) ] + + /* Read original X0, X1, X2, and X3 from the EL1 stack without modifying SP, and store. + * [SP+0x20] -> X0, [SP+0x28] -> X1, [SP+0x10] -> X2, [SP+0x18] -> X3. */ + LDR X1, [ SP, # 0x20 ] /* Original X0 */ + STR X1, [ X0, # ( portOFFSET_TO_X0 * 8 ) ] + LDR X1, [ SP, # 0x28 ] /* Original X1 */ + STR X1, [ X0, # ( portOFFSET_TO_X1 * 8 ) ] + LDR X1, [ SP, # 0x10 ] /* Original X2 */ + STR X1, [ X0, # ( portOFFSET_TO_X2 * 8 ) ] + LDR X1, [ SP, # 0x18 ] /* Original X3 */ + STR X1, [ X0, # ( portOFFSET_TO_X3 * 8 ) ] + + /* Restore X2-X5 to their original values, discard X1 and X0 as they contain system call number + * and location of task's saved context in TCB. + */ + MOV X1, X2 /* Pass system call */ + LDP X4, X5, [ SP ], #0x10 + LDP X2, X3, [ SP ], #0x10 + ADD SP, SP, #0x10 /* Discard X0 and X1 */ + + savesyscallcontextgpregs + BL vSystemCallEnter /* returns after programming ELR/SPSR/SP_EL0 and args */ + /* Set LR for the syscall implementation to point to vRequestSystemCallExit. */ + adrp X30, vRequestSystemCallExit + add X30, X30, :lo12:vRequestSystemCallExit + restoresyscallcontextgpregs + ERET + +priv_path: + /* Load implementation address: uxSystemCallImplementations[X2] (64-bit entries). */ + adrp X3, uxSystemCallImplementations + add X3, X3, :lo12:uxSystemCallImplementations + LSL X2, X2, #3 /* Multiply index by size of pointer (8 bytes). */ + ADD X3, X3, X2 /* X3 = &uxSystemCallImplementations[X2] */ + LDR X3, [ X3 ] /* X3 = uxSystemCallImplementations[X2] */ + /* Return from exception directly to implementation; preserve original LR and registers. */ + MSR ELR_EL1, X3 + MSR SPSR_EL1, X5 + /* Restore X5-X0. */ + LDP X4, X5, [ SP ], #0x10 + LDP X2, X3, [ SP ], #0x10 + LDP X0, X1, [ SP ], #0x10 + ERET + + /* ---------- SystemCallExit -----------------------------------*/ +122: + LDR X0, [ SP, # 0x20 ] /* Restore X0 without changing SP as it contains system call return value */ + savefuncontextgpregs + BL vSystemCallExit + restorefuncontextgpregexceptx0 + /* Restore X5-X1 while discarding old X0. */ + LDP X4, X5, [ SP ], #0x10 + LDP X2, X3, [ SP ], #0x10 + LDP XZR, X1, [ SP ], #0x10 + ERET +#endif /* #if ( configENABLE_MPU == 1 ) */ + +/* ---------- Unexpected EC – just hang in place ---------------------------*/ +FreeRTOS_Abort: +B FreeRTOS_Abort + +FreeRTOS_Yield: +MSR SPSR_EL1, X5 + +/* Check if the task is in a critical section by inspecting ullCriticalNesting. */ +#if ( configNUMBER_OF_CORES > 1 ) + adrp X0, ullCriticalNestings + add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */ + MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */ + AND X1, X1, # 0xff /* Extract Aff0 (core ID). */ + LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */ + ADD X0, X0, X1 /* Add offset to base address. */ + LDR X1, [ X0 ] /* Load ullCriticalNesting for this core. */ + CBNZ X1, Skip_Context_Switch /* Skip context switch if in a critical section. */ +#endif + +/* Restore X5-X0 to their original values before saving full context. */ +LDP X4, X5, [SP], #0x10 +LDP X2, X3, [SP], #0x10 LDP X0, X1, [SP], #0x10 portSAVE_CONTEXT -#if configNUMBER_OF_CORES > 1 +savefuncontextgpregs +#if ( configNUMBER_OF_CORES > 1 ) MRS x0, mpidr_el1 AND x0, x0, 255 #endif BL vTaskSwitchContext +restorefuncontextgpregs portRESTORE_CONTEXT +ERET + +Skip_Context_Switch: +/* Restore X5-X0 to their original values. */ +LDP X4, X5, [SP], #0x10 +LDP X2, X3, [SP], #0x10 +LDP X0, X1, [SP], #0x10 +ERET Start_First_Task: -/* Start-first-task path: pick a task and restore it (no prior save). */ -portRESTORE_CONTEXT + /* Restore X5-X0 to their original values. */ + LDP X4, X5, [SP], #0x10 + LDP X2, X3, [SP], #0x10 + LDP X0, X1, [SP], #0x10 + portRESTORE_CONTEXT + ERET -FreeRTOS_Abort: -/* Full ESR is in X0, exception class code is in X1. */ -B . +/****************************************************************************** + * vPortSaveTaskContext is used to save the task's context into its stack. + *****************************************************************************/ + .align 8 + .type vPortSaveTaskContext, % function +vPortSaveTaskContext: +portSAVE_CONTEXT +RET /****************************************************************************** * vPortRestoreTaskContext is used to start the scheduler. @@ -470,16 +965,14 @@ B . .type vPortRestoreTaskContext, % function vPortRestoreTaskContext: .set freertos_vector_base, _freertos_vector_table - /* Install the FreeRTOS interrupt handlers. */ LDR X1, = freertos_vector_base - MSR VBAR_EL1, X1 +MSR VBAR_EL1, X1 DSB SY ISB SY - /* Start the first task. */ - portRESTORE_CONTEXT - +portRESTORE_CONTEXT +ERET /****************************************************************************** * FreeRTOS_IRQ_Handler handles IRQ entry and exit. @@ -496,7 +989,7 @@ ISB SY .type FreeRTOS_IRQ_Handler, % function FreeRTOS_IRQ_Handler: /* Save volatile registers. */ -savefuncontextgpregs +saveallgpregisters savefloatregisters /* Save the SPSR and ELR. */ @@ -506,15 +999,18 @@ MRS X2, ELR_EL1 STP X2, X3, [ SP, # - 0x10 ] ! /* Increment the interrupt nesting counter. */ -LDR X5, ullPortInterruptNestingsConst /* Load base address of the ullPortYieldRequired array */ -#if configNUMBER_OF_CORES > 1 - /* Existing code to get core ID and scale to pointer size is reused. */ - MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register. */ - AND X2, X2, # 0xff /* Extract Aff0, which contains the core ID. */ - LSL X2, X2, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system). */ - -/* Calculate offset for the current core's ullPortYieldRequired and load its address. */ - ADD X5, X5, X2 /* Add offset for the current core's ullPortYieldRequired. */ +#if ( configNUMBER_OF_CORES == 1 ) + adrp X5, ullPortInterruptNesting + add X5, X5, :lo12:ullPortInterruptNesting /* X5 = &ullPortInterruptNesting */ +#else + adrp X5, ullPortInterruptNestings + add X5, X5, :lo12:ullPortInterruptNestings /* X5 = &ullPortInterruptNestings */ + MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register. */ + AND X2, X2, # 0xff /* Extract Aff0, which contains the core ID. */ + LSL X2, X2, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system). */ + + /* Calculate offset for the current core's ullPortYieldRequired and load its address. */ + ADD X5, X5, X2 /* Add offset for the current core's ullPortYieldRequired. */ #endif LDR X1, [ X5 ] /* Old nesting count in X1. */ ADD X6, X1, # 1 @@ -530,8 +1026,10 @@ MRS X0, ICC_IAR1_EL1 /* Maintain the interrupt ID value across the function call. */ STP X0, X1, [ SP, # - 0x10 ] ! +savefuncontextgpregs /* Call the C handler. */ BL vApplicationIRQHandler +restorefuncontextgpregs /* Disable interrupts. */ MSR DAIFSET, # 2 @@ -554,20 +1052,31 @@ CMP X1, # 0 B.NE Exit_IRQ_No_Context_Switch /* Is a context switch required? */ -LDR X0, ullPortYieldRequiredConst -#if configNUMBER_OF_CORES > 1 -/* Existing code to get core ID and scale to pointer size is reused. */ - MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register. */ - AND X2, X2, # 0xff /* Extract Aff0, which contains the core ID. */ - LSL X2, X2, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system). */ - +adrp X0, ullPortYieldRequired +add X0, X0, :lo12:ullPortYieldRequired /* X0 = &ullPortYieldRequired */ +#if ( configNUMBER_OF_CORES > 1 ) + MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register. */ + AND X2, X2, # 0xff /* Extract Aff0, which contains the core ID. */ + LSL X2, X2, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system). */ /* Calculate offset for the current core's ullPortYieldRequired and load its address. */ - ADD X0, X0, X2 /* Add offset for the current core's ullPortYieldRequired. */ + ADD X0, X0, X2 /* Add offset for the current core's ullPortYieldRequired. */ #endif LDR X1, [ X0 ] CMP X1, # 0 B.EQ Exit_IRQ_No_Context_Switch +/* Check if the task is in a critical section by inspecting ullCriticalNesting. */ +#if ( configNUMBER_OF_CORES > 1 ) + adrp X0, ullCriticalNestings + add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */ + MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */ + AND X1, X1, # 0xff /* Extract Aff0 (core ID). */ + LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */ + ADD X0, X0, X1 /* Add offset to base address. */ + LDR X1, [ X0 ] /* Load ullCriticalNesting for this core. */ + CBNZ X1, Exit_IRQ_No_Context_Switch /* Skip context switch if in a critical section. */ +#endif + /* Reset ullPortYieldRequired to 0. */ MOV X2, # 0 STR X2, [ X0 ] @@ -581,7 +1090,7 @@ DSB SY ISB SY restorefloatregisters -restorefuncontextgpregs +restoreallgpregisters /* Save the context of the current task and select a new task to run. */ portSAVE_CONTEXT @@ -589,8 +1098,11 @@ portSAVE_CONTEXT MRS x0, mpidr_el1 AND x0, x0, 255 #endif +savefuncontextgpregs BL vTaskSwitchContext +restorefuncontextgpregs portRESTORE_CONTEXT +ERET Exit_IRQ_No_Context_Switch: /* Restore volatile registers. */ @@ -602,7 +1114,7 @@ DSB SY ISB SY restorefloatregisters -restorefuncontextgpregs +restoreallgpregisters ERET @@ -628,36 +1140,17 @@ restorefuncontextgpregs .weak vApplicationIRQHandler .type vApplicationIRQHandler, % function vApplicationIRQHandler: -/* Save LR and FP on the stack. */ -STP X29, X30, [ SP, # - 0x10 ] ! /* Save FPU registers (32 128-bits + 2 64-bits configuration and status registers). */ savefloatregisters +savefuncontextgpregs /* Call the C handler. */ BL vApplicationFPUSafeIRQHandler +restorefuncontextgpregs /* Restore FPU registers. */ restorefloatregisters -/* Restore FP and LR. */ -LDP X29, X30, [ SP ], # 0x10 RET - - .align 8 -#if ( configNUMBER_OF_CORES == 1 ) -pxCurrentTCBsConst:.dword pxCurrentTCB -ullCriticalNestingsConst:.dword ullCriticalNesting -ullPortInterruptNestingsConst:.dword ullPortInterruptNesting -ullPortYieldRequiredConst:.dword ullPortYieldRequired -ullPortTaskHasFPUContextConst:.dword ullPortTaskHasFPUContext -#else -pxCurrentTCBsConst:.dword pxCurrentTCBs -ullCriticalNestingsConst:.dword ullCriticalNestings -ullPortInterruptNestingsConst:.dword ullPortInterruptNestings -ullPortYieldRequiredConst:.dword ullPortYieldRequired -ullPortTaskHasFPUContextConst:.dword ullPortTaskHasFPUContext -#endif /* if ( configNUMBER_OF_CORES == 1 ) */ -ullMaxAPIPriorityMaskConst:.dword ullMaxAPIPriorityMask -vApplicationIRQHandlerConst:.word vApplicationIRQHandler .end diff --git a/portable/GCC/ARM_CR82/portmacro.h b/portable/GCC/ARM_CR82/portmacro.h index 1617491efc8..82be611b551 100644 --- a/portable/GCC/ARM_CR82/portmacro.h +++ b/portable/GCC/ARM_CR82/portmacro.h @@ -109,14 +109,21 @@ /** * @brief SVC numbers. */ -#define portSVC_YIELD 105 -#define portSVC_START_FIRST_TASK 106 -#define portSVC_DISABLE_INTERRUPTS 107 -#define portSVC_ENABLE_INTERRUPTS 108 -#define portSVC_GET_CORE_ID 109 -#define portSVC_MASK_ALL_INTERRUPTS 110 -#define portSVC_UNMASK_ALL_INTERRUPTS 111 -#define portSVC_UNMASK_INTERRUPTS 112 + +#define portSVC_SYSTEM_CALL_EXIT 104 +#define portSVC_YIELD 105 +#define portSVC_START_FIRST_TASK 106 +#define portSVC_DISABLE_INTERRUPTS 107 +#define portSVC_ENABLE_INTERRUPTS 108 +#define portSVC_GET_CORE_ID 109 +#define portSVC_MASK_ALL_INTERRUPTS 110 +#define portSVC_UNMASK_ALL_INTERRUPTS 111 +#define portSVC_UNMASK_INTERRUPTS 112 +#define portSVC_CHECK_PRIVILEGE 113 +#define portSVC_SAVE_TASK_CONTEXT 114 +#define portSVC_RESTORE_CONTEXT 115 +#define portSVC_DELETE_CURRENT_TASK 116 +#define portSVC_INTERRUPT_CORE 117 #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) #define portYIELD() __asm volatile ( "SVC %0" : : "i" ( portSVC_YIELD ) : "memory" ) @@ -246,6 +253,7 @@ #define portINTERRUPT_PRIORITY_REGISTER_OFFSET ( 0x400U ) #define portYIELD_CORE_INT_ID ( 0x0U ) +#define portMAX_API_PRIORITY_MASK ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) #if ( configNUMBER_OF_CORES > 1 ) @@ -261,14 +269,16 @@ extern void vPortRecursiveLock( BaseType_t xCoreID, ePortRTOSLock eLockNum, BaseType_t uxAcquire ); - extern BaseType_t xPortGetCoreID( void ); + extern uint8_t ucPortGetCoreID( void ); + extern uint8_t ucPortGetCoreIDFromIsr( void ); #endif /* if !defined(__ASSEMBLER__) */ #define portSET_INTERRUPT_MASK() uxPortSetInterruptMask() #define portCLEAR_INTERRUPT_MASK( x ) vPortClearInterruptMask( x ) - #define portMAX_CORE_COUNT configNUMBER_OF_CORES - #define portGET_CORE_ID() xPortGetCoreID() + #define portMAX_CORE_COUNT configNUMBER_OF_CORES + #define portGET_CORE_ID() ucPortGetCoreID() + #define portGET_CORE_ID_FROM_ISR() ucPortGetCoreIDFromIsr() /* Use SGI 0 as the yield core interrupt. */ #define portYIELD_CORE( xCoreID ) vInterruptCore( portYIELD_CORE_INT_ID, ( uint32_t ) xCoreID ) @@ -288,6 +298,242 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/** + * @brief MPU specific constants. + */ +#if ( configENABLE_MPU == 1 ) + + #if !defined(__ASSEMBLER__) + extern BaseType_t xPortIsTaskPrivileged( void ); + #endif /* if !defined(__ASSEMBLER__) */ + + /* Device memory attributes used in MAIR_EL1 registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) + + /* MPU settings that can be overridden in FreeRTOSConfig.h. */ + #ifndef configTOTAL_MPU_REGIONS + #define configTOTAL_MPU_REGIONS ( 16UL ) + #endif + + #define portPRIVILEGED_FLASH_REGION ( 0ULL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1ULL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2ULL ) + #define portPRIVILEGED_RAM_REGION ( 3ULL ) + #define portSTACK_REGION ( 0ULL ) + #define portFIRST_CONFIGURABLE_REGION ( 5ULL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + + #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) + #define portACL_ENTRY_SIZE_BITS ( 32UL ) + #endif /* configENABLE_ACCESS_CONTROL_LIST == 1 */ + + #if !defined(__ASSEMBLER__) + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint64_t ullPrbarEl1; /**< PRBAR_EL1 for the region. */ + uint64_t ullPrlarEl1; /**< PRLAR_EL1 for the region. */ + } MPURegionSettings_t; + + #ifndef configSYSTEM_CALL_STACK_SIZE + #define configSYSTEM_CALL_STACK_SIZE 128 /* must be defined to the desired size of the system call stack in words for using MPU wrappers v2. */ + #endif + + /** + * @brief System call info. + */ + typedef struct SYSTEM_CALL_INFO + { + /* Used to save both the user-mode stack pointer (SP_EL0) and link register (X30) + * at system call entry so they can be restored or referenced safely even if the task + * switches out while executing the system call. + */ + uint64_t ullLinkRegisterAtSystemCallEntry; + uint64_t ullUserSPAtSystemCallEntry; + } xSYSTEM_CALL_INFO; + #endif /* if !defined(__ASSEMBLER__) */ + + /** + * @brief Task context as stored in the TCB. + */ + #if ( configENABLE_FPU == 1 ) + /* + * +-----------+------------+--------------------------------+-------------+------------------+ + * | Q0-Q31 | FPSR, FPCR | CRITICAL_NESTING, FPU_CONTEXT | X0-X30, XZR | INIT_PSTATE, PC | + * +-----------+------------+--------------------------------+-------------+------------------+ + * + * <-----------><-----------><-------------------------------><------------><-----------------> + * 64 2 2 32 2 + */ + #define MAX_CONTEXT_SIZE 102 + + #else /* #if ( configENABLE_FPU == 1 ) */ + /* + * +--------------------------------+-------------+------------------+ + * | CRITICAL_NESTING, FPU_CONTEXT | X0-X30, XZR | INIT_PSTATE, PC | + * +--------------------------------+-------------+------------------+ + * <-------------------------------><------------><------------------> + * 2 32 2 + */ + #define MAX_CONTEXT_SIZE 36 + #endif /* #if ( configENABLE_FPU == 1 ) */ + + #if !defined(__ASSEMBLER__) + typedef struct MPU_SETTINGS + { + uint64_t ullTaskUnprivilegedSP; /* Task's unprivileged user stack pointer. */ + uint64_t ullMairEl1; /* MAIR_EL1 for the task containing attributes. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /* Settings for tasks' regions. */ + uint64_t ullContext[ MAX_CONTEXT_SIZE + configSYSTEM_CALL_STACK_SIZE ]; /* Task's saved context. */ + uint64_t ullTaskFlags; + + xSYSTEM_CALL_INFO xSystemCallInfo; + + #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) + uint32_t ulAccessControlList[ ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE / portACL_ENTRY_SIZE_BITS ) + 1 ]; + #endif /* configENABLE_ACCESS_CONTROL_LIST */ + } xMPU_SETTINGS; + #endif /* if !defined(__ASSEMBLER__) */ + + #define portUSING_MPU_WRAPPERS ( 1 ) + #define portPRIVILEGE_BIT ( 0x80000000UL ) + + /* Normal memory attributes used in MAIR_EL1 registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + + #define portMPU_MAIR_EL1_ATTR0_POS ( 0UL ) + #define portMPU_MAIR_EL1_ATTR0_MASK ( 0x00000000000000ffULL ) + + #define portMPU_MAIR_EL1_ATTR1_POS ( 8UL ) + #define portMPU_MAIR_EL1_ATTR1_MASK ( 0x000000000000ff00ULL ) + + #define portMPU_MAIR_EL1_ATTR2_POS ( 16UL ) + #define portMPU_MAIR_EL1_ATTR2_MASK ( 0x0000000000ff0000ULL ) + + #define portMPU_MAIR_EL1_ATTR3_POS ( 24UL ) + #define portMPU_MAIR_EL1_ATTR3_MASK ( 0x00000000ff000000ULL ) + + #define portMPU_MAIR_EL1_ATTR4_POS ( 32UL ) + #define portMPU_MAIR_EL1_ATTR4_MASK ( 0x000000ff00000000ULL ) + + #define portMPU_MAIR_EL1_ATTR5_POS ( 40UL ) + #define portMPU_MAIR_EL1_ATTR5_MASK ( 0x0000ff0000000000ULL ) + + #define portMPU_MAIR_EL1_ATTR6_POS ( 48UL ) + #define portMPU_MAIR_EL1_ATTR6_MASK ( 0x00ff000000000000ULL ) + + #define portMPU_MAIR_EL1_ATTR7_POS ( 56UL ) + #define portMPU_MAIR_EL1_ATTR7_MASK ( 0xff00000000000000ULL ) + + #define portMPU_PRBAR_EL1_ADDRESS_MASK ( 0x0000FFFFFFFFFFC0ULL ) + #define portMPU_PRLAR_EL1_ADDRESS_MASK ( 0x0000FFFFFFFFFFC0ULL ) + #define portMPU_PRBAR_EL1_ACCESS_PERMISSIONS_MASK ( 3ULL<< 2ULL ) + + #define portMPU_REGION_NON_SHAREABLE ( 0ULL << 4ULL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2ULL << 4ULL ) + #define portMPU_REGION_INNER_SHAREABLE ( 3ULL << 4ULL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0ULL << 2ULL ) + #define portMPU_REGION_READ_WRITE ( 1ULL << 2ULL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2ULL << 2ULL ) + #define portMPU_REGION_READ_ONLY ( 3ULL << 2ULL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1ULL << 1ULL ) + + #define portMPU_PRLAR_EL1_ATTR_INDEX0 ( 0ULL << 1ULL ) + #define portMPU_PRLAR_EL1_ATTR_INDEX1 ( 1ULL << 1ULL ) + #define portMPU_PRLAR_EL1_ATTR_INDEX2 ( 2ULL << 1ULL ) + #define portMPU_PRLAR_EL1_ATTR_INDEX3 ( 3ULL << 1ULL ) + #define portMPU_PRLAR_EL1_ATTR_INDEX4 ( 4ULL << 1ULL ) + #define portMPU_PRLAR_EL1_ATTR_INDEX5 ( 5ULL << 1ULL ) + #define portMPU_PRLAR_EL1_ATTR_INDEX6 ( 6ULL << 1ULL ) + #define portMPU_PRLAR_EL1_ATTR_INDEX7 ( 7ULL << 1ULL ) + + #define portMPU_PRLAR_EL1_REGION_ENABLE ( 1ULL ) + + #define portMPU_ENABLE_BIT ( 1ULL << 0ULL ) + #define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1ULL << 17ULL ) + + /* Max value that fits in a uint64_t type. */ + #define portUINT64_MAX ( ~( ( uint64_t ) 0 ) ) + #define portADD_UINT64_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT64_MAX - ( b ) ) ) + + /* Extract first address of the MPU region as encoded in the + * PRBAR_EL1 register value. */ + #define portEXTRACT_FIRST_ADDRESS_FROM_PRBAR_EL1( prbar_el1 ) \ + ( ( prbar_el1 ) & portMPU_PRBAR_EL1_ADDRESS_MASK ) + + /* Extract last address of the MPU region as encoded in the + * PRLAR_EL1 register value. */ + #define portEXTRACT_LAST_ADDRESS_FROM_PRLAR_EL1( prlar_el1 ) \ + ( ( ( prlar_el1 ) & portMPU_PRLAR_EL1_ADDRESS_MASK ) | ~portMPU_PRLAR_EL1_ADDRESS_MASK ) + + /* Does addr lies within [start, end] address range? */ + #define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + + /* Is the access request satisfied by the available permissions? */ + #define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + + /** + * @brief Offsets in the task's stack (context). + */ + #if ( configUSE_TASK_FPU_SUPPORT == portTASK_HAVE_FPU_CONTEXT_BY_DEFAULT ) + #define portOFFSET_TO_PC ( 68 ) + #define portOFFSET_TO_LR ( 70 ) + #define portOFFSET_TO_X0 ( 100 ) + #define portOFFSET_TO_X1 ( 101 ) + #define portOFFSET_TO_X2 ( 98 ) + #define portOFFSET_TO_X3 ( 99 ) + #else + #define portOFFSET_TO_PC ( 2 ) + #define portOFFSET_TO_LR ( 4 ) + #define portOFFSET_TO_X0 ( 34 ) + #define portOFFSET_TO_X1 ( 35 ) + #define portOFFSET_TO_X2 ( 32 ) + #define portOFFSET_TO_X3 ( 33 ) + #endif + + /** + * @brief Flag used to mark that a Task is privileged. + * + * @ingroup Port Privilege + */ + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#else + + #define portPRIVILEGE_BIT ( 0x0UL ) + +#endif /* #if ( configENABLE_MPU == 1 ) */ + +#define portPSTATE_I_BIT ( 0x7 ) + /* *INDENT-OFF* */ #ifdef __cplusplus } From 8e8d4152e33596dde269547389085166e69928b4 Mon Sep 17 00:00:00 2001 From: Ahmed Ismail Date: Thu, 9 Oct 2025 14:45:09 +0100 Subject: [PATCH 2/2] cortex-r82: Minor code improvements This commit includes minor code improvements to enhance readability and maintainability of the Cortex-R82 port files. Changes include refactoring variable names, optimizing comments, and improving code structure without altering functionality. Signed-off-by: Ahmed Ismail --- portable/GCC/ARM_CR82/port.c | 108 +++++++++++++++++------------- portable/GCC/ARM_CR82/portASM.S | 101 ++++++++++++++-------------- portable/GCC/ARM_CR82/portmacro.h | 20 +++--- 3 files changed, 124 insertions(+), 105 deletions(-) diff --git a/portable/GCC/ARM_CR82/port.c b/portable/GCC/ARM_CR82/port.c index cc08c9f240d..af667915c56 100644 --- a/portable/GCC/ARM_CR82/port.c +++ b/portable/GCC/ARM_CR82/port.c @@ -229,7 +229,6 @@ * assembly code so is implemented in portASM.s. */ extern void vPortRestoreTaskContext( void ); - extern void vGIC_EnableIRQ( uint32_t ulInterruptID ); extern void vGIC_SetPriority( uint32_t ulInterruptID, uint32_t ulPriority ); extern void vGIC_PowerUpRedistributor( void ); @@ -238,28 +237,38 @@ extern void vGIC_EnableCPUInterface( void ); /*-----------------------------------------------------------*/ #if ( configNUMBER_OF_CORES == 1 ) + PRIVILEGED_DATA volatile uint64_t ullCriticalNesting = 0ULL; -/* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero - * then floating point context must be saved and restored for the task. */ + /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero + * then floating point context must be saved and restored for the task. */ PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext = pdFALSE; -/* Set to 1 to pend a context switch from an ISR. */ + /* Set to 1 to pend a context switch from an ISR. */ PRIVILEGED_DATA uint64_t ullPortYieldRequired = pdFALSE; -/* Counts the interrupt nesting depth. A context switch is only performed if - * if the nesting depth is 0. */ + /* Counts the interrupt nesting depth. A context switch is only performed if + * if the nesting depth is 0. */ PRIVILEGED_DATA uint64_t ullPortInterruptNesting = 0; + #else /* #if ( configNUMBER_OF_CORES == 1 ) */ PRIVILEGED_DATA volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ] = { 0 }; /* Flags to check if the secondary cores are ready. */ PRIVILEGED_DATA volatile uint8_t ucSecondaryCoresReadyFlags[ configNUMBER_OF_CORES - 1 ] = { 0 }; + + /* Flag to signal that the primary core has done all the shared initialisations. */ PRIVILEGED_DATA volatile uint8_t ucPrimaryCoreInitDoneFlag = 0; - /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero + + /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero * then floating point context must be saved and restored for the task. */ PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext[ configNUMBER_OF_CORES ] = { pdFALSE }; + + /* Set to 1 to pend a context switch from an ISR. */ PRIVILEGED_DATA uint64_t ullPortYieldRequired[ configNUMBER_OF_CORES ] = { pdFALSE }; + + /* Counts the interrupt nesting depth. A context switch is only performed if + * if the nesting depth is 0. */ PRIVILEGED_DATA uint64_t ullPortInterruptNestings[ configNUMBER_OF_CORES ] = { 0 }; #endif /* #if ( configNUMBER_OF_CORES == 1 ) */ @@ -1157,12 +1166,12 @@ BaseType_t xPortStartScheduler( void ) volatile uint8_t ucMaxPriorityValue; /* Determine how many priority bits are implemented in the GIC. - * - * Save the interrupt priority value that is about to be clobbered. */ + * + * Save the interrupt priority value that is about to be clobbered. */ ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to - * all possible bits. */ + * all possible bits. */ *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; /* Read the value back to see how many bits stuck. */ @@ -1175,12 +1184,12 @@ BaseType_t xPortStartScheduler( void ) } /* Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read - * value. */ + * value. */ configASSERT( ucMaxPriorityValue >= portLOWEST_INTERRUPT_PRIORITY ); /* Restore the clobbered interrupt priority register to its original - * value. */ + * value. */ *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ @@ -1523,9 +1532,9 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void ) #if ( configNUMBER_OF_CORES > 1 ) /* Which core owns the lock? Keep in privileged, shareable RAM. */ - PRIVILEGED_DATA volatile uint64_t ucOwnedByCore[ portMAX_CORE_COUNT ]; + PRIVILEGED_DATA volatile uint64_t ullOwnedByCore[ portMAX_CORE_COUNT ]; /* Lock count a core owns. */ - PRIVILEGED_DATA volatile uint64_t ucRecursionCountByLock[ eLockCount ]; + PRIVILEGED_DATA volatile uint64_t ullRecursionCountByLock[ eLockCount ]; /* Index 0 is used for ISR lock and Index 1 is used for task lock. */ PRIVILEGED_DATA uint32_t ulGateWord[ eLockCount ]; @@ -1549,13 +1558,14 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void ) static inline void prvSpinUnlock( uint32_t * ulLock ) { + /* Conservative unlock: preserve original barriers for broad HW/FVP. */ __asm volatile ( - "dmb sy\n" - "mov w1, #0\n" - "str w1, [%x0]\n" - "sev\n" - "dsb sy\n" - "isb sy\n" + "dmb sy \n" + "mov w1, #0 \n" + "str w1, [%x0] \n" + "sev \n" + "dsb sy \n" + "isb sy \n" : : "r" ( ulLock ) : "memory", "w1" @@ -1566,22 +1576,30 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void ) static inline uint32_t prvSpinTrylock( uint32_t * ulLock ) { + /* + * Conservative LDXR/STXR trylock: + * - Return 1 immediately if busy, clearing exclusive state (CLREX). + * - Retry STXR only on spurious failure when observed free. + * - DMB on success to preserve expected acquire semantics. + */ register uint32_t ulRet; - /* Try to acquire spinlock; caller is responsible for further barriers. */ __asm volatile ( - "1:\n" - "ldxr w1, [%x1]\n" - "cmp w1, #1\n" - "beq 2f\n" - "mov w2, #1\n" - "stxr w1, w2, [%x1]\n" - "cmp w1, #0\n" - "bne 1b\n" - "2:\n" - "mov %w0, w1\n" + "1: \n" + "ldxr w1, [%x1] \n" + "cbnz w1, 2f \n" /* Busy -> return 1 */ + "mov w2, #1 \n" + "stxr w3, w2, [%x1] \n" /* w3 = status */ + "cbnz w3, 1b \n" /* Retry on STXR failure */ + "dmb sy \n" /* Acquire barrier on success */ + "mov %w0, #0 \n" /* Success */ + "b 3f \n" + "2: \n" + "clrex \n" /* Clear monitor when busy */ + "mov %w0, #1 \n" /* Busy */ + "3: \n" : "=r" ( ulRet ) : "r" ( ulLock ) - : "memory", "w1", "w2" + : "memory", "w1", "w2", "w3" ); return ulRet; @@ -1629,10 +1647,10 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void ) if( prvSpinTrylock( &ulGateWord[ eLockNum ] ) != 0 ) { /* Check if the core owns the spinlock. */ - if( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ulLockBit ) + if( prvGet64( &ullOwnedByCore[ xCoreID ] ) & ulLockBit ) { - configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) != 255u ); - prvSet64( &ucRecursionCountByLock[ eLockNum ], ( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) + 1 ) ); + configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) != 255u ); + prvSet64( &ullRecursionCountByLock[ eLockNum ], ( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) + 1 ) ); return; } @@ -1656,26 +1674,26 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void ) __asm__ __volatile__ ( "dmb sy" ::: "memory" ); /* Assert the lock count is 0 when the spinlock is free and is acquired. */ - configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) == 0 ); + configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) == 0 ); /* Set lock count as 1. */ - prvSet64( &ucRecursionCountByLock[ eLockNum ], 1 ); - /* Set ucOwnedByCore. */ - prvSet64( &ucOwnedByCore[ xCoreID ], ( prvGet64( &ucOwnedByCore[ xCoreID ] ) | ulLockBit ) ); + prvSet64( &ullRecursionCountByLock[ eLockNum ], 1 ); + /* Set ullOwnedByCore. */ + prvSet64( &ullOwnedByCore[ xCoreID ], ( prvGet64( &ullOwnedByCore[ xCoreID ] ) | ulLockBit ) ); } /* Lock release. */ else { /* Assert the lock is not free already. */ - configASSERT( ( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ulLockBit ) != 0 ); - configASSERT( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) != 0 ); + configASSERT( ( prvGet64( &ullOwnedByCore[ xCoreID ] ) & ulLockBit ) != 0 ); + configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) != 0 ); - /* Reduce ucRecursionCountByLock by 1. */ - prvSet64( &ucRecursionCountByLock[ eLockNum ], ( prvGet64( &ucRecursionCountByLock[ eLockNum ] ) - 1 ) ); + /* Reduce ullRecursionCountByLock by 1. */ + prvSet64( &ullRecursionCountByLock[ eLockNum ], ( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) - 1 ) ); - if( !prvGet64( &ucRecursionCountByLock[ eLockNum ] ) ) + if( !prvGet64( &ullRecursionCountByLock[ eLockNum ] ) ) { - prvSet64( &ucOwnedByCore[ xCoreID ], ( prvGet64( &ucOwnedByCore[ xCoreID ] ) & ~ulLockBit ) ); + prvSet64( &ullOwnedByCore[ xCoreID ], ( prvGet64( &ullOwnedByCore[ xCoreID ] ) & ~ulLockBit ) ); prvSpinUnlock( &ulGateWord[ eLockNum ] ); /* Add barrier to ensure lock status is reflected before we proceed. */ __asm__ __volatile__ ( "dmb sy" ::: "memory" ); diff --git a/portable/GCC/ARM_CR82/portASM.S b/portable/GCC/ARM_CR82/portASM.S index 16fc7780de8..0923ef31194 100644 --- a/portable/GCC/ARM_CR82/portASM.S +++ b/portable/GCC/ARM_CR82/portASM.S @@ -52,13 +52,13 @@ /* Variables and functions. */ #if ( configNUMBER_OF_CORES == 1 ) - .extern pxCurrentTCB - .extern ullCriticalNesting - .extern ullPortInterruptNesting + .extern pxCurrentTCB + .extern ullCriticalNesting + .extern ullPortInterruptNesting #else /* #if ( configNUMBER_OF_CORES == 1 ) */ - .extern pxCurrentTCBs - .extern ullCriticalNestings - .extern ullPortInterruptNestings + .extern pxCurrentTCBs + .extern ullCriticalNestings + .extern ullPortInterruptNestings #endif .extern vTaskSwitchContext .extern vApplicationIRQHandler @@ -308,16 +308,16 @@ LDP Q0, Q1, [ SP ], # 0x20 /* Store user allocated task stack and use ullContext as the SP */ #if ( configNUMBER_OF_CORES == 1 ) - adrp X0, pxCurrentTCB - add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */ + adrp X0, pxCurrentTCB + add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */ #else - adrp X0, pxCurrentTCBs - add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */ - /* Get the core ID to index the TCB correctly. */ - MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ - AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */ - LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ - ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */ + adrp X0, pxCurrentTCBs + add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */ + /* Get the core ID to index the TCB correctly. */ + MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ + AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */ + LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ + ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */ #endif LDR X1, [ X0 ] ADD X1, X1, #8 /* X1 = X1 + 8, X1 now points to ullTaskUnprivilegedSP in TCB. */ @@ -339,16 +339,16 @@ LDP Q0, Q1, [ SP ], # 0x20 #endif CBNZ X0, 3f /* If task is privileged, skip saving MPU context. */ #if ( configNUMBER_OF_CORES == 1 ) - adrp X0, pxCurrentTCB - add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */ + adrp X0, pxCurrentTCB + add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */ #else - adrp X0, pxCurrentTCBs - add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */ - /* Get the core ID to index the TCB correctly. */ - MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ - AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */ - LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ - ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */ + adrp X0, pxCurrentTCBs + add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */ + /* Get the core ID to index the TCB correctly. */ + MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ + AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */ + LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ + ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */ #endif LDR X0, [ X0 ] @@ -369,8 +369,10 @@ LDP Q0, Q1, [ SP ], # 0x20 #endif /* #if ( configENABLE_MPU == 1 ) */ MSR SPSEL, # 0 + /* Save the entire context. */ saveallgpregisters + /* Save the SPSR and ELR values. */ MRS X3, SPSR_EL1 MRS X2, ELR_EL1 @@ -379,24 +381,25 @@ STP X2, X3, [ SP, # - 0x10 ] ! /* Save the critical section nesting depth. */ #if ( configNUMBER_OF_CORES == 1 ) -adrp X0, ullCriticalNesting -add X0, X0, :lo12:ullCriticalNesting /* X0 = &ullCriticalNesting */ + adrp X0, ullCriticalNesting + add X0, X0, :lo12:ullCriticalNesting /* X0 = &ullCriticalNesting */ #else -adrp X0, ullCriticalNestings -add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */ -/* Calculate per-core index using MPIDR_EL1 for SMP support. */ -MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */ -AND X1, X1, # 0xff /* Extract Aff0 (core ID). */ -LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */ -ADD X0, X0, X1 /* Add offset to base address. */ + adrp X0, ullCriticalNestings + add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */ + /* Calculate per-core index using MPIDR_EL1 for SMP support. */ + MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */ + AND X1, X1, # 0xff /* Extract Aff0 (core ID). */ + LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */ + ADD X0, X0, X1 /* Add offset to base address. */ #endif + LDR X3, [ X0 ] /* Save the FPU context indicator. */ adrp X0, ullPortTaskHasFPUContext add X0, X0, :lo12:ullPortTaskHasFPUContext /* X0 = &ullPortTaskHasFPUContext */ -#if configNUMBER_OF_CORES > 1 +#if ( configNUMBER_OF_CORES > 1 ) ADD X0, X0, X1 /* Add to the base of the FPU array. */ #endif LDR X2, [ X0 ] @@ -547,16 +550,16 @@ MSR SPSEL, # 1 STP X8, X9, [ SP, # - 0x10 ] ! STR X10, [ SP, # - 0x10 ] ! #if ( configNUMBER_OF_CORES == 1 ) - adrp X8, pxCurrentTCB - add X8, X8, :lo12:pxCurrentTCB /* X8 = &pxCurrentTCB */ + adrp X8, pxCurrentTCB + add X8, X8, :lo12:pxCurrentTCB /* X8 = &pxCurrentTCB */ #else - adrp X8, pxCurrentTCBs - add X8, X8, :lo12:pxCurrentTCBs /* X8 = &pxCurrentTCBs */ - /* Get the core ID to index the TCB correctly. */ - MRS X10, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ - AND X10, X10, # 0xff /* Extract Aff0 which contains the core ID */ - LSL X10, X10, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ - ADD X8, X8, X10 /* Add the offset for the current core's TCB pointer */ + adrp X8, pxCurrentTCBs + add X8, X8, :lo12:pxCurrentTCBs /* X8 = &pxCurrentTCBs */ + /* Get the core ID to index the TCB correctly. */ + MRS X10, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */ + AND X10, X10, # 0xff /* Extract Aff0 which contains the core ID */ + LSL X10, X10, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */ + ADD X8, X8, X10 /* Add the offset for the current core's TCB pointer */ #endif LDR X9, [ X8 ] MRS X8, SP_EL0 @@ -926,8 +929,8 @@ LDP X0, X1, [SP], #0x10 portSAVE_CONTEXT savefuncontextgpregs #if ( configNUMBER_OF_CORES > 1 ) -MRS x0, mpidr_el1 -AND x0, x0, 255 + MRS x0, mpidr_el1 + AND x0, x0, 255 #endif BL vTaskSwitchContext restorefuncontextgpregs @@ -1039,12 +1042,12 @@ ISB SY /* Restore the interrupt ID value. */ LDP X0, X1, [ SP ], # 0x10 - /* End IRQ processing by writing interrupt ID value to the EOI register. */ MSR ICC_EOIR1_EL1, X0 /* Restore the critical nesting count. */ LDP X1, X5, [ SP ], # 0x10 + STR X1, [ X5 ] /* Has interrupt nesting unwound? */ @@ -1095,8 +1098,8 @@ restoreallgpregisters /* Save the context of the current task and select a new task to run. */ portSAVE_CONTEXT #if configNUMBER_OF_CORES > 1 - MRS x0, mpidr_el1 - AND x0, x0, 255 + MRS x0, mpidr_el1 + AND x0, x0, 255 #endif savefuncontextgpregs BL vTaskSwitchContext @@ -1116,7 +1119,7 @@ ISB SY restorefloatregisters restoreallgpregisters - ERET +ERET /****************************************************************************** * If the application provides an implementation of vApplicationIRQHandler(), diff --git a/portable/GCC/ARM_CR82/portmacro.h b/portable/GCC/ARM_CR82/portmacro.h index 82be611b551..8ca19dac23b 100644 --- a/portable/GCC/ARM_CR82/portmacro.h +++ b/portable/GCC/ARM_CR82/portmacro.h @@ -142,19 +142,18 @@ extern void vInterruptCore( uint32_t ulInterruptID, uint32_t ulCoreID ); #endif /* if !defined(__ASSEMBLER__) */ -/* Use SVC so this is safe from EL0. EL1 sites in the port use direct MSR. */\ +/* Use SVC so this is safe from EL0. EL1 sites in the port use direct MSR. */ #define portDISABLE_INTERRUPTS() __asm volatile ( "SVC %0" : : "i" ( portSVC_DISABLE_INTERRUPTS ) : "memory" ) #define portENABLE_INTERRUPTS() __asm volatile ( "SVC %0" : : "i" ( portSVC_ENABLE_INTERRUPTS ) : "memory" ) - /* In all GICs 255 can be written to the priority mask register to unmask all * (but the lowest) interrupt priority. */ #define portUNMASK_VALUE ( 0xFFUL ) #if !defined(__ASSEMBLER__) /* These macros do not globally disable/enable interrupts. They do mask off - * interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */ + * interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */ #if ( configNUMBER_OF_CORES == 1 ) extern void vPortEnterCritical( void ); extern void vPortExitCritical( void ); @@ -180,7 +179,7 @@ #if !defined(__ASSEMBLER__) /* Prototype of the FreeRTOS tick handler. This must be installed as the - * handler for whichever peripheral is used to generate the RTOS tick. */ + * handler for whichever peripheral is used to generate the RTOS tick. */ void FreeRTOS_Tick_Handler( void ); #endif /* if !defined(__ASSEMBLER__) */ @@ -200,7 +199,8 @@ * nothing to prevent it from being called accidentally. */ #define vPortTaskUsesFPU() #endif -#define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU() + +#define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU() #define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL ) #define portLOWEST_USABLE_INTERRUPT_PRIORITY ( portLOWEST_INTERRUPT_PRIORITY - 1UL ) @@ -212,12 +212,10 @@ #if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 -/* Store/clear the ready priorities in a bit map. */ + /* Store/clear the ready priorities in a bit map. */ #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) -/*-----------------------------------------------------------*/ - #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __builtin_clz( uxReadyPriorities ) ) #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ @@ -227,7 +225,7 @@ #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() #endif /* configASSERT */ -#define portNOP() __asm volatile ( "NOP" ) +#define portNOP() __asm volatile ( "NOP" ) #define portINLINE __inline /* The number of bits to shift for an interrupt priority is dependent on the @@ -280,8 +278,8 @@ #define portGET_CORE_ID() ucPortGetCoreID() #define portGET_CORE_ID_FROM_ISR() ucPortGetCoreIDFromIsr() -/* Use SGI 0 as the yield core interrupt. */ - #define portYIELD_CORE( xCoreID ) vInterruptCore( portYIELD_CORE_INT_ID, ( uint32_t ) xCoreID ) + /* Use SGI 0 as the yield core interrupt. */ + #define portYIELD_CORE( xCoreID ) vInterruptCore( portYIELD_CORE_INT_ID, ( uint32_t ) xCoreID ) #define portRELEASE_ISR_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), eIsrLock, pdFALSE ) #define portGET_ISR_LOCK( xCoreID ) vPortRecursiveLock( ( xCoreID ), eIsrLock, pdTRUE )