229229 * assembly code so is implemented in portASM.s.
230230 */
231231extern void vPortRestoreTaskContext ( void );
232-
233232extern void vGIC_EnableIRQ ( uint32_t ulInterruptID );
234233extern void vGIC_SetPriority ( uint32_t ulInterruptID , uint32_t ulPriority );
235234extern void vGIC_PowerUpRedistributor ( void );
@@ -238,28 +237,38 @@ extern void vGIC_EnableCPUInterface( void );
238237/*-----------------------------------------------------------*/
239238
240239#if ( configNUMBER_OF_CORES == 1 )
240+
241241 PRIVILEGED_DATA volatile uint64_t ullCriticalNesting = 0ULL ;
242242
243- /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
244- * then floating point context must be saved and restored for the task. */
243+ /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
244+ * then floating point context must be saved and restored for the task. */
245245 PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext = pdFALSE ;
246246
247- /* Set to 1 to pend a context switch from an ISR. */
247+ /* Set to 1 to pend a context switch from an ISR. */
248248 PRIVILEGED_DATA uint64_t ullPortYieldRequired = pdFALSE ;
249249
250- /* Counts the interrupt nesting depth. A context switch is only performed if
251- * if the nesting depth is 0. */
250+ /* Counts the interrupt nesting depth. A context switch is only performed if
251+ * if the nesting depth is 0. */
252252 PRIVILEGED_DATA uint64_t ullPortInterruptNesting = 0 ;
253+
253254#else /* #if ( configNUMBER_OF_CORES == 1 ) */
254255 PRIVILEGED_DATA volatile uint64_t ullCriticalNestings [ configNUMBER_OF_CORES ] = { 0 };
255256
256257 /* Flags to check if the secondary cores are ready. */
257258 PRIVILEGED_DATA volatile uint8_t ucSecondaryCoresReadyFlags [ configNUMBER_OF_CORES - 1 ] = { 0 };
259+
260+ /* Flag to signal that the primary core has done all the shared initialisations. */
258261 PRIVILEGED_DATA volatile uint8_t ucPrimaryCoreInitDoneFlag = 0 ;
259- /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
262+
263+ /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
260264 * then floating point context must be saved and restored for the task. */
261265 PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext [ configNUMBER_OF_CORES ] = { pdFALSE };
266+
267+ /* Set to 1 to pend a context switch from an ISR. */
262268 PRIVILEGED_DATA uint64_t ullPortYieldRequired [ configNUMBER_OF_CORES ] = { pdFALSE };
269+
270+ /* Counts the interrupt nesting depth. A context switch is only performed if
271+ * if the nesting depth is 0. */
263272 PRIVILEGED_DATA uint64_t ullPortInterruptNestings [ configNUMBER_OF_CORES ] = { 0 };
264273
265274#endif /* #if ( configNUMBER_OF_CORES == 1 ) */
@@ -1168,12 +1177,12 @@ BaseType_t xPortStartScheduler( void )
11681177 volatile uint8_t ucMaxPriorityValue ;
11691178
11701179 /* Determine how many priority bits are implemented in the GIC.
1171- *
1172- * Save the interrupt priority value that is about to be clobbered. */
1180+ *
1181+ * Save the interrupt priority value that is about to be clobbered. */
11731182 ucOriginalPriority = * pucFirstUserPriorityRegister ;
11741183
11751184 /* Determine the number of priority bits available. First write to
1176- * all possible bits. */
1185+ * all possible bits. */
11771186 * pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE ;
11781187
11791188 /* Read the value back to see how many bits stuck. */
@@ -1186,12 +1195,12 @@ BaseType_t xPortStartScheduler( void )
11861195 }
11871196
11881197 /* Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read
1189- * value. */
1198+ * value. */
11901199 configASSERT ( ucMaxPriorityValue >= portLOWEST_INTERRUPT_PRIORITY );
11911200
11921201
11931202 /* Restore the clobbered interrupt priority register to its original
1194- * value. */
1203+ * value. */
11951204 * pucFirstUserPriorityRegister = ucOriginalPriority ;
11961205 }
11971206 #endif /* configASSERT_DEFINED */
@@ -1534,9 +1543,9 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
15341543#if ( configNUMBER_OF_CORES > 1 )
15351544
15361545 /* Which core owns the lock? Keep in privileged, shareable RAM. */
1537- PRIVILEGED_DATA volatile uint64_t ucOwnedByCore [ portMAX_CORE_COUNT ];
1546+ PRIVILEGED_DATA volatile uint64_t ullOwnedByCore [ portMAX_CORE_COUNT ];
15381547 /* Lock count a core owns. */
1539- PRIVILEGED_DATA volatile uint64_t ucRecursionCountByLock [ eLockCount ];
1548+ PRIVILEGED_DATA volatile uint64_t ullRecursionCountByLock [ eLockCount ];
15401549 /* Index 0 is used for ISR lock and Index 1 is used for task lock. */
15411550 PRIVILEGED_DATA uint32_t ulGateWord [ eLockCount ];
15421551
@@ -1560,13 +1569,14 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
15601569
15611570 static inline void prvSpinUnlock ( uint32_t * ulLock )
15621571 {
1572+ /* Conservative unlock: preserve original barriers for broad HW/FVP. */
15631573 __asm volatile (
1564- "dmb sy\n"
1565- "mov w1, #0\n"
1566- "str w1, [%x0]\n"
1567- "sev\n"
1568- "dsb sy\n"
1569- "isb sy\n"
1574+ "dmb sy \n"
1575+ "mov w1, #0 \n"
1576+ "str w1, [%x0] \n"
1577+ "sev \n"
1578+ "dsb sy \n"
1579+ "isb sy \n"
15701580 :
15711581 : "r" ( ulLock )
15721582 : "memory" , "w1"
@@ -1577,22 +1587,30 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
15771587
15781588 static inline uint32_t prvSpinTrylock ( uint32_t * ulLock )
15791589 {
1590+ /*
1591+ * Conservative LDXR/STXR trylock:
1592+ * - Return 1 immediately if busy, clearing exclusive state (CLREX).
1593+ * - Retry STXR only on spurious failure when observed free.
1594+ * - DMB on success to preserve expected acquire semantics.
1595+ */
15801596 register uint32_t ulRet ;
1581- /* Try to acquire spinlock; caller is responsible for further barriers. */
15821597 __asm volatile (
1583- "1:\n"
1584- "ldxr w1, [%x1]\n"
1585- "cmp w1, #1\n"
1586- "beq 2f\n"
1587- "mov w2, #1\n"
1588- "stxr w1, w2, [%x1]\n"
1589- "cmp w1, #0\n"
1590- "bne 1b\n"
1591- "2:\n"
1592- "mov %w0, w1\n"
1598+ "1: \n"
1599+ "ldxr w1, [%x1] \n"
1600+ "cbnz w1, 2f \n" /* Busy -> return 1 */
1601+ "mov w2, #1 \n"
1602+ "stxr w3, w2, [%x1] \n" /* w3 = status */
1603+ "cbnz w3, 1b \n" /* Retry on STXR failure */
1604+ "dmb sy \n" /* Acquire barrier on success */
1605+ "mov %w0, #0 \n" /* Success */
1606+ "b 3f \n"
1607+ "2: \n"
1608+ "clrex \n" /* Clear monitor when busy */
1609+ "mov %w0, #1 \n" /* Busy */
1610+ "3: \n"
15931611 : "=r" ( ulRet )
15941612 : "r" ( ulLock )
1595- : "memory" , "w1" , "w2"
1613+ : "memory" , "w1" , "w2" , "w3"
15961614 );
15971615
15981616 return ulRet ;
@@ -1640,10 +1658,10 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
16401658 if ( prvSpinTrylock ( & ulGateWord [ eLockNum ] ) != 0 )
16411659 {
16421660 /* Check if the core owns the spinlock. */
1643- if ( prvGet64 ( & ucOwnedByCore [ xCoreID ] ) & ulLockBit )
1661+ if ( prvGet64 ( & ullOwnedByCore [ xCoreID ] ) & ulLockBit )
16441662 {
1645- configASSERT ( prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) != 255u );
1646- prvSet64 ( & ucRecursionCountByLock [ eLockNum ], ( prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) + 1 ) );
1663+ configASSERT ( prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) != 255u );
1664+ prvSet64 ( & ullRecursionCountByLock [ eLockNum ], ( prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) + 1 ) );
16471665 return ;
16481666 }
16491667
@@ -1667,26 +1685,26 @@ UBaseType_t uxPortSetInterruptMaskFromISR( void )
16671685 __asm__ __volatile__ ( "dmb sy" ::: "memory" );
16681686
16691687 /* Assert the lock count is 0 when the spinlock is free and is acquired. */
1670- configASSERT ( prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) == 0 );
1688+ configASSERT ( prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) == 0 );
16711689
16721690 /* Set lock count as 1. */
1673- prvSet64 ( & ucRecursionCountByLock [ eLockNum ], 1 );
1674- /* Set ucOwnedByCore . */
1675- prvSet64 ( & ucOwnedByCore [ xCoreID ], ( prvGet64 ( & ucOwnedByCore [ xCoreID ] ) | ulLockBit ) );
1691+ prvSet64 ( & ullRecursionCountByLock [ eLockNum ], 1 );
1692+ /* Set ullOwnedByCore . */
1693+ prvSet64 ( & ullOwnedByCore [ xCoreID ], ( prvGet64 ( & ullOwnedByCore [ xCoreID ] ) | ulLockBit ) );
16761694 }
16771695 /* Lock release. */
16781696 else
16791697 {
16801698 /* Assert the lock is not free already. */
1681- configASSERT ( ( prvGet64 ( & ucOwnedByCore [ xCoreID ] ) & ulLockBit ) != 0 );
1682- configASSERT ( prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) != 0 );
1699+ configASSERT ( ( prvGet64 ( & ullOwnedByCore [ xCoreID ] ) & ulLockBit ) != 0 );
1700+ configASSERT ( prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) != 0 );
16831701
1684- /* Reduce ucRecursionCountByLock by 1. */
1685- prvSet64 ( & ucRecursionCountByLock [ eLockNum ], ( prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) - 1 ) );
1702+ /* Reduce ullRecursionCountByLock by 1. */
1703+ prvSet64 ( & ullRecursionCountByLock [ eLockNum ], ( prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) - 1 ) );
16861704
1687- if ( !prvGet64 ( & ucRecursionCountByLock [ eLockNum ] ) )
1705+ if ( !prvGet64 ( & ullRecursionCountByLock [ eLockNum ] ) )
16881706 {
1689- prvSet64 ( & ucOwnedByCore [ xCoreID ], ( prvGet64 ( & ucOwnedByCore [ xCoreID ] ) & ~ulLockBit ) );
1707+ prvSet64 ( & ullOwnedByCore [ xCoreID ], ( prvGet64 ( & ullOwnedByCore [ xCoreID ] ) & ~ulLockBit ) );
16901708 prvSpinUnlock ( & ulGateWord [ eLockNum ] );
16911709 /* Add barrier to ensure lock status is reflected before we proceed. */
16921710 __asm__ __volatile__ ( "dmb sy" ::: "memory" );
0 commit comments