task_manager.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. /**
  2. * Copyright (c) 2017 - 2020, Nordic Semiconductor ASA
  3. *
  4. * All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without modification,
  7. * are permitted provided that the following conditions are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright notice, this
  10. * list of conditions and the following disclaimer.
  11. *
  12. * 2. Redistributions in binary form, except as embedded into a Nordic
  13. * Semiconductor ASA integrated circuit in a product or a software update for
  14. * such product, must reproduce the above copyright notice, this list of
  15. * conditions and the following disclaimer in the documentation and/or other
  16. * materials provided with the distribution.
  17. *
  18. * 3. Neither the name of Nordic Semiconductor ASA nor the names of its
  19. * contributors may be used to endorse or promote products derived from this
  20. * software without specific prior written permission.
  21. *
  22. * 4. This software, with or without modification, must only be used with a
  23. * Nordic Semiconductor ASA integrated circuit.
  24. *
  25. * 5. Any software provided in binary form under this license must not be reverse
  26. * engineered, decompiled, modified and/or disassembled.
  27. *
  28. * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
  29. * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  30. * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
  31. * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
  32. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  33. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
  34. * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  35. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  36. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  37. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. *
  39. */
  40. #include "sdk_common.h"
  41. #if NRF_MODULE_ENABLED(TASK_MANAGER)
  42. #include "nrf_mpu_lib.h"
  43. #include "nrf_atomic.h"
  44. #include "app_util_platform.h"
  45. #include "task_manager.h"
  46. #if TASK_MANAGER_CLI_CMDS
  47. #include "nrf_cli.h"
  48. #endif
  49. #define NRF_LOG_MODULE_NAME task_manager
  50. #if TASK_MANAGER_CONFIG_LOG_ENABLED
  51. #define NRF_LOG_LEVEL TASK_MANAGER_CONFIG_LOG_LEVEL
  52. #define NRF_LOG_INFO_COLOR TASK_MANAGER_CONFIG_INFO_COLOR
  53. #define NRF_LOG_DEBUG_COLOR TASK_MANAGER_CONFIG_DEBUG_COLOR
  54. #endif
  55. #include "nrf_log.h"
  56. NRF_LOG_MODULE_REGISTER();
  57. #if TASK_MANAGER_CONFIG_STACK_GUARD
  58. #define STACK_GUARD_SIZE (1 << (TASK_MANAGER_CONFIG_STACK_GUARD + 1))
  59. STATIC_ASSERT((TASK_MANAGER_CONFIG_STACK_SIZE % STACK_GUARD_SIZE) == 0);
  60. #endif
  61. STATIC_ASSERT((TASK_MANAGER_CONFIG_MAX_TASKS) > 0);
  62. STATIC_ASSERT((TASK_MANAGER_CONFIG_STACK_SIZE % 8) == 0);
  63. // Support older CMSIS avaiable in Keil 4
  64. #if (__CORTEX_M == 4)
  65. # ifndef CONTROL_FPCA_Pos
  66. # define CONTROL_FPCA_Pos 2u
  67. # define CONTROL_FPCA_Msk (1ul << CONTROL_FPCA_Pos)
  68. # endif
  69. # ifndef CONTROL_SPSEL_Pos
  70. # define CONTROL_SPSEL_Pos 1u
  71. # define CONTROL_SPSEL_Msk (1ul << CONTROL_SPSEL_Pos)
  72. # endif
  73. #endif
  74. /*lint -save -esym(526,task_switch)*/
  75. // Declare task switching function.
  76. extern void task_switch(void);
  77. /*lint -restore*/
  78. /**@brief Idle Task ID */
  79. #define IDLE_TASK_ID TASK_MANAGER_CONFIG_MAX_TASKS
  80. #define TASK_STACK_MAGIC_WORD 0xDEADD00E
  81. #define TASK_FLAG_SIGNAL_MASK 0x00FFFFFF
  82. #define TASK_FLAG_DESTROY 0x80000000
  83. /** @brief Total number of tasks includes user configuration and idle task. */
  84. #define TOTAL_NUM_OF_TASKS (TASK_MANAGER_CONFIG_MAX_TASKS + 1)
  85. /**@brief Task stack with saved task state (does not include FPU state). */
  86. typedef struct
  87. {
  88. uint32_t r0;
  89. uint32_t r4;
  90. uint32_t r5;
  91. uint32_t r6;
  92. uint32_t r7;
  93. uint32_t r8;
  94. uint32_t r9;
  95. uint32_t r10;
  96. uint32_t r11;
  97. uint32_t r12;
  98. uint32_t lr;
  99. uint32_t control;
  100. } task_stack_t;
  101. /**@brief Task State */
  102. typedef struct
  103. {
  104. void *p_stack; /**< Pointer to task stack. NULL if task does not exist. */
  105. const char *p_task_name;
  106. nrf_atomic_u32_t flags; /**< Task flags */
  107. } task_state_t;
  108. /* Allocate space for task stacks:
  109. *
  110. * Layout:
  111. * +---------------+
  112. * | Idle Task |
  113. * +---------------+
  114. * | Stack Guard |
  115. * +---------------+
  116. * | Task N |
  117. * +---------------+
  118. * | Stack Guard |
  119. * +---------------+
  120. * | ... |
  121. * +---------------+
  122. * | Task 0 |
  123. * +---------------+
  124. * | Stack Guard |
  125. * +---------------+
  126. */
  127. typedef struct
  128. {
  129. #if TASK_MANAGER_CONFIG_STACK_GUARD
  130. uint8_t guard[STACK_GUARD_SIZE];
  131. #endif
  132. uint8_t stack[TASK_MANAGER_CONFIG_STACK_SIZE];
  133. } task_manager_stack_t;
  134. /**@brief Stack space for tasks */
  135. #if TASK_MANAGER_CONFIG_STACK_GUARD
  136. /**@brief Handle to MPU region used as a guard */
  137. static nrf_mpu_lib_region_t s_guard_region;
  138. __ALIGN(STACK_GUARD_SIZE)
  139. #else
  140. __ALIGN(8)
  141. #endif
  142. static task_manager_stack_t s_task_stacks[TOTAL_NUM_OF_TASKS];
  143. /**@brief Task States
  144. * Addtional state reserved for idle task which is mandatory.
  145. * */
  146. static task_state_t s_task_state[TOTAL_NUM_OF_TASKS];
  147. /**@brief Mask indicating which tasks are runnable */
  148. static nrf_atomic_u32_t s_runnable_tasks_mask;
  149. /**@brief ID of currently executed task */
  150. static task_id_t s_current_task_id;
  151. /**@brief Guard page attributes: Normal memory, WBWA/WBWA, RO/RO, XN */
  152. #define TASK_GUARD_ATTRIBUTES ((0x05 << MPU_RASR_TEX_Pos) | (1 << MPU_RASR_B_Pos) | \
  153. (0x07 << MPU_RASR_AP_Pos) | (1 << MPU_RASR_XN_Pos))
  154. /**@brief Macro for getting pointer to bottom of stack for given task id */
  155. #define BOTTOM_OF_TASK_STACK(_task_id) ((void *)(&s_task_stacks[(_task_id)].stack[0]))
  156. /**@brief Macro for getting pointer to top of stack for given task id */
  157. #define TOP_OF_TASK_STACK(_task_id) ((void *)(&s_task_stacks[(_task_id)].stack[TASK_MANAGER_CONFIG_STACK_SIZE]))
  158. /**@brief Macro for getting pointer to base of stack guard for given task id */
  159. #define TASK_STACK_GUARD_BASE(_task_id) ((void *)(&s_task_stacks[(_task_id)].guard[0]))
  160. #define TASK_ID_TO_MASK(_task_id) (0x80000000 >> (_task_id))
  161. /**@brief Puts task in RUNNABLE state */
  162. #define TASK_STATE_RUNNABLE(_task_id) \
  163. (void)nrf_atomic_u32_or(&s_runnable_tasks_mask, TASK_ID_TO_MASK(_task_id))
  164. /**@brief Puts task in SUSPENDED state */
  165. #define TASK_STATE_SUSPENDED(_task_id) \
  166. (void)nrf_atomic_u32_and(&s_runnable_tasks_mask, ~TASK_ID_TO_MASK(_task_id));
  167. static void task_stack_poison(task_id_t task_id)
  168. {
  169. #if TASK_MANAGER_CONFIG_STACK_PROFILER_ENABLED
  170. unsigned int i = TASK_MANAGER_CONFIG_STACK_SIZE / sizeof(uint32_t);
  171. uint32_t *p_stack_top = TOP_OF_TASK_STACK(task_id);
  172. while (i--)
  173. {
  174. *(--p_stack_top) = TASK_STACK_MAGIC_WORD;
  175. }
  176. #endif
  177. }
  178. static void task_stack_protect(task_id_t task_id)
  179. {
  180. #if TASK_MANAGER_CONFIG_STACK_GUARD
  181. APP_ERROR_CHECK(nrf_mpu_lib_region_create(&s_guard_region,
  182. TASK_STACK_GUARD_BASE(task_id),
  183. STACK_GUARD_SIZE,
  184. TASK_GUARD_ATTRIBUTES));
  185. #endif
  186. }
  187. PRAGMA_OPTIMIZATION_FORCE_START
  188. void task_manager_start(task_main_t idle_task, void *p_idle_task_context)
  189. {
  190. unsigned long control;
  191. // Idle task must be specified.
  192. ASSERT(idle_task != NULL);
  193. // Make sure that we are in privledged thread level using MSP stack.
  194. ASSERT((__get_IPSR() & IPSR_ISR_Msk) == 0);
  195. ASSERT((__get_CONTROL() & CONTROL_nPRIV_Msk) == 0);
  196. ASSERT((__get_CONTROL() & CONTROL_SPSEL_Msk) == 0);
  197. // Prepare task state structure.
  198. s_current_task_id = IDLE_TASK_ID;
  199. s_task_state[s_current_task_id].p_task_name = "Idle Task";
  200. // Prepare stack instrumentation and protection.
  201. task_stack_poison(s_current_task_id);
  202. task_stack_protect(s_current_task_id);
  203. NRF_LOG_INFO("Task %u created (name: '%s', stack: 0x%08X-0x%08X).",
  204. s_current_task_id,
  205. s_task_state[s_current_task_id].p_task_name,
  206. (uint32_t)BOTTOM_OF_TASK_STACK(s_current_task_id),
  207. (uint32_t)TOP_OF_TASK_STACK(s_current_task_id) - 1);
  208. // Prepare context for idle task. This must be done with all interrupts disabled.
  209. __disable_irq();
  210. // Set process and exception stacks.
  211. __set_PSP((uint32_t)(TOP_OF_TASK_STACK(s_current_task_id)));
  212. __set_MSP((uint32_t)(STACK_TOP));
  213. // Update CONTROL register.
  214. control = __get_CONTROL();
  215. control &= CONTROL_FPCA_Msk; // Clear FPCA since FPU state does not need to be preserved.
  216. control |= CONTROL_SPSEL_Msk; // Use MSP only for excpetions, leaving PSP for tasks.
  217. __set_CONTROL(control);
  218. // Context is ready. Enable interrupts.
  219. __enable_irq();
  220. // Perform task switch to run non-idle tasks as soon as possible.
  221. task_switch();
  222. // Jump to idle task.
  223. idle_task(p_idle_task_context);
  224. // This should be never reached.
  225. APP_ERROR_CHECK_BOOL(false);
  226. }
  227. PRAGMA_OPTIMIZATION_FORCE_END
  228. task_id_t task_create(task_main_t task, char const * p_task_name, void *p_context)
  229. {
  230. task_state_t *p_state = NULL;
  231. task_stack_t *p_stack;
  232. task_id_t task_id;
  233. // Check arguments.
  234. if (task == NULL)
  235. {
  236. return TASK_ID_INVALID;
  237. }
  238. // Find free task state structure.
  239. CRITICAL_REGION_ENTER();
  240. for (task_id = 0; task_id < TASK_MANAGER_CONFIG_MAX_TASKS; task_id++)
  241. {
  242. if (s_task_state[task_id].p_stack == NULL)
  243. {
  244. p_state = &s_task_state[task_id];
  245. p_state->p_stack = TOP_OF_TASK_STACK(task_id);
  246. break;
  247. }
  248. }
  249. CRITICAL_REGION_EXIT();
  250. // Return invalid Task ID if new task cannot be created.
  251. if (p_state == NULL)
  252. {
  253. return TASK_ID_INVALID;
  254. }
  255. // Prepare initial stack for the task.
  256. task_stack_poison(task_id);
  257. p_state->p_stack = (uint8_t *)(p_state->p_stack) - sizeof(*p_stack);
  258. p_state->p_task_name = (char *)p_task_name;
  259. p_state->flags = 0;
  260. p_stack = p_state->p_stack;
  261. p_stack->control = CONTROL_SPSEL_Msk;
  262. p_stack->lr = (uint32_t)(task); // Start from this function.
  263. p_stack->r0 = (uint32_t)(p_context); // Pass p_context as first argument.
  264. // Mark task as ready to run.
  265. TASK_STATE_RUNNABLE(task_id);
  266. NRF_LOG_INFO("Task %u created (name: '%s', stack: 0x%08X-0x%08X).",
  267. task_id,
  268. p_task_name,
  269. (uint32_t)BOTTOM_OF_TASK_STACK(task_id),
  270. (uint32_t)TOP_OF_TASK_STACK(task_id) - 1);
  271. return task_id;
  272. }
  273. /**@brief Task scheduler.
  274. *
  275. * @param[in] Pointer to task stack with saved task state.
  276. * @return Pointer to new task stack with saved task state.
  277. */
  278. void *task_schedule(void *p_stack)
  279. {
  280. uint32_t runnable_tasks_mask;
  281. #if TASK_MANAGER_CONFIG_STACK_GUARD
  282. // Destroy stack guard allocated for current task.
  283. APP_ERROR_CHECK(nrf_mpu_lib_region_destroy(s_guard_region));
  284. #endif
  285. // Save current task state if task if switching from valid task.
  286. if ((s_task_state[s_current_task_id].flags & TASK_FLAG_DESTROY) == 0)
  287. {
  288. s_task_state[s_current_task_id].p_stack = p_stack;
  289. }
  290. else
  291. {
  292. TASK_STATE_SUSPENDED(s_current_task_id);
  293. s_task_state[s_current_task_id].p_stack = NULL;
  294. NRF_LOG_INFO("Task %u terminated (name: '%s').",
  295. s_current_task_id,
  296. s_task_state[s_current_task_id].p_task_name);
  297. }
  298. // Atomically fetch list of runnable tasks.
  299. runnable_tasks_mask = s_runnable_tasks_mask;
  300. // Check if there are any tasks to execute.
  301. if (runnable_tasks_mask != 0)
  302. {
  303. // Check if we could continue this round.
  304. if ((runnable_tasks_mask << (s_current_task_id + 1)) != 0)
  305. {
  306. // There are tasks to execute in this round. Select next runnable task:
  307. s_current_task_id += 1 + __CLZ((runnable_tasks_mask << (s_current_task_id + 1)));
  308. }
  309. else
  310. {
  311. // No more tasks in this round. Select first avaiable task:
  312. s_current_task_id = __CLZ(runnable_tasks_mask);
  313. }
  314. }
  315. else
  316. {
  317. // Fall back to idle task if other tasks cannot be run.
  318. s_current_task_id = IDLE_TASK_ID;
  319. }
  320. task_stack_protect(s_current_task_id);
  321. // Switch to new task.
  322. return s_task_state[s_current_task_id].p_stack;
  323. }
  324. void task_yield(void)
  325. {
  326. // Make sure that we are in privledged thread level using PSP stack.
  327. ASSERT((__get_IPSR() & IPSR_ISR_Msk) == 0);
  328. ASSERT((__get_CONTROL() & CONTROL_nPRIV_Msk) == 0);
  329. ASSERT((__get_CONTROL() & CONTROL_SPSEL_Msk) != 0);
  330. // Perform task switch.
  331. task_switch();
  332. }
  333. uint32_t task_events_wait(uint32_t evt_mask)
  334. {
  335. uint32_t current_events;
  336. ASSERT((evt_mask & ~TASK_FLAG_SIGNAL_MASK) == 0);
  337. for (;;)
  338. {
  339. current_events = s_task_state[s_current_task_id].flags & evt_mask;
  340. if (current_events != 0)
  341. {
  342. (void)nrf_atomic_u32_and(&s_task_state[s_current_task_id].flags, ~current_events);
  343. break;
  344. }
  345. TASK_STATE_SUSPENDED(s_current_task_id);
  346. task_yield();
  347. }
  348. return current_events;
  349. }
  350. void task_events_set(task_id_t task_id, uint32_t evt_mask)
  351. {
  352. ASSERT((task_id != TASK_ID_INVALID) && (task_id < TASK_MANAGER_CONFIG_MAX_TASKS));
  353. ASSERT((evt_mask & ~TASK_FLAG_SIGNAL_MASK) == 0);
  354. ASSERT(s_task_state[task_id].p_stack != NULL);
  355. (void)nrf_atomic_u32_or(&s_task_state[task_id].flags, evt_mask);
  356. TASK_STATE_RUNNABLE(task_id);
  357. }
  358. void task_exit(void)
  359. {
  360. // Make sure that we are in privledged thread level using PSP stack.
  361. ASSERT((__get_IPSR() & IPSR_ISR_Msk) == 0);
  362. ASSERT((__get_CONTROL() & CONTROL_nPRIV_Msk) == 0);
  363. ASSERT((__get_CONTROL() & CONTROL_SPSEL_Msk) != 0);
  364. s_task_state[s_current_task_id].flags = TASK_FLAG_DESTROY;
  365. task_switch();
  366. }
  367. task_id_t task_id_get(void)
  368. {
  369. // Make sure that we are in privledged thread level using PSP stack.
  370. ASSERT((__get_IPSR() & IPSR_ISR_Msk) == 0);
  371. ASSERT((__get_CONTROL() & CONTROL_nPRIV_Msk) == 0);
  372. ASSERT((__get_CONTROL() & CONTROL_SPSEL_Msk) != 0);
  373. return s_current_task_id;
  374. }
  375. uint32_t task_stack_max_usage_get(task_id_t task_id)
  376. {
  377. #if TASK_MANAGER_CONFIG_STACK_PROFILER_ENABLED
  378. unsigned int stack_usage;
  379. uint32_t *p_stack, *p_stack_top;
  380. ASSERT((task_id != TASK_ID_INVALID) || (task_id < TASK_MANAGER_CONFIG_MAX_TASKS));
  381. ASSERT(s_task_state[task_id].p_stack != NULL);
  382. p_stack_top = TOP_OF_TASK_STACK(task_id);
  383. p_stack = BOTTOM_OF_TASK_STACK(task_id);
  384. stack_usage = TASK_MANAGER_CONFIG_STACK_SIZE;
  385. while (p_stack < p_stack_top)
  386. {
  387. if (*(p_stack++) != TASK_STACK_MAGIC_WORD)
  388. {
  389. break;
  390. }
  391. stack_usage -= sizeof(*p_stack);
  392. }
  393. return stack_usage;
  394. #else
  395. return 0;
  396. #endif
  397. }
  398. #if TASK_MANAGER_CLI_CMDS
  399. static void task_mnanager_info(nrf_cli_t const * p_cli, size_t argc, char **argv)
  400. {
  401. task_id_t task_id;
  402. for (task_id = 0; task_id < TOTAL_NUM_OF_TASKS; task_id++)
  403. {
  404. const char *p_task_name = NULL;
  405. CRITICAL_REGION_ENTER();
  406. if (s_task_state[task_id].p_stack != NULL)
  407. {
  408. p_task_name = (s_task_state[task_id].p_task_name) ? s_task_state[task_id].p_task_name
  409. : "<NULL>";
  410. }
  411. CRITICAL_REGION_EXIT();
  412. if (p_task_name)
  413. {
  414. uint32_t stack_usage = task_stack_max_usage_get(task_id);
  415. nrf_cli_fprintf(p_cli, NRF_CLI_NORMAL, "Task %u:\r\n", task_id);
  416. nrf_cli_fprintf(p_cli, NRF_CLI_NORMAL, "\tName:\t'%s'\r\n", p_task_name);
  417. if (stack_usage)
  418. {
  419. nrf_cli_fprintf(p_cli, NRF_CLI_NORMAL, "\tStack:\t0x%08X-0x%08X used in %u%% (%u out of %u bytes)\r\n",
  420. (uint32_t)BOTTOM_OF_TASK_STACK(task_id),
  421. (uint32_t)TOP_OF_TASK_STACK(task_id) - 1,
  422. 100 * stack_usage / TASK_MANAGER_CONFIG_STACK_SIZE,
  423. stack_usage,
  424. TASK_MANAGER_CONFIG_STACK_SIZE);
  425. }
  426. else
  427. {
  428. nrf_cli_fprintf(p_cli, NRF_CLI_NORMAL, "\tStack:\t0x%08X-0x%08X\r\n",
  429. (uint32_t)BOTTOM_OF_TASK_STACK(task_id),
  430. (uint32_t)TOP_OF_TASK_STACK(task_id) - 1);
  431. }
  432. nrf_cli_fprintf(p_cli, NRF_CLI_NORMAL, "\tState:\t%s\r\n",
  433. (s_current_task_id == task_id) ? "Running" :
  434. (s_runnable_tasks_mask & TASK_ID_TO_MASK(task_id)) ? "Runnable" : "Suspended");
  435. nrf_cli_fprintf(p_cli, NRF_CLI_NORMAL, "\tFlags:\t0x%08X\r\n\r\n",
  436. s_task_state[task_id].flags);
  437. }
  438. }
  439. }
  440. NRF_CLI_CREATE_STATIC_SUBCMD_SET(m_sub_task_mngr)
  441. {
  442. NRF_CLI_CMD(info, NULL, "tasks info", task_mnanager_info),
  443. NRF_CLI_SUBCMD_SET_END
  444. };
  445. NRF_CLI_CMD_REGISTER(task_manager, &m_sub_task_mngr, "commands for task manager", NULL);
  446. #endif //TASK_MANAGER_CLI_CMDS
  447. #else //TASK_MANAGER_ENABLED
  448. void *task_schedule(void *p_stack)
  449. {
  450. return (void *)0;
  451. }
  452. #endif //TASK_MANAGER_ENABLED