diff --git a/src/include/module/module/base.h b/src/include/module/module/base.h index 6d7dea657314..9448d372e110 100644 --- a/src/include/module/module/base.h +++ b/src/include/module/module/base.h @@ -75,6 +75,7 @@ enum module_processing_type { }; struct userspace_context; +struct k_mem_domain; /* * A pointer to this structure is passed to module API functions (from struct module_interface). @@ -189,6 +190,9 @@ struct processing_module { #if CONFIG_USERSPACE struct userspace_context *user_ctx; #endif /* CONFIG_USERSPACE */ +#if CONFIG_SOF_USERSPACE_APPLICATION + struct k_mem_domain *mdom; +#endif #endif /* SOF_MODULE_PRIVATE */ }; diff --git a/src/include/sof/objpool.h b/src/include/sof/objpool.h index 7746c09ea480..39dbc670e9e5 100644 --- a/src/include/sof/objpool.h +++ b/src/include/sof/objpool.h @@ -6,12 +6,21 @@ #ifndef __ZEPHYR_OBJPOOL_H__ #define __ZEPHYR_OBJPOOL_H__ -struct list_item; +#include + +#include + +struct objpool_head { + struct list_item list; + uint32_t flags; +}; + /** * Allocate memory tracked as part of an object pool. * - * @param head Pointer to the object pool list head. + * @param head Pointer to the object pool head. * @param size Size in bytes of memory blocks to allocate. + * @param flags Memory allocation flags. * * @return a pointer to the allocated memory on success, NULL on failure. * @@ -22,13 +31,15 @@ struct list_item; * is requested, the next call allocates 4 blocks, then 8, 16 and 32. After that * 32 blocks are allocated every time. Note, that by design allocated blocks are * never freed. See more below. + * TODO: @a flags are currently only used when allocating new object sets. + * Should add a check that they're consistent with already allocated objects. */ -void *objpool_alloc(struct list_item *head, size_t size); +void *objpool_alloc(struct objpool_head *head, size_t size, uint32_t flags); /** * Return a block to the object pool * - * @param head Pointer to the object pool list head. + * @param head Pointer to the object pool head. * @param data Pointer to the object to return (can be NULL) * * @return 0 on success or a negative error code. @@ -36,6 +47,6 @@ void *objpool_alloc(struct list_item *head, size_t size); * Return a block to the object pool. Memory is never freed by design, unused * blocks are kept in the object pool for future re-use. */ -int objpool_free(struct list_item *head, void *data); +int objpool_free(struct objpool_head *head, void *data); #endif diff --git a/src/lib/objpool.c b/src/lib/objpool.c index 3b926ab887e5..6afc1f94283a 100644 --- a/src/lib/objpool.c +++ b/src/lib/objpool.c @@ -24,7 +24,7 @@ struct objpool { #define OBJPOOL_BITS (sizeof(((struct objpool *)0)->mask) * 8) -static int objpool_add(struct list_item *head, unsigned int n, size_t size) +static int objpool_add(struct list_item *head, unsigned int n, size_t size, uint32_t flags) { if (n > OBJPOOL_BITS) return -ENOMEM; @@ -35,7 +35,7 @@ static int objpool_add(struct list_item *head, unsigned int n, size_t size) size_t aligned_size = ALIGN_UP(size, sizeof(int)); /* Initialize with 0 to give caller a chance to identify new allocations */ - struct objpool *pobjpool = rzalloc(0, n * aligned_size + sizeof(*pobjpool)); + struct objpool *pobjpool = rzalloc(flags, n * aligned_size + sizeof(*pobjpool)); if (!pobjpool) return -ENOMEM; @@ -50,7 +50,7 @@ static int objpool_add(struct list_item *head, unsigned int n, size_t size) return 0; } -void *objpool_alloc(struct list_item *head, size_t size) +void *objpool_alloc(struct objpool_head *head, size_t size, uint32_t flags) { size_t aligned_size = ALIGN_UP(size, sizeof(int)); struct list_item *list; @@ -60,7 +60,11 @@ void *objpool_alloc(struct list_item *head, size_t size) if (!size || aligned_size > (UINT_MAX >> 5) - sizeof(*pobjpool)) return NULL; - list_for_item(list, head) { + if (!list_is_empty(&head->list) && head->flags != flags) + /* List isn't empty, and flags don't match */ + return NULL; + + list_for_item(list, &head->list) { pobjpool = container_of(list, struct objpool, list); uint32_t free_mask = MASK(pobjpool->n - 1, 0) & ~pobjpool->mask; @@ -83,11 +87,11 @@ void *objpool_alloc(struct list_item *head, size_t size) /* no free elements found */ unsigned int new_n; - if (list_is_empty(head)) { + if (list_is_empty(&head->list)) { new_n = 2; } else { /* Check the last one */ - pobjpool = container_of(head->prev, struct objpool, list); + pobjpool = container_of(head->list.prev, struct objpool, list); if (pobjpool->n == OBJPOOL_BITS) new_n = OBJPOOL_BITS; @@ -95,17 +99,17 @@ void *objpool_alloc(struct list_item *head, size_t size) new_n = pobjpool->n << 1; } - if (objpool_add(head, new_n, size) < 0) + if (objpool_add(&head->list, new_n, size, flags) < 0) return NULL; /* Return the first element of the new objpool, which is now the last one in the list */ - pobjpool = container_of(head->prev, struct objpool, list); + pobjpool = container_of(head->list.prev, struct objpool, list); pobjpool->mask = 1; return pobjpool->data; } -int objpool_free(struct list_item *head, void *data) +int objpool_free(struct objpool_head *head, void *data) { struct list_item *list; struct objpool *pobjpool; @@ -113,7 +117,7 @@ int objpool_free(struct list_item *head, void *data) if (!data) return 0; - list_for_item(list, head) { + list_for_item(list, &head->list) { pobjpool = container_of(list, struct objpool, list); size_t aligned_size = ALIGN_UP(pobjpool->size, sizeof(int)); diff --git a/src/schedule/zephyr_dp_schedule.c b/src/schedule/zephyr_dp_schedule.c index 1418baf4cc6d..5da298db73a4 100644 --- a/src/schedule/zephyr_dp_schedule.c +++ b/src/schedule/zephyr_dp_schedule.c @@ -387,8 +387,6 @@ int scheduler_dp_init(void) notifier_register(NULL, NULL, NOTIFIER_ID_LL_POST_RUN, scheduler_dp_ll_tick, 0); - scheduler_dp_domain_init(); - return 0; } diff --git a/src/schedule/zephyr_dp_schedule.h b/src/schedule/zephyr_dp_schedule.h index 303403565506..83880bd02b4e 100644 --- a/src/schedule/zephyr_dp_schedule.h +++ b/src/schedule/zephyr_dp_schedule.h @@ -60,8 +60,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, uint16_t core, size_t stack_size, uint32_t options); #if CONFIG_SOF_USERSPACE_APPLICATION void scheduler_dp_domain_free(struct processing_module *pmod); -int scheduler_dp_domain_init(void); #else static inline void scheduler_dp_domain_free(struct processing_module *pmod) {} -static inline int scheduler_dp_domain_init(void) {return 0;} #endif diff --git a/src/schedule/zephyr_dp_schedule_application.c b/src/schedule/zephyr_dp_schedule_application.c index dc0e0fe1da3e..e001b21ea423 100644 --- a/src/schedule/zephyr_dp_schedule_application.c +++ b/src/schedule/zephyr_dp_schedule_application.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -27,7 +28,7 @@ LOG_MODULE_DECLARE(dp_schedule, CONFIG_SOF_LOG_LEVEL); extern struct tr_ctx dp_tr; -static struct k_mem_domain dp_mdom[CONFIG_CORE_COUNT]; +static struct objpool_head dp_mdom_head = {.list = LIST_INIT(dp_mdom_head.list)}; /* Synchronization semaphore for the scheduler thread to wait for DP startup */ #define DP_SYNC_INIT(i, _) Z_SEM_INITIALIZER(dp_sync[i], 0, 1) @@ -384,14 +385,17 @@ void dp_thread_fn(void *p1, void *p2, void *p3) */ void scheduler_dp_domain_free(struct processing_module *pmod) { - unsigned int core = pmod->dev->task->core; + struct k_mem_domain *mdom = pmod->mdom; - llext_manager_rm_domain(pmod->dev->ipc_config.id, dp_mdom + core); + llext_manager_rm_domain(pmod->dev->ipc_config.id, mdom); struct task_dp_pdata *pdata = pmod->dev->task->priv_data; - k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_HEAP); - k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_CFG); + k_mem_domain_remove_partition(mdom, pdata->mpart + SOF_DP_PART_HEAP); + k_mem_domain_remove_partition(mdom, pdata->mpart + SOF_DP_PART_CFG); + + pmod->mdom = NULL; + objpool_free(&dp_mdom_head, mdom); } /* Called only in IPC context */ @@ -497,6 +501,19 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, unsigned int pidx; size_t size; uintptr_t start; + struct k_mem_domain *mdom = objpool_alloc(&dp_mdom_head, sizeof(*mdom), + SOF_MEM_FLAG_COHERENT); + + if (!mdom) + goto e_thread; + + mod->mdom = mdom; + + if (!mdom->arch.ptables) { + ret = k_mem_domain_init(mdom, 0, NULL); + if (ret < 0) + goto e_dom; + } /* Module heap partition */ mod_heap_info(mod, &size, &start); @@ -513,12 +530,12 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, }; for (pidx = 0; pidx < SOF_DP_PART_TYPE_COUNT; pidx++) { - ret = k_mem_domain_add_partition(dp_mdom + core, pdata->mpart + pidx); + ret = k_mem_domain_add_partition(mdom, pdata->mpart + pidx); if (ret < 0) goto e_dom; } - ret = llext_manager_add_domain(mod->dev->ipc_config.id, dp_mdom + core); + ret = llext_manager_add_domain(mod->dev->ipc_config.id, mdom); if (ret < 0) { tr_err(&dp_tr, "failed to add LLEXT to domain %d", ret); goto e_dom; @@ -528,7 +545,7 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, * Keep this call last, able to fail, otherwise domain will be removed * before its thread */ - ret = k_mem_domain_add_thread(dp_mdom + core, pdata->thread_id); + ret = k_mem_domain_add_thread(mdom, pdata->thread_id); if (ret < 0) { tr_err(&dp_tr, "failed to add thread to domain %d", ret); goto e_dom; @@ -554,8 +571,3 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, mod_free(mod, task_memory); return ret; } - -int scheduler_dp_domain_init(void) -{ - return k_mem_domain_init(dp_mdom + cpu_get_id(), 0, NULL); -} diff --git a/test/ztest/unit/objpool/test_objpool_ztest.c b/test/ztest/unit/objpool/test_objpool_ztest.c index 1fcc65f4ec51..05f06e73865e 100644 --- a/test/ztest/unit/objpool/test_objpool_ztest.c +++ b/test/ztest/unit/objpool/test_objpool_ztest.c @@ -9,6 +9,9 @@ #include #include #include + +#include + #include #include @@ -26,23 +29,26 @@ void *__wrap_rzalloc(uint32_t flags, size_t bytes) ZTEST(objpool_suite, test_objpool_wrong_size) { - struct list_item head = LIST_INIT(head); + struct objpool_head head = {.list = LIST_INIT(head.list)}; /* new object pool of 2 blocks */ - uint8_t *block1 = objpool_alloc(&head, DATA_SIZE); + uint8_t *block1 = objpool_alloc(&head, DATA_SIZE, 0); /* should fail because of a different size */ - uint8_t *block2 = objpool_alloc(&head, DATA_SIZE + 1); + uint8_t *block2 = objpool_alloc(&head, DATA_SIZE + 1, 0); /* second block in the first object pool */ - uint8_t *block3 = objpool_alloc(&head, DATA_SIZE); + uint8_t *block3 = objpool_alloc(&head, DATA_SIZE, 0); /* new object pool of 4 blocks */ - uint8_t *block4 = objpool_alloc(&head, DATA_SIZE); + uint8_t *block4 = objpool_alloc(&head, DATA_SIZE, 0); /* should fail because of a different size */ - uint8_t *block5 = objpool_alloc(&head, DATA_SIZE * 2); + uint8_t *block5 = objpool_alloc(&head, DATA_SIZE * 2, 0); + /* should fail because of different flags */ + uint8_t *block6 = objpool_alloc(&head, DATA_SIZE * 2, SOF_MEM_FLAG_COHERENT); zassert_not_null(block1); zassert_is_null(block2); zassert_not_null(block3); zassert_not_null(block4); zassert_is_null(block5); + zassert_is_null(block6); zassert_not_ok(objpool_free(&head, block1 + 1)); zassert_ok(objpool_free(&head, block1)); @@ -54,7 +60,7 @@ ZTEST(objpool_suite, test_objpool_wrong_size) ZTEST(objpool_suite, test_objpool) { - struct list_item head = LIST_INIT(head); + struct objpool_head head = {.list = LIST_INIT(head.list)}; void *blocks[62]; /* 2 + 4 + 8 + 16 + 32 */ unsigned int k = 0; @@ -64,7 +70,7 @@ ZTEST(objpool_suite, test_objpool) uint8_t *start; for (unsigned int j = 0; j < n; j++) { - uint8_t *block = objpool_alloc(&head, DATA_SIZE); + uint8_t *block = objpool_alloc(&head, DATA_SIZE, 0); zassert_not_null(block, "allocation failed loop %u iter %u", i, j);