diff --git a/src/schedule/zephyr_dp_schedule.c b/src/schedule/zephyr_dp_schedule.c index c0306f0f599e..5b842490a499 100644 --- a/src/schedule/zephyr_dp_schedule.c +++ b/src/schedule/zephyr_dp_schedule.c @@ -33,6 +33,14 @@ SOF_DEFINE_REG_UUID(dp_sched); DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO); +#if CONFIG_SOF_USERSPACE_APPLICATION +struct dp_sem_buf { + struct sys_sem sem[CONFIG_CORE_COUNT]; + uint8_t reserved[CONFIG_MM_DRV_PAGE_SIZE - sizeof(struct sys_sem) * CONFIG_CORE_COUNT]; +}; + +static struct dp_sem_buf __aligned(4096) dp_sched_sem; +#else #define DP_LOCK_INIT(i, _) Z_SEM_INITIALIZER(dp_lock[i], 1, 1) #define DP_LOCK_INIT_LIST LISTIFY(CONFIG_MP_MAX_NUM_CPUS, DP_LOCK_INIT, (,)) @@ -42,6 +50,7 @@ DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO); */ static STRUCT_SECTION_ITERABLE_ARRAY(k_sem, dp_lock, CONFIG_MP_MAX_NUM_CPUS) = { DP_LOCK_INIT_LIST }; +#endif /* Each per-core instance of DP scheduler has separate structures; hence, locks are per-core. * @@ -49,22 +58,48 @@ STRUCT_SECTION_ITERABLE_ARRAY(k_sem, dp_lock, CONFIG_MP_MAX_NUM_CPUS) = { DP_LOC */ unsigned int scheduler_dp_lock(uint16_t core) { +#if CONFIG_SOF_USERSPACE_APPLICATION + sys_sem_take(&dp_sched_sem.sem[core], K_FOREVER); +#else k_sem_take(&dp_lock[core], K_FOREVER); +#endif + return core; } void scheduler_dp_unlock(unsigned int key) { +#if CONFIG_SOF_USERSPACE_APPLICATION + sys_sem_give(&dp_sched_sem.sem[key]); +#else k_sem_give(&dp_lock[key]); +#endif } -void scheduler_dp_grant(k_tid_t thread_id, uint16_t core) +#if CONFIG_SOF_USERSPACE_APPLICATION +int scheduler_dp_add_domain(struct k_mem_domain *domain) { -#if CONFIG_USERSPACE - k_thread_access_grant(thread_id, &dp_lock[core]); -#endif + struct k_mem_partition part = { + .start = (uintptr_t)&dp_sched_sem, + .size = sizeof(dp_sched_sem), + .attr = K_MEM_PARTITION_P_RW_U_RW, + }; + + return k_mem_domain_add_partition(domain, &part); } +int scheduler_dp_rm_domain(struct k_mem_domain *domain) +{ + struct k_mem_partition part = { + .start = (uintptr_t)&dp_sched_sem, + .size = sizeof(dp_sched_sem), + .attr = K_MEM_PARTITION_P_RW_U_RW, + }; + + return k_mem_domain_remove_partition(domain, &part); +} +#endif + /* dummy LL task - to start LL on secondary cores */ static enum task_state scheduler_dp_ll_tick_dummy(void *data) { @@ -370,6 +405,11 @@ int scheduler_dp_init(void) scheduler_init(SOF_SCHEDULE_DP, &schedule_dp_ops, dp_sch); +#if CONFIG_SOF_USERSPACE_APPLICATION + for (unsigned int i = 0; i < ARRAY_SIZE(dp_sched_sem.sem); i++) + sys_sem_init(dp_sched_sem.sem + i, 1, 1); +#endif + /* init src of DP tick */ ret = schedule_task_init_ll(&dp_sch->ll_tick_src, SOF_UUID(dp_sched_uuid), diff --git a/src/schedule/zephyr_dp_schedule.h b/src/schedule/zephyr_dp_schedule.h index 2ef6bde805c3..fed42860e710 100644 --- a/src/schedule/zephyr_dp_schedule.h +++ b/src/schedule/zephyr_dp_schedule.h @@ -55,11 +55,12 @@ void scheduler_dp_recalculate(struct scheduler_dp_data *dp_sch, bool is_ll_post_ void dp_thread_fn(void *p1, void *p2, void *p3); unsigned int scheduler_dp_lock(uint16_t core); void scheduler_dp_unlock(unsigned int key); -void scheduler_dp_grant(k_tid_t thread_id, uint16_t core); int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, const struct task_ops *ops, struct processing_module *mod, uint16_t core, size_t stack_size, uint32_t options); #if CONFIG_SOF_USERSPACE_APPLICATION +int scheduler_dp_add_domain(struct k_mem_domain *domain); +int scheduler_dp_rm_domain(struct k_mem_domain *domain); void scheduler_dp_domain_free(struct processing_module *pmod); int scheduler_dp_domain_init(void); #else diff --git a/src/schedule/zephyr_dp_schedule_application.c b/src/schedule/zephyr_dp_schedule_application.c index 65f4c1ee9de1..5a00274eb8e0 100644 --- a/src/schedule/zephyr_dp_schedule_application.c +++ b/src/schedule/zephyr_dp_schedule_application.c @@ -396,6 +396,7 @@ void scheduler_dp_domain_free(struct processing_module *pmod) k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_HEAP); k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_CFG); + scheduler_dp_rm_domain(dp_mdom + core); #endif } @@ -505,7 +506,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, #if CONFIG_USERSPACE k_thread_access_grant(pdata->thread_id, pdata->sem, &dp_sync[core]); - scheduler_dp_grant(pdata->thread_id, core); unsigned int pidx; size_t size; @@ -531,6 +531,12 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, goto e_dom; } + ret = scheduler_dp_add_domain(dp_mdom + core); + if (ret < 0) { + tr_err(&dp_tr, "failed to add DP lock domain %d", ret); + goto e_dom; + } + ret = llext_manager_add_domain(mod->dev->ipc_config.id, dp_mdom + core); if (ret < 0) { tr_err(&dp_tr, "failed to add LLEXT to domain %d", ret); diff --git a/src/schedule/zephyr_dp_schedule_thread.c b/src/schedule/zephyr_dp_schedule_thread.c index c9874155893c..d371e13bc281 100644 --- a/src/schedule/zephyr_dp_schedule_thread.c +++ b/src/schedule/zephyr_dp_schedule_thread.c @@ -270,7 +270,6 @@ int scheduler_dp_task_init(struct task **task, CONFIG_DP_THREAD_PRIORITY, (*task)->flags, K_FOREVER); k_thread_access_grant(pdata->thread_id, pdata->event); - scheduler_dp_grant(pdata->thread_id, cpu_get_id()); /* pin the thread to specific core */ ret = k_thread_cpu_pin(pdata->thread_id, core);