Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 44 additions & 4 deletions src/schedule/zephyr_dp_schedule.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,14 @@ SOF_DEFINE_REG_UUID(dp_sched);

DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO);

#if CONFIG_SOF_USERSPACE_APPLICATION
struct dp_sem_buf {
struct sys_sem sem[CONFIG_CORE_COUNT];
uint8_t reserved[CONFIG_MM_DRV_PAGE_SIZE - sizeof(struct sys_sem) * CONFIG_CORE_COUNT];
};

static struct dp_sem_buf __aligned(4096) dp_sched_sem;
#else
#define DP_LOCK_INIT(i, _) Z_SEM_INITIALIZER(dp_lock[i], 1, 1)
#define DP_LOCK_INIT_LIST LISTIFY(CONFIG_MP_MAX_NUM_CPUS, DP_LOCK_INIT, (,))

Expand All @@ -42,29 +50,56 @@ DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO);
*/
static
STRUCT_SECTION_ITERABLE_ARRAY(k_sem, dp_lock, CONFIG_MP_MAX_NUM_CPUS) = { DP_LOCK_INIT_LIST };
#endif

/* Each per-core instance of DP scheduler has separate structures; hence, locks are per-core.
*
* TODO: consider using cpu_get_id() instead of supplying core as a parameter.
*/
unsigned int scheduler_dp_lock(uint16_t core)
{
#if CONFIG_SOF_USERSPACE_APPLICATION
sys_sem_take(&dp_sched_sem.sem[core], K_FOREVER);
#else
k_sem_take(&dp_lock[core], K_FOREVER);
#endif

return core;
}

void scheduler_dp_unlock(unsigned int key)
{
#if CONFIG_SOF_USERSPACE_APPLICATION
sys_sem_give(&dp_sched_sem.sem[key]);
#else
k_sem_give(&dp_lock[key]);
#endif
}

void scheduler_dp_grant(k_tid_t thread_id, uint16_t core)
#if CONFIG_SOF_USERSPACE_APPLICATION
int scheduler_dp_add_domain(struct k_mem_domain *domain)
{
#if CONFIG_USERSPACE
k_thread_access_grant(thread_id, &dp_lock[core]);
#endif
struct k_mem_partition part = {
.start = (uintptr_t)&dp_sched_sem,
.size = sizeof(dp_sched_sem),
.attr = K_MEM_PARTITION_P_RW_U_RW,
};

return k_mem_domain_add_partition(domain, &part);
}

int scheduler_dp_rm_domain(struct k_mem_domain *domain)
{
struct k_mem_partition part = {
.start = (uintptr_t)&dp_sched_sem,
.size = sizeof(dp_sched_sem),
.attr = K_MEM_PARTITION_P_RW_U_RW,
};

return k_mem_domain_remove_partition(domain, &part);
}
#endif

/* dummy LL task - to start LL on secondary cores */
static enum task_state scheduler_dp_ll_tick_dummy(void *data)
{
Expand Down Expand Up @@ -370,6 +405,11 @@ int scheduler_dp_init(void)

scheduler_init(SOF_SCHEDULE_DP, &schedule_dp_ops, dp_sch);

#if CONFIG_SOF_USERSPACE_APPLICATION
for (unsigned int i = 0; i < ARRAY_SIZE(dp_sched_sem.sem); i++)
sys_sem_init(dp_sched_sem.sem + i, 1, 1);
#endif

/* init src of DP tick */
ret = schedule_task_init_ll(&dp_sch->ll_tick_src,
SOF_UUID(dp_sched_uuid),
Expand Down
3 changes: 2 additions & 1 deletion src/schedule/zephyr_dp_schedule.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,12 @@ void scheduler_dp_recalculate(struct scheduler_dp_data *dp_sch, bool is_ll_post_
void dp_thread_fn(void *p1, void *p2, void *p3);
unsigned int scheduler_dp_lock(uint16_t core);
void scheduler_dp_unlock(unsigned int key);
void scheduler_dp_grant(k_tid_t thread_id, uint16_t core);
int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,
const struct task_ops *ops, struct processing_module *mod,
uint16_t core, size_t stack_size, uint32_t options);
#if CONFIG_SOF_USERSPACE_APPLICATION
int scheduler_dp_add_domain(struct k_mem_domain *domain);
int scheduler_dp_rm_domain(struct k_mem_domain *domain);
void scheduler_dp_domain_free(struct processing_module *pmod);
int scheduler_dp_domain_init(void);
#else
Expand Down
8 changes: 7 additions & 1 deletion src/schedule/zephyr_dp_schedule_application.c
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,7 @@ void scheduler_dp_domain_free(struct processing_module *pmod)

k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_HEAP);
k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_CFG);
scheduler_dp_rm_domain(dp_mdom + core);
#endif
}

Expand Down Expand Up @@ -505,7 +506,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,

#if CONFIG_USERSPACE
k_thread_access_grant(pdata->thread_id, pdata->sem, &dp_sync[core]);
scheduler_dp_grant(pdata->thread_id, core);

unsigned int pidx;
size_t size;
Expand All @@ -531,6 +531,12 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,
goto e_dom;
}

ret = scheduler_dp_add_domain(dp_mdom + core);
if (ret < 0) {
tr_err(&dp_tr, "failed to add DP lock domain %d", ret);
goto e_dom;
}

ret = llext_manager_add_domain(mod->dev->ipc_config.id, dp_mdom + core);
if (ret < 0) {
tr_err(&dp_tr, "failed to add LLEXT to domain %d", ret);
Expand Down
1 change: 0 additions & 1 deletion src/schedule/zephyr_dp_schedule_thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,6 @@ int scheduler_dp_task_init(struct task **task,
CONFIG_DP_THREAD_PRIORITY, (*task)->flags, K_FOREVER);

k_thread_access_grant(pdata->thread_id, pdata->event);
scheduler_dp_grant(pdata->thread_id, cpu_get_id());

/* pin the thread to specific core */
ret = k_thread_cpu_pin(pdata->thread_id, core);
Expand Down