Skip to content

Commit 7be6321

Browse files
marcinszkudlinskikv2019i
authored andcommitted
dp: wait till dp thread stops in thread cancel
There's a race posiibility in DP when stopping a pipeline: - dp starts processing - incoming IPC - pause the pipeline. IPC has higher priority than DP, so DP is preempted - pipeline is stopping, module "reset" is called. Some of resources may be freed here - when IPC finishes, DP thread continues processing Sollution: wait for DP to finish processing and terminate DP thread before calling "reset" method in module To do this: 1) call "thread cancel" before calling "reset"reset 2) modify "thread cancel" to mark the thread to terminate and execute k_thread_join() 3) terminated thread cannot be restarted, so thread creation must be moved from "init" to "schedule". There's no need to reallocate memory zephyr guarantees that resources may be re-used when a thread is terminated. Signed-off-by: Marcin Szkudlinski <[email protected]> (cherry picked from commit 34957e7) Signed-off-by: Kai Vehmanen <[email protected]>
1 parent d2ec0c6 commit 7be6321

File tree

4 files changed

+64
-48
lines changed

4 files changed

+64
-48
lines changed

src/audio/module_adapter/module/generic.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -336,6 +336,9 @@ int module_reset(struct processing_module *mod)
336336
if (md->state < MODULE_IDLE)
337337
return 0;
338338
#endif
339+
/* cancel task if DP task*/
340+
if (mod->dev->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_DP && mod->dev->task)
341+
schedule_task_cancel(mod->dev->task);
339342
if (ops->reset) {
340343
ret = ops->reset(mod);
341344
if (ret) {

src/audio/pipeline/pipeline-schedule.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -45,11 +45,6 @@ DECLARE_SOF_UUID("dp-task", dp_task_uuid, 0xee755917, 0x96b9, 0x4130,
4545
*/
4646
#define TASK_DP_STACK_SIZE 8192
4747

48-
/**
49-
* \brief a priority of the DP threads in the system.
50-
*/
51-
#define ZEPHYR_DP_THREAD_PRIORITY (CONFIG_NUM_PREEMPT_PRIORITIES - 2)
52-
5348
#endif /* CONFIG_ZEPHYR_DP_SCHEDULER */
5449

5550
static void pipeline_schedule_cancel(struct pipeline *p)
@@ -403,8 +398,7 @@ int pipeline_comp_dp_task_init(struct comp_dev *comp)
403398
&ops,
404399
mod,
405400
comp->ipc_config.core,
406-
TASK_DP_STACK_SIZE,
407-
ZEPHYR_DP_THREAD_PRIORITY);
401+
TASK_DP_STACK_SIZE);
408402
if (ret < 0)
409403
return ret;
410404
}

src/include/sof/schedule/dp_schedule.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,15 +67,13 @@ int scheduler_dp_init(void);
6767
* \param[in] mod pointer to the module to be run
6868
* \param[in] core CPU the thread should run on
6969
* \param[in] stack_size size of stack for a zephyr task
70-
* \param[in] task_priority priority of the zephyr task
7170
*/
7271
int scheduler_dp_task_init(struct task **task,
7372
const struct sof_uuid_entry *uid,
7473
const struct task_ops *ops,
7574
struct processing_module *mod,
7675
uint16_t core,
77-
size_t stack_size,
78-
uint32_t task_priority);
76+
size_t stack_size);
7977

8078
/**
8179
* \brief Extract information about scheduler's tasks

src/schedule/zephyr_dp_schedule.c

Lines changed: 59 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -29,15 +29,22 @@ DECLARE_SOF_UUID("dp-schedule", dp_sched_uuid, 0x87858bc2, 0xbaa9, 0x40b6,
2929

3030
DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO);
3131

32+
/**
33+
* \brief a priority of the DP threads in the system.
34+
*/
35+
#define ZEPHYR_DP_THREAD_PRIORITY (CONFIG_NUM_PREEMPT_PRIORITIES - 2)
36+
3237
struct scheduler_dp_data {
3338
struct list_item tasks; /* list of active dp tasks */
3439
struct task ll_tick_src; /* LL task - source of DP tick */
3540
};
3641

3742
struct task_dp_pdata {
3843
k_tid_t thread_id; /* zephyr thread ID */
44+
struct k_thread thread; /* memory space for a thread */
3945
uint32_t deadline_clock_ticks; /* dp module deadline in Zephyr ticks */
4046
k_thread_stack_t __sparse_cache *p_stack; /* pointer to thread stack */
47+
size_t stack_size; /* size of the stack in bytes */
4148
struct k_sem sem; /* semaphore for task scheduling */
4249
struct processing_module *mod; /* the module to be scheduled */
4350
uint32_t ll_cycles_to_start; /* current number of LL cycles till delayed start */
@@ -267,6 +274,8 @@ static int scheduler_dp_task_cancel(void *data, struct task *task)
267274
{
268275
unsigned int lock_key;
269276
struct scheduler_dp_data *dp_sch = (struct scheduler_dp_data *)data;
277+
struct task_dp_pdata *pdata = task->priv_data;
278+
270279

271280
/* this is asyn cancel - mark the task as canceled and remove it from scheduling */
272281
lock_key = scheduler_dp_lock();
@@ -278,8 +287,14 @@ static int scheduler_dp_task_cancel(void *data, struct task *task)
278287
if (list_is_empty(&dp_sch->tasks))
279288
schedule_task_cancel(&dp_sch->ll_tick_src);
280289

290+
/* if the task is waiting on a semaphore - let it run and self-terminate */
291+
k_sem_give(&pdata->sem);
281292
scheduler_dp_unlock(lock_key);
282293

294+
/* wait till the task has finished, if there was any task created */
295+
if (pdata->thread_id)
296+
k_thread_join(pdata->thread_id, K_FOREVER);
297+
283298
return 0;
284299
}
285300

@@ -289,10 +304,17 @@ static int scheduler_dp_task_free(void *data, struct task *task)
289304

290305
scheduler_dp_task_cancel(data, task);
291306

292-
/* abort the execution of the thread */
293-
k_thread_abort(pdata->thread_id);
307+
/* the thread should be terminated at this moment,
308+
* abort is safe and will ensure no use after free
309+
*/
310+
if (pdata->thread_id) {
311+
k_thread_abort(pdata->thread_id);
312+
pdata->thread_id = NULL;
313+
}
314+
294315
/* free task stack */
295316
rfree((__sparse_force void *)pdata->p_stack);
317+
pdata->p_stack = NULL;
296318

297319
/* all other memory has been allocated as a single malloc, will be freed later by caller */
298320
return 0;
@@ -345,17 +367,17 @@ static void dp_thread_fn(void *p1, void *p2, void *p3)
345367
}
346368
}
347369

348-
/* call task_complete */
349-
if (task->state == SOF_TASK_STATE_COMPLETED) {
350-
/* call task_complete out of lock, it may eventually call schedule again */
351-
scheduler_dp_unlock(lock_key);
352-
task_complete(task);
353-
} else {
354-
scheduler_dp_unlock(lock_key);
355-
}
356-
};
370+
if (task->state == SOF_TASK_STATE_COMPLETED ||
371+
task->state == SOF_TASK_STATE_CANCEL)
372+
break; /* exit the while loop, terminate the thread */
357373

358-
/* never be here */
374+
scheduler_dp_unlock(lock_key);
375+
}
376+
377+
scheduler_dp_unlock(lock_key);
378+
/* call task_complete */
379+
if (task->state == SOF_TASK_STATE_COMPLETED)
380+
task_complete(task);
359381
}
360382

361383
static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t start,
@@ -365,6 +387,7 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
365387
struct task_dp_pdata *pdata = task->priv_data;
366388
unsigned int lock_key;
367389
uint64_t deadline_clock_ticks;
390+
int ret;
368391

369392
lock_key = scheduler_dp_lock();
370393

@@ -375,6 +398,22 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
375398
return -EINVAL;
376399
}
377400

401+
/* create a zephyr thread for the task */
402+
pdata->thread_id = k_thread_create(&pdata->thread, (__sparse_force void *)pdata->p_stack,
403+
pdata->stack_size, dp_thread_fn, task, NULL, NULL,
404+
ZEPHYR_DP_THREAD_PRIORITY, K_USER, K_FOREVER);
405+
406+
/* pin the thread to specific core */
407+
ret = k_thread_cpu_pin(pdata->thread_id, task->core);
408+
if (ret < 0) {
409+
tr_err(&dp_tr, "zephyr_dp_task_init(): zephyr task pin to core failed");
410+
goto err;
411+
}
412+
413+
/* start the thread, it should immediately stop at a semaphore, so clean it */
414+
k_sem_reset(&pdata->sem);
415+
k_thread_start(pdata->thread_id);
416+
378417
/* if there's no DP tasks scheduled yet, run ll tick source task */
379418
if (list_is_empty(&dp_sch->tasks))
380419
schedule_task(&dp_sch->ll_tick_src, 0, 0);
@@ -396,6 +435,12 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
396435

397436
tr_dbg(&dp_tr, "DP task scheduled with period %u [us]", (uint32_t)period);
398437
return 0;
438+
439+
err:
440+
/* cleanup - unlock and free all allocated resources */
441+
scheduler_dp_unlock(lock_key);
442+
k_thread_abort(pdata->thread_id);
443+
return ret;
399444
}
400445

401446
static struct scheduler_ops schedule_dp_ops = {
@@ -436,19 +481,16 @@ int scheduler_dp_task_init(struct task **task,
436481
const struct task_ops *ops,
437482
struct processing_module *mod,
438483
uint16_t core,
439-
size_t stack_size,
440-
uint32_t task_priority)
484+
size_t stack_size)
441485
{
442486
void __sparse_cache *p_stack = NULL;
443487

444488
/* memory allocation helper structure */
445489
struct {
446490
struct task task;
447491
struct task_dp_pdata pdata;
448-
struct k_thread thread;
449492
} *task_memory;
450493

451-
k_tid_t thread_id = NULL;
452494
int ret;
453495

454496
/* must be called on the same core the task will be binded to */
@@ -478,23 +520,6 @@ int scheduler_dp_task_init(struct task **task,
478520
goto err;
479521
}
480522

481-
/* create a zephyr thread for the task */
482-
thread_id = k_thread_create(&task_memory->thread, (__sparse_force void *)p_stack,
483-
stack_size, dp_thread_fn, &task_memory->task, NULL, NULL,
484-
task_priority, K_USER, K_FOREVER);
485-
if (!thread_id) {
486-
ret = -EFAULT;
487-
tr_err(&dp_tr, "zephyr_dp_task_init(): zephyr thread create failed");
488-
goto err;
489-
}
490-
/* pin the thread to specific core */
491-
ret = k_thread_cpu_pin(thread_id, core);
492-
if (ret < 0) {
493-
ret = -EFAULT;
494-
tr_err(&dp_tr, "zephyr_dp_task_init(): zephyr task pin to core failed");
495-
goto err;
496-
}
497-
498523
/* internal SOF task init */
499524
ret = schedule_task_init(&task_memory->task, uid, SOF_SCHEDULE_DP, 0, ops->run,
500525
mod, core, 0);
@@ -514,19 +539,15 @@ int scheduler_dp_task_init(struct task **task,
514539

515540
/* success, fill the structures */
516541
task_memory->task.priv_data = &task_memory->pdata;
517-
task_memory->pdata.thread_id = thread_id;
518542
task_memory->pdata.p_stack = p_stack;
543+
task_memory->pdata.stack_size = stack_size;
519544
task_memory->pdata.mod = mod;
520545
*task = &task_memory->task;
521546

522-
/* start the thread - it will immediately stop at a semaphore */
523-
k_thread_start(thread_id);
524547

525548
return 0;
526549
err:
527550
/* cleanup - free all allocated resources */
528-
if (thread_id)
529-
k_thread_abort(thread_id);
530551
rfree((__sparse_force void *)p_stack);
531552
rfree(task_memory);
532553
return ret;

0 commit comments

Comments
 (0)