@@ -29,15 +29,22 @@ DECLARE_SOF_UUID("dp-schedule", dp_sched_uuid, 0x87858bc2, 0xbaa9, 0x40b6,
29
29
30
30
DECLARE_TR_CTX (dp_tr , SOF_UUID (dp_sched_uuid ), LOG_LEVEL_INFO );
31
31
32
+ /**
33
+ * \brief a priority of the DP threads in the system.
34
+ */
35
+ #define ZEPHYR_DP_THREAD_PRIORITY (CONFIG_NUM_PREEMPT_PRIORITIES - 2)
36
+
32
37
struct scheduler_dp_data {
33
38
struct list_item tasks ; /* list of active dp tasks */
34
39
struct task ll_tick_src ; /* LL task - source of DP tick */
35
40
};
36
41
37
42
struct task_dp_pdata {
38
43
k_tid_t thread_id ; /* zephyr thread ID */
44
+ struct k_thread thread ; /* memory space for a thread */
39
45
uint32_t deadline_clock_ticks ; /* dp module deadline in Zephyr ticks */
40
46
k_thread_stack_t __sparse_cache * p_stack ; /* pointer to thread stack */
47
+ size_t stack_size ; /* size of the stack in bytes */
41
48
struct k_sem sem ; /* semaphore for task scheduling */
42
49
struct processing_module * mod ; /* the module to be scheduled */
43
50
uint32_t ll_cycles_to_start ; /* current number of LL cycles till delayed start */
@@ -267,6 +274,8 @@ static int scheduler_dp_task_cancel(void *data, struct task *task)
267
274
{
268
275
unsigned int lock_key ;
269
276
struct scheduler_dp_data * dp_sch = (struct scheduler_dp_data * )data ;
277
+ struct task_dp_pdata * pdata = task -> priv_data ;
278
+
270
279
271
280
/* this is asyn cancel - mark the task as canceled and remove it from scheduling */
272
281
lock_key = scheduler_dp_lock ();
@@ -278,8 +287,14 @@ static int scheduler_dp_task_cancel(void *data, struct task *task)
278
287
if (list_is_empty (& dp_sch -> tasks ))
279
288
schedule_task_cancel (& dp_sch -> ll_tick_src );
280
289
290
+ /* if the task is waiting on a semaphore - let it run and self-terminate */
291
+ k_sem_give (& pdata -> sem );
281
292
scheduler_dp_unlock (lock_key );
282
293
294
+ /* wait till the task has finished, if there was any task created */
295
+ if (pdata -> thread_id )
296
+ k_thread_join (pdata -> thread_id , K_FOREVER );
297
+
283
298
return 0 ;
284
299
}
285
300
@@ -289,10 +304,17 @@ static int scheduler_dp_task_free(void *data, struct task *task)
289
304
290
305
scheduler_dp_task_cancel (data , task );
291
306
292
- /* abort the execution of the thread */
293
- k_thread_abort (pdata -> thread_id );
307
+ /* the thread should be terminated at this moment,
308
+ * abort is safe and will ensure no use after free
309
+ */
310
+ if (pdata -> thread_id ) {
311
+ k_thread_abort (pdata -> thread_id );
312
+ pdata -> thread_id = NULL ;
313
+ }
314
+
294
315
/* free task stack */
295
316
rfree ((__sparse_force void * )pdata -> p_stack );
317
+ pdata -> p_stack = NULL ;
296
318
297
319
/* all other memory has been allocated as a single malloc, will be freed later by caller */
298
320
return 0 ;
@@ -345,17 +367,17 @@ static void dp_thread_fn(void *p1, void *p2, void *p3)
345
367
}
346
368
}
347
369
348
- /* call task_complete */
349
- if (task -> state == SOF_TASK_STATE_COMPLETED ) {
350
- /* call task_complete out of lock, it may eventually call schedule again */
351
- scheduler_dp_unlock (lock_key );
352
- task_complete (task );
353
- } else {
354
- scheduler_dp_unlock (lock_key );
355
- }
356
- };
370
+ if (task -> state == SOF_TASK_STATE_COMPLETED ||
371
+ task -> state == SOF_TASK_STATE_CANCEL )
372
+ break ; /* exit the while loop, terminate the thread */
357
373
358
- /* never be here */
374
+ scheduler_dp_unlock (lock_key );
375
+ }
376
+
377
+ scheduler_dp_unlock (lock_key );
378
+ /* call task_complete */
379
+ if (task -> state == SOF_TASK_STATE_COMPLETED )
380
+ task_complete (task );
359
381
}
360
382
361
383
static int scheduler_dp_task_shedule (void * data , struct task * task , uint64_t start ,
@@ -365,6 +387,7 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
365
387
struct task_dp_pdata * pdata = task -> priv_data ;
366
388
unsigned int lock_key ;
367
389
uint64_t deadline_clock_ticks ;
390
+ int ret ;
368
391
369
392
lock_key = scheduler_dp_lock ();
370
393
@@ -375,6 +398,22 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
375
398
return - EINVAL ;
376
399
}
377
400
401
+ /* create a zephyr thread for the task */
402
+ pdata -> thread_id = k_thread_create (& pdata -> thread , (__sparse_force void * )pdata -> p_stack ,
403
+ pdata -> stack_size , dp_thread_fn , task , NULL , NULL ,
404
+ ZEPHYR_DP_THREAD_PRIORITY , K_USER , K_FOREVER );
405
+
406
+ /* pin the thread to specific core */
407
+ ret = k_thread_cpu_pin (pdata -> thread_id , task -> core );
408
+ if (ret < 0 ) {
409
+ tr_err (& dp_tr , "zephyr_dp_task_init(): zephyr task pin to core failed" );
410
+ goto err ;
411
+ }
412
+
413
+ /* start the thread, it should immediately stop at a semaphore, so clean it */
414
+ k_sem_reset (& pdata -> sem );
415
+ k_thread_start (pdata -> thread_id );
416
+
378
417
/* if there's no DP tasks scheduled yet, run ll tick source task */
379
418
if (list_is_empty (& dp_sch -> tasks ))
380
419
schedule_task (& dp_sch -> ll_tick_src , 0 , 0 );
@@ -396,6 +435,12 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
396
435
397
436
tr_dbg (& dp_tr , "DP task scheduled with period %u [us]" , (uint32_t )period );
398
437
return 0 ;
438
+
439
+ err :
440
+ /* cleanup - unlock and free all allocated resources */
441
+ scheduler_dp_unlock (lock_key );
442
+ k_thread_abort (pdata -> thread_id );
443
+ return ret ;
399
444
}
400
445
401
446
static struct scheduler_ops schedule_dp_ops = {
@@ -436,19 +481,16 @@ int scheduler_dp_task_init(struct task **task,
436
481
const struct task_ops * ops ,
437
482
struct processing_module * mod ,
438
483
uint16_t core ,
439
- size_t stack_size ,
440
- uint32_t task_priority )
484
+ size_t stack_size )
441
485
{
442
486
void __sparse_cache * p_stack = NULL ;
443
487
444
488
/* memory allocation helper structure */
445
489
struct {
446
490
struct task task ;
447
491
struct task_dp_pdata pdata ;
448
- struct k_thread thread ;
449
492
} * task_memory ;
450
493
451
- k_tid_t thread_id = NULL ;
452
494
int ret ;
453
495
454
496
/* must be called on the same core the task will be binded to */
@@ -478,23 +520,6 @@ int scheduler_dp_task_init(struct task **task,
478
520
goto err ;
479
521
}
480
522
481
- /* create a zephyr thread for the task */
482
- thread_id = k_thread_create (& task_memory -> thread , (__sparse_force void * )p_stack ,
483
- stack_size , dp_thread_fn , & task_memory -> task , NULL , NULL ,
484
- task_priority , K_USER , K_FOREVER );
485
- if (!thread_id ) {
486
- ret = - EFAULT ;
487
- tr_err (& dp_tr , "zephyr_dp_task_init(): zephyr thread create failed" );
488
- goto err ;
489
- }
490
- /* pin the thread to specific core */
491
- ret = k_thread_cpu_pin (thread_id , core );
492
- if (ret < 0 ) {
493
- ret = - EFAULT ;
494
- tr_err (& dp_tr , "zephyr_dp_task_init(): zephyr task pin to core failed" );
495
- goto err ;
496
- }
497
-
498
523
/* internal SOF task init */
499
524
ret = schedule_task_init (& task_memory -> task , uid , SOF_SCHEDULE_DP , 0 , ops -> run ,
500
525
mod , core , 0 );
@@ -514,19 +539,15 @@ int scheduler_dp_task_init(struct task **task,
514
539
515
540
/* success, fill the structures */
516
541
task_memory -> task .priv_data = & task_memory -> pdata ;
517
- task_memory -> pdata .thread_id = thread_id ;
518
542
task_memory -> pdata .p_stack = p_stack ;
543
+ task_memory -> pdata .stack_size = stack_size ;
519
544
task_memory -> pdata .mod = mod ;
520
545
* task = & task_memory -> task ;
521
546
522
- /* start the thread - it will immediately stop at a semaphore */
523
- k_thread_start (thread_id );
524
547
525
548
return 0 ;
526
549
err :
527
550
/* cleanup - free all allocated resources */
528
- if (thread_id )
529
- k_thread_abort (thread_id );
530
551
rfree ((__sparse_force void * )p_stack );
531
552
rfree (task_memory );
532
553
return ret ;
0 commit comments