Skip to content

Commit c8918d4

Browse files
committed
cpuload.cpp: Add PID hash function to find task structure
Adds a hash function from process PID to store and find corresponding task info structure, instead of looping through the list. This reduces complexity from O(n) to O(1) when calling sched_note_ functions, greatly reducing CPU load when there are a lot of processes and context switches.
1 parent c46cd22 commit c8918d4

File tree

1 file changed

+115
-10
lines changed

1 file changed

+115
-10
lines changed

platforms/nuttx/src/px4/common/cpuload.cpp

Lines changed: 115 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,102 @@ __BEGIN_DECLS
5151

5252
__EXPORT struct system_load_s system_load;
5353

54+
/* Simple hashing via PID; shamelessly ripped from NuttX scheduler. All rights
55+
* and credit belong to whomever authored this logic.
56+
*/
57+
58+
#define HASH(i) ((i) & (hashtab_size - 1))
59+
60+
struct system_load_taskinfo_s **hashtab;
61+
volatile int hashtab_size;
62+
63+
void init_task_hash(void)
64+
{
65+
hashtab_size = 4;
66+
hashtab = (struct system_load_taskinfo_s **)kmm_zalloc(sizeof(*hashtab) * hashtab_size);
67+
}
68+
69+
static struct system_load_taskinfo_s *get_task_info(pid_t pid)
70+
{
71+
struct system_load_taskinfo_s *ret = NULL;
72+
irqstate_t flags = enter_critical_section();
73+
74+
if (hashtab) {
75+
ret = hashtab[HASH(pid)];
76+
}
77+
78+
leave_critical_section(flags);
79+
return ret;
80+
}
81+
82+
static void drop_task_info(pid_t pid)
83+
{
84+
irqstate_t flags = enter_critical_section();
85+
hashtab[HASH(pid)] = NULL;
86+
leave_critical_section(flags);
87+
}
88+
89+
static int hash_task_info(struct system_load_taskinfo_s *task_info, pid_t pid)
90+
{
91+
struct system_load_taskinfo_s **newtab;
92+
void *temp;
93+
int hash;
94+
int i;
95+
96+
/* Use critical section to protect the hash table */
97+
98+
irqstate_t flags = enter_critical_section();
99+
100+
/* Keep trying until we get it or run out of memory */
101+
102+
retry:
103+
104+
/* Calculate hash */
105+
106+
hash = HASH(pid);
107+
108+
/* Check if the entry is available */
109+
110+
if (hashtab[hash] == NULL) {
111+
hashtab[hash] = task_info;
112+
leave_critical_section(flags);
113+
return OK;
114+
}
115+
116+
/* No can do, double the size of the hash table */
117+
118+
newtab = (struct system_load_taskinfo_s **)kmm_zalloc(hashtab_size * 2 * sizeof(*newtab));
119+
120+
if (newtab == NULL) {
121+
leave_critical_section(flags);
122+
return -ENOMEM;
123+
}
124+
125+
hashtab_size *= 2;
126+
127+
/* Start using the new hash table */
128+
129+
for (i = 0; i < hashtab_size / 2; i++) {
130+
struct system_load_taskinfo_s *info = hashtab[i];
131+
132+
if (info && info->tcb) {
133+
hash = HASH(info->tcb->pid);
134+
newtab[hash] = hashtab[i];
135+
136+
} else {
137+
newtab[i] = NULL;
138+
}
139+
}
140+
141+
temp = hashtab;
142+
hashtab = newtab;
143+
kmm_free(temp);
144+
145+
/* Try again */
146+
147+
goto retry;
148+
}
149+
54150
#if defined(CONFIG_SEGGER_SYSVIEW)
55151
# include <nuttx/note/note_sysview.h>
56152
# ifndef CONFIG_SEGGER_SYSVIEW_PREFIX
@@ -87,6 +183,10 @@ void cpuload_monitor_stop()
87183

88184
void cpuload_initialize_once()
89185
{
186+
/* Initialize hashing */
187+
188+
init_task_hash();
189+
90190
for (auto &task : system_load.tasks) {
91191
task.valid = false;
92192
}
@@ -127,6 +227,8 @@ void sched_note_start(FAR struct tcb_s *tcb)
127227
task.tcb = tcb;
128228
task.valid = true;
129229
system_load.total_count++;
230+
// add to the hashlist
231+
hash_task_info(&task, tcb->pid);
130232
break;
131233
}
132234
}
@@ -148,6 +250,8 @@ void sched_note_stop(FAR struct tcb_s *tcb)
148250
task.curr_start_time = 0;
149251
task.tcb = nullptr;
150252
system_load.total_count--;
253+
// drop from the tasklist
254+
drop_task_info(tcb->pid);
151255
break;
152256
}
153257
}
@@ -171,13 +275,13 @@ void sched_note_suspend(FAR struct tcb_s *tcb)
171275
}
172276
}
173277

174-
for (auto &task : system_load.tasks) {
175-
// Task ending its current scheduling run
176-
if (task.valid && (task.curr_start_time > 0)
177-
&& task.tcb && task.tcb->pid == tcb->pid) {
278+
struct system_load_taskinfo_s *task = get_task_info(tcb->pid);
178279

179-
task.total_runtime += hrt_elapsed_time(&task.curr_start_time);
180-
break;
280+
if (task) {
281+
// Task ending its current scheduling run
282+
if (task->valid && (task->curr_start_time > 0)
283+
&& task->tcb && task->tcb->pid == tcb->pid) {
284+
task->total_runtime += hrt_elapsed_time(&task->curr_start_time);
181285
}
182286
}
183287
}
@@ -200,12 +304,13 @@ void sched_note_resume(FAR struct tcb_s *tcb)
200304
}
201305
}
202306

203-
for (auto &task : system_load.tasks) {
204-
if (task.valid && task.tcb && task.tcb->pid == tcb->pid) {
307+
struct system_load_taskinfo_s *task = get_task_info(tcb->pid);
308+
309+
if (task) {
310+
if (task->valid && task->tcb && task->tcb->pid == tcb->pid) {
205311
// curr_start_time is accessed from an IRQ handler (in logger), so we need
206312
// to make the update atomic
207-
hrt_store_absolute_time(&task.curr_start_time);
208-
break;
313+
hrt_store_absolute_time(&task->curr_start_time);
209314
}
210315
}
211316
}

0 commit comments

Comments
 (0)