@@ -51,6 +51,102 @@ __BEGIN_DECLS
51
51
52
52
__EXPORT struct system_load_s system_load;
53
53
54
+ /* Simple hashing via PID; shamelessly ripped from NuttX scheduler. All rights
55
+ * and credit belong to whomever authored this logic.
56
+ */
57
+
58
+ #define HASH (i ) ((i) & (hashtab_size - 1 ))
59
+
60
+ struct system_load_taskinfo_s **hashtab;
61
+ volatile int hashtab_size;
62
+
63
+ void init_task_hash (void )
64
+ {
65
+ hashtab_size = 4 ;
66
+ hashtab = (struct system_load_taskinfo_s **)kmm_zalloc (sizeof (*hashtab) * hashtab_size);
67
+ }
68
+
69
+ static struct system_load_taskinfo_s *get_task_info (pid_t pid)
70
+ {
71
+ struct system_load_taskinfo_s *ret = NULL ;
72
+ irqstate_t flags = enter_critical_section ();
73
+
74
+ if (hashtab) {
75
+ ret = hashtab[HASH (pid)];
76
+ }
77
+
78
+ leave_critical_section (flags);
79
+ return ret;
80
+ }
81
+
82
+ static void drop_task_info (pid_t pid)
83
+ {
84
+ irqstate_t flags = enter_critical_section ();
85
+ hashtab[HASH (pid)] = NULL ;
86
+ leave_critical_section (flags);
87
+ }
88
+
89
+ static int hash_task_info (struct system_load_taskinfo_s *task_info, pid_t pid)
90
+ {
91
+ struct system_load_taskinfo_s **newtab;
92
+ void *temp;
93
+ int hash;
94
+ int i;
95
+
96
+ /* Use critical section to protect the hash table */
97
+
98
+ irqstate_t flags = enter_critical_section ();
99
+
100
+ /* Keep trying until we get it or run out of memory */
101
+
102
+ retry:
103
+
104
+ /* Calculate hash */
105
+
106
+ hash = HASH (pid);
107
+
108
+ /* Check if the entry is available */
109
+
110
+ if (hashtab[hash] == NULL ) {
111
+ hashtab[hash] = task_info;
112
+ leave_critical_section (flags);
113
+ return OK;
114
+ }
115
+
116
+ /* No can do, double the size of the hash table */
117
+
118
+ newtab = (struct system_load_taskinfo_s **)kmm_zalloc (hashtab_size * 2 * sizeof (*newtab));
119
+
120
+ if (newtab == NULL ) {
121
+ leave_critical_section (flags);
122
+ return -ENOMEM;
123
+ }
124
+
125
+ hashtab_size *= 2 ;
126
+
127
+ /* Start using the new hash table */
128
+
129
+ for (i = 0 ; i < hashtab_size / 2 ; i++) {
130
+ struct system_load_taskinfo_s *info = hashtab[i];
131
+
132
+ if (info && info->tcb ) {
133
+ hash = HASH (info->tcb ->pid );
134
+ newtab[hash] = hashtab[i];
135
+
136
+ } else {
137
+ newtab[i] = NULL ;
138
+ }
139
+ }
140
+
141
+ temp = hashtab;
142
+ hashtab = newtab;
143
+ kmm_free (temp);
144
+
145
+ /* Try again */
146
+
147
+ goto retry;
148
+ }
149
+
54
150
#if defined(CONFIG_SEGGER_SYSVIEW)
55
151
# include < nuttx/note/note_sysview.h>
56
152
# ifndef CONFIG_SEGGER_SYSVIEW_PREFIX
@@ -87,6 +183,10 @@ void cpuload_monitor_stop()
87
183
88
184
void cpuload_initialize_once ()
89
185
{
186
+ /* Initialize hashing */
187
+
188
+ init_task_hash ();
189
+
90
190
for (auto &task : system_load.tasks ) {
91
191
task.valid = false ;
92
192
}
@@ -127,6 +227,8 @@ void sched_note_start(FAR struct tcb_s *tcb)
127
227
task.tcb = tcb;
128
228
task.valid = true ;
129
229
system_load.total_count ++;
230
+ // add to the hashlist
231
+ hash_task_info (&task, tcb->pid );
130
232
break ;
131
233
}
132
234
}
@@ -148,6 +250,8 @@ void sched_note_stop(FAR struct tcb_s *tcb)
148
250
task.curr_start_time = 0 ;
149
251
task.tcb = nullptr ;
150
252
system_load.total_count --;
253
+ // drop from the tasklist
254
+ drop_task_info (tcb->pid );
151
255
break ;
152
256
}
153
257
}
@@ -171,13 +275,13 @@ void sched_note_suspend(FAR struct tcb_s *tcb)
171
275
}
172
276
}
173
277
174
- for (auto &task : system_load.tasks ) {
175
- // Task ending its current scheduling run
176
- if (task.valid && (task.curr_start_time > 0 )
177
- && task.tcb && task.tcb ->pid == tcb->pid ) {
278
+ struct system_load_taskinfo_s *task = get_task_info (tcb->pid );
178
279
179
- task.total_runtime += hrt_elapsed_time (&task.curr_start_time );
180
- break ;
280
+ if (task) {
281
+ // Task ending its current scheduling run
282
+ if (task->valid && (task->curr_start_time > 0 )
283
+ && task->tcb && task->tcb ->pid == tcb->pid ) {
284
+ task->total_runtime += hrt_elapsed_time (&task->curr_start_time );
181
285
}
182
286
}
183
287
}
@@ -200,12 +304,13 @@ void sched_note_resume(FAR struct tcb_s *tcb)
200
304
}
201
305
}
202
306
203
- for (auto &task : system_load.tasks ) {
204
- if (task.valid && task.tcb && task.tcb ->pid == tcb->pid ) {
307
+ struct system_load_taskinfo_s *task = get_task_info (tcb->pid );
308
+
309
+ if (task) {
310
+ if (task->valid && task->tcb && task->tcb ->pid == tcb->pid ) {
205
311
// curr_start_time is accessed from an IRQ handler (in logger), so we need
206
312
// to make the update atomic
207
- hrt_store_absolute_time (&task.curr_start_time );
208
- break ;
313
+ hrt_store_absolute_time (&task->curr_start_time );
209
314
}
210
315
}
211
316
}
0 commit comments