51
51
*
52
52
*/
53
53
struct buf_ring {
54
- volatile uint32_t br_prod_head ;
55
- volatile uint32_t br_prod_tail ;
54
+ uint32_t br_prod_head ;
55
+ uint32_t br_prod_tail ;
56
56
int br_prod_size ;
57
57
int br_prod_mask ;
58
58
uint64_t br_drops ;
59
- volatile uint32_t br_cons_head __aligned (CACHE_LINE_SIZE );
60
- volatile uint32_t br_cons_tail ;
59
+ uint32_t br_cons_head __aligned (CACHE_LINE_SIZE );
60
+ uint32_t br_cons_tail ;
61
61
int br_cons_size ;
62
62
int br_cons_mask ;
63
63
#if defined(DEBUG_BUFRING ) && defined(_KERNEL )
@@ -83,10 +83,12 @@ buf_ring_enqueue(struct buf_ring *br, void *buf)
83
83
* via drbr_peek(), and then re-added via drbr_putback() and
84
84
* trigger a spurious panic.
85
85
*/
86
- for (uint32_t i = br -> br_cons_head ; i != br -> br_prod_head ; i ++ )
86
+ for (uint32_t i = atomic_load_32 (& br -> br_cons_head );
87
+ i != atomic_load_32 (& br -> br_prod_head ); i ++ )
87
88
if (br -> br_ring [i & mask ] == buf )
88
89
panic ("buf=%p already enqueue at %d prod=%d cons=%d" ,
89
- buf , i , br -> br_prod_tail , br -> br_cons_tail );
90
+ buf , i , atomic_load_32 (& br -> br_prod_tail ),
91
+ atomic_load_32 (& br -> br_cons_tail ));
90
92
#endif
91
93
critical_enter ();
92
94
do {
@@ -106,8 +108,8 @@ buf_ring_enqueue(struct buf_ring *br, void *buf)
106
108
107
109
if ((int32_t )(cons_tail + br -> br_prod_size - prod_next ) < 1 ) {
108
110
rmb ();
109
- if (prod_head == br -> br_prod_head &&
110
- cons_tail == br -> br_cons_tail ) {
111
+ if (prod_head == atomic_load_32 ( & br -> br_prod_head ) &&
112
+ cons_tail == atomic_load_32 ( & br -> br_cons_tail ) ) {
111
113
br -> br_drops ++ ;
112
114
critical_exit ();
113
115
return (ENOBUFS );
@@ -127,7 +129,7 @@ buf_ring_enqueue(struct buf_ring *br, void *buf)
127
129
* that preceded us, we need to wait for them
128
130
* to complete
129
131
*/
130
- while (br -> br_prod_tail != prod_head )
132
+ while (atomic_load_32 ( & br -> br_prod_tail ) != prod_head )
131
133
cpu_spinwait ();
132
134
atomic_store_rel_32 (& br -> br_prod_tail , prod_next );
133
135
critical_exit ();
@@ -173,7 +175,7 @@ buf_ring_dequeue_mc(struct buf_ring *br)
173
175
* that preceded us, we need to wait for them
174
176
* to complete
175
177
*/
176
- while (br -> br_cons_tail != cons_head )
178
+ while (atomic_load_32 ( & br -> br_cons_tail ) != cons_head )
177
179
cpu_spinwait ();
178
180
179
181
atomic_store_rel_32 (& br -> br_cons_tail , cons_next );
@@ -195,7 +197,7 @@ buf_ring_dequeue_sc(struct buf_ring *br)
195
197
void * buf ;
196
198
197
199
mask = br -> br_cons_mask ;
198
- cons_head = br -> br_cons_head ;
200
+ cons_head = atomic_load_32 ( & br -> br_cons_head ) ;
199
201
prod_tail = atomic_load_acq_32 (& br -> br_prod_tail );
200
202
201
203
cons_next = cons_head + 1 ;
@@ -204,7 +206,7 @@ buf_ring_dequeue_sc(struct buf_ring *br)
204
206
return (NULL );
205
207
206
208
cons_idx = cons_head & mask ;
207
- br -> br_cons_head = cons_next ;
209
+ atomic_store_32 ( & br -> br_cons_head , cons_next ) ;
208
210
buf = br -> br_ring [cons_idx ];
209
211
210
212
#ifdef DEBUG_BUFRING
@@ -213,9 +215,9 @@ buf_ring_dequeue_sc(struct buf_ring *br)
213
215
if (!mtx_owned (br -> br_lock ))
214
216
panic ("lock not held on single consumer dequeue" );
215
217
#endif
216
- if (br -> br_cons_tail != cons_head )
218
+ if (atomic_load_32 ( & br -> br_cons_tail ) != cons_head )
217
219
panic ("inconsistent list cons_tail=%d cons_head=%d" ,
218
- br -> br_cons_tail , cons_head );
220
+ atomic_load_32 ( & br -> br_cons_tail ) , cons_head );
219
221
#endif
220
222
atomic_store_rel_32 (& br -> br_cons_tail , cons_next );
221
223
return (buf );
@@ -235,13 +237,13 @@ buf_ring_advance_sc(struct buf_ring *br)
235
237
236
238
mask = br -> br_cons_mask ;
237
239
#endif
238
- cons_head = br -> br_cons_head ;
239
- prod_tail = br -> br_prod_tail ;
240
+ cons_head = atomic_load_32 ( & br -> br_cons_head ) ;
241
+ prod_tail = atomic_load_32 ( & br -> br_prod_tail ) ;
240
242
241
243
cons_next = cons_head + 1 ;
242
244
if (cons_head == prod_tail )
243
245
return ;
244
- br -> br_cons_head = cons_next ;
246
+ atomic_store_32 ( & br -> br_cons_head , cons_next ) ;
245
247
#ifdef DEBUG_BUFRING
246
248
br -> br_ring [cons_head & mask ] = NULL ;
247
249
#endif
@@ -267,12 +269,13 @@ buf_ring_advance_sc(struct buf_ring *br)
267
269
static __inline void
268
270
buf_ring_putback_sc (struct buf_ring * br , void * new )
269
271
{
270
- uint32_t mask ;
272
+ uint32_t cons_idx , mask ;
271
273
272
274
mask = br -> br_cons_mask ;
273
- KASSERT ((br -> br_cons_head & mask ) != (br -> br_prod_tail & mask ),
275
+ cons_idx = atomic_load_32 (& br -> br_cons_head ) & mask ;
276
+ KASSERT (cons_idx != (atomic_load_32 (& br -> br_prod_tail ) & mask ),
274
277
("Buf-Ring has none in putback" )) ;
275
- br -> br_ring [br -> br_cons_head & mask ] = new ;
278
+ br -> br_ring [cons_idx ] = new ;
276
279
}
277
280
278
281
/*
@@ -291,7 +294,7 @@ buf_ring_peek(struct buf_ring *br)
291
294
#endif
292
295
mask = br -> br_cons_mask ;
293
296
prod_tail = atomic_load_acq_32 (& br -> br_prod_tail );
294
- cons_head = br -> br_cons_head ;
297
+ cons_head = atomic_load_32 ( & br -> br_cons_head ) ;
295
298
296
299
if (cons_head == prod_tail )
297
300
return (NULL );
@@ -312,7 +315,7 @@ buf_ring_peek_clear_sc(struct buf_ring *br)
312
315
313
316
mask = br -> br_cons_mask ;
314
317
prod_tail = atomic_load_acq_32 (& br -> br_prod_tail );
315
- cons_head = br -> br_cons_head ;
318
+ cons_head = atomic_load_32 ( & br -> br_cons_head ) ;
316
319
317
320
if (cons_head == prod_tail )
318
321
return (NULL );
@@ -332,22 +335,26 @@ static __inline int
332
335
buf_ring_full (struct buf_ring * br )
333
336
{
334
337
335
- return (br -> br_prod_head == br -> br_cons_tail + br -> br_cons_size - 1 );
338
+ return (atomic_load_32 (& br -> br_prod_head ) ==
339
+ atomic_load_32 (& br -> br_cons_tail ) + br -> br_cons_size - 1 );
336
340
}
337
341
338
342
static __inline int
339
343
buf_ring_empty (struct buf_ring * br )
340
344
{
341
345
342
- return (br -> br_cons_head == br -> br_prod_tail );
346
+ return (atomic_load_32 (& br -> br_cons_head ) ==
347
+ atomic_load_32 (& br -> br_prod_tail ));
343
348
}
344
349
345
350
static __inline int
346
351
buf_ring_count (struct buf_ring * br )
347
352
{
353
+ uint32_t cons_tail , prod_tail ;
348
354
349
- return ((br -> br_prod_size + br -> br_prod_tail - br -> br_cons_tail )
350
- & br -> br_prod_mask );
355
+ cons_tail = atomic_load_32 (& br -> br_cons_tail );
356
+ prod_tail = atomic_load_32 (& br -> br_prod_tail );
357
+ return ((br -> br_prod_size + prod_tail - cons_tail ) & br -> br_prod_mask );
351
358
}
352
359
353
360
#ifdef _KERNEL
0 commit comments