diff --git a/src/native/eventpipe/ep-block.c b/src/native/eventpipe/ep-block.c index 1d7d707d7cd4ed..57be3069ed5556 100644 --- a/src/native/eventpipe/ep-block.c +++ b/src/native/eventpipe/ep-block.c @@ -906,10 +906,10 @@ ep_sequence_point_block_init ( const uint32_t thread_count = dn_umap_size (map); ep_write_buffer_uint32_t (&sequence_point_block->block.write_pointer, thread_count); - DN_UMAP_FOREACH_BEGIN (const EventPipeThreadSessionState *, key, uint32_t, sequence_number, map) { + DN_UMAP_FOREACH_BEGIN (EventPipeThread *, thread, uint32_t, sequence_number, map) { ep_write_buffer_uint64_t ( &sequence_point_block->block.write_pointer, - ep_thread_get_os_thread_id (ep_thread_session_state_get_thread (key))); + ep_thread_get_os_thread_id (thread)); ep_write_buffer_uint32_t ( &sequence_point_block->block.write_pointer, diff --git a/src/native/eventpipe/ep-buffer-manager.c b/src/native/eventpipe/ep-buffer-manager.c index e37ff32fe131c7..829471cf8541d2 100644 --- a/src/native/eventpipe/ep-buffer-manager.c +++ b/src/native/eventpipe/ep-buffer-manager.c @@ -72,61 +72,71 @@ buffer_manager_deallocate_buffer ( // Attempt to reserve space for a buffer static bool -buffer_manager_try_reserve_buffer( +buffer_manager_try_reserve_buffer ( EventPipeBufferManager *buffer_manager, uint32_t request_size); // Release a reserved buffer budget static void -buffer_manager_release_buffer( +buffer_manager_release_buffer ( EventPipeBufferManager *buffer_manager, uint32_t size); // An iterator that can enumerate all the events which have been written into this buffer manager. -// Initially the iterator starts uninitialized and get_current_event () returns NULL. Calling move_next_xxx () -// attempts to advance the cursor to the next event. If there is no event prior to stop_timestamp then -// the get_current_event () again returns NULL, otherwise it returns that event. The event pointer returned -// by get_current_event() is valid until move_next_xxx() is called again. Once all events in a buffer have -// been read the iterator will delete that buffer from the pool. - -// Moves to the next oldest event searching across all threads. If there is no event older than -// stop_timestamp then get_current_event() will return NULL. +// Initially the iterator starts uninitialized and get_current_event () returns NULL. The iterator +// operates on the buffer_manager's thread_session_state_list_snapshot, and calling move_next_xxx () +// attempts to advance the cursor to the next event. If the current thread has no event prior to +// the snapshot_timestamp, the current_thread_session_state is removed from the snapshot list and +// get_current_event () returns NULL. The event pointer returned by get_current_event() is valid +// until move_next_xxx() is called again. Once all events in a buffer have been read the iterator +// will delete that buffer from the pool. Once all buffers from a thread have been read and the +// thread has been unregistered, the EventPipeThreadSessionState is cleaned up and removed. + static void -buffer_manager_move_next_event_any_thread ( +buffer_manager_snapshot_threads ( EventPipeBufferManager *buffer_manager, - ep_timestamp_t stop_timestamp); + ep_timestamp_t snapshot_timestamp); + +// Moves to the next oldest event searching across all snapshot threads. If there is no event older than +// the buffer_manager->snapshot_timestamp then get_current_event() will return NULL. +static +void +buffer_manager_move_next_event_any_snapshot_thread (EventPipeBufferManager *buffer_manager); // Moves to the next oldest event from the same thread as the current event. If there is no event -// older than stopTimeStamp then GetCurrentEvent() will return NULL. This should only be called +// older than snapshot_timestamp then GetCurrentEvent() will return NULL. This should only be called // when GetCurrentEvent() is non-null (because we need to know what thread's events to iterate) static void -buffer_manager_move_next_event_same_thread ( - EventPipeBufferManager *buffer_manager, - ep_timestamp_t stop_timestamp); +buffer_manager_move_next_event_same_thread (EventPipeBufferManager *buffer_manager); -// Finds the first buffer in EventPipeBufferList that has a readable event prior to before_timestamp, -// starting with pBuffer +// Finds the first buffer in an EventPipeThreadSessionState's buffer list with an event prior to before_timestamp. +// Should any empty buffers be encountered along the way, they are actively removed, and callers are notified +// if the EventPipeThreadSessionState is exhausted. static EventPipeBuffer * buffer_manager_advance_to_non_empty_buffer ( EventPipeBufferManager *buffer_manager, - EventPipeBufferList *buffer_list, - EventPipeBuffer *buffer, - ep_timestamp_t before_timestamp); + EventPipeThreadSessionState *thread_session_state, + ep_timestamp_t before_timestamp, + bool *thread_session_state_exhausted); // Detaches this buffer from an active writer thread and marks it read-only so that the reader -// thread can use it. If the writer thread has not yet stored the buffer into its thread-local -// slot it will not be converted, but such buffers have no events in them so there is no reason -// to read them. +// thread can use it. static -bool -buffer_manager_try_convert_buffer_to_read_only ( +void +buffer_manager_convert_buffer_to_read_only ( EventPipeBufferManager *buffer_manager, EventPipeBuffer *new_read_buffer); +static +void +buffer_manager_remove_and_delete_thread_session_state ( + EventPipeBufferManager *buffer_manager, + EventPipeThreadSessionState *thread_session_state); + /* * EventPipeBufferList. */ @@ -136,17 +146,14 @@ void buffer_list_fini (EventPipeBufferList *buffer_list) { EP_ASSERT (buffer_list != NULL); - ep_thread_holder_fini (&buffer_list->thread_holder); } EventPipeBufferList * -ep_buffer_list_alloc ( - EventPipeBufferManager *manager, - EventPipeThread *thread) +ep_buffer_list_alloc (EventPipeBufferManager *manager) { EventPipeBufferList *instance = ep_rt_object_alloc (EventPipeBufferList); ep_raise_error_if_nok (instance != NULL); - ep_raise_error_if_nok (ep_buffer_list_init (instance, manager, thread) != NULL); + ep_raise_error_if_nok (ep_buffer_list_init (instance, manager) != NULL); ep_on_exit: return instance; @@ -160,20 +167,15 @@ ep_buffer_list_alloc ( EventPipeBufferList * ep_buffer_list_init ( EventPipeBufferList *buffer_list, - EventPipeBufferManager *manager, - EventPipeThread *thread) + EventPipeBufferManager *manager) { EP_ASSERT (buffer_list != NULL); EP_ASSERT (manager != NULL); - EP_ASSERT (thread != NULL); - - ep_thread_holder_init (&buffer_list->thread_holder, thread); buffer_list->manager = manager; buffer_list->head_buffer = NULL; buffer_list->tail_buffer = NULL; buffer_list->buffer_count = 0; - buffer_list->last_read_sequence_number = 0; return buffer_list; } @@ -263,7 +265,7 @@ ep_buffer_list_get_and_remove_head (EventPipeBufferList *buffer_list) } bool -buffer_manager_try_reserve_buffer( +buffer_manager_try_reserve_buffer ( EventPipeBufferManager *buffer_manager, uint32_t request_size) { @@ -283,7 +285,7 @@ buffer_manager_try_reserve_buffer( } void -buffer_manager_release_buffer( +buffer_manager_release_buffer ( EventPipeBufferManager *buffer_manager, uint32_t size) { @@ -389,6 +391,8 @@ buffer_manager_init_sequence_point_thread_list ( ep_buffer_manager_requires_lock_held (buffer_manager); DN_LIST_FOREACH_BEGIN (EventPipeThreadSessionState *, thread_session_state, buffer_manager->thread_session_state_list) { + EventPipeThread *thread = ep_thread_session_state_get_thread (thread_session_state); + // The sequence number captured here is not guaranteed to be the most recent sequence number, nor // is it guaranteed to match the number of events we would observe in the thread's write buffer // memory. This is only used as a lower bound on the number of events the thread has attempted to @@ -399,8 +403,8 @@ buffer_manager_init_sequence_point_thread_list ( // underflow. uint32_t sequence_number = ep_thread_session_state_get_volatile_sequence_number (thread_session_state) - 1; - dn_umap_ptr_uint32_insert (ep_sequence_point_get_thread_sequence_numbers (sequence_point), thread_session_state, sequence_number); - ep_thread_addref (ep_thread_holder_get_thread (ep_thread_session_state_get_thread_holder_ref (thread_session_state))); + dn_umap_ptr_uint32_insert (ep_sequence_point_get_thread_sequence_numbers (sequence_point), thread, sequence_number); + ep_thread_addref (thread); } DN_LIST_FOREACH_END; // This needs to come after querying the thread sequence numbers to ensure that any recorded @@ -455,7 +459,6 @@ buffer_manager_allocate_buffer_for_thread ( EP_ASSERT (request_size > 0); EventPipeBuffer *new_buffer = NULL; - EventPipeBufferList *thread_buffer_list = NULL; EventPipeSequencePoint* sequence_point = NULL; uint32_t sequence_number = 0; @@ -489,25 +492,16 @@ buffer_manager_allocate_buffer_for_thread ( // Attempt to reserve the necessary buffer size EP_ASSERT(buffer_size > 0); - ep_return_null_if_nok(buffer_manager_try_reserve_buffer(buffer_manager, buffer_size)); + ep_return_null_if_nok(buffer_manager_try_reserve_buffer (buffer_manager, buffer_size)); // The sequence counter is exclusively mutated on this thread so this is a thread-local read. sequence_number = ep_thread_session_state_get_volatile_sequence_number (thread_session_state); new_buffer = ep_buffer_alloc (buffer_size, ep_thread_session_state_get_thread (thread_session_state), sequence_number); ep_raise_error_if_nok (new_buffer != NULL); + // Allocating a new EventPipeSequencePoint and adding to the sequence_point list requires the buffer manager lock. // Adding a buffer to the buffer list requires us to take the lock. EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1) - thread_buffer_list = ep_thread_session_state_get_buffer_list (thread_session_state); - if (thread_buffer_list == NULL) { - thread_buffer_list = ep_buffer_list_alloc (buffer_manager, ep_thread_session_state_get_thread (thread_session_state)); - ep_raise_error_if_nok_holding_spin_lock (thread_buffer_list != NULL, section1); - - ep_raise_error_if_nok_holding_spin_lock (dn_list_push_back (buffer_manager->thread_session_state_list, thread_session_state), section1); - ep_thread_session_state_set_buffer_list (thread_session_state, thread_buffer_list); - thread_buffer_list = NULL; - } - if (buffer_manager->sequence_point_alloc_budget != 0) { // sequence point bookkeeping if (buffer_size >= buffer_manager->remaining_sequence_point_alloc_budget) { @@ -527,9 +521,10 @@ buffer_manager_allocate_buffer_for_thread ( #endif // EP_CHECKED_BUILD // Set the buffer on the thread. - if (new_buffer != NULL) + if (new_buffer != NULL) { ep_buffer_list_insert_tail (ep_thread_session_state_get_buffer_list (thread_session_state), new_buffer); - + ep_thread_session_state_set_write_buffer (thread_session_state, new_buffer); + } EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1) ep_on_exit: @@ -540,13 +535,10 @@ buffer_manager_allocate_buffer_for_thread ( ep_sequence_point_free (sequence_point); sequence_point = NULL; - ep_buffer_list_free (thread_buffer_list); - thread_buffer_list = NULL; - ep_buffer_free (new_buffer); new_buffer = NULL; - buffer_manager_release_buffer(buffer_manager, buffer_size); + buffer_manager_release_buffer (buffer_manager, buffer_size); ep_exit_error_handler (); } @@ -560,7 +552,7 @@ buffer_manager_deallocate_buffer ( EP_ASSERT (buffer_manager != NULL); if (buffer) { - buffer_manager_release_buffer(buffer_manager, ep_buffer_get_size (buffer)); + buffer_manager_release_buffer (buffer_manager, ep_buffer_get_size (buffer)); ep_buffer_free (buffer); #ifdef EP_CHECKED_BUILD buffer_manager->num_buffers_allocated--; @@ -570,11 +562,37 @@ buffer_manager_deallocate_buffer ( static void -buffer_manager_move_next_event_any_thread ( +buffer_manager_snapshot_threads ( EventPipeBufferManager *buffer_manager, - ep_timestamp_t stop_timestamp) + ep_timestamp_t snapshot_timestamp) { EP_ASSERT (buffer_manager != NULL); + EP_ASSERT (dn_list_empty (buffer_manager->thread_session_state_list_snapshot)); + EP_ASSERT (snapshot_timestamp != 0); + + ep_buffer_manager_requires_lock_held (buffer_manager); + + buffer_manager->snapshot_timestamp = snapshot_timestamp; + + DN_LIST_FOREACH_BEGIN (EventPipeThreadSessionState *, thread_session_state, buffer_manager->thread_session_state_list) { + EventPipeBufferList *buffer_list = ep_thread_session_state_get_buffer_list (thread_session_state); + EventPipeBuffer *buffer = buffer_list->head_buffer; + if (buffer && ep_buffer_get_creation_timestamp (buffer) < snapshot_timestamp) { + // Ownership is not transferred, the thread_session_state_list still owns the EventPipeThreadSessionState + dn_list_push_back (buffer_manager->thread_session_state_list_snapshot, thread_session_state); + } + } DN_LIST_FOREACH_END; + + ep_buffer_manager_requires_lock_held (buffer_manager); +} + +static +void +buffer_manager_move_next_event_any_snapshot_thread (EventPipeBufferManager *buffer_manager) +{ + EP_ASSERT (buffer_manager != NULL); + EP_ASSERT (buffer_manager->thread_session_state_list_snapshot != NULL); + EP_ASSERT (buffer_manager->snapshot_timestamp != 0); ep_buffer_manager_requires_lock_not_held (buffer_manager); @@ -583,69 +601,60 @@ buffer_manager_move_next_event_any_thread ( buffer_manager->current_event = NULL; buffer_manager->current_buffer = NULL; - buffer_manager->current_buffer_list = NULL; - - // We need to do this in two steps because we can't hold m_lock and EventPipeThread::m_lock - // at the same time. - - // Step 1 - while holding m_lock get the oldest buffer from each thread - DN_DEFAULT_LOCAL_ALLOCATOR (allocator, dn_vector_ptr_default_local_allocator_byte_size * 2); + buffer_manager->current_thread_session_state = NULL; - dn_vector_ptr_t buffer_array; - dn_vector_ptr_t buffer_list_array; + // Iterate the snapshot list to find the one with the oldest event. This may require + // converting some of the live buffers from writable to readable. We may need to wait outside the + // buffer_manager lock for a writer thread to finish an in progress write to the buffer. + ep_timestamp_t stop_timestamp = buffer_manager->snapshot_timestamp; + ep_timestamp_t oldest_timestamp = stop_timestamp; - dn_vector_ptr_custom_init_params_t params = {0, }; - params.allocator = (dn_allocator_t *)&allocator; - params.capacity = dn_vector_ptr_default_local_allocator_capacity_size; + EventPipeBuffer *buffer; + for (dn_list_it_t it = dn_list_begin (buffer_manager->thread_session_state_list_snapshot); !dn_list_it_end (it); ) { + EventPipeThreadSessionState *thread_session_state = *dn_list_it_data_t (it, EventPipeThreadSessionState *); + EP_ASSERT (thread_session_state != NULL); - ep_raise_error_if_nok (dn_vector_ptr_custom_init (&buffer_array, ¶ms)); - ep_raise_error_if_nok (dn_vector_ptr_custom_init (&buffer_list_array, ¶ms)); + dn_list_it_t next_it = dn_list_it_next (it); - EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1) - EventPipeBufferList *buffer_list; - EventPipeBuffer *buffer; - DN_LIST_FOREACH_BEGIN (EventPipeThreadSessionState *, thread_session_state, buffer_manager->thread_session_state_list) { - buffer_list = ep_thread_session_state_get_buffer_list (thread_session_state); - buffer = buffer_list->head_buffer; - if (buffer && ep_buffer_get_creation_timestamp (buffer) < stop_timestamp) { - dn_vector_ptr_push_back (&buffer_list_array, buffer_list); - dn_vector_ptr_push_back (&buffer_array, buffer); + bool live_thread_session_state_exhausted = false; + bool snapshot_thread_session_state_exhausted = true; + buffer = buffer_manager_advance_to_non_empty_buffer (buffer_manager, thread_session_state, stop_timestamp, &live_thread_session_state_exhausted); + if (buffer) { + EventPipeEventInstance *next_event = ep_buffer_get_current_read_event (buffer); + if (next_event) { + ep_timestamp_t next_event_timestamp = ep_event_instance_get_timestamp (next_event); + if (next_event_timestamp < stop_timestamp) + snapshot_thread_session_state_exhausted = false; + if (next_event_timestamp < oldest_timestamp) { + buffer_manager->current_event = next_event; + buffer_manager->current_buffer = buffer; + buffer_manager->current_thread_session_state = thread_session_state; + oldest_timestamp = next_event_timestamp; + } } - } DN_LIST_FOREACH_END; - EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1) - - // Step 2 - iterate the cached list to find the one with the oldest event. This may require - // converting some of the buffers from writable to readable, and that in turn requires - // taking the associated EventPipeThread lock for thread that was writing to that buffer. - ep_timestamp_t oldest_timestamp; - oldest_timestamp = stop_timestamp; + } - EventPipeBufferList *buffer_list; - EventPipeBuffer *head_buffer; - EventPipeBuffer *buffer; - EventPipeEventInstance *next_event; + if (snapshot_thread_session_state_exhausted) + dn_list_erase (it); - for (uint32_t i = 0; i < dn_vector_ptr_size (&buffer_array) && i < dn_vector_ptr_size (&buffer_list_array); ++i) { - buffer_list = (EventPipeBufferList *)*dn_vector_ptr_index (&buffer_list_array, i); - head_buffer = (EventPipeBuffer *)*dn_vector_ptr_index (&buffer_array, i); - buffer = buffer_manager_advance_to_non_empty_buffer (buffer_manager, buffer_list, head_buffer, stop_timestamp); - if (buffer) { - // Peek the next event out of the buffer. - next_event = ep_buffer_get_current_read_event (buffer); - // If it's the oldest event we've seen, then save it. - if (next_event && ep_event_instance_get_timestamp (next_event) < oldest_timestamp) { - buffer_manager->current_event = next_event; - buffer_manager->current_buffer = buffer; - buffer_manager->current_buffer_list = buffer_list; - oldest_timestamp = ep_event_instance_get_timestamp (buffer_manager->current_event); - } + if (live_thread_session_state_exhausted) { + EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1); + buffer_manager_remove_and_delete_thread_session_state (buffer_manager, thread_session_state); + EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1); } + + it = next_it; + } + + // The current event will be read, so update the last read sequence number for that thread session state. + if (buffer_manager->current_event != NULL) + { + uint32_t sequence_number = ep_buffer_get_current_sequence_number (buffer_manager->current_buffer); + ep_thread_session_state_set_last_read_sequence_number (buffer_manager->current_thread_session_state, sequence_number); } ep_on_exit: ep_buffer_manager_requires_lock_not_held (buffer_manager); - dn_vector_ptr_dispose (&buffer_list_array); - dn_vector_ptr_dispose (&buffer_array); return; ep_on_error: @@ -654,14 +663,12 @@ buffer_manager_move_next_event_any_thread ( static void -buffer_manager_move_next_event_same_thread ( - EventPipeBufferManager *buffer_manager, - ep_timestamp_t stop_timestamp) +buffer_manager_move_next_event_same_thread (EventPipeBufferManager *buffer_manager) { EP_ASSERT (buffer_manager != NULL); EP_ASSERT (buffer_manager->current_event != NULL); EP_ASSERT (buffer_manager->current_buffer != NULL); - EP_ASSERT (buffer_manager->current_buffer_list != NULL); + EP_ASSERT (buffer_manager->current_thread_session_state != NULL); ep_buffer_manager_requires_lock_not_held (buffer_manager); @@ -670,11 +677,11 @@ buffer_manager_move_next_event_same_thread ( ep_buffer_move_next_read_event (buffer_manager->current_buffer); // Find the first buffer in the list, if any, which has an event in it - buffer_manager->current_buffer = buffer_manager_advance_to_non_empty_buffer ( - buffer_manager, - buffer_manager->current_buffer_list, - buffer_manager->current_buffer, - stop_timestamp); + EventPipeThreadSessionState *current_thread_session_state = buffer_manager->current_thread_session_state; + ep_timestamp_t stop_timestamp = buffer_manager->snapshot_timestamp; + bool live_thread_session_state_exhausted = false; + bool snapshot_thread_session_state_exhausted = true; + buffer_manager->current_buffer = buffer_manager_advance_to_non_empty_buffer (buffer_manager, current_thread_session_state, stop_timestamp, &live_thread_session_state_exhausted); if (buffer_manager->current_buffer) { // get the event from that buffer @@ -684,47 +691,62 @@ buffer_manager_move_next_event_same_thread ( // event exists, but isn't early enough buffer_manager->current_event = NULL; buffer_manager->current_buffer = NULL; - buffer_manager->current_buffer_list = NULL; + buffer_manager->current_thread_session_state = NULL; } else { // event is early enough, set the new cursor buffer_manager->current_event = next_event; EP_ASSERT (buffer_manager->current_buffer != NULL); - EP_ASSERT (buffer_manager->current_buffer_list != NULL); + EP_ASSERT (buffer_manager->current_thread_session_state != NULL); + snapshot_thread_session_state_exhausted = false; + // The current event will be read, so update the last read sequence number for that thread session state. + uint32_t sequence_number = ep_buffer_get_current_sequence_number (buffer_manager->current_buffer); + ep_thread_session_state_set_last_read_sequence_number (current_thread_session_state, sequence_number); } } else { // no more buffers prior to before_timestamp EP_ASSERT (buffer_manager->current_event == NULL); EP_ASSERT (buffer_manager->current_buffer == NULL); - buffer_manager->current_buffer_list = NULL; + buffer_manager->current_thread_session_state = NULL; + } + + if (snapshot_thread_session_state_exhausted) + dn_list_remove (buffer_manager->thread_session_state_list_snapshot, current_thread_session_state); + + if (live_thread_session_state_exhausted) { + EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1) + buffer_manager_remove_and_delete_thread_session_state (buffer_manager, current_thread_session_state); + EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1) } + +ep_on_exit: + return; + +ep_on_error: + ep_exit_error_handler (); } static EventPipeBuffer * buffer_manager_advance_to_non_empty_buffer ( EventPipeBufferManager *buffer_manager, - EventPipeBufferList *buffer_list, - EventPipeBuffer *buffer, - ep_timestamp_t before_timestamp) + EventPipeThreadSessionState *thread_session_state, + ep_timestamp_t before_timestamp, + bool *thread_session_state_exhausted) { EP_ASSERT (buffer_manager != NULL); - EP_ASSERT (buffer_list != NULL); - EP_ASSERT (buffer != NULL); - EP_ASSERT (buffer_list->head_buffer == buffer); + EP_ASSERT (thread_session_state != NULL); ep_buffer_manager_requires_lock_not_held (buffer_manager); - EventPipeBuffer *current_buffer = buffer; + EventPipeBufferList *buffer_list = ep_thread_session_state_get_buffer_list (thread_session_state); + EP_ASSERT (buffer_list != NULL); + EventPipeBuffer *current_buffer = buffer_list->head_buffer; + EP_ASSERT (current_buffer != NULL); + *thread_session_state_exhausted = false; bool done = false; while (!done) { - if (!buffer_manager_try_convert_buffer_to_read_only (buffer_manager, current_buffer)) { - // the writer thread hasn't yet stored this buffer into the m_pWriteBuffer - // field (there is a small time window after allocation in this state). - // This should be the only buffer remaining in the list and it has no - // events written into it so we are done iterating. - current_buffer = NULL; - done = true; - } else if (ep_buffer_get_current_read_event (current_buffer) != NULL) { + buffer_manager_convert_buffer_to_read_only (buffer_manager, current_buffer); + if (ep_buffer_get_current_read_event (current_buffer) != NULL) { // found a non-empty buffer done = true; } else { @@ -736,6 +758,10 @@ buffer_manager_advance_to_non_empty_buffer ( // get the next buffer current_buffer = buffer_list->head_buffer; + + if (!current_buffer && (ep_rt_volatile_load_uint32_t_without_barrier (ep_thread_get_unregistered_ref (ep_thread_session_state_get_thread (thread_session_state))) > 0)) + *thread_session_state_exhausted = true; + if (!current_buffer || ep_buffer_get_creation_timestamp (current_buffer) >= before_timestamp) { // no more buffers in the list before this timestamp, we're done current_buffer = NULL; @@ -755,8 +781,8 @@ buffer_manager_advance_to_non_empty_buffer ( } static -bool -buffer_manager_try_convert_buffer_to_read_only ( +void +buffer_manager_convert_buffer_to_read_only ( EventPipeBufferManager *buffer_manager, EventPipeBuffer *new_read_buffer) { @@ -765,40 +791,47 @@ buffer_manager_try_convert_buffer_to_read_only ( ep_buffer_manager_requires_lock_not_held (buffer_manager); - bool result = false; - // if already readable, nothing to do if (ep_buffer_get_volatile_state (new_read_buffer) == EP_BUFFER_STATE_READ_ONLY) - return true; + return; - // if not yet readable, disable the thread from writing to it which causes - // it to become readable + bool wait_for_writer_thread = false; + EventPipeThreadSessionState *thread_session_state = NULL; + + // if not yet readable, unpublish this buffer from the writer thread EventPipeThread *thread = ep_buffer_get_writer_thread (new_read_buffer); - EP_SPIN_LOCK_ENTER (ep_thread_get_rt_lock_ref (thread), section1); - EventPipeThreadSessionState *thread_session_state = ep_thread_get_session_state (thread, buffer_manager->session); + EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1) + thread_session_state = ep_thread_get_session_state (thread, buffer_manager->session); EP_ASSERT(thread_session_state != NULL); if (ep_thread_session_state_get_write_buffer (thread_session_state) == new_read_buffer) { ep_thread_session_state_set_write_buffer (thread_session_state, NULL); - EP_ASSERT (ep_buffer_get_volatile_state (new_read_buffer) == EP_BUFFER_STATE_READ_ONLY); - result = true; + wait_for_writer_thread = true; } - EP_SPIN_LOCK_EXIT (ep_thread_get_rt_lock_ref (thread), section1); + EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1) + + if (wait_for_writer_thread) { + uint32_t index = ep_session_get_index (buffer_manager->session); + // Wait for the writer thread to finish any pending writes to this buffer and release any + // cached pointers it may have. The writer thread might need the buffer manager lock to make progress so we can't hold it while waiting. + // Both session_use_in_progress and write_buffer are accessed via atomic operations. By setting the write_buffer to NULL above while holding + // the buffer manager lock, the wait is correct only because the writer thread sets session_use_in_progress before caching the write_buffer + // and resets it after it's done using the cached write_buffer. + EP_YIELD_WHILE (ep_thread_get_session_use_in_progress (thread) == index && + ep_thread_session_state_get_volatile_write_buffer (thread_session_state) == NULL); + } - // It is possible that EventPipeBufferList returns a writable buffer - // yet it is not returned as ep_thread_get_write_buffer (). This is because - // ep_buffer_manager_allocate_buffer_for_thread () insert the new writable buffer into - // the EventPipeBufferList first, and then it is added to the writable buffer hash table - // by ep_thread_set_write_buffer () next. The two operations are not atomic so it is possible - // to observe this partial state. - if (!result) - result = (ep_buffer_get_volatile_state (new_read_buffer) == EP_BUFFER_STATE_READ_ONLY); + // Now that the writer thread has released its cached pointer to this buffer, we can safely + // mark it as read-only. + // For buffer_manager's coordination of event writing and reading operations, defensively transition the buffer state under the buffer manager lock. + EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section2) + ep_buffer_convert_to_read_only (new_read_buffer); + EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section2) ep_on_exit: ep_buffer_manager_requires_lock_not_held (buffer_manager); - return result; + return; ep_on_error: - EP_ASSERT (!result); ep_exit_error_handler (); } @@ -823,6 +856,10 @@ ep_buffer_manager_alloc ( ep_rt_wait_event_alloc (&instance->rt_wait_event, false, true); ep_raise_error_if_nok (ep_rt_wait_event_is_valid (&instance->rt_wait_event)); + instance->thread_session_state_list_snapshot = dn_list_alloc (); + ep_raise_error_if_nok (instance->thread_session_state_list_snapshot != NULL); + instance->snapshot_timestamp = 0; + instance->session = session; instance->size_of_all_buffers = 0; instance->num_oversized_events_dropped = 0; @@ -838,7 +875,7 @@ ep_buffer_manager_alloc ( instance->current_event = NULL; instance->current_buffer = NULL; - instance->current_buffer_list = NULL; + instance->current_thread_session_state = NULL; instance->max_size_of_all_buffers = EP_CLAMP ((size_t)100 * 1024, max_size_of_all_buffers, (size_t)UINT32_MAX); @@ -865,10 +902,14 @@ ep_buffer_manager_free (EventPipeBufferManager * buffer_manager) { ep_return_void_if_nok (buffer_manager != NULL); - ep_buffer_manager_deallocate_buffers (buffer_manager); - dn_list_free (buffer_manager->sequence_points); + EP_ASSERT (dn_list_empty (buffer_manager->thread_session_state_list_snapshot)); + + dn_list_free (buffer_manager->thread_session_state_list_snapshot); + + EP_ASSERT (dn_list_empty (buffer_manager->thread_session_state_list)); + dn_list_free (buffer_manager->thread_session_state_list); ep_rt_wait_event_free (&buffer_manager->rt_wait_event); @@ -942,52 +983,54 @@ ep_buffer_manager_write_event ( // Before we pick a buffer, make sure the event is enabled. ep_return_false_if_nok (ep_event_is_enabled (ep_event)); + // Check to see if an event thread was specified. If not, then use the current thread. + if (event_thread == NULL) + event_thread = thread; + + EventPipeThread *current_thread; + current_thread = ep_thread_get (); + ep_raise_error_if_nok (current_thread != NULL); + + // session_state won't be freed if use_in_progress is set. + EP_ASSERT (ep_thread_get_session_use_in_progress (current_thread) == ep_session_get_index (session)); + session_state = ep_thread_get_volatile_session_state (current_thread, session); + if (session_state == NULL) { + // slow path should only happen once per thread per session + EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1) + // No need to re-check session_state inside the lock, this thread is the only one allowed to create it. + session_state = ep_thread_session_state_alloc (current_thread, session, buffer_manager); + ep_raise_error_if_nok_holding_spin_lock (session_state != NULL, section1); + ep_raise_error_if_nok_holding_spin_lock (dn_list_push_back (buffer_manager->thread_session_state_list, session_state), section1); + ep_thread_set_session_state (current_thread, session, session_state); + EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1) + } + // Check that the payload size is less than 64 KB (max size for ETW events) if (ep_event_payload_get_size (payload) > 64 * 1024) { ep_rt_atomic_inc_int64_t (&buffer_manager->num_oversized_events_dropped); - EventPipeThread *current_thread = ep_thread_get(); - ep_rt_spin_lock_handle_t *thread_lock = ep_thread_get_rt_lock_ref (current_thread); - EP_SPIN_LOCK_ENTER (thread_lock, section1) - session_state = ep_thread_get_or_create_session_state (current_thread, session); - ep_thread_session_state_increment_sequence_number (session_state); - EP_SPIN_LOCK_EXIT (thread_lock, section1) + ep_thread_session_state_increment_sequence_number (session_state); return false; } - // Check to see an event thread was specified. If not, then use the current thread. - if (event_thread == NULL) - event_thread = thread; - current_stack_contents = ep_stack_contents_init (&stack_contents); if (stack == NULL && ep_session_get_enable_stackwalk (session) && ep_event_get_need_stack (ep_event) && !ep_session_get_rundown_enabled (session)) { ep_walk_managed_stack_for_current_thread (current_stack_contents); stack = current_stack_contents; } - // See if the thread already has a buffer to try. - EventPipeThread *current_thread; - current_thread = ep_thread_get (); - ep_raise_error_if_nok (current_thread != NULL); - - ep_rt_spin_lock_handle_t *thread_lock; - thread_lock = ep_thread_get_rt_lock_ref (current_thread); - - EP_SPIN_LOCK_ENTER (thread_lock, section2) - session_state = ep_thread_get_or_create_session_state (current_thread, session); - ep_raise_error_if_nok_holding_spin_lock (session_state != NULL, section2); - - buffer = ep_thread_session_state_get_write_buffer (session_state); - if (!buffer) { + // buffer won't be converted to read-only if use_in_progress is set + EP_ASSERT (ep_thread_get_session_use_in_progress (current_thread) == ep_session_get_index(session)); + buffer = ep_thread_session_state_get_volatile_write_buffer (session_state); + if (!buffer) { + alloc_new_buffer = true; + } else { + // Attempt to write the event to the buffer. If this fails, we should allocate a new buffer. + if (ep_buffer_write_event (buffer, event_thread, session, ep_event, payload, activity_id, related_activity_id, stack)) + ep_thread_session_state_increment_sequence_number (session_state); + else alloc_new_buffer = true; - } else { - // Attempt to write the event to the buffer. If this fails, we should allocate a new buffer. - if (ep_buffer_write_event (buffer, event_thread, session, ep_event, payload, activity_id, related_activity_id, stack)) - ep_thread_session_state_increment_sequence_number (session_state); - else - alloc_new_buffer = true; - } - EP_SPIN_LOCK_EXIT (thread_lock, section2) + } // alloc_new_buffer is reused below to detect if overflow happened, so cache it here to see if we should // signal the reader thread @@ -1003,27 +1046,17 @@ ep_buffer_manager_write_event ( // We treat this as the write_event call occurring after this session stopped listening for events, effectively the // same as if ep_event_is_enabled test above returned false. ep_raise_error_if_nok (!write_suspended); - - // This lock looks unnecessary for the sequence number, but didn't want to - // do a broader refactoring to take it out. If it shows up as a perf - // problem then we should. - EP_SPIN_LOCK_ENTER (thread_lock, section3) - ep_thread_session_state_increment_sequence_number (session_state); - EP_SPIN_LOCK_EXIT (thread_lock, section3) + ep_thread_session_state_increment_sequence_number (session_state); } else { current_thread = ep_thread_get (); EP_ASSERT (current_thread != NULL); - thread_lock = ep_thread_get_rt_lock_ref (current_thread); - EP_SPIN_LOCK_ENTER (thread_lock, section4) - ep_thread_session_state_set_write_buffer (session_state, buffer); - // Try to write the event after we allocated a buffer. - // This is the first time if the thread had no buffers before the call to this function. - // This is the second time if this thread did have one or more buffers, but they were full. - alloc_new_buffer = !ep_buffer_write_event (buffer, event_thread, session, ep_event, payload, activity_id, related_activity_id, stack); - EP_ASSERT(!alloc_new_buffer); - ep_thread_session_state_increment_sequence_number (session_state); - EP_SPIN_LOCK_EXIT (thread_lock, section4) + // Try to write the event after we allocated a buffer. + // This is the first time if the thread had no buffers before the call to this function. + // This is the second time if this thread did have one or more buffers, but they were full. + alloc_new_buffer = !ep_buffer_write_event (buffer, event_thread, session, ep_event, payload, activity_id, related_activity_id, stack); + EP_ASSERT(!alloc_new_buffer); + ep_thread_session_state_increment_sequence_number (session_state); } } @@ -1048,52 +1081,6 @@ ep_buffer_manager_write_event ( ep_exit_error_handler (); } -void -ep_buffer_manager_suspend_write_event ( - EventPipeBufferManager *buffer_manager, - uint32_t session_index) -{ - EP_ASSERT (buffer_manager != NULL); - - // All calls to this method must be synchronized by our caller - ep_requires_lock_held (); - - DN_DEFAULT_LOCAL_ALLOCATOR (allocator, dn_vector_ptr_default_local_allocator_byte_size); - - dn_vector_ptr_custom_init_params_t params = {0, }; - params.allocator = (dn_allocator_t *)&allocator; - params.capacity = dn_vector_ptr_default_local_allocator_capacity_size; - - dn_vector_ptr_t thread_vector; - ep_raise_error_if_nok (dn_vector_ptr_custom_init (&thread_vector, ¶ms)); - - EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1); - EP_ASSERT (ep_buffer_manager_ensure_consistency (buffer_manager)); - // Find all threads that have used this buffer manager. - DN_LIST_FOREACH_BEGIN (EventPipeThreadSessionState *, thread_session_state, buffer_manager->thread_session_state_list) { - dn_vector_ptr_push_back (&thread_vector, ep_thread_session_state_get_thread (thread_session_state)); - } DN_LIST_FOREACH_END; - EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1); - - // Iterate through all the threads, forcing them to relinquish any buffers stored in - // EventPipeThread's write buffer and prevent storing new ones. - DN_VECTOR_PTR_FOREACH_BEGIN (EventPipeThread *, thread, &thread_vector) { - EP_SPIN_LOCK_ENTER (ep_thread_get_rt_lock_ref (thread), section2) - EventPipeThreadSessionState *thread_session_state = ep_thread_get_session_state (thread, buffer_manager->session); - EP_ASSERT(thread_session_state != NULL); - ep_thread_session_state_set_write_buffer (thread_session_state, NULL); - EP_SPIN_LOCK_EXIT (ep_thread_get_rt_lock_ref (thread), section2) - } DN_VECTOR_PTR_FOREACH_END; - -ep_on_exit: - ep_requires_lock_held (); - dn_vector_ptr_dispose (&thread_vector); - return; - -ep_on_error: - ep_exit_error_handler (); -} - void ep_buffer_manager_write_all_buffers_to_file ( EventPipeBufferManager *buffer_manager, @@ -1122,19 +1109,32 @@ ep_buffer_manager_write_all_buffers_to_file_v3 ( { EP_ASSERT (buffer_manager != NULL); EP_ASSERT (file != NULL); + EP_ASSERT (buffer_manager->thread_session_state_list_snapshot != NULL); EP_ASSERT (buffer_manager->current_event == NULL); EP_ASSERT (events_written != NULL); *events_written = false; + EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1) + buffer_manager_snapshot_threads (buffer_manager, stop_timestamp); + EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1); + // Naively walk the circular buffer, writing the event stream in timestamp order. - buffer_manager_move_next_event_any_thread (buffer_manager, stop_timestamp); + buffer_manager_move_next_event_any_snapshot_thread (buffer_manager); while (buffer_manager->current_event != NULL) { *events_written = true; ep_file_write_event (file, buffer_manager->current_event, /*CaptureThreadId=*/0, /*sequenceNumber=*/0, /*IsSorted=*/true); - buffer_manager_move_next_event_any_thread (buffer_manager, stop_timestamp); + buffer_manager_move_next_event_any_snapshot_thread (buffer_manager); } ep_file_flush (file, EP_FILE_FLUSH_FLAGS_ALL_BLOCKS); + + EP_ASSERT (dn_list_empty (buffer_manager->thread_session_state_list_snapshot)); + +ep_on_exit: + return; + +ep_on_error: + ep_exit_error_handler (); } void @@ -1201,7 +1201,6 @@ ep_buffer_manager_write_all_buffers_to_file_v4 ( *events_written = false; EventPipeSequencePoint *sequence_point = NULL; - ep_timestamp_t current_timestamp_boundary = stop_timestamp; DN_DEFAULT_LOCAL_ALLOCATOR (allocator, dn_vector_ptr_default_local_allocator_byte_size); @@ -1212,24 +1211,25 @@ ep_buffer_manager_write_all_buffers_to_file_v4 ( dn_vector_ptr_t session_states_to_delete; ep_raise_error_if_nok (dn_vector_ptr_custom_init (&session_states_to_delete, ¶ms)); - EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1) - if (buffer_manager_try_peek_sequence_point (buffer_manager, &sequence_point)) - current_timestamp_boundary = EP_MIN (current_timestamp_boundary, ep_sequence_point_get_timestamp (sequence_point)); - EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1) - // loop across sequence points - while(true) { - // loop across events within a sequence point boundary + while (true) { + ep_timestamp_t current_timestamp_boundary = stop_timestamp; + EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1) + if (buffer_manager_try_peek_sequence_point (buffer_manager, &sequence_point)) + current_timestamp_boundary = EP_MIN (current_timestamp_boundary, ep_sequence_point_get_timestamp (sequence_point)); + + buffer_manager_snapshot_threads (buffer_manager, current_timestamp_boundary); + EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1); + + // loop across events within a sequence point boundary while (true) { // pick the thread that has the oldest event - buffer_manager_move_next_event_any_thread (buffer_manager, current_timestamp_boundary); + buffer_manager_move_next_event_any_snapshot_thread (buffer_manager); if (buffer_manager->current_event == NULL) break; uint64_t capture_thread_id = ep_thread_get_os_thread_id (ep_buffer_get_writer_thread (buffer_manager->current_buffer)); - EventPipeBufferList *buffer_list = buffer_manager->current_buffer_list; - // loop across events on this thread bool events_written_for_thread = false; @@ -1237,117 +1237,60 @@ ep_buffer_manager_write_all_buffers_to_file_v4 ( while (buffer_manager->current_event != NULL) { // The first event emitted on each thread (detected by !events_written_for_thread) is guaranteed to - // be the oldest event cached in our buffers so we mark it. This implements mechanism #2 + // be the oldest event cached in our buffers so we mark it. This implements mechanism #2 // in the big comment above. sequence_number = ep_buffer_get_current_sequence_number (buffer_manager->current_buffer); ep_file_write_event (file, buffer_manager->current_event, capture_thread_id, sequence_number, !events_written_for_thread); events_written_for_thread = true; - buffer_manager_move_next_event_same_thread (buffer_manager, current_timestamp_boundary); + buffer_manager_move_next_event_same_thread (buffer_manager); } - buffer_list->last_read_sequence_number = sequence_number; + // Have we written events in any sequence point? *events_written = events_written_for_thread || *events_written; } + EP_ASSERT (dn_list_empty (buffer_manager->thread_session_state_list_snapshot)); // This finishes any current partially filled EventPipeBlock, and flushes it to the stream ep_file_flush (file, EP_FILE_FLUSH_FLAGS_ALL_BLOCKS); - // there are no more events prior to current_timestamp_boundary - if (current_timestamp_boundary == stop_timestamp) { - // We are done + // there are no more events prior to snapshot_timestamp + if (buffer_manager->snapshot_timestamp == stop_timestamp) break; - } else { - // stopped at sequence point case - - // the sequence point captured a lower bound for sequence number on each thread, but iterating - // through the events we may have observed that a higher numbered event was recorded. If so we - // should adjust the sequence numbers upwards to ensure the data in the stream is consistent. - EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section2) - for (dn_list_it_t it = dn_list_begin (buffer_manager->thread_session_state_list); !dn_list_it_end (it); ) { - EventPipeThreadSessionState *session_state = *dn_list_it_data_t (it, EventPipeThreadSessionState *); - dn_umap_it_t found = dn_umap_ptr_uint32_find (ep_sequence_point_get_thread_sequence_numbers (sequence_point), session_state); - uint32_t thread_sequence_number = !dn_umap_it_end (found) ? dn_umap_it_value_uint32_t (found) : 0; - uint32_t last_read_sequence_number = ep_thread_session_state_get_buffer_list (session_state)->last_read_sequence_number; - // Sequence numbers can overflow so we can't use a direct last_read > sequence_number comparison - // If a thread is able to drop more than 0x80000000 events in between sequence points then we will - // miscategorize it, but that seems unlikely. - uint32_t last_read_delta = last_read_sequence_number - thread_sequence_number; - if (0 < last_read_delta && last_read_delta < 0x80000000) { - if (!dn_umap_it_end (found)) { - dn_umap_erase (found); - } else { - ep_thread_addref (ep_thread_holder_get_thread (ep_thread_session_state_get_thread_holder_ref (session_state))); - } - dn_umap_ptr_uint32_insert (ep_sequence_point_get_thread_sequence_numbers (sequence_point), session_state, last_read_sequence_number); - } - - it = dn_list_it_next (it); - - // if a session_state was exhausted during this sequence point, mark it for deletion - if (ep_thread_session_state_get_buffer_list (session_state)->head_buffer == NULL) { - - // We don't hold the thread lock here, so it technically races with a thread getting unregistered. This is okay, - // because we will either not have passed the above if statement (there were events still in the buffers) or we - // will catch it at the next sequence point. - if (ep_rt_volatile_load_uint32_t_without_barrier (ep_thread_get_unregistered_ref (ep_thread_session_state_get_thread (session_state))) > 0) { - dn_vector_ptr_push_back (&session_states_to_delete, session_state); - dn_list_remove (buffer_manager->thread_session_state_list, session_state); - } - } + + // stopped at sequence point case + + // the sequence point captured a lower bound for sequence number on each thread, but iterating + // through the events we may have observed that a higher numbered event was recorded. If so we + // should adjust the sequence numbers upwards to ensure the data in the stream is consistent. + // EventPipeThreadSessionStates removed during move_next_event_any|same_thread, will already + // have had their sequence numbers updated. + EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section2) + DN_LIST_FOREACH_BEGIN (EventPipeThreadSessionState *, session_state, buffer_manager->thread_session_state_list) { + EventPipeThread *thread = ep_thread_session_state_get_thread (session_state); + dn_umap_it_t found = dn_umap_ptr_uint32_find (ep_sequence_point_get_thread_sequence_numbers (sequence_point), thread); + uint32_t thread_sequence_number = !dn_umap_it_end (found) ? dn_umap_it_value_uint32_t (found) : 0; + uint32_t last_read_sequence_number = ep_thread_session_state_get_last_read_sequence_number (session_state); + // Sequence numbers can overflow so we can't use a direct last_read > sequence_number comparison + // If a thread is able to drop more than 0x80000000 events in between sequence points then we will + // miscategorize it, but that seems unlikely. + uint32_t last_read_delta = last_read_sequence_number - thread_sequence_number; + if (0 < last_read_delta && last_read_delta < 0x80000000) { + dn_umap_ptr_uint32_insert_or_assign (ep_sequence_point_get_thread_sequence_numbers (sequence_point), thread, last_read_sequence_number); + if (dn_umap_it_end (found)) + ep_thread_addref (thread); } - EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section2) - - // emit the sequence point into the file - ep_file_write_sequence_point (file, sequence_point); - - // move to the next sequence point if any - EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section3) - // advance to the next sequence point, if any - buffer_manager_dequeue_sequence_point (buffer_manager); - current_timestamp_boundary = stop_timestamp; - if (buffer_manager_try_peek_sequence_point (buffer_manager, &sequence_point)) - current_timestamp_boundary = EP_MIN (current_timestamp_boundary, ep_sequence_point_get_timestamp (sequence_point)); - EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section3) - } - } + } DN_LIST_FOREACH_END; + EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section2) - // There are sequence points created during this flush and we've marked session states for deletion. - // We need to remove these from the internal maps of the subsequent Sequence Points - if (dn_vector_ptr_size (&session_states_to_delete) > 0) { - EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section4) - if (buffer_manager_try_peek_sequence_point (buffer_manager, &sequence_point)) { - DN_LIST_FOREACH_BEGIN (EventPipeSequencePoint *, current_sequence_point, buffer_manager->sequence_points) { - // foreach (session_state in session_states_to_delete) - DN_VECTOR_PTR_FOREACH_BEGIN (EventPipeThreadSessionState *, thread_session_state, &session_states_to_delete) { - dn_umap_it_t found = dn_umap_ptr_uint32_find (ep_sequence_point_get_thread_sequence_numbers (current_sequence_point), thread_session_state); - if (!dn_umap_it_end (found)) { - dn_umap_erase (found); - // every entry of this map was holding an extra ref to the thread (see: ep-event-instance.{h|c}) - ep_thread_release (ep_thread_session_state_get_thread (thread_session_state)); - } - } DN_VECTOR_PTR_FOREACH_END; - } DN_LIST_FOREACH_END; - } + // emit the sequence point into the file + ep_file_write_sequence_point (file, sequence_point); - EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section4) + // we are done with the sequence point + EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section3) + buffer_manager_dequeue_sequence_point (buffer_manager); + EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section3) } - // foreach (session_state in session_states_to_delete) - DN_VECTOR_PTR_FOREACH_BEGIN (EventPipeThreadSessionState *, thread_session_state, &session_states_to_delete) { - EP_ASSERT (thread_session_state != NULL); - - // This may be the last reference to a given EventPipeThread, so make a ref to keep it around till we're done - EventPipeThreadHolder thread_holder; - if (ep_thread_holder_init (&thread_holder, ep_thread_session_state_get_thread (thread_session_state))) { - ep_rt_spin_lock_handle_t *thread_lock = ep_thread_get_rt_lock_ref (ep_thread_holder_get_thread (&thread_holder)); - EP_SPIN_LOCK_ENTER (thread_lock, section5) - EP_ASSERT(ep_rt_volatile_load_uint32_t_without_barrier (ep_thread_get_unregistered_ref (ep_thread_session_state_get_thread (thread_session_state))) > 0); - ep_thread_delete_session_state (ep_thread_session_state_get_thread (thread_session_state), ep_thread_session_state_get_session (thread_session_state)); - EP_SPIN_LOCK_EXIT (thread_lock, section5) - ep_thread_holder_fini (&thread_holder); - } - } DN_VECTOR_PTR_FOREACH_END; - ep_on_exit: dn_vector_ptr_dispose (&session_states_to_delete); return; @@ -1355,40 +1298,52 @@ ep_buffer_manager_write_all_buffers_to_file_v4 ( ep_exit_error_handler (); } +// Former `buffer_manager_move_next_event_any_thread (buffer_manager, ep_perf_timestamp_get ())` perf comment +// PERF: This may be too aggressive? If this method is being called frequently enough to keep pace with the +// writing threads we could be in a state of high lock contention and lots of churning buffers. Each writer +// would take several locks, allocate a new buffer, write one event into it, then the reader would take the +// lock, convert the buffer to read-only and read the single event out of it. Allowing more events to accumulate +// in the buffers before converting between writable and read-only amortizes a lot of the overhead. One way +// to achieve that would be picking a stop_timestamp that was Xms in the past. This would let Xms of events +// to accumulate in the write buffer before we converted it and forced the writer to allocate another. Other more +// sophisticated approaches would probably build a low overhead synchronization mechanism to read and write the +// buffer at the same time. +// +// As a slight optimization for many writer threads concurrently writing events while the reader thread attempts +// to convert all head buffers to read-only, we take a snapshot of known thread session states and iterate +// until all events before the snapshot timestamp have been read before taking a new snapshot. EventPipeEventInstance * ep_buffer_manager_get_next_event (EventPipeBufferManager *buffer_manager) { EP_ASSERT (buffer_manager != NULL); + EP_ASSERT (buffer_manager->thread_session_state_list_snapshot != NULL); ep_requires_lock_not_held (); - // PERF: This may be too aggressive? If this method is being called frequently enough to keep pace with the - // writing threads we could be in a state of high lock contention and lots of churning buffers. Each writer - // would take several locks, allocate a new buffer, write one event into it, then the reader would take the - // lock, convert the buffer to read-only and read the single event out of it. Allowing more events to accumulate - // in the buffers before converting between writable and read-only amortizes a lot of the overhead. One way - // to achieve that would be picking a stop_timestamp that was Xms in the past. This would let Xms of events - // to accumulate in the write buffer before we converted it and forced the writer to allocate another. Other more - // sophisticated approaches would probably build a low overhead synchronization mechanism to read and write the - // buffer at the same time. - ep_timestamp_t stop_timestamp = ep_perf_timestamp_get (); - buffer_manager_move_next_event_any_thread (buffer_manager, stop_timestamp); + if (dn_list_empty (buffer_manager->thread_session_state_list_snapshot)) { + ep_timestamp_t snapshot_timestamp = ep_perf_timestamp_get (); + + EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1) + buffer_manager_snapshot_threads (buffer_manager, snapshot_timestamp); + EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1) + } + + buffer_manager_move_next_event_any_snapshot_thread (buffer_manager); + +ep_on_exit: return buffer_manager->current_event; + +ep_on_error: + buffer_manager->current_event = NULL; + ep_exit_error_handler (); } void -ep_buffer_manager_deallocate_buffers (EventPipeBufferManager *buffer_manager) +ep_buffer_manager_close (EventPipeBufferManager *buffer_manager) { EP_ASSERT (buffer_manager != NULL); - DN_DEFAULT_LOCAL_ALLOCATOR (allocator, dn_vector_ptr_default_local_allocator_byte_size); - - dn_vector_ptr_custom_init_params_t params = {0, }; - params.allocator = (dn_allocator_t *)&allocator; - params.capacity = dn_vector_ptr_default_local_allocator_capacity_size; - - dn_vector_ptr_t thread_session_states_to_remove; - ep_raise_error_if_nok (dn_vector_ptr_custom_init (&thread_session_states_to_remove, ¶ms)); + dn_list_clear (buffer_manager->thread_session_state_list_snapshot); // Take the buffer manager manipulation lock EP_SPIN_LOCK_ENTER (&buffer_manager->rt_lock, section1) @@ -1397,21 +1352,19 @@ ep_buffer_manager_deallocate_buffers (EventPipeBufferManager *buffer_manager) DN_LIST_FOREACH_BEGIN (EventPipeThreadSessionState *, thread_session_state, buffer_manager->thread_session_state_list) { // Get the list and determine if we can free it. EventPipeBufferList *buffer_list = ep_thread_session_state_get_buffer_list (thread_session_state); - ep_thread_session_state_set_buffer_list (thread_session_state, NULL); // Iterate over all nodes in the buffer list and deallocate them. EventPipeBuffer *buffer = ep_buffer_list_get_and_remove_head (buffer_list); while (buffer) { - buffer_manager_deallocate_buffer (buffer_manager, buffer); + buffer_manager_deallocate_buffer(buffer_manager, buffer); buffer = ep_buffer_list_get_and_remove_head (buffer_list); } - // Now that all the buffer list elements have been freed, free the list itself. - ep_buffer_list_free (buffer_list); - buffer_list = NULL; + // clear the thread session state slot + ep_thread_set_session_state (ep_thread_session_state_get_thread (thread_session_state), ep_thread_session_state_get_session (thread_session_state), NULL); + // delete the thread session state and transitively the buffer list + ep_thread_session_state_free (thread_session_state); - // And finally queue the removal of the SessionState from the thread - dn_vector_ptr_push_back (&thread_session_states_to_remove, thread_session_state); } DN_LIST_FOREACH_END; // Clear thread session state list. @@ -1419,23 +1372,7 @@ ep_buffer_manager_deallocate_buffers (EventPipeBufferManager *buffer_manager) EP_SPIN_LOCK_EXIT (&buffer_manager->rt_lock, section1) - // remove and delete the session state - DN_VECTOR_PTR_FOREACH_BEGIN (EventPipeThreadSessionState *, thread_session_state, &thread_session_states_to_remove) { - // The strong reference from session state -> thread might be the very last reference - // We need to ensure the thread doesn't die until we can release the lock - EP_ASSERT (thread_session_state != NULL); - EventPipeThreadHolder thread_holder; - if (ep_thread_holder_init (&thread_holder, ep_thread_session_state_get_thread (thread_session_state))) { - ep_rt_spin_lock_handle_t *thread_lock = ep_thread_get_rt_lock_ref (ep_thread_session_state_get_thread (thread_session_state)); - EP_SPIN_LOCK_ENTER (thread_lock, section2) - ep_thread_delete_session_state (ep_thread_session_state_get_thread (thread_session_state), ep_thread_session_state_get_session (thread_session_state)); - EP_SPIN_LOCK_EXIT (thread_lock, section2) - ep_thread_holder_fini (&thread_holder); - } - } DN_VECTOR_PTR_FOREACH_END; - ep_on_exit: - dn_vector_ptr_dispose (&thread_session_states_to_remove); return; ep_on_error: @@ -1456,6 +1393,53 @@ ep_buffer_manager_ensure_consistency (EventPipeBufferManager *buffer_manager) } #endif +// While deleting a thread session state, this is the last chance to update sequence points +// mapped thread sequence numbers. The current sequence point needs to have the latest read +// sequence number, where as future sequence points can drop the thread entry. +static +void +buffer_manager_remove_and_delete_thread_session_state ( + EventPipeBufferManager *buffer_manager, + EventPipeThreadSessionState *thread_session_state) +{ + EP_ASSERT (buffer_manager != NULL); + EP_ASSERT (thread_session_state != NULL); + + ep_buffer_manager_requires_lock_held (buffer_manager); + + EventPipeSequencePoint *current_sequence_point = NULL; + if (buffer_manager_try_peek_sequence_point (buffer_manager, ¤t_sequence_point)) { + DN_LIST_FOREACH_BEGIN (EventPipeSequencePoint *, sequence_point, buffer_manager->sequence_points) { + EventPipeThread *thread = ep_thread_session_state_get_thread (thread_session_state); + dn_umap_t *thread_sequence_numbers = ep_sequence_point_get_thread_sequence_numbers (sequence_point); + dn_umap_it_t found = dn_umap_ptr_uint32_find (thread_sequence_numbers, thread); + if (current_sequence_point != sequence_point) { + if (!dn_umap_it_end (found)) { + ep_thread_release (thread); + dn_umap_erase_key (thread_sequence_numbers, thread); + } + continue; + } + + uint32_t thread_sequence_number = !dn_umap_it_end (found) ? dn_umap_it_value_uint32_t (found) : 0; + // Sequence numbers can overflow so we can't use a direct last_read > sequence_number comparison + // If a thread is able to drop more than 0x80000000 events in between sequence points then we will + // miscategorize it, but that seems unlikely. + uint32_t sequence_number = ep_thread_session_state_get_last_read_sequence_number (thread_session_state); + uint32_t last_read_delta = sequence_number - thread_sequence_number; + if (0 < last_read_delta && last_read_delta < 0x80000000) { + dn_umap_ptr_uint32_insert_or_assign (thread_sequence_numbers, thread, sequence_number); + if (dn_umap_it_end (found)) + ep_thread_addref (thread); + } + } DN_LIST_FOREACH_END; + } + + dn_list_remove (buffer_manager->thread_session_state_list, thread_session_state); + ep_thread_set_session_state (ep_thread_session_state_get_thread (thread_session_state), ep_thread_session_state_get_session (thread_session_state), NULL); + ep_thread_session_state_free (thread_session_state); +} + #endif /* !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) */ #endif /* ENABLE_PERFTRACING */ diff --git a/src/native/eventpipe/ep-buffer-manager.h b/src/native/eventpipe/ep-buffer-manager.h index 99be8024acbdca..8bb522aaed7ca9 100644 --- a/src/native/eventpipe/ep-buffer-manager.h +++ b/src/native/eventpipe/ep-buffer-manager.h @@ -21,8 +21,6 @@ struct _EventPipeBufferList { #else struct _EventPipeBufferList_Internal { #endif - // The thread which writes to the buffers in this list. - EventPipeThreadHolder thread_holder; // The buffer manager that owns this list. EventPipeBufferManager *manager; // Buffers are stored in an intrusive linked-list from oldest to newest. @@ -31,9 +29,6 @@ struct _EventPipeBufferList_Internal { EventPipeBuffer *tail_buffer; // The number of buffers in the list. uint32_t buffer_count; - // The sequence number of the last event that was read, only - // updated/read by the reader thread. - uint32_t last_read_sequence_number; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_BUFFER_MANAGER_GETTER_SETTER) @@ -42,26 +37,13 @@ struct _EventPipeBufferList { }; #endif -EP_DEFINE_GETTER_REF(EventPipeBufferList *, buffer_list, EventPipeThreadHolder *, thread_holder); - -static -inline -EventPipeThread * -ep_buffer_list_get_thread (EventPipeBufferList *buffer_list) -{ - return ep_thread_holder_get_thread (ep_buffer_list_get_thread_holder_cref (buffer_list)); -} - EventPipeBufferList * -ep_buffer_list_alloc ( - EventPipeBufferManager *manager, - EventPipeThread *thread); +ep_buffer_list_alloc (EventPipeBufferManager *manager); EventPipeBufferList * ep_buffer_list_init ( EventPipeBufferList *buffer_list, - EventPipeBufferManager *manager, - EventPipeThread *thread); + EventPipeBufferManager *manager); void ep_buffer_list_fini (EventPipeBufferList *buffer_list); @@ -108,11 +90,22 @@ struct _EventPipeBufferManager_Internal { ep_rt_spin_lock_handle_t rt_lock; // The session this buffer manager belongs to. EventPipeSession *session; - // Iterator state for reader thread. + // ---------------------------- READER-THREAD ONLY BEGIN ---------------------------- // These are not protected by rt_lock and expected to only be used on the reader thread. + // Iterator state for reader thread. EventPipeEventInstance *current_event; EventPipeBuffer *current_buffer; - EventPipeBufferList *current_buffer_list; + EventPipeThreadSessionState *current_thread_session_state; + // A snapshot of the thead_session_state_list only used by the reader thread to optimize event iteration. + // As existing and new threads write events, this represents the subset of EventPipeThreadSessionStates + // containing events before the snapshot timestamp. As buffer_manager_move_next_event_any_snapshot_thread + // and buffer_manager_move_next_event_same_thread iterate over events, entries are removed from the list + // when no more events are available before the snapshot timestamp. So once there are no more events + // before the snapshot timestamp, the list will be empty. + dn_list_t *thread_session_state_list_snapshot; + // The timestamp representing the cut-off for the snapshot list. + ep_timestamp_t snapshot_timestamp; + // ---------------------------- READER-THREAD ONLY END ------------------------------ // The total allocation size of buffers under management. volatile size_t size_of_all_buffers; // The maximum allowable size of buffers under management. @@ -190,20 +183,6 @@ ep_buffer_manager_write_event ( ep_rt_thread_handle_t event_thread, EventPipeStackContents *stack); -// READ_ONLY state and no new EventPipeBuffers or EventPipeBufferLists can be created. Calls to -// write_event that start during the suspension period or were in progress but hadn't yet recorded -// their event into a buffer before the start of the suspension period will return false and the -// event will not be recorded. Any events that not recorded as a result of this suspension will be -// treated the same as events that were not recorded due to configuration. -// EXPECTED USAGE: First the caller will disable all events via configuration, then call -// suspend_write_event () to force any write_event calls that may still be in progress to either -// finish or cancel. After that all BufferLists and Buffers can be safely drained and/or deleted. -// _Requires_lock_held (ep) -void -ep_buffer_manager_suspend_write_event ( - EventPipeBufferManager *buffer_manager, - uint32_t session_index); - // Write the contents of the managed buffers to the specified file. // The stop_timeStamp is used to determine when tracing was stopped to ensure that we // skip any events that might be partially written due to races when tracing is stopped. @@ -236,7 +215,7 @@ ep_buffer_manager_get_next_event (EventPipeBufferManager *buffer_manager); // threads can be in the middle of a write operation and get blocked, and we may not get an opportunity // to free their buffer for a very long time. void -ep_buffer_manager_deallocate_buffers (EventPipeBufferManager *buffer_manager); +ep_buffer_manager_close (EventPipeBufferManager *buffer_manager); #ifdef EP_CHECKED_BUILD bool diff --git a/src/native/eventpipe/ep-buffer.c b/src/native/eventpipe/ep-buffer.c index 0bf815077f6113..49979357323955 100644 --- a/src/native/eventpipe/ep-buffer.c +++ b/src/native/eventpipe/ep-buffer.c @@ -56,8 +56,9 @@ ep_buffer_free (EventPipeBuffer *buffer) { ep_return_void_if_nok (buffer != NULL); - // We should never be deleting a buffer that a writer thread might still try to write to - EP_ASSERT (ep_rt_volatile_load_uint32_t (&buffer->state) == (uint32_t)EP_BUFFER_STATE_READ_ONLY); + // buffers are not guaranteed to be read-only at this point as allocation may have failed, + // a Listener EventPipe Session may not have finished reading all events by disable, + // or an EventPipe session was abruptly disconnected. ep_rt_vfree (buffer->buffer, buffer->limit - buffer->buffer); ep_rt_object_free (buffer); @@ -191,8 +192,6 @@ ep_buffer_convert_to_read_only (EventPipeBuffer *buffer) EP_ASSERT (buffer != NULL); EP_ASSERT (buffer->current_read_event == NULL); - ep_thread_requires_lock_held (buffer->writer_thread); - ep_rt_volatile_store_uint32_t (&buffer->state, (uint32_t)EP_BUFFER_STATE_READ_ONLY); // If this buffer contains an event, select it. diff --git a/src/native/eventpipe/ep-buffer.h b/src/native/eventpipe/ep-buffer.h index e7c1c9072eebcd..a952f7ba7e6a78 100644 --- a/src/native/eventpipe/ep-buffer.h +++ b/src/native/eventpipe/ep-buffer.h @@ -24,9 +24,9 @@ // isn't writable and read-related methods will assert if it isn't readable. Methods that have no asserts should have immutable results that // can be used at any point during the buffer's lifetime. The buffer has no internal locks so it is the caller's responsibility to synchronize // their usage. -// Writing into the buffer and calling convert_to_read_only() is always done with EventPipeThread rt_lock held. The eventual reader thread can do +// Calling convert_to_read_only() is always done with EventPipeBufferManager lock held, and it yields to concurrent writes. The eventual reader thread can do // a few different things to ensure it sees a consistent state: -// 1) Take the writer's EventPipeThread rt_lock at least once after the last time the writer writes events +// 1) Take the buffer manager's lock at least once after the last time the writer writes events // 2) Use a memory barrier that prevents reader loads from being re-ordered earlier, such as the one that will occur implicitly by evaluating // ep_buffer_get_volatile_state () @@ -57,7 +57,7 @@ struct _EventPipeBuffer_Internal { // The linked list is invasive, thus we declare the pointers here. EventPipeBuffer *prev_buffer; EventPipeBuffer *next_buffer; - // State transition WRITABLE -> READ_ONLY only occurs while holding the writer_thread->rt_lock; + // State transition WRITABLE -> READ_ONLY only occurs while holding the buffer_manager lock; // It can be read at any time volatile uint32_t state; // The sequence number corresponding to current_read_event @@ -74,7 +74,6 @@ struct _EventPipeBuffer { EP_DEFINE_GETTER(EventPipeBuffer *, buffer, ep_timestamp_t, creation_timestamp) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, uint8_t *, buffer) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, uint8_t *, limit) -EP_DEFINE_GETTER_REF(EventPipeBuffer *, buffer, volatile uint32_t *, state) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, EventPipeBuffer *, prev_buffer) EP_DEFINE_SETTER(EventPipeBuffer *, buffer, EventPipeBuffer *, prev_buffer) EP_DEFINE_GETTER(EventPipeBuffer *, buffer, EventPipeBuffer *, next_buffer) @@ -146,7 +145,7 @@ EventPipeBufferState ep_buffer_get_volatile_state (const EventPipeBuffer *buffer); // Convert the buffer writable to readable. -// _Requires_lock_held (thread) +// Should only be called while holding the buffer manager lock. void ep_buffer_convert_to_read_only (EventPipeBuffer *buffer); diff --git a/src/native/eventpipe/ep-event-instance.c b/src/native/eventpipe/ep-event-instance.c index 2d1c2d2fa09fe6..aee474d6d0b57d 100644 --- a/src/native/eventpipe/ep-event-instance.c +++ b/src/native/eventpipe/ep-event-instance.c @@ -231,8 +231,8 @@ sequence_point_fini (EventPipeSequencePoint *sequence_point) // Each entry in the map owns a ref-count on the corresponding thread if (dn_umap_size (sequence_point->thread_sequence_numbers) != 0) { - DN_UMAP_FOREACH_KEY_BEGIN (EventPipeThreadSessionState *, key, sequence_point->thread_sequence_numbers) { - ep_thread_release (ep_thread_session_state_get_thread (key)); + DN_UMAP_FOREACH_KEY_BEGIN (EventPipeThread *, key, sequence_point->thread_sequence_numbers) { + ep_thread_release (key); } DN_UMAP_FOREACH_END; } diff --git a/src/native/eventpipe/ep-session.c b/src/native/eventpipe/ep-session.c index 56786d0e6f2681..ba86a00bbca738 100644 --- a/src/native/eventpipe/ep-session.c +++ b/src/native/eventpipe/ep-session.c @@ -40,10 +40,6 @@ static void session_create_streaming_thread (EventPipeSession *session); -static -void -ep_session_remove_dangling_session_states (EventPipeSession *session); - static bool session_user_events_tracepoints_init ( @@ -380,48 +376,6 @@ ep_session_alloc ( ep_exit_error_handler (); } -void -ep_session_remove_dangling_session_states (EventPipeSession *session) -{ - ep_return_void_if_nok (session != NULL); - - DN_DEFAULT_LOCAL_ALLOCATOR (allocator, dn_vector_ptr_default_local_allocator_byte_size); - - dn_vector_ptr_custom_init_params_t params = {0, }; - params.allocator = (dn_allocator_t *)&allocator; - params.capacity = dn_vector_ptr_default_local_allocator_capacity_size; - - dn_vector_ptr_t threads; - - if (dn_vector_ptr_custom_init (&threads, ¶ms)) { - ep_thread_get_threads (&threads); - DN_VECTOR_PTR_FOREACH_BEGIN (EventPipeThread *, thread, &threads) { - if (thread) { - EP_SPIN_LOCK_ENTER (ep_thread_get_rt_lock_ref (thread), section1); - EventPipeThreadSessionState *session_state = ep_thread_get_session_state(thread, session); - if (session_state) { - // If a buffer tries to write event(s) but never gets a buffer because the maximum total buffer size - // has been exceeded, we can leak the EventPipeThreadSessionState* and crash later trying to access - // the session from the thread session state. Whenever we terminate a session we check to make sure - // we haven't leaked any thread session states. - ep_thread_delete_session_state(thread, session); - } - EP_SPIN_LOCK_EXIT (ep_thread_get_rt_lock_ref (thread), section1); - - ep_thread_release (thread); - } - } DN_VECTOR_PTR_FOREACH_END; - - dn_vector_ptr_dispose (&threads); - } - -ep_on_exit: - return; - -ep_on_error: - ep_exit_error_handler (); -} - void ep_session_inc_ref (EventPipeSession *session) { @@ -445,11 +399,18 @@ ep_session_dec_ref (EventPipeSession *session) ep_buffer_manager_free (session->buffer_manager); ep_file_free (session->file); - ep_session_remove_dangling_session_states (session); - ep_rt_object_free (session); } +void +ep_session_close (EventPipeSession *session) +{ + EP_ASSERT (session != NULL); + + if (ep_session_type_uses_buffer_manager (session->session_type)) + ep_buffer_manager_close (session->buffer_manager); +} + EventPipeSessionProvider * ep_session_get_session_provider ( const EventPipeSession *session, @@ -524,14 +485,23 @@ ep_session_execute_rundown ( ep_rt_execute_rundown (execution_checkpoints); } +// Coordinates an EventPipeSession being freed in disable_holding_lock with +// threads operating on session pointer slots in the _ep_sessions session array. +// It assumes 1) callers have already cleared the session pointer slot from the +// _ep_sessions so all threads that already loaded the session pointer slot will +// be properly awaited and 2) the config lock is held to prevent a new session +// being allocated and inheriting the session pointer slot index, which would +// cause threads to mistakenly operate on the wrong session. void -ep_session_suspend_write_event (EventPipeSession *session) +ep_session_wait_for_inflight_thread_ops (EventPipeSession *session) { EP_ASSERT (session != NULL); // Need to disable the session before calling this method. EP_ASSERT (!ep_is_session_enabled ((EventPipeSessionID)session)); + ep_requires_lock_held (); + DN_DEFAULT_LOCAL_ALLOCATOR (allocator, dn_vector_ptr_default_local_allocator_byte_size); dn_vector_ptr_custom_init_params_t params = {0, }; @@ -543,9 +513,13 @@ ep_session_suspend_write_event (EventPipeSession *session) if (dn_vector_ptr_custom_init (&threads, ¶ms)) { ep_thread_get_threads (&threads); DN_VECTOR_PTR_FOREACH_BEGIN (EventPipeThread *, thread, &threads) { + // The session slot must remain cleared from the _ep_sessions array from holding the config lock. + // Otherwise, if the config lock is removed, a newly allocated session could inherit the same slot + // and cause operating threads to mistake the new session for this one. + EP_ASSERT (!ep_is_session_enabled ((EventPipeSessionID)session)); if (thread) { - // Wait for the thread to finish any writes to this session - EP_YIELD_WHILE (ep_thread_get_session_write_in_progress (thread) == session->index); + // The session is disabled, so wait for any in-progress writes to complete. + EP_YIELD_WHILE (ep_thread_get_session_use_in_progress (thread) == session->index); // Since we've already disabled the session, the thread won't call back in to this // session once its done with the current write @@ -556,9 +530,7 @@ ep_session_suspend_write_event (EventPipeSession *session) dn_vector_ptr_dispose (&threads); } - if (session->buffer_manager) - // Convert all buffers to read only to ensure they get flushed - ep_buffer_manager_suspend_write_event (session->buffer_manager, session->index); + ep_requires_lock_held (); } void diff --git a/src/native/eventpipe/ep-session.h b/src/native/eventpipe/ep-session.h index 0617a876287862..0a55da3829cdf2 100644 --- a/src/native/eventpipe/ep-session.h +++ b/src/native/eventpipe/ep-session.h @@ -86,6 +86,7 @@ EP_DEFINE_GETTER(EventPipeSession *, session, uint32_t, index) EP_DEFINE_GETTER(EventPipeSession *, session, EventPipeSessionProviderList *, providers) EP_DEFINE_GETTER(EventPipeSession *, session, EventPipeBufferManager *, buffer_manager) EP_DEFINE_GETTER_REF(EventPipeSession *, session, volatile uint32_t *, rundown_enabled) +EP_DEFINE_GETTER(EventPipeSession *, session, EventPipeSessionType, session_type) EP_DEFINE_GETTER(EventPipeSession *, session, uint64_t, rundown_keyword) EP_DEFINE_GETTER(EventPipeSession *, session, ep_timestamp_t, session_start_time) EP_DEFINE_GETTER(EventPipeSession *, session, ep_timestamp_t, session_start_timestamp) @@ -114,6 +115,9 @@ ep_session_inc_ref (EventPipeSession *session); void ep_session_dec_ref (EventPipeSession *session); +void +ep_session_close (EventPipeSession *session); + // _Requires_lock_held (ep) EventPipeSessionProvider * ep_session_get_session_provider ( @@ -130,11 +134,12 @@ ep_session_execute_rundown ( EventPipeSession *session, dn_vector_ptr_t *execution_checkpoints); -// Force all in-progress writes to either finish or cancel -// This is required to ensure we can safely flush and delete the buffers +// Wait for all in-progress threads that depend on this session's pointer slot +// in _ep_sessions remaining valid throughout its operation. +// This is required to ensure we can safely flush, close, and free the session. // _Requires_lock_held (ep) void -ep_session_suspend_write_event (EventPipeSession *session); +ep_session_wait_for_inflight_thread_ops (EventPipeSession *session); // Write a sequence point into the output stream synchronously. void diff --git a/src/native/eventpipe/ep-thread.c b/src/native/eventpipe/ep-thread.c index b720bf592d0c4d..aa2cea459d3f38 100644 --- a/src/native/eventpipe/ep-thread.c +++ b/src/native/eventpipe/ep-thread.c @@ -31,13 +31,10 @@ ep_thread_alloc (void) EventPipeThread *instance = ep_rt_object_alloc (EventPipeThread); ep_raise_error_if_nok (instance != NULL); - ep_rt_spin_lock_alloc (&instance->rt_lock); - ep_raise_error_if_nok (ep_rt_spin_lock_is_valid (&instance->rt_lock)); - instance->os_thread_id = ep_rt_thread_id_t_to_uint64_t (ep_rt_current_thread_get_id ()); - memset (instance->session_state, 0, sizeof (instance->session_state)); + memset ((void *)instance->session_state, 0, sizeof (instance->session_state)); - instance->writing_event_in_progress = UINT32_MAX; + instance->session_use_in_progress = UINT32_MAX; instance->unregistered = 0; ep_on_exit: @@ -62,7 +59,6 @@ ep_thread_free (EventPipeThread *thread) } #endif - ep_rt_spin_lock_free (&thread->rt_lock); ep_rt_object_free (thread); } @@ -124,6 +120,36 @@ ep_thread_unregister (EventPipeThread *thread) ep_return_false_if_nok (thread != NULL); bool found = false; + + // Thread unregistration is one condition for EventPipeThreadSessionStates to be cleaned up. + // Rather than coordinating cross-thread cleanup, restrict the work to each reader thread by + // signaling events available for reading. + for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) { + if ((ep_volatile_load_allow_write () & ((uint64_t)1 << i)) == 0) + continue; + + // Now that we know this session is probably live we pay the perf cost of the memory barriers + // Setting this flag lets a thread trying to do a concurrent disable that it is not safe to delete + // session ID i. The if check above also ensures that once the session is unpublished this thread + // will eventually stop ever storing ID i into the session_use_in_progress flag. This is important to + // guarantee termination of the YIELD_WHILE loop in ep_session_wait_for_inflight_thread_ops. + ep_thread_set_session_use_in_progress (thread, i); + { + EventPipeSession *const session = ep_volatile_load_session (i); + // Disable is allowed to set the session to NULL at any time and that may have occurred in between + // the check and the load + if (session != NULL) { + EventPipeBufferManager *const buffer_manager = ep_session_get_buffer_manager (session); + if (buffer_manager != NULL) { + ep_rt_wait_event_set (ep_buffer_manager_get_rt_wait_event_ref (buffer_manager)); + } + } + } + // Do not reference session past this point, we are signaling disable_holding_lock that it is safe to + // delete it + ep_thread_set_session_use_in_progress (thread, UINT32_MAX); + } + EP_SPIN_LOCK_ENTER (&_ep_threads_lock, section1) // Remove ourselves from the global list DN_LIST_FOREACH_BEGIN (EventPipeThread *, current_thread, _ep_threads) { @@ -182,41 +208,37 @@ ep_thread_get_threads (dn_vector_ptr_t *threads) } void -ep_thread_set_session_write_in_progress ( +ep_thread_set_session_use_in_progress ( EventPipeThread *thread, uint32_t session_index) { EP_ASSERT (thread != NULL); EP_ASSERT (session_index < EP_MAX_NUMBER_OF_SESSIONS || session_index == UINT32_MAX); - ep_rt_volatile_store_uint32_t (&thread->writing_event_in_progress, session_index); + ep_rt_volatile_store_uint32_t (&thread->session_use_in_progress, session_index); } uint32_t -ep_thread_get_session_write_in_progress (const EventPipeThread *thread) +ep_thread_get_session_use_in_progress (const EventPipeThread *thread) { EP_ASSERT (thread != NULL); - return ep_rt_volatile_load_uint32_t (&thread->writing_event_in_progress); + return ep_rt_volatile_load_uint32_t (&thread->session_use_in_progress); } -EventPipeThreadSessionState * -ep_thread_get_or_create_session_state ( +void +ep_thread_set_session_state ( EventPipeThread *thread, - EventPipeSession *session) + EventPipeSession *session, + EventPipeThreadSessionState *thread_session_state) { EP_ASSERT (thread != NULL); EP_ASSERT (session != NULL); EP_ASSERT (ep_session_get_index (session) < EP_MAX_NUMBER_OF_SESSIONS); - ep_thread_requires_lock_held (thread); - - EventPipeThreadSessionState *state = thread->session_state [ep_session_get_index (session)]; - if (!state) { - state = ep_thread_session_state_alloc (thread, session, ep_session_get_buffer_manager (session)); - thread->session_state [ep_session_get_index (session)] = state; - } + ep_buffer_manager_requires_lock_held (ep_session_get_buffer_manager (session)); - return state; + uint32_t index = ep_session_get_index (session); + ep_rt_volatile_store_ptr ((volatile void **)(&thread->session_state [index]), thread_session_state); } EventPipeThreadSessionState * @@ -228,44 +250,26 @@ ep_thread_get_session_state ( EP_ASSERT (session != NULL); EP_ASSERT (ep_session_get_index (session) < EP_MAX_NUMBER_OF_SESSIONS); - ep_thread_requires_lock_held (thread); + ep_buffer_manager_requires_lock_held (ep_session_get_buffer_manager (session)); - return thread->session_state [ep_session_get_index (session)]; + uint32_t index = ep_session_get_index (session); + return (EventPipeThreadSessionState *)ep_rt_volatile_load_ptr_without_barrier ((volatile void **)(&thread->session_state [index])); } -void -ep_thread_delete_session_state ( - EventPipeThread *thread, +EventPipeThreadSessionState * +ep_thread_get_volatile_session_state ( + const EventPipeThread *thread, EventPipeSession *session) { EP_ASSERT (thread != NULL); EP_ASSERT (session != NULL); + EP_ASSERT (ep_session_get_index (session) < EP_MAX_NUMBER_OF_SESSIONS); + EP_ASSERT (ep_thread_get() == thread); + EP_ASSERT (thread->session_use_in_progress == ep_session_get_index (session)); - ep_thread_requires_lock_held (thread); - - uint32_t index = ep_session_get_index (session); - EP_ASSERT (index < EP_MAX_NUMBER_OF_SESSIONS); - - EP_ASSERT (thread->session_state [index] != NULL); - ep_thread_session_state_free (thread->session_state [index]); - thread->session_state [index] = NULL; -} - -#ifdef EP_CHECKED_BUILD -void -ep_thread_requires_lock_held (const EventPipeThread *thread) -{ - EP_ASSERT (thread != NULL); - ep_rt_spin_lock_requires_lock_held (&thread->rt_lock); -} - -void -ep_thread_requires_lock_not_held (const EventPipeThread *thread) -{ - EP_ASSERT (thread != NULL); - ep_rt_spin_lock_requires_lock_not_held (&thread->rt_lock); + size_t index = ep_session_get_index (session); + return (EventPipeThreadSessionState *)ep_rt_volatile_load_ptr ((volatile void **)(&thread->session_state [index])); } -#endif /* * EventPipeThreadHolder. @@ -343,10 +347,10 @@ ep_thread_session_state_alloc ( instance->session = session; instance->sequence_number = 1; + instance->last_read_sequence_number = 0; -#ifdef EP_CHECKED_BUILD - instance->buffer_manager = buffer_manager; -#endif + instance->buffer_list = ep_buffer_list_alloc (buffer_manager); + ep_raise_error_if_nok (instance->buffer_list != NULL); ep_on_exit: return instance; @@ -375,6 +379,7 @@ ep_thread_session_state_free (EventPipeThreadSessionState *thread_session_state) { ep_return_void_if_nok (thread_session_state != NULL); ep_thread_holder_fini (&thread_session_state->thread_holder); + ep_buffer_list_free (thread_session_state->buffer_list); ep_rt_object_free (thread_session_state); } @@ -389,66 +394,39 @@ EventPipeBuffer * ep_thread_session_state_get_write_buffer (const EventPipeThreadSessionState *thread_session_state) { EP_ASSERT (thread_session_state != NULL); - ep_thread_requires_lock_held (thread_session_state->thread_holder.thread); - - EP_ASSERT ((thread_session_state->write_buffer == NULL) || (ep_rt_volatile_load_uint32_t (ep_buffer_get_state_cref (thread_session_state->write_buffer)) == EP_BUFFER_STATE_WRITABLE)); - return thread_session_state->write_buffer; -} - -void -ep_thread_session_state_set_write_buffer ( - EventPipeThreadSessionState *thread_session_state, - EventPipeBuffer *new_buffer) -{ - EP_ASSERT (thread_session_state != NULL); - - ep_thread_requires_lock_held (thread_session_state->thread_holder.thread); - - EP_ASSERT ((new_buffer == NULL) || (ep_rt_volatile_load_uint32_t (ep_buffer_get_state_cref (new_buffer)) == EP_BUFFER_STATE_WRITABLE)); - EP_ASSERT ((thread_session_state->write_buffer == NULL) || (ep_rt_volatile_load_uint32_t (ep_buffer_get_state_cref (thread_session_state->write_buffer)) == EP_BUFFER_STATE_WRITABLE)); - - if (thread_session_state->write_buffer) - ep_buffer_convert_to_read_only (thread_session_state->write_buffer); + ep_buffer_manager_requires_lock_held (ep_session_get_buffer_manager (thread_session_state->session)); - thread_session_state->write_buffer = new_buffer; + EP_ASSERT ((thread_session_state->write_buffer == NULL) || (ep_buffer_get_volatile_state (thread_session_state->write_buffer) == EP_BUFFER_STATE_WRITABLE)); + return (EventPipeBuffer*) ep_rt_volatile_load_ptr_without_barrier ((volatile void **)&thread_session_state->write_buffer); } -EventPipeBufferList * -ep_thread_session_state_get_buffer_list (const EventPipeThreadSessionState *thread_session_state) +EventPipeBuffer * +ep_thread_session_state_get_volatile_write_buffer (const EventPipeThreadSessionState *thread_session_state) { EP_ASSERT (thread_session_state != NULL); - ep_buffer_manager_requires_lock_held (thread_session_state->buffer_manager); - - return thread_session_state->buffer_list; + return (EventPipeBuffer*) ep_rt_volatile_load_ptr ((volatile void **)&thread_session_state->write_buffer); } void -ep_thread_session_state_set_buffer_list ( +ep_thread_session_state_set_write_buffer ( EventPipeThreadSessionState *thread_session_state, - EventPipeBufferList *new_buffer_list) + EventPipeBuffer *new_buffer) { EP_ASSERT (thread_session_state != NULL); - ep_buffer_manager_requires_lock_held (thread_session_state->buffer_manager); + ep_buffer_manager_requires_lock_held (ep_session_get_buffer_manager (thread_session_state->session)); - thread_session_state->buffer_list = new_buffer_list; -} + EP_ASSERT ((new_buffer == NULL) || (ep_buffer_get_volatile_state (new_buffer) == EP_BUFFER_STATE_WRITABLE)); + EP_ASSERT ((thread_session_state->write_buffer == NULL) || (ep_buffer_get_volatile_state (thread_session_state->write_buffer) == EP_BUFFER_STATE_WRITABLE)); -uint32_t -ep_thread_session_state_get_volatile_sequence_number (const EventPipeThreadSessionState *thread_session_state) -{ - EP_ASSERT (thread_session_state != NULL); - return ep_rt_volatile_load_uint32_t_without_barrier (&thread_session_state->sequence_number); + thread_session_state->write_buffer = new_buffer; } uint32_t -ep_thread_session_state_get_sequence_number (const EventPipeThreadSessionState *thread_session_state) +ep_thread_session_state_get_volatile_sequence_number (const EventPipeThreadSessionState *thread_session_state) { EP_ASSERT (thread_session_state != NULL); - - ep_thread_requires_lock_held (thread_session_state->thread_holder.thread); - return ep_rt_volatile_load_uint32_t_without_barrier (&thread_session_state->sequence_number); } @@ -456,8 +434,7 @@ void ep_thread_session_state_increment_sequence_number (EventPipeThreadSessionState *thread_session_state) { EP_ASSERT (thread_session_state != NULL); - - ep_thread_requires_lock_held (thread_session_state->thread_holder.thread); + EP_ASSERT (ep_thread_get() == ep_thread_session_state_get_thread (thread_session_state)); ep_rt_volatile_store_uint32_t (&thread_session_state->sequence_number, ep_rt_volatile_load_uint32_t (&thread_session_state->sequence_number) + 1); } diff --git a/src/native/eventpipe/ep-thread.h b/src/native/eventpipe/ep-thread.h index 81093ec06827bd..658a1cf6f40a26 100644 --- a/src/native/eventpipe/ep-thread.h +++ b/src/native/eventpipe/ep-thread.h @@ -21,30 +21,47 @@ struct _EventPipeThread { #else struct _EventPipeThread_Internal { #endif - // Per-session state. - // The pointers in this array are only read/written under rt_lock - // Some of the data within the ThreadSessionState object can be accessed - // without rt_lock however, see the fields of that type for details. - EventPipeThreadSessionState *session_state [EP_MAX_NUMBER_OF_SESSIONS]; + // An array of slots to hold per-session per-thread state. + // A slot in this array should be non-NULL iff the same ThreadSessionState object is also contained in the + // session->buffer_manager->thread_session_state_list. + // + // Thread-safety notes: + // The pointers in this array are only modified under the buffer manager lock for whichever enabled session currently owns the slot. + // (session->index == slot_index) + // Both the per-thread slots here and the global slots in _ep_sessions can only be non-NULL during the time period when the + // session is enabled. ep_disable() won't exit the global EP lock until all the slots for that session are NULL and + // threads have exited code regions where they may have been using locally cached pointers retrieved through + // the slots. This ensures threads neither retain stale pointers to a disabled session, nor will they be able to acquire a + // stale pointer from the slot later. It also means no thread will be trying to synchronize using a stale session lock across + // the transition when a new session takes over ownership of the slot. + // + // Writing a non-NULL pointer into the slot only occurs on the OS thread associated with this EventPipeThread object when + // trying to write the first event into a session. + // Writing a NULL pointer into the slot could occur on either: + // 1. The session event flushing thread when is has finished flushing all the events for a thread that already exited. + // 2. The thread which calls ep_disable() to disable the session. + // + // Reading from this slot can either be done by taking the buffer manager lock, or by doing a volatile read of the pointer. The volatile + // read should only occur when running on the OS thread associated with this EventPipeThread object. When reading under the lock the + // pointer should not be retained past the scope of the lock. When using the volatile read, the pointer should not be retained + // outside the period where session_use_in_progress == slot_number. + volatile EventPipeThreadSessionState *session_state [EP_MAX_NUMBER_OF_SESSIONS]; + #ifdef EP_THREAD_INCLUDE_ACTIVITY_ID uint8_t activity_id [EP_ACTIVITY_ID_SIZE]; #endif EventPipeSession *rundown_session; - // This lock is designed to have low contention. Normally it is only taken by this thread, - // but occasionally it may also be taken by another thread which is trying to collect and drain - // buffers from all threads. - ep_rt_spin_lock_handle_t rt_lock; // This is initialized when the Thread object is first constructed and remains // immutable afterwards. uint64_t os_thread_id; // The EventPipeThreadHolder maintains one count while the thread is alive - // and each session's EventPipeBufferList maintains one count while it - // exists. + // Each EventPipeThreadSessionState maintains one count throughout its lifetime + // Every SequencePoint tracking this thread also maintains one count int32_t ref_count; // If this is set to a valid id before the corresponding entry of sessions is set to null, // that pointer will be protected from deletion. See ep_disable () and // ep_write () for more detail. - volatile uint32_t writing_event_in_progress; + volatile uint32_t session_use_in_progress; // This is set to non-zero when the thread is unregistered from the global list of EventPipe threads. // This should happen when a physical thread is ending. // This is a convenience marker to prevent us from having to search the global list. @@ -72,7 +89,6 @@ ep_thread_get_activity_id_cref (ep_rt_thread_activity_id_handle_t activity_id_ha EP_DEFINE_GETTER(EventPipeThread *, thread, EventPipeSession *, rundown_session); EP_DEFINE_SETTER(EventPipeThread *, thread, EventPipeSession *, rundown_session); -EP_DEFINE_GETTER_REF(EventPipeThread *, thread, ep_rt_spin_lock_handle_t *, rt_lock); EP_DEFINE_GETTER(EventPipeThread *, thread, uint64_t, os_thread_id); EP_DEFINE_GETTER_REF(EventPipeThread *, thread, int32_t *, ref_count); EP_DEFINE_GETTER_REF(EventPipeThread *, thread, volatile uint32_t *, unregistered); @@ -170,42 +186,31 @@ ep_thread_is_rundown_thread (const EventPipeThread *thread) return (ep_thread_get_rundown_session (thread) != NULL); } -#ifdef EP_CHECKED_BUILD -void -ep_thread_requires_lock_held (const EventPipeThread *thread); - void -ep_thread_requires_lock_not_held (const EventPipeThread *thread); -#else -#define ep_thread_requires_lock_held(thread) -#define ep_thread_requires_lock_not_held(thread) -#endif - -void -ep_thread_set_session_write_in_progress ( +ep_thread_set_session_use_in_progress ( EventPipeThread *thread, uint32_t session_index); uint32_t -ep_thread_get_session_write_in_progress (const EventPipeThread *thread); +ep_thread_get_session_use_in_progress (const EventPipeThread *thread); -// _Requires_lock_held (thread) +// _Requires_lock_held (buffer_manager) EventPipeThreadSessionState * -ep_thread_get_or_create_session_state ( - EventPipeThread *thread, +ep_thread_get_session_state ( + const EventPipeThread *thread, EventPipeSession *session); -// _Requires_lock_held (thread) EventPipeThreadSessionState * -ep_thread_get_session_state ( +ep_thread_get_volatile_session_state ( const EventPipeThread *thread, EventPipeSession *session); -// _Requires_lock_held (thread) +// _Requires_lock_held (buffer_manager) void -ep_thread_delete_session_state ( +ep_thread_set_session_state ( EventPipeThread *thread, - EventPipeSession *session); + EventPipeSession *session, + EventPipeThreadSessionState *thread_session_state); /* * EventPipeThreadHolder. @@ -254,29 +259,25 @@ struct _EventPipeThreadSessionState_Internal { EventPipeThreadHolder thread_holder; // immutable. EventPipeSession *session; - // The buffer this thread is allowed to write to if non-null, it must - // match the tail of buffer_list - // protected by thread_holder->thread.rt_lock + // The buffer this thread is allowed to write to. If non-null, it must + // match the tail of buffer_list. + // Modifications always occur under the buffer manager lock. + // Non-null writes only occur on the thread this state belongs to. + // Null writes may occur on the buffer manager event flushing thread. + // Lock-free reads may occur only on the thread this state belongs to. EventPipeBuffer *write_buffer; - // The list of buffers that were written to by this thread. This - // is populated lazily the first time a thread tries to allocate - // a buffer for this session. It is set back to null when - // event writing is suspended during session disable. - // protected by the buffer manager lock. - // This field can be read outside the lock when - // the buffer allocation logic is estimating how many - // buffers a given thread has used (see: ep_thread_session_state_get_buffer_count_estimate and its uses). + // The list of buffers that were written to by this thread. + // immutable EventPipeBufferList *buffer_list; -#ifdef EP_CHECKED_BUILD - // protected by the buffer manager lock. - EventPipeBufferManager *buffer_manager; -#endif + // The sequence number of the last event that was read, only + // updated/read by the reader thread. + uint32_t last_read_sequence_number; // The number of events that were attempted to be written by this // thread. Each event was either successfully recorded in a buffer // or it was dropped. // - // Only updated by the current thread under thread_holder->thread.rt_lock. Other - // event writer threads are allowed to do unsynchronized reads when + // Only updated by the current thread. + // Other event writer threads are allowed to do unsynchronized reads when // capturing a sequence point but this does not provide any consistency // guarantee. In particular there is no promise that the other thread // is observing the most recent sequence number, nor is there a promise @@ -299,8 +300,10 @@ struct _EventPipeThreadSessionState { }; #endif -EP_DEFINE_GETTER_REF(EventPipeThreadSessionState *, thread_session_state, EventPipeThreadHolder *, thread_holder) +EP_DEFINE_GETTER(EventPipeThreadSessionState *, thread_session_state, EventPipeBufferList *, buffer_list) EP_DEFINE_GETTER(EventPipeThreadSessionState *, thread_session_state, EventPipeSession *, session) +EP_DEFINE_GETTER(EventPipeThreadSessionState *, thread_session_state, uint32_t, last_read_sequence_number); +EP_DEFINE_SETTER(EventPipeThreadSessionState *, thread_session_state, uint32_t, last_read_sequence_number); EventPipeThreadSessionState * ep_thread_session_state_alloc ( @@ -317,34 +320,22 @@ ep_thread_session_state_get_thread (const EventPipeThreadSessionState *thread_se uint32_t ep_thread_session_state_get_buffer_count_estimate(const EventPipeThreadSessionState *thread_session_state); -// _Requires_lock_held (thread) +// _Requires_lock_held (buffer_manager) EventPipeBuffer * ep_thread_session_state_get_write_buffer (const EventPipeThreadSessionState *thread_session_state); -// _Requires_lock_held (thread) -void -ep_thread_session_state_set_write_buffer ( - EventPipeThreadSessionState *thread_session_state, - EventPipeBuffer *new_buffer); - -// _Requires_lock_held (buffer_manager) -EventPipeBufferList * -ep_thread_session_state_get_buffer_list (const EventPipeThreadSessionState *thread_session_state); +EventPipeBuffer * +ep_thread_session_state_get_volatile_write_buffer (const EventPipeThreadSessionState *thread_session_state); // _Requires_lock_held (buffer_manager) void -ep_thread_session_state_set_buffer_list ( +ep_thread_session_state_set_write_buffer ( EventPipeThreadSessionState *thread_session_state, - EventPipeBufferList *new_buffer_list); + EventPipeBuffer *new_buffer); uint32_t ep_thread_session_state_get_volatile_sequence_number (const EventPipeThreadSessionState *thread_session_state); -// _Requires_lock_held (thread) -uint32_t -ep_thread_session_state_get_sequence_number (const EventPipeThreadSessionState *thread_session_state); - -// _Requires_lock_held (thread) void ep_thread_session_state_increment_sequence_number (EventPipeThreadSessionState *thread_session_state); diff --git a/src/native/eventpipe/ep.c b/src/native/eventpipe/ep.c index b67c825b982a64..0b45cf280d3fdc 100644 --- a/src/native/eventpipe/ep.c +++ b/src/native/eventpipe/ep.c @@ -674,13 +674,13 @@ disable_holding_lock ( ep_volatile_store_allow_write (ep_volatile_load_allow_write () & ~(ep_session_get_mask (session))); - // Remove the session from the array before calling ep_session_suspend_write_event. This way + // Remove the session from the array before calling ep_session_wait_for_inflight_thread_ops. This way // we can guarantee that either the event write got the pointer and will complete // the write successfully, or it gets NULL and will bail. EP_ASSERT (ep_volatile_load_session (ep_session_get_index (session)) == session); ep_volatile_store_session (ep_session_get_index (session), NULL); - ep_session_suspend_write_event (session); + ep_session_wait_for_inflight_thread_ops (session); bool ignored; ep_session_write_all_buffers_to_file (session, &ignored); // Flush the buffers to the stream/file @@ -691,6 +691,12 @@ disable_holding_lock ( // been emitted. ep_session_write_sequence_point_unbuffered (session); + // The session is disabled but might still be referenced elsewhere. + // As a newly allocated session may inherit this session's index, close the session to + // ensure all buffers are freed and detach all threads EventPipeThreadSessionStates + // so threads won't write to the closed session mistaking it as the new session. + ep_session_close (session); + ep_session_dec_ref (session); // Providers can't be deleted during tracing because they may be needed when serializing the file. @@ -813,6 +819,7 @@ write_event_2 ( EP_ASSERT (rundown_session != NULL); EP_ASSERT (thread != NULL); + ep_thread_set_session_use_in_progress (current_thread, ep_session_get_index (rundown_session)); uint8_t *data = ep_event_payload_get_flat_data (payload); if (thread != NULL && rundown_session != NULL && data != NULL) { ep_session_write_event ( @@ -825,6 +832,7 @@ write_event_2 ( event_thread, stack); } + ep_thread_set_session_use_in_progress (current_thread, UINT32_MAX); } else { for (uint32_t i = 0; i < EP_MAX_NUMBER_OF_SESSIONS; ++i) { if ((ep_volatile_load_allow_write () & ((uint64_t)1 << i)) == 0) @@ -833,9 +841,9 @@ write_event_2 ( // Now that we know this session is probably live we pay the perf cost of the memory barriers // Setting this flag lets a thread trying to do a concurrent disable that it is not safe to delete // session ID i. The if check above also ensures that once the session is unpublished this thread - // will eventually stop ever storing ID i into the WriteInProgress flag. This is important to - // guarantee termination of the YIELD_WHILE loop in SuspendWriteEvents. - ep_thread_set_session_write_in_progress (current_thread, i); + // will eventually stop ever storing ID i into the session_use_in_progress flag. This is important to + // guarantee termination of the YIELD_WHILE loop in ep_session_wait_for_inflight_thread_ops. + ep_thread_set_session_use_in_progress (current_thread, i); { EventPipeSession *const session = ep_volatile_load_session (i); // Disable is allowed to set s_pSessions[i] = NULL at any time and that may have occurred in between @@ -852,9 +860,9 @@ write_event_2 ( stack); } } - // Do not reference session past this point, we are signaling Disable() that it is safe to + // Do not reference session past this point, we are signaling disable_holding_lock that it is safe to // delete it - ep_thread_set_session_write_in_progress (current_thread, UINT32_MAX); + ep_thread_set_session_use_in_progress (current_thread, UINT32_MAX); } } }