Skip to content

Commit 537e8e9

Browse files
authored
Merge pull request #127 from pipeless-ai/add_redis_event_exporter
feat: Add redis event exporter
2 parents 9c09485 + ce1b28c commit 537e8e9

File tree

10 files changed

+267
-57
lines changed

10 files changed

+267
-57
lines changed

pipeless/Cargo.lock

+55-2
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pipeless/Cargo.toml

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[package]
22
name = "pipeless-ai"
3-
version = "1.6.3"
3+
version = "1.7.0"
44
edition = "2021"
55
authors = ["Miguel A. Cabrera Minagorri"]
66
description = "An open-source computer vision framework to build and deploy applications in minutes"
@@ -47,6 +47,7 @@ gstreamer-rtsp = "0.21.0"
4747
inquire = "0.6.2"
4848
tabled = "0.15.0"
4949
ctrlc = "3.4.2"
50+
redis = { version = "0.24.0", features = ["aio", "tokio-comp"] }
5051

5152
[dependencies.uuid]
5253
version = "1.4.1"

pipeless/src/cli/start.rs

+19-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
use pyo3;
2-
use std::sync::Arc;
2+
use std::{env, sync::Arc};
33
use tokio::sync::RwLock;
44
use gstreamer as gst;
55
use glib;
@@ -8,7 +8,7 @@ use ctrlc;
88

99
use crate as pipeless;
1010

11-
pub fn start_pipeless_node(stages_dir: &str) {
11+
pub fn start_pipeless_node(project_dir: &str, export_redis_events: bool) {
1212
ctrlc::set_handler(|| {
1313
println!("Exiting...");
1414
std::process::exit(0);
@@ -24,11 +24,27 @@ pub fn start_pipeless_node(stages_dir: &str) {
2424
// Initialize Gstreamer
2525
gst::init().expect("Unable to initialize gstreamer");
2626

27-
let frame_path_executor = Arc::new(RwLock::new(pipeless::stages::path::FramePathExecutor::new(stages_dir)));
27+
let frame_path_executor = Arc::new(RwLock::new(pipeless::stages::path::FramePathExecutor::new(project_dir)));
2828

2929
// Init Tokio runtime
3030
let tokio_rt = tokio::runtime::Runtime::new().expect("Unable to create Tokio runtime");
3131
tokio_rt.block_on(async {
32+
// Create event exporter when enabled
33+
let event_exporter =
34+
if export_redis_events {
35+
let redis_url = env::var("PIPELESS_REDIS_URL")
36+
.expect("Please export the PIPELESS_REDIS_URL environment variable in order to export events to Redis");
37+
let redis_channel = env::var("PIPELESS_REDIS_CHANNEL")
38+
.expect("Please export the PIPELESS_REDIS_CHANNEL environment variable in order to export events to Redis");
39+
pipeless::event_exporters::EventExporter::new_redis_exporter(&redis_url, &redis_channel).await
40+
} else {
41+
pipeless::event_exporters::EventExporter::new_none_exporter()
42+
};
43+
{ // Context to lock the global event exporter in order to set it
44+
let mut e_exp = pipeless::event_exporters::EVENT_EXPORTER.lock().await;
45+
*e_exp = event_exporter;
46+
}
47+
3248
let streams_table = Arc::new(RwLock::new(pipeless::config::streams::StreamsTable::new()));
3349
let dispatcher = pipeless::dispatcher::Dispatcher::new(streams_table.clone());
3450
let dispatcher_sender = dispatcher.get_sender().clone();

pipeless/src/dispatcher.rs

+55-43
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,9 @@ pub struct Dispatcher {
2727
receiver: tokio_stream::wrappers::UnboundedReceiverStream<DispatcherEvent>,
2828
}
2929
impl Dispatcher {
30-
pub fn new(streams_table: Arc<RwLock<pipeless::config::streams::StreamsTable>>) -> Self {
30+
pub fn new(
31+
streams_table: Arc<RwLock<pipeless::config::streams::StreamsTable>>,
32+
) -> Self {
3133
let (sender, receiver) = tokio::sync::mpsc::unbounded_channel::<DispatcherEvent>();
3234
Self {
3335
sender,
@@ -36,7 +38,6 @@ impl Dispatcher {
3638
),
3739
streams_table
3840
}
39-
4041
}
4142

4243
pub fn get_sender(&self) -> tokio::sync::mpsc::UnboundedSender<DispatcherEvent> {
@@ -66,7 +67,7 @@ impl Dispatcher {
6667

6768
pub fn start(
6869
dispatcher: Dispatcher,
69-
frame_path_executor_arc: Arc<RwLock<pipeless::stages::path::FramePathExecutor>>
70+
frame_path_executor_arc: Arc<RwLock<pipeless::stages::path::FramePathExecutor>>,
7071
) {
7172
let running_managers: Arc<RwLock<HashMap<uuid::Uuid, pipeless::pipeline::Manager>>> = Arc::new(RwLock::new(HashMap::new()));
7273
let frame_path_executor_arc = frame_path_executor_arc.clone();
@@ -152,6 +153,7 @@ pub fn start(
152153
new_manager.get_pipeline_id().await
153154
) {
154155
error!("Error adding new stream to the streams config table: {}", err);
156+
pipeless::event_exporters::events::export_stream_start_error_event(entry.get_id()).await;
155157
}
156158
let mut managers_map_guard = running_managers.write().await;
157159
managers_map_guard.insert(new_manager.get_pipeline_id().await, new_manager);
@@ -160,6 +162,7 @@ pub fn start(
160162
error!("Unable to create new pipeline: {}. Rolling back streams configuration.", err.to_string());
161163
let removed = streams_table_guard.remove(entry.get_id());
162164
if removed.is_none() { warn!("Error rolling back table, entry not found.") };
165+
pipeless::event_exporters::events::export_stream_start_error_event(entry.get_id()).await;
163166
}
164167
}
165168
},
@@ -195,50 +198,59 @@ pub fn start(
195198
}
196199
}
197200
DispatcherEvent::PipelineFinished(pipeline_id, finish_state) => {
198-
let mut table_write_guard = streams_table.write().await;
199-
let stream_entry_option = table_write_guard.find_by_pipeline_id_mut(pipeline_id);
200-
if let Some(entry) = stream_entry_option {
201-
// Remove the pipeline from the stream entry since it finished
202-
entry.unassign_pipeline();
203-
204-
// Update the target state of the stream based on the restart policy
205-
match entry.get_restart_policy() {
206-
pipeless::config::streams::RestartPolicy::Never => {
207-
match finish_state {
208-
pipeless::pipeline::PipelineEndReason::Completed => entry.set_target_state(pipeless::config::streams::StreamEntryState::Completed),
209-
pipeless::pipeline::PipelineEndReason::Error => entry.set_target_state(pipeless::config::streams::StreamEntryState::Error),
210-
pipeless::pipeline::PipelineEndReason::Updated => entry.set_target_state(pipeless::config::streams::StreamEntryState::Running),
211-
}
212-
},
213-
pipeless::config::streams::RestartPolicy::Always => {
214-
entry.set_target_state(pipeless::config::streams::StreamEntryState::Running);
215-
},
216-
pipeless::config::streams::RestartPolicy::OnError => {
217-
if finish_state == pipeless::pipeline::PipelineEndReason::Error {
218-
entry.set_target_state(pipeless::config::streams::StreamEntryState::Running);
219-
} else {
220-
entry.set_target_state(pipeless::config::streams::StreamEntryState::Error);
221-
}
222-
},
223-
pipeless::config::streams::RestartPolicy::OnEos => {
224-
if finish_state == pipeless::pipeline::PipelineEndReason::Completed {
201+
let mut stream_uuid: Option<uuid::Uuid> = None;
202+
{ // context to release the write lock
203+
let mut table_write_guard = streams_table.write().await;
204+
let stream_entry_option = table_write_guard.find_by_pipeline_id_mut(pipeline_id);
205+
if let Some(entry) = stream_entry_option {
206+
stream_uuid = Some(entry.get_id());
207+
// Remove the pipeline from the stream entry since it finished
208+
entry.unassign_pipeline();
209+
210+
// Update the target state of the stream based on the restart policy
211+
match entry.get_restart_policy() {
212+
pipeless::config::streams::RestartPolicy::Never => {
213+
match finish_state {
214+
pipeless::pipeline::PipelineEndReason::Completed => entry.set_target_state(pipeless::config::streams::StreamEntryState::Completed),
215+
pipeless::pipeline::PipelineEndReason::Error => entry.set_target_state(pipeless::config::streams::StreamEntryState::Error),
216+
pipeless::pipeline::PipelineEndReason::Updated => entry.set_target_state(pipeless::config::streams::StreamEntryState::Running),
217+
}
218+
},
219+
pipeless::config::streams::RestartPolicy::Always => {
225220
entry.set_target_state(pipeless::config::streams::StreamEntryState::Running);
226-
} else {
227-
entry.set_target_state(pipeless::config::streams::StreamEntryState::Completed);
228-
}
229-
},
230-
}
221+
},
222+
pipeless::config::streams::RestartPolicy::OnError => {
223+
if finish_state == pipeless::pipeline::PipelineEndReason::Error {
224+
entry.set_target_state(pipeless::config::streams::StreamEntryState::Running);
225+
} else {
226+
entry.set_target_state(pipeless::config::streams::StreamEntryState::Error);
227+
}
228+
},
229+
pipeless::config::streams::RestartPolicy::OnEos => {
230+
if finish_state == pipeless::pipeline::PipelineEndReason::Completed {
231+
entry.set_target_state(pipeless::config::streams::StreamEntryState::Running);
232+
} else {
233+
entry.set_target_state(pipeless::config::streams::StreamEntryState::Completed);
234+
}
235+
},
236+
}
231237

232-
// Create new event since we have modified the streams config table
233-
if let Err(err) = dispatcher_sender.send(DispatcherEvent::TableChange) {
234-
warn!("Unable to send dispatcher event for streams table changed. Error: {}", err.to_string());
238+
// Create new event since we have modified the streams config table
239+
if let Err(err) = dispatcher_sender.send(DispatcherEvent::TableChange) {
240+
warn!("Unable to send dispatcher event for streams table changed. Error: {}", err.to_string());
241+
}
242+
} else {
243+
warn!("
244+
Unable to unassign pipeline for stream. Stream entry not found.
245+
Pipeline id: {}
246+
", pipeline_id);
235247
}
236-
} else {
237-
warn!("
238-
Unable to unassign pipeline for stream. Stream entry not found.
239-
Pipeline id: {}
240-
", pipeline_id);
241248
}
249+
250+
pipeless::event_exporters::events::export_stream_finished_event(
251+
stream_uuid.unwrap_or_default(),
252+
finish_state.to_string().as_str()
253+
).await;
242254
}
243255
}
244256
}
+49
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
use std::fmt;
2+
use log::warn;
3+
4+
pub enum EventType {
5+
StreamStartError,
6+
StreamFinished,
7+
}
8+
impl fmt::Display for EventType {
9+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
10+
match self {
11+
EventType::StreamStartError => write!(f, "StreamStartError"),
12+
EventType::StreamFinished => write!(f, "StreamFinished"),
13+
}
14+
}
15+
}
16+
17+
/*
18+
* Exports a stream finished event to the external event exporter when it is enabled
19+
*/
20+
pub async fn export_stream_finished_event(stream_uuid: uuid::Uuid, stream_end_state: &str) {
21+
let ext_event: serde_json::Value = serde_json::json!({
22+
"type": EventType::StreamFinished.to_string(),
23+
"end_state": stream_end_state,
24+
"stream_uuid": stream_uuid.to_string(),
25+
});
26+
let ext_event_json_str = serde_json::to_string(&ext_event);
27+
if let Ok(json_str) = ext_event_json_str {
28+
super::EVENT_EXPORTER.lock().await.publish(&json_str).await;
29+
} else {
30+
warn!("Error serializing event to JSON string, skipping external publishing");
31+
}
32+
}
33+
34+
/*
35+
* Exports a stream start error event to the external event exporter when it is enabled
36+
*/
37+
pub async fn export_stream_start_error_event(stream_uuid: uuid::Uuid) {
38+
let ext_event: serde_json::Value = serde_json::json!({
39+
"type": EventType::StreamStartError.to_string(),
40+
"end_state": "error",
41+
"stream_uuid": stream_uuid.to_string(),
42+
});
43+
let ext_event_json_str = serde_json::to_string(&ext_event);
44+
if let Ok(json_str) = ext_event_json_str {
45+
super::EVENT_EXPORTER.lock().await.publish(&json_str).await;
46+
} else {
47+
warn!("Error serializing event to JSON string, skipping external publishing");
48+
}
49+
}

0 commit comments

Comments
 (0)