From 1c3c264706ad122fc9870aa40b931a6b14dfc55a Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 21:42:39 -0500 Subject: [PATCH 01/34] Added default icons --- .../static/appIcon.png | Bin 0 -> 854 bytes .../static/appIconAlt.png | Bin 0 -> 854 bytes .../static/appIconAlt_2x.png | Bin 0 -> 2190 bytes .../static/appIcon_2x.png | Bin 0 -> 2190 bytes 4 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 Supply Chain/App/supply_chain_soln_accelerator/static/appIcon.png create mode 100644 Supply Chain/App/supply_chain_soln_accelerator/static/appIconAlt.png create mode 100644 Supply Chain/App/supply_chain_soln_accelerator/static/appIconAlt_2x.png create mode 100644 Supply Chain/App/supply_chain_soln_accelerator/static/appIcon_2x.png diff --git a/Supply Chain/App/supply_chain_soln_accelerator/static/appIcon.png b/Supply Chain/App/supply_chain_soln_accelerator/static/appIcon.png new file mode 100644 index 0000000000000000000000000000000000000000..6c3e6b9a7cfc799897a3118202809aaad9481b96 GIT binary patch literal 854 zcmV-c1F8IpP)%88K2n)Nu_{fX>m%2MM)m7Cs8za(Z z73niP(%*pc`TYC8U~Fe+$0OzwL%)Fj$nk$O6pzQX&qyR9udlDtY&HYiFf>C00|TJUl$~ z%mAzbuv)G9t_ckYb`A~>ypHGR=LL{3z~OIiZ*qNoEtN{elW}crP1pEUAVA18F)<(+(9+V9JU>6HZ|r^it>KPDOLRaLBF*P$YBc)QQ zYqoHp$!*@rWHQP&ITjiai}Atm@UU(KJ9OJi5tKCJni=Vt&`^Mw7!sM$$1{yaL&5QP zB=IZvJ$D!|y2XL5?^!ZHAQfG@DQ%tm0dko%?;sg5(0bqX42T2G)P$Rjx0^+sK)EB1 z$w}6$qnggVL%JJF0ED6S^>uHr(17d{ zVs>^`Z*8(^Zf?$#3}C0Hr@FrxA7FTnJCK4UvdaRrmDNC|b{BI9*8trDWN>g$vx&)} z_$pXfSdgo$D`gI5@S9y7olF-RP&-qa=?#$kF$TMU?8Hpq9BS=|0|+^CghR+EezrJ3 z8nO$I0R+#P|NKTWG(+t+#ZyQJSf=hrKk|Kp=Nk|IDnsAE;Vu0I)Q!cC40T)h6VLnm g|3g2)K({kK0MC#mLult31ONa407*qoM6N<$f+882*Z=?k literal 0 HcmV?d00001 diff --git a/Supply Chain/App/supply_chain_soln_accelerator/static/appIconAlt.png b/Supply Chain/App/supply_chain_soln_accelerator/static/appIconAlt.png new file mode 100644 index 0000000000000000000000000000000000000000..6c3e6b9a7cfc799897a3118202809aaad9481b96 GIT binary patch literal 854 zcmV-c1F8IpP)%88K2n)Nu_{fX>m%2MM)m7Cs8za(Z z73niP(%*pc`TYC8U~Fe+$0OzwL%)Fj$nk$O6pzQX&qyR9udlDtY&HYiFf>C00|TJUl$~ z%mAzbuv)G9t_ckYb`A~>ypHGR=LL{3z~OIiZ*qNoEtN{elW}crP1pEUAVA18F)<(+(9+V9JU>6HZ|r^it>KPDOLRaLBF*P$YBc)QQ zYqoHp$!*@rWHQP&ITjiai}Atm@UU(KJ9OJi5tKCJni=Vt&`^Mw7!sM$$1{yaL&5QP zB=IZvJ$D!|y2XL5?^!ZHAQfG@DQ%tm0dko%?;sg5(0bqX42T2G)P$Rjx0^+sK)EB1 z$w}6$qnggVL%JJF0ED6S^>uHr(17d{ zVs>^`Z*8(^Zf?$#3}C0Hr@FrxA7FTnJCK4UvdaRrmDNC|b{BI9*8trDWN>g$vx&)} z_$pXfSdgo$D`gI5@S9y7olF-RP&-qa=?#$kF$TMU?8Hpq9BS=|0|+^CghR+EezrJ3 z8nO$I0R+#P|NKTWG(+t+#ZyQJSf=hrKk|Kp=Nk|IDnsAE;Vu0I)Q!cC40T)h6VLnm g|3g2)K({kK0MC#mLult31ONa407*qoM6N<$f+882*Z=?k literal 0 HcmV?d00001 diff --git a/Supply Chain/App/supply_chain_soln_accelerator/static/appIconAlt_2x.png b/Supply Chain/App/supply_chain_soln_accelerator/static/appIconAlt_2x.png new file mode 100644 index 0000000000000000000000000000000000000000..6dc0a15bf61a0fe38a12892e96545285054711f1 GIT binary patch literal 2190 zcmV;92yyp`P)qvHw&(ovZR&eO>iP$#B0$s`R2VhLrQl`)zoGp zur%Q_8-WR+7KTg!O#lruk%>WTWze;2*M1Mf{_pzs)vH&FWNBp31kk?$o&frf=o5ol z6h8qp0W>TrObq&)8MJll)@s+TT`QLG{{8#v+qZASXwl=_e>8)RA3t7g+qP{P5P*I7 z@S%G4>{*W`4(qzE04*&oReSgDO^g2V>pxwK7SBDQDt~PGmm>6>J-n|M#d=3Qf0eJuZ{p$1Q z&j}E6J8|Mf0^sY{uhpkdpDL33{{4Fe%uSm%C7_=_f1W^}M_V{R06BT`WCGyj%a`@TzhJ9q9(VCn-TVD8_)zdCT>KmzsQ!-s>} zv~Yj`apA&+Bokn0C?XR?bM$PvFgc_C!PAw>a=ix$m7(hQ%Odp1&|pwd0|#akMCd?zal;CtIIDI8WJWs z(i_qi3Xnwu=iR$^tJt%7^XB9oQyr5X!;yFbSR~J3z5?KNfSHbdltWE&z=M*LnhaH0eR%e zk<{nz-MfiNf`El>W9MLw3J^d0d7(^F9~{Sbg^G+n2tpY{-M4zlvNR z;P9tBgSG)R3P2`x{`~nQ7m}*C0O!Sv7b~tu4TA|5Y*CLMJzDYgS$!}BV-$}~8B%eI zZFlb6X~QO69|a(bGm%@jZmp_oNR9OPQ0_%0gHamh@FZIcW8@7Rur2y9_S?5_FLxo^ z)2B~Yb?~DDNebTM$Bz?`ZEX~QWE)hLjYuraGnmCxO|ahb#bX)sE&;@3F{BB^kQ;L* z)o4*-m=9)lDN08H$b^7#@#4i~oP0sEU<*g@QYM)fKz@tV1j-&S8nywi4*Dz>jRsIi zNx3gl(zz{$$TQ|rni1*~CDVtc71&1fO9`7x%)JlB-tbPiX1|Z>sA#1>^ zmq@=$n=3#fLrV5b-ru-!BQ4gXjvhUl{*KlJT9--B>T~JRrQ~yw!m*GCMqLy>gk^$* z(X-CD=L(RzMV7({Q4Xs6Tyd6X6cNX=K2>cSc%?B(3KQ+@qbVpOm;j0+-O)V=5VBWY z-zY6|_0Rc|5}>|+Y6GI_dSuREu6`vs6tRS&CI7f5Qt>29(Eod#k?wGWL1z_>b2aIFae10&(6R|{$nROr)2N%qa)OD? zwme$gZtA+-7!4r$79|xc|B-(4dt3pgs58p0m2DHlqA1J_T3>zg@814x}=-xFm%^A(nevBA2w0O>3k+``^Z$ZA9q;!MPxOeWlN$j znW$a-TR%Ty=l}(dag+#vj#C5Mm;C0WapPG4CyTC^%8e$lMgzzXD$*>%(Ssdyepny3 zXnY35Flc@vJu`1$UB7;PIqQ)w=>P?I793SDnR?alW|#wJ0^E4!(r52CDnKIbCAuOm zTSdeH=rhr^fXH?c$x?PJ?aTp9HpT#S{@@h0xNaZDi%8L2x@ItQ85N*9Z7o_void7I zOf<5a$pg@4H*OK-2khh(cSWRYjKVnm0URtOD_a|WXM;TCq0fZ^6s(E;QC>ZZuEl$4 zGLqxcJav;0nK&KfZ{)(q>YGi5Z#>|Xo_*|M&e3P#0EIV+>a3?#vA{k`wu#4A;>2>+ zoe|@gs_H(V4Ez{^Pi1t>7%&gZv<7 zh_Fl*TLX0wKLQ@M#6z4+Hs6WIRzYbH4{-vBjC^1Ej=QJ8X4-uWxdi0G85EO<>|=pa zy~P129s>q|%*1c<=cJ{1o|8WY%$#LNWBHtPyB3>m>{>T~VqyT1)k+)H`pw6tqg!JE zGit83MfbVB?f}i2EGc4IWGM_Io&U{$iqICxINQ$q<<)cB_vdl_k7m$37C3Z_37{4P zCx9k^hKX!q(8QpLLBkN3H-ko<&0jS5^nZV83DyMA1kf-&PYjwEG>k$2045I4qaqf& QMgRZ+07*qoM6N<$f^LWzs{jB1 literal 0 HcmV?d00001 diff --git a/Supply Chain/App/supply_chain_soln_accelerator/static/appIcon_2x.png b/Supply Chain/App/supply_chain_soln_accelerator/static/appIcon_2x.png new file mode 100644 index 0000000000000000000000000000000000000000..6dc0a15bf61a0fe38a12892e96545285054711f1 GIT binary patch literal 2190 zcmV;92yyp`P)qvHw&(ovZR&eO>iP$#B0$s`R2VhLrQl`)zoGp zur%Q_8-WR+7KTg!O#lruk%>WTWze;2*M1Mf{_pzs)vH&FWNBp31kk?$o&frf=o5ol z6h8qp0W>TrObq&)8MJll)@s+TT`QLG{{8#v+qZASXwl=_e>8)RA3t7g+qP{P5P*I7 z@S%G4>{*W`4(qzE04*&oReSgDO^g2V>pxwK7SBDQDt~PGmm>6>J-n|M#d=3Qf0eJuZ{p$1Q z&j}E6J8|Mf0^sY{uhpkdpDL33{{4Fe%uSm%C7_=_f1W^}M_V{R06BT`WCGyj%a`@TzhJ9q9(VCn-TVD8_)zdCT>KmzsQ!-s>} zv~Yj`apA&+Bokn0C?XR?bM$PvFgc_C!PAw>a=ix$m7(hQ%Odp1&|pwd0|#akMCd?zal;CtIIDI8WJWs z(i_qi3Xnwu=iR$^tJt%7^XB9oQyr5X!;yFbSR~J3z5?KNfSHbdltWE&z=M*LnhaH0eR%e zk<{nz-MfiNf`El>W9MLw3J^d0d7(^F9~{Sbg^G+n2tpY{-M4zlvNR z;P9tBgSG)R3P2`x{`~nQ7m}*C0O!Sv7b~tu4TA|5Y*CLMJzDYgS$!}BV-$}~8B%eI zZFlb6X~QO69|a(bGm%@jZmp_oNR9OPQ0_%0gHamh@FZIcW8@7Rur2y9_S?5_FLxo^ z)2B~Yb?~DDNebTM$Bz?`ZEX~QWE)hLjYuraGnmCxO|ahb#bX)sE&;@3F{BB^kQ;L* z)o4*-m=9)lDN08H$b^7#@#4i~oP0sEU<*g@QYM)fKz@tV1j-&S8nywi4*Dz>jRsIi zNx3gl(zz{$$TQ|rni1*~CDVtc71&1fO9`7x%)JlB-tbPiX1|Z>sA#1>^ zmq@=$n=3#fLrV5b-ru-!BQ4gXjvhUl{*KlJT9--B>T~JRrQ~yw!m*GCMqLy>gk^$* z(X-CD=L(RzMV7({Q4Xs6Tyd6X6cNX=K2>cSc%?B(3KQ+@qbVpOm;j0+-O)V=5VBWY z-zY6|_0Rc|5}>|+Y6GI_dSuREu6`vs6tRS&CI7f5Qt>29(Eod#k?wGWL1z_>b2aIFae10&(6R|{$nROr)2N%qa)OD? zwme$gZtA+-7!4r$79|xc|B-(4dt3pgs58p0m2DHlqA1J_T3>zg@814x}=-xFm%^A(nevBA2w0O>3k+``^Z$ZA9q;!MPxOeWlN$j znW$a-TR%Ty=l}(dag+#vj#C5Mm;C0WapPG4CyTC^%8e$lMgzzXD$*>%(Ssdyepny3 zXnY35Flc@vJu`1$UB7;PIqQ)w=>P?I793SDnR?alW|#wJ0^E4!(r52CDnKIbCAuOm zTSdeH=rhr^fXH?c$x?PJ?aTp9HpT#S{@@h0xNaZDi%8L2x@ItQ85N*9Z7o_void7I zOf<5a$pg@4H*OK-2khh(cSWRYjKVnm0URtOD_a|WXM;TCq0fZ^6s(E;QC>ZZuEl$4 zGLqxcJav;0nK&KfZ{)(q>YGi5Z#>|Xo_*|M&e3P#0EIV+>a3?#vA{k`wu#4A;>2>+ zoe|@gs_H(V4Ez{^Pi1t>7%&gZv<7 zh_Fl*TLX0wKLQ@M#6z4+Hs6WIRzYbH4{-vBjC^1Ej=QJ8X4-uWxdi0G85EO<>|=pa zy~P129s>q|%*1c<=cJ{1o|8WY%$#LNWBHtPyB3>m>{>T~VqyT1)k+)H`pw6tqg!JE zGit83MfbVB?f}i2EGc4IWGM_Io&U{$iqICxINQ$q<<)cB_vdl_k7m$37C3Z_37{4P zCx9k^hKX!q(8QpLLBkN3H-ko<&0jS5^nZV83DyMA1kf-&PYjwEG>k$2045I4qaqf& QMgRZ+07*qoM6N<$f^LWzs{jB1 literal 0 HcmV?d00001 From e0c96515243a21ee75ab744e1005c681638b3e5f Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 21:46:51 -0500 Subject: [PATCH 02/34] Update inputs.conf - specify python version --- .../supply_chain_soln_accelerator/default/inputs.conf | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/inputs.conf b/Supply Chain/App/supply_chain_soln_accelerator/default/inputs.conf index 74f44f4..1111995 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/inputs.conf +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/inputs.conf @@ -3,7 +3,6 @@ disabled = false host = edi_gw index = supply_chain_edi sourcetype = edi:x12 -#sourcetype = edi_x12 # EDI Data Generation Scripts @@ -13,6 +12,7 @@ interval = 86400 host = edi_gw index = supply_chain_edi sourcetype = edi:x12 +python.version = python3 [script://$SPLUNK_HOME/etc/apps/supply_chain_edi/bin/edi_stream_scene_stock_out_supplier.py] disabled = 0 @@ -21,6 +21,7 @@ host = edi_gw index = supply_chain_edi sourcetype = edi:x12 source = edi_quantumline_supplier +python.version = python3 [script://$SPLUNK_HOME/etc/apps/supply_chain_edi/bin/edi_stream_scene_stock_out_customer.py] disabled = 0 @@ -29,6 +30,7 @@ host = edi_gw index = supply_chain_edi sourcetype = edi:x12 source = edi_quantumline_customer +python.version = python3 [script://$SPLUNK_HOME/etc/apps/supply_chain_edi/bin/edi_stream_scene_full_order_to_cash.py] disabled = 0 @@ -37,6 +39,7 @@ host = edi_gw index = supply_chain_edi sourcetype = edi:x12 source = edi_quantumline_customer_full +python.version = python3 [script://$SPLUNK_HOME/etc/apps/supply_chain_edi/bin/edi_stream_scene_full_order_to_cash_missing_997.py] disabled = 0 @@ -45,6 +48,7 @@ host = edi_gw index = supply_chain_edi sourcetype = edi:x12 source = edi_quantumline_customer_full +python.version = python3 # SAP iDocs Data Generation Scripts [script://$SPLUNK_HOME/etc/apps/supply_chain_edi/bin/sap_idoc_stream.py] @@ -53,7 +57,7 @@ interval = 86400 host = sap_idoc index = supply_chain_idoc sourcetype = sap:idoc -#sourcetype = sap_idoc +python.version = python3 [script://$SPLUNK_HOME/etc/apps/supply_chain_edi/bin/edi_stream_scene_po_order_change.py] disabled = 0 @@ -62,6 +66,7 @@ host = edi_gw index = supply_chain_edi sourcetype = edi:x12 source = edi_quantumline_customer_full +python.version = python3 [script://$SPLUNK_HOME/etc/apps/supply_chain_edi/bin/edi_stream_scene_retry_loop.py] disabled = 0 @@ -70,6 +75,7 @@ host = edi_gw index = supply_chain_edi sourcetype = edi:x12 source = edi_quantumline_customer_full +python.version = python3 [script://$SPLUNK_HOME/etc/apps/supply_chain_edi/bin/edi_stream_scene_full_order_discrepancy.py] disabled = 0 @@ -78,3 +84,4 @@ host = edi_gw index = supply_chain_edi sourcetype = edi:x12 source = edi_quantumline_customer_full +python.version = python3 From 20aa8afbb74071d49d54e1e56765f07ca4186271 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 21:52:18 -0500 Subject: [PATCH 03/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_data_exp_raw_edi_all.xml.bk --- .../views/sa_sc_data_exp_raw_edi_all.xml.bk | 399 ------------------ 1 file changed, 399 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_data_exp_raw_edi_all.xml.bk diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_data_exp_raw_edi_all.xml.bk b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_data_exp_raw_edi_all.xml.bk deleted file mode 100644 index f6bdd50..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_data_exp_raw_edi_all.xml.bk +++ /dev/null @@ -1,399 +0,0 @@ -
- - Data Exploration Tool for Conversation Detail Events : Notification API (Real-time, For only conversations assigned to a queue) - - `get_edi_x12_index` $INPUT_CUST_FILTER_01$* -| rex field=_raw max_match=100 "\n(?<edi_po_record>PO\d\|.*~)\s*" -| rex field=_raw max_match=100 "\n(?<edi_n1_record>N1\|.*~)\s*" -| rex field=_raw max_match=100 "\n(?<edi_hl_record>HL\|.*~\s*\nLIN\|.*~\s*\nSN\d\|.*~)" -| eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status -| eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) -| eval raw_record = _raw -| stats count as evt_count, first(_time) as _time, latest(_time) as time_last, earliest(_time) as time_first, values(host) as host, values(source) as source, values(sourcetype) as sourcetype, last(edi_code) as edi_code, values(edi_cont_num) as edi_cont_num, dc(edi_code) as edi_code_cnt, values(edi_date) as edi_date, values(edi_flag) as edi_flag, first(edi_requestor) as edi_requestor, first(edi_responder) as edi_responder, values(edi_sequence) as edi_sequence, values(edi_time) as edi_time, values(edi_code_ack) as edi_code_ack, values(edi_ack_status) as edi_ack_status, values(bsn_ship_time) as bsn_ship_time, values(bsn_ship_id) as bsn_ship_id, values(bsn_ship_date) as bsn_ship_date, values(bsn_orig_asn) as bsn_orig_asn, values(edi_po_ack) as edi_po_ack, values(iea_int_cont_num) as iea_int_cont_num, values(iea_num_func_group) as iea_num_func_group, values(ge_grp_cont_num) as ge_grp_cont_num, values(ge_trans_set_num) as ge_trans_set_num, values(se_tran_cont_num) as se_tran_cont_num, values(edi_po_record) as edi_po_record, values(edi_po_item_cnt) as edi_po_item_cnt, values(edi_po_curr_amt) as edi_po_curr_amt, values(edi_n1_record) as edi_n1_record, values(edi_hl_record) as edi_hl_record, values(edi_ack_status_combo) as edi_ack_status_combo, values(raw_record) as raw_record by edi_tr_id edi_code_groupby -| eval ack_time_took=time_last-time_first - - - $INPUT_TIME_GLOBAL.earliest$ - $INPUT_TIME_GLOBAL.latest$ - - - index=genesys_sum source=genesys_evt_sum_conv_aggregates metric="$INPUT_METRIC_TYPE_T$" group.ani="*$INPUT_ANI$*" group.conversationId="*$INPUT_CONV_ID$*" | fields + * - $INPUT_TIME_GLOBAL.earliest$ - $INPUT_TIME_GLOBAL.latest$ - - - index=genesys_sum source=genesys_evt_sum_conv_aggregates metric="$INPUT_METRIC_TYPE_O$" group.ani="*$INPUT_ANI$*" group.conversationId="*$INPUT_CONV_ID$*" | fields + * - $INPUT_TIME_GLOBAL.earliest$ - $INPUT_TIME_GLOBAL.latest$ - -
- - - * - * - - - - raw_record - edi_hl_record - source - sourcetype - raw_record,edi_hl_record,source,sourcetype - raw_record,edi_hl_record,source,sourcetype - | fields - - - - - - - - - - -7d@h - now - - -
- - - - -
-

- Detailed EDI Transaction Records -

- -
-
- EDI Events Chart - -

Common EDI Events

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
EDI CodeDescription
EDI 810Invoice - Used by suppliers to request payment after goods or services are provided.
EDI 850Purchase Order - Places an order for goods or services from a buyer to a supplier.
EDI 855Purchase Order Acknowledgment - Confirms receipt and status of a purchase order.
EDI 856Advance Ship Notice (ASN) - Provides shipment details including contents and delivery information.
EDI 997Functional Acknowledgment - Confirms receipt of other EDI transactions, validating transmission success.
EDI 846Inventory Inquiry/Advice - Shares inventory status or availability from a supplier to a buyer.
EDI 830Planning Schedule - Conveys forecasted demands or long-term planning details to suppliers.
EDI 820Payment Order/Remittance Advice - Authorizes payment or notifies about payments made.
EDI 832Price/Sales Catalog - Provides price and product information from suppliers to buyers.
EDI 860Purchase Order Change Request - Requests changes to an existing purchase order.
EDI 214Transportation Carrier Shipment Status - Updates on shipment status from carriers to shippers.
EDI 945Warehouse Shipping Advice - Informs the supplier or retailer that goods have been shipped.
EDI 943Warehouse Stock Transfer Shipment Advice - Notifies of a transfer or shipment between warehouses.
EDI 940Warehouse Shipping Order - Instructs a warehouse to ship product to a specified location.
EDI 204Motor Carrier Load Tender - Provides details to a transportation carrier about shipment logistics.
EDI 210Motor Carrier Freight Details and Invoice - Sends freight charges from shipper to carrier.
-

- SVC stands for - Splunk Virtual Compute - which is basically a unit of capabilities that includes the following resources in Splunk Cloud: - compute, memory, and I/O - . -

-

- If you click on - 3.1 Show License, Utilization and Cost - the app retrieves the Total SVC utilization of the day and the following breakdown of that utilization from the Cloud Monitoring Console (CMC): -

-
    -
  1. Search Utilization or Search Workload
  2. -
  3. Ingestion Utilization or Indexing Workload
  4. -
  5. Basic Services or Shared Services which is everything else.
  6. -
-

- Launch the Cloud Monitoring Console (CMC), from the - License Usage - menu dropdown launch the - Workload - dashboard; The first panel SVC Usage will show an hourly SVC usage. -

-

Scroll down to SVC Consumer panel and it will show SVC usage for ingestion, search and shared services.

-

The Setup/Help tab in this dashboard will has an acronym and help panel with additional information.

-
-
- - - - - - - - table _time * -$INPUT_EXCL_FIELDS$ - - - - - - - - - - - {"TELECOM_PROBLEM":#DC4E41,"CUSTOMER_DISCONNECT":#53A051,"AGENT_DISCONNECT":#F8BE34,"THIRD_PARTY_DISCONNECT":#F8BE34,"CONTACT_FLOW_DISCONNECT":#DC4E41,"OTHER":#DC4E41} - - - {"AVAILABLE":#53A051,"DELETED":#DC4E41,"NULL":#F8BE34} - -
-
-
- - - Contact Record Event Timeline - - - `get_edi_x12_index` -| rex field=_raw max_match=100 "\n(?<edi_po_record>PO\d\|.*~)\s*" -| rex field=_raw max_match=100 "\n(?<edi_n1_record>N1\|.*~)\s*" -| rex field=_raw max_match=100 "\n(?<edi_hl_record>HL\|.*~\s*\nLIN\|.*~\s*\nSN\d\|.*~)" -| eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status -| eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) -| eval raw_record = _raw ```| table _time host source sourcetype edi_buyer edi_code edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_code_ack edi_ack_status bsn_ship_time bsn_ship_id bsn_ship_date bsn_orig_asn edi_po_ack iea_int_cont_num iea_num_func_group ge_grp_cont_num ge_trans_set_num se_tran_cont_num edi_po_record edi_po_item_cnt edi_po_curr_amt edi_n1_record edi_hl_record edi_ack_status_combo edi_code_groupby raw_record``` -| stats earliest(_time) as _time, latest(_time) as time_last, earliest(_time) as time_first, values(host) as host, values(source) as source, values(sourcetype) as sourcetype, values(edi_buyer) as buyer, values(edi_code) as edi_code, values(edi_cont_num) as edi_cont_num, dc(edi_code) as edi_code_cnt, values(edi_date) as edi_date, values(edi_flag) as edi_flag, values(edi_requestor) as edi_requestor, values(edi_responder) as edi_responder, values(edi_seller) as edi_seller, values(edi_sequence) as edi_sequence, values(edi_time) as edi_time, values(edi_code_ack) as edi_code_ack, values(edi_ack_status) as edi_ack_status, values(bsn_ship_time) as bsn_ship_time, values(bsn_ship_id) as bsn_ship_id, values(bsn_ship_date) as bsn_ship_date, values(bsn_orig_asn) as bsn_orig_asn, values(edi_po_ack) as edi_po_ack, values(iea_int_cont_num) as iea_int_cont_num, values(iea_num_func_group) as iea_num_func_group, values(ge_grp_cont_num) as ge_grp_cont_num, values(ge_trans_set_num) as ge_trans_set_num, values(se_tran_cont_num) as se_tran_cont_num, values(edi_po_record) as edi_po_record, values(edi_po_item_cnt) as edi_po_item_cnt, values(edi_po_curr_amt) as edi_po_curr_amt, values(edi_n1_record) as edi_n1_record, values(edi_hl_record) as edi_hl_record, values(edi_ack_status_combo) as edi_ack_status_combo, values(raw_record) as raw_record by edi_tr_id edi_code_groupby -| eval ack_time_took=time_last-time_first -| eval _time=time_first - - - - - - - - - - - - - - - - - - - - - - - - - -
-

- Query Builder -

-
-
-

- SVC stands for - Splunk Virtual Compute - which is basically a unit of capabilities that includes the following resources in Splunk Cloud: - compute, memory, and I/O - . -

-

- If you click on - 3.1 Show License, Utilization and Cost - the app retrieves the Total SVC utilization of the day and the following breakdown of that utilization from the Cloud Monitoring Console (CMC): -

-
    -
  1. Search Utilization or Search Workload
  2. -
  3. Ingestion Utilization or Indexing Workload
  4. -
  5. Basic Services or Shared Services which is everything else.
  6. -
-

- Launch the Cloud Monitoring Console (CMC), from the - License Usage - menu dropdown launch the - Workload - dashboard; The first panel SVC Usage will show an hourly SVC usage. -

-

Scroll down to SVC Consumer panel and it will show SVC usage for ingestion, search and shared services.

-

The Setup/Help tab in this dashboard will has an acronym and help panel with additional information.

-
-
- - - - - - - - edi_tr_id - edi_tr_id - all_fields - all_fields - - | makeresults 1 -| eval input_field="ack_time_took bsn_orig_asn bsn_ship_date bsn_ship_id bsn_ship_time edi_ack_status edi_ack_status_combo edi_code edi_code_ack edi_code_cnt edi_code_groupby edi_cont_num edi_date edi_flag edi_n1_record edi_po_ack edi_po_curr_amt edi_po_item_cnt edi_po_record edi_requestor edi_responder edi_sequence edi_time edi_tr_id evt_count ge_grp_cont_num ge_trans_set_num host iea_int_cont_num iea_num_func_group se_tran_cont_num time_first time_last" -| rex field=input_field max_match=100 "(?<all_fields>\w+)" -| table all_fields -| mvexpand all_fields - - - - - Count - Count Distinct - Sum - Average - Min - Max - count - count - - - - edi_code - edi_code - all_fields - all_fields - - | makeresults 1 -| eval input_field="ack_time_took bsn_orig_asn bsn_ship_date bsn_ship_id bsn_ship_time edi_ack_status edi_ack_status_combo edi_code edi_code_ack edi_code_cnt edi_code_groupby edi_cont_num edi_date edi_flag edi_n1_record edi_po_ack edi_po_curr_amt edi_po_item_cnt edi_po_record edi_requestor edi_responder edi_sequence edi_time edi_tr_id evt_count ge_grp_cont_num ge_trans_set_num host iea_int_cont_num iea_num_func_group se_tran_cont_num time_first time_last" -| rex field=input_field max_match=100 "(?<all_fields>\w+)" -| table all_fields -| mvexpand all_fields - -15m - now - - - - - - - - - Area - Line - Column - Bar - area - area - - - O - Service Level Statistics - - timechart $INPUT_FUNC_01$($INPUT_FIELD_01$) by $INPUT_SPLIT_BY_01$ - - - - - - - - - - - - - - - - - bar - pie - bar - bar - - - O - Service Level Statistics - - stats $INPUT_FUNC_01$($INPUT_FIELD_01$) by $INPUT_SPLIT_BY_01$ -| sort - $INPUT_FUNC_01$($INPUT_FIELD_01$) - - - - - - - - - - - - - \ No newline at end of file From 9900fd5d0ac74875e6392ca49827b27c892156b9 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:00:37 -0500 Subject: [PATCH 04/34] Update sa_sc_exploration_demo_01.xml --- .../default/data/ui/views/sa_sc_exploration_demo_01.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_demo_01.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_demo_01.xml index 893b190..21e2006 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_demo_01.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_demo_01.xml @@ -1,4 +1,4 @@ - + From 1d3e7290f1204092632c6ffcbc1317e1ee906427 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:03:14 -0500 Subject: [PATCH 05/34] Update datamodels.conf removed data model acceleration --- .../App/supply_chain_soln_accelerator/default/datamodels.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/datamodels.conf b/Supply Chain/App/supply_chain_soln_accelerator/default/datamodels.conf index f5a7fd6..6271591 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/datamodels.conf +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/datamodels.conf @@ -7,7 +7,7 @@ dataset.fields = [{"name":"_time"},{"name":"bsn_orig_asn"},{"name":"bsn_ship_dat dataset.type = table [supply_chain_edi_sum_evt_edi_all] -acceleration = true +acceleration = false acceleration.earliest_time = 0 acceleration.hunk.dfs_block_size = 0 dataset.commands = [{"selectedMethod":"search","baseSPL":"index=supply_chain_sum_evt_edi_all","type":"initialdata","isComplete":true,"spl":"index=supply_chain_sum_evt_edi_all | fields \"_time\", \"ack_time_took\", \"bsn_orig_asn\", \"bsn_ship_date\", \"bsn_ship_id\", \"bsn_ship_time\", \"date_hour\", \"date_mday\", \"date_minute\", \"date_month\", \"date_second\", \"date_wday\", \"date_year\", \"date_zone\", \"edi_ack_status\", \"edi_ack_status_combo\", \"edi_code\", \"edi_code_ack\", \"edi_code_cnt\", \"edi_code_groupby\", \"edi_cont_num\", \"edi_date\", \"edi_desc\", \"edi_flag\", \"edi_hl_record\", \"edi_n1_record\", \"edi_name\", \"edi_po_ack\", \"edi_po_curr_amt\", \"edi_po_item_cnt\", \"edi_po_record\", \"edi_requestor\", \"edi_responder\", \"edi_sequence\", \"edi_time\", \"edi_tr_id\", \"evt_count\", \"ge_grp_cont_num\", \"ge_trans_set_num\", \"host\", \"iea_int_cont_num\", \"iea_num_func_group\", \"index\", \"info_search_time\", \"linecount\", \"orig_host\", \"orig_source\", \"orig_sourcetype\", \"raw_record\", \"se_tran_cont_num\", \"source\", \"sourcetype\", \"splunk_server\", \"SUM_TYPE\", \"time_first\", \"time_last\", \"timeendpos\", \"timestartpos\", \"_raw\"","columns":[{"name":"_time","type":"timestamp","id":"bf17bbfa-1695-46be-b88a-bbbbc84700f8"},{"type":"number","name":"ack_time_took","id":"8ce05439-760e-4733-ab4c-0a6ed6c7ccd9"},{"type":"number","name":"bsn_orig_asn","id":"9905440f-06d0-4f2c-bead-e5f841f2ec2c"},{"type":"number","name":"bsn_ship_date","id":"5f9472ba-3c4f-4716-9a82-be247e0d8f6b"},{"type":"string","name":"bsn_ship_id","id":"1f555354-0f2d-4cbd-87f7-076965b49ea6"},{"type":"number","name":"bsn_ship_time","id":"27b1fff5-d79b-481e-a17a-f652031733dc"},{"name":"date_hour","type":"number","id":"f95bc854-d037-449a-8199-e5ca432bbe00"},{"type":"number","name":"date_mday","id":"0f48ca86-7e7c-4199-93c8-e3efacfe0787"},{"type":"number","name":"date_minute","id":"132b80f7-5e3a-4e54-9c20-0fb3cb784456"},{"type":"string","name":"date_month","id":"c3f34c48-e089-4772-8a98-59f1adca9fc9"},{"type":"number","name":"date_second","id":"e641aa16-d145-4224-901d-f000b37e3d9f"},{"type":"string","name":"date_wday","id":"ff21f256-e51c-48a8-af44-1b0d425fb6e2"},{"type":"number","name":"date_year","id":"c8f42de1-2a19-4ab2-92d6-5cc8b55a0ba5"},{"type":"number","name":"date_zone","id":"64ec51d8-ce3b-4aa9-a231-e46cf65dddaf"},{"type":"string","name":"edi_ack_status","id":"d138d284-c340-4460-9acb-3e482fd28c08"},{"type":"string","name":"edi_ack_status_combo","id":"45f31b5c-1422-4384-9936-884c959220ee"},{"type":"number","name":"edi_code","id":"0b1f1d86-dd97-431d-b59d-e53e62a859a6"},{"type":"number","name":"edi_code_ack","id":"e0afd456-3d27-4129-a530-7a439aeff938"},{"type":"number","name":"edi_code_cnt","id":"ebfe896c-1d33-477f-9812-55976643e70d"},{"type":"number","name":"edi_code_groupby","id":"ef894a2c-c5f3-45fd-8026-83aad273f02f"},{"type":"string","name":"edi_cont_num","id":"bc580900-68e1-4530-a470-7a41a674c0b0"},{"type":"number","name":"edi_date","id":"d00592de-560a-4596-a45f-497368792953"},{"type":"string","name":"edi_desc","id":"de60f1fb-3768-41ef-be54-f6c37330f045"},{"type":"string","name":"edi_flag","id":"b3d139fa-6823-4367-b4fe-df520c0fb380"},{"type":"string","name":"edi_hl_record","id":"e174ec8a-9b6e-47a4-a48f-6abc58ca26fe"},{"type":"string","name":"edi_n1_record","id":"7a14222f-6c10-4629-9efa-84d66ddec398"},{"type":"string","name":"edi_name","id":"ef0e3dbb-213f-463f-bc10-c668f57565cf"},{"type":"string","name":"edi_po_ack","id":"4af2f151-921e-4e5c-9af6-78b128ebc0da"},{"type":"number","name":"edi_po_curr_amt","id":"e56ce8cd-13f9-4ab6-abed-7a8cf05b9f2f"},{"type":"number","name":"edi_po_item_cnt","id":"3d541cb7-4ec5-465d-8aa7-12b5d447b23b"},{"type":"string","name":"edi_po_record","id":"b83b7cd1-6b8f-4ebb-a53d-864592394946"},{"type":"string","name":"edi_requestor","id":"3c2a790d-2e09-42d0-a990-3bc53d77bbb7"},{"type":"string","name":"edi_responder","id":"925ea97b-cc74-480f-afbd-edf6e0341834"},{"type":"string","name":"edi_sequence","id":"82bc39d4-50e7-412e-a81c-b2f104b47fe2"},{"type":"string","name":"edi_time","id":"7abb3d27-f8fb-4df0-8839-9f252bcf3a64"},{"type":"string","name":"edi_tr_id","id":"9dc39f5f-47a6-474e-aebf-40c4df30283b"},{"type":"number","name":"evt_count","id":"021f89ae-94f0-42d3-8fb4-ff8da4b78ebb"},{"type":"string","name":"ge_grp_cont_num","id":"100e0d13-fc6a-4774-9e54-9cddf162a0dc"},{"type":"number","name":"ge_trans_set_num","id":"84ae1884-3198-4cae-b330-290a645a8151"},{"name":"host","type":"string","id":"bb32c925-ba16-460f-b93d-0c0b4a3de2a9"},{"type":"string","name":"iea_int_cont_num","id":"2062b941-59af-4c31-b71b-104ff1d081e5"},{"type":"number","name":"iea_num_func_group","id":"fab7c1b8-4496-43d2-9023-caa9f12ea807"},{"type":"string","name":"index","id":"dd861aec-63f2-461c-82e3-75c4b93d3e6e"},{"type":"number","name":"info_search_time","id":"d0c9422d-58e9-4bc9-bfd0-d19abd4e6504"},{"type":"number","name":"linecount","id":"68a84ffc-3406-4a5c-85e5-24c90bb08876"},{"type":"string","name":"orig_host","id":"241c5a41-c5f6-4647-af31-a977c3e89f07"},{"type":"string","name":"orig_source","id":"8b4f9987-ea9a-4ddd-8f36-fe02e69ed00a"},{"type":"string","name":"orig_sourcetype","id":"d4c308dc-8a80-4d37-afda-84b2a8acb730"},{"type":"string","name":"raw_record","id":"16ec5458-7674-4602-aaaa-e5488aacbb5a"},{"type":"string","name":"se_tran_cont_num","id":"2dbf272c-1cf7-43c8-8143-c8c6d73b2077"},{"name":"source","type":"string","id":"1cf23d8d-034f-4b82-8e18-2de957119e55"},{"name":"sourcetype","type":"string","id":"917d68b4-f456-4495-b9f8-79dc278bb281"},{"type":"string","name":"splunk_server","id":"5d916333-abe0-4053-8d21-8ef0ed3588bf"},{"type":"string","name":"SUM_TYPE","id":"5d4710de-ec99-4dff-bc98-d8dff9509e98"},{"type":"number","name":"time_first","id":"b013485c-7eb2-491b-bf86-a5672b80997b"},{"type":"number","name":"time_last","id":"0acf1a4d-9349-49b4-bd68-2ef50658c90b"},{"name":"timeendpos","type":"number","id":"fdc1d262-7670-428c-a853-8fe00c28e0f6"},{"name":"timestartpos","type":"number","id":"79b78ab0-f232-4771-8d8f-1f8d2b71ffee"},{"name":"_raw","type":"raw","id":"898fccd5-4206-4725-a9f4-2c6215d08d7b"}],"requiredColumns":[],"editorValues":[]}] From e3ac6092b41c459bbcf4ba604d072e70dd4b75a6 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:08:10 -0500 Subject: [PATCH 06/34] Update macros.conf - added index macros --- .../default/macros.conf | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/macros.conf b/Supply Chain/App/supply_chain_soln_accelerator/default/macros.conf index ee03a33..707a110 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/macros.conf +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/macros.conf @@ -30,3 +30,18 @@ definition = `get_edi_x12_index` \ | eval ack_time_took=time_last-time_first \ | eval _time=time_first +[get_supply_chain_edi_index] +definition = index=supply_chain_edi +iseval = 0 + +[get_supply_chain_idoc_index] +definition = index=supply_chain_idoc +iseval = 0 + +[get_chip_qa_index] +definition = index=chip_qa +iseval = 0 + +[get_supply_chain_sum_evt_edi_all_index] +definition = index=supply_chain_sum_evt_edi_all +iseval = 0 From a7138859a0e79094fc1a0d11ec4fe20a71c8e90e Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:14:19 -0500 Subject: [PATCH 07/34] Update sa_sc_01_mon_edi_00_intro.xml --- .../default/data/ui/views/sa_sc_01_mon_edi_00_intro.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_00_intro.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_00_intro.xml index 42df7d7..e0d5583 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_00_intro.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_00_intro.xml @@ -155,7 +155,7 @@ RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -202,4 +202,4 @@ - \ No newline at end of file + From 8bbb9ed873922ad3a182d0e643875ae3e9369c04 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:14:37 -0500 Subject: [PATCH 08/34] Update sa_sc_01_mon_edi_01_mon_edi_ack.xml --- .../data/ui/views/sa_sc_01_mon_edi_01_mon_edi_ack.xml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_01_mon_edi_ack.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_01_mon_edi_ack.xml index bd2bbf0..c3d0242 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_01_mon_edi_ack.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_01_mon_edi_ack.xml @@ -133,7 +133,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type edi_name | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -178,7 +178,7 @@
RESULT ANALYSIS 02 : Total stats on EDI response status - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -212,7 +212,7 @@ Total stats on EDI response status (Chart) - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -278,4 +278,4 @@ - \ No newline at end of file + From 7692429f9784abce636c32f23ed4747dc62c85cb Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:16:25 -0500 Subject: [PATCH 09/34] Update sa_sc_01_mon_edi_02_trans_succ_rat.xml --- .../views/sa_sc_01_mon_edi_02_trans_succ_rat.xml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_02_trans_succ_rat.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_02_trans_succ_rat.xml index 0d36a71..3b4f66b 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_02_trans_succ_rat.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_02_trans_succ_rat.xml @@ -110,7 +110,7 @@
RESULT ANALYSIS 01 : Trends on EDI response status - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -152,7 +152,7 @@ Trends on EDI response status - Visual Line Chart - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -194,7 +194,7 @@
RESULT ANALYSIS 01 : Trends on EDI transmission success rates - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -237,7 +237,7 @@ Trends on EDI transmission success rates w/ occurrences - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -284,7 +284,7 @@
RESULT ANALYSIS 01 : Times when response rates were below SLA. - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -328,7 +328,7 @@ Trends when response rates were below SLA. - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -382,4 +382,4 @@ - \ No newline at end of file + From 0c66ed07f605167dea7412553aeeea79626b9e74 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:17:29 -0500 Subject: [PATCH 10/34] Update sa_sc_01_mon_edi_03_trans_error_retry.xml --- .../ui/views/sa_sc_01_mon_edi_03_trans_error_retry.xml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_03_trans_error_retry.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_03_trans_error_retry.xml index 4db0d84..e866764 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_03_trans_error_retry.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_03_trans_error_retry.xml @@ -115,7 +115,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status "Error" or "Rejected". - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -156,7 +156,7 @@
RESULT ANALYSIS 02 : Stats trends on error + retry rate - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -204,7 +204,7 @@ Visualization on error + retry trend - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -254,4 +254,4 @@ - \ No newline at end of file + From c281e38ebbd8a672e24d1b5c51e5472792abf79a Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:18:36 -0500 Subject: [PATCH 11/34] Update sa_sc_01_mon_edi_04_edi_ack_timing.xml --- .../ui/views/sa_sc_01_mon_edi_04_edi_ack_timing.xml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_04_edi_ack_timing.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_04_edi_ack_timing.xml index 8b2373c..e453b7d 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_04_edi_ack_timing.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_04_edi_ack_timing.xml @@ -115,7 +115,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, list(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -144,7 +144,7 @@ RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, list(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status, count as evt_count by edi_tr_id edi_code_groupby @@ -184,7 +184,7 @@
RESULT ANALYSIS 02 : Total stats on EDI response status - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -224,7 +224,7 @@ RESULT ANALYSIS 02 : Total stats on EDI response status - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -271,4 +271,4 @@ - \ No newline at end of file + From 6f6286d70e8a2b0d1e6cecf19fcaf94e1aadf77e Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:19:28 -0500 Subject: [PATCH 12/34] Update sa_sc_01_mon_edi_05_find_repeat_trans.xml --- .../ui/views/sa_sc_01_mon_edi_05_find_repeat_trans.xml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_05_find_repeat_trans.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_05_find_repeat_trans.xml index 2ff7a6d..fb18371 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_05_find_repeat_trans.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_01_mon_edi_05_find_repeat_trans.xml @@ -125,7 +125,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" source=edi_quantumline_customer_full NOT edi_code=997 + sourcetype="edi:x12" source=edi_quantumline_customer_full NOT edi_code=997 | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | strcat edi_code "-" edi_ack_status edi_event_pattern_combo @@ -173,8 +173,8 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" NOT edi_code=997 - [ search index=supply_chain_edi sourcetype="edi:x12" source=edi_quantumline_customer_full NOT edi_code=997 + sourcetype="edi:x12" NOT edi_code=997 + [ search sourcetype="edi:x12" source=edi_quantumline_customer_full NOT edi_code=997 | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | strcat edi_code "-" edi_ack_status edi_event_pattern_combo @@ -229,4 +229,4 @@ - \ No newline at end of file + From de03b153fb3870a72b152add7e52d43db76789be Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:21:04 -0500 Subject: [PATCH 13/34] Update sa_sc_02_po_lifecycle_00_intro.xml --- .../default/data/ui/views/sa_sc_02_po_lifecycle_00_intro.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_00_intro.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_00_intro.xml index d142003..6110431 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_00_intro.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_00_intro.xml @@ -157,7 +157,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (860, 850, 997) + sourcetype="edi:x12" edi_code IN (860, 850, 997) | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -203,4 +203,4 @@ - \ No newline at end of file + From cb4b977f5660a7f13b721001c9907744a1c5cd30 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:22:11 -0500 Subject: [PATCH 14/34] Update sa_sc_02_po_lifecycle_01_po_ack_verify.xml --- .../views/sa_sc_02_po_lifecycle_01_po_ack_verify.xml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_01_po_ack_verify.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_01_po_ack_verify.xml index 9159f24..21d4c2b 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_01_po_ack_verify.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_01_po_ack_verify.xml @@ -115,7 +115,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850, 997) + sourcetype="edi:x12" edi_code IN (850, 997) | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_name) as edi_name, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status, count as evt_cnt by edi_tr_id edi_code_groupby @@ -162,7 +162,7 @@
RESULT ANALYSIS 02 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850, 997) + sourcetype="edi:x12" edi_code IN (850, 997) | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_name) as edi_name, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status, count as evt_cnt by edi_tr_id edi_code_groupby @@ -190,7 +190,7 @@
Total stats on EDI response status (Chart) - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850, 997) + sourcetype="edi:x12" edi_code IN (850, 997) | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_name) as edi_name, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status, count as evt_cnt by edi_tr_id edi_code_groupby @@ -219,7 +219,7 @@ Total stats on EDI response status (Chart) - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850, 997) + sourcetype="edi:x12" edi_code IN (850, 997) | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_name) as edi_name, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status, count as evt_cnt by edi_tr_id edi_code_groupby @@ -262,7 +262,7 @@
RESULT ANALYSIS 02 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850, 997) edi_tr_id=TR* + sourcetype="edi:x12" edi_code IN (850, 997) edi_tr_id=TR* | rex field=_raw max_match=100 "(?P<po_item>PO1\|[^\~]*)~" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -321,4 +321,4 @@ - \ No newline at end of file + From 2c0be9e062ca65bc58cd81ec6b593f96a873913e Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:23:13 -0500 Subject: [PATCH 15/34] Update sa_sc_02_po_lifecycle_02_po_sup_perf.xml --- .../views/sa_sc_02_po_lifecycle_02_po_sup_perf.xml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_02_po_sup_perf.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_02_po_sup_perf.xml index 3b04c8f..2531775 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_02_po_sup_perf.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_02_po_sup_perf.xml @@ -116,7 +116,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850 855) + sourcetype="edi:x12" edi_code IN (850 855) | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -172,7 +172,7 @@
RESULT ANALYSIS 02 : Total stats on EDI response status - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850 855) + sourcetype="edi:x12" edi_code IN (850 855) | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -213,7 +213,7 @@ Total stats on EDI response status (Chart) - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850 855) + sourcetype="edi:x12" edi_code IN (850 855) | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -258,7 +258,7 @@ RESULT ANALYSIS 02 : Total stats on EDI response status - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850 855) + sourcetype="edi:x12" edi_code IN (850 855) | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, earliest(_time) as time_po, latest(_time) as time_po_ack, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, dc(edi_code) as edi_code_cnt, last(edi_ack_status) as edi_ack_status by edi_tr_id @@ -301,7 +301,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850 855) + sourcetype="edi:x12" edi_code IN (850 855) | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -362,4 +362,4 @@ - \ No newline at end of file + From 3a4de31c7f8eb0cc5114d498eafc9d0b43b2d4fd Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:24:12 -0500 Subject: [PATCH 16/34] Update sa_sc_02_po_lifecycle_03_po_no_res_sup.xml --- .../ui/views/sa_sc_02_po_lifecycle_03_po_no_res_sup.xml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_03_po_no_res_sup.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_03_po_no_res_sup.xml index 380644c..d992489 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_03_po_no_res_sup.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_03_po_no_res_sup.xml @@ -113,7 +113,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850 855) + sourcetype="edi:x12" edi_code IN (850 855) | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -167,7 +167,7 @@
RESULT ANALYSIS 02 : Total stats on EDI response status - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850 855) + sourcetype="edi:x12" edi_code IN (850 855) | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -210,7 +210,7 @@ Total stats on EDI response status (Chart) - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850 855) + sourcetype="edi:x12" edi_code IN (850 855) | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -260,4 +260,4 @@ - \ No newline at end of file + From 95448fa3d61ef7564a57a2fe9fbf9715570097dd Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:25:28 -0500 Subject: [PATCH 17/34] Update sa_sc_02_po_lifecycle_04_po_change_mon.xml --- .../sa_sc_02_po_lifecycle_04_po_change_mon.xml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_04_po_change_mon.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_04_po_change_mon.xml index 300478a..bee71f6 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_04_po_change_mon.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_04_po_change_mon.xml @@ -114,7 +114,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (860, 850, 997) + sourcetype="edi:x12" edi_code IN (860, 850, 997) | rex field=_raw max_match=100 "(?P<po_item>PO1\|[^\~]*)~" | rex field=_raw max_match=100 "(?P<po_change_item>POC\|[^\~]*)~" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type po_item po_change_item @@ -170,7 +170,7 @@
RESULT ANALYSIS 02 : Total stats on EDI response status - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (860, 850, 997) + sourcetype="edi:x12" edi_code IN (860, 850, 997) | rex field=_raw max_match=100 "(?P<po_item>PO1\|[^\~]*)~" | rex field=_raw max_match=100 "(?P<po_change_item>POC\|[^\~]*)~" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status @@ -207,7 +207,7 @@ Total stats on EDI response status (Chart) - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (860, 850, 997) + sourcetype="edi:x12" edi_code IN (860, 850, 997) | rex field=_raw max_match=100 "(?P<po_item>PO1\|[^\~]*)~" | rex field=_raw max_match=100 "(?P<po_change_item>POC\|[^\~]*)~" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status @@ -227,7 +227,7 @@ - + @@ -237,7 +237,7 @@
RESULT ANALYSIS 02 : Total stats on EDI response status - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (860, 850, 997) + sourcetype="edi:x12" edi_code IN (860, 850, 997) | rex field=_raw max_match=100 "(?P<po_item>PO1\|[^\~]*)~" | rex field=_raw max_match=100 "(?P<po_change_item>POC\|[^\~]*)~" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status @@ -287,7 +287,7 @@ Total stats on EDI response status (Chart) - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (860, 850, 997) + sourcetype="edi:x12" edi_code IN (860, 850, 997) | rex field=_raw max_match=100 "(?P<po_item>PO1\|[^\~]*)~" | rex field=_raw max_match=100 "(?P<po_change_item>POC\|[^\~]*)~" | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status @@ -309,7 +309,7 @@ - + @@ -343,4 +343,4 @@ - \ No newline at end of file + From 97fb18276ca9fbafd0da6af68ce9a15debe5a58e Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:26:22 -0500 Subject: [PATCH 18/34] Update sa_sc_02_po_lifecycle_05_po_resp_succ_rate.xml --- .../ui/views/sa_sc_02_po_lifecycle_05_po_resp_succ_rate.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_05_po_resp_succ_rate.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_05_po_resp_succ_rate.xml index 618c848..9e090cc 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_05_po_resp_succ_rate.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_05_po_resp_succ_rate.xml @@ -115,7 +115,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850, 997) + sourcetype="edi:x12" edi_code IN (850, 997) | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -150,7 +150,7 @@ Total stats on EDI response status (Chart) - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850, 997) + sourcetype="edi:x12" edi_code IN (850, 997) | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -200,4 +200,4 @@ - \ No newline at end of file + From ec684b3a629ac2025f9c880d61cd27c1a28ade6a Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:27:17 -0500 Subject: [PATCH 19/34] Update sa_sc_02_po_lifecycle_06_po_proc_error_rate.xml --- .../views/sa_sc_02_po_lifecycle_06_po_proc_error_rate.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_06_po_proc_error_rate.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_06_po_proc_error_rate.xml index 9c8d0d7..927c5ed 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_06_po_proc_error_rate.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_02_po_lifecycle_06_po_proc_error_rate.xml @@ -113,7 +113,7 @@
RESULT ANALYSIS 01 : EDI Transactions with response status determined. - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850, 997) + sourcetype="edi:x12" edi_code IN (850, 997) | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -149,7 +149,7 @@ Total stats on EDI response status (Chart) - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850, 997) + sourcetype="edi:x12" edi_code IN (850, 997) | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -198,4 +198,4 @@ - \ No newline at end of file + From 70cbbb4cbf790ae9e408a939e5f72bdd4400586e Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:28:28 -0500 Subject: [PATCH 20/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_details.xml --- .../default/data/ui/views/sa_sc_details.xml | 185 ------------------ 1 file changed, 185 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_details.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_details.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_details.xml deleted file mode 100644 index 8aab79c..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_details.xml +++ /dev/null @@ -1,185 +0,0 @@ -
- -
- - - $events_frame_id$ - $asset$ - - - - $unit_cycle$ - - - - - 0 - - - -
- - - Trained Baseline Engine Profile - ML Algorithm (Random Forest) - - state 0=Normal 1=Warning 2=Critical - - `get_pm_event_frame(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| eval state=case(unit_cycle<=200, "0", unit_cycle>=201 AND unit_cycle<=275, "1", unit_cycle>275, "2") -| table sma4* state - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Drilled in Detailed Asset Performance Metrics - ID $asset$ - - Real-time data classified through ML Model (0=Normal 1=Warning 2=Critical) - - `get_pm_event_frame($events_frame_id$)` -| search unit_cycle<$unit_cycle$ -| rename SS_sname_Bleed_Enthalpy as sma4(SS_sname_Bleed_Enthalpy), SS_sname_Bypass_Ratio as sma4(SS_sname_Bypass_Ratio), SS_sname_Corr_Core_Speed as sma4(SS_sname_Corr_Core_Speed), SS_sname_Corr_Fan_Speed as sma4(SS_sname_Corr_Fan_Speed), SS_sname_Fuel_Flow_Ratio as sma4(SS_sname_Fuel_Flow_Ratio), SS_sname_HPC_Outlet_Temp as sma4(SS_sname_HPC_Outlet_Temp), SS_sname_HPT_Coolant_Bleed as sma4(SS_sname_HPT_Coolant_Bleed), SS_sname_LPC_Outlet_Temp as sma4(SS_sname_LPC_Outlet_Temp), SS_sname_LPT_Outlet_Temp as sma4(SS_sname_LPT_Outlet_Temp), SS_sname_Phys_Core_Speed as sma4(SS_sname_Phys_Core_Speed), SS_sname_Phys_Fan_Speed as sma4(SS_sname_Phys_Fan_Speed) -| table events_frame_id unit_cycle sma4* -| apply _app_pm_super_randomforest_01 -| fields - unit_cycle - $TIME.earliest$ - $TIME.latest$ - 1 - - - - - - - - - - - - - - - - Box Plot - Showing major data ranges and spread for Training data profile - - - `get_pm_event_frame(194)` -| table SS* -| `boxplot` - 0 - 1 - - - - - - - - - Box Plot - Asset ID $asset$ feature distribution - - - `get_pm_event_frame($events_frame_id$)` -| search unit_cycle<$unit_cycle$ -| fields - unit_cycle -| `boxplot` - 0 - - 1 - - - - - - - - - - - - Asset Maintenance Records -
- Verify past maintenance activity records in work order system - - | inputlookup engine_maint_record.csv -| search asset_id=$events_frame_id$ -| table asset_id, date, record, operator - -24h@h - now - 1 - - - - - - - - - - - - -
-
- - Asset Maintenance Recommendation - - Recommendation guideline for current state engine - - | inputlookup engine_maint_recom.csv -| search asset_id=$events_frame_id$ -| table order recommendation - -24h@h - now - 1 - - - - - - - - -
-
-
- \ No newline at end of file From 2f612092d7a9298c511b1bd94ad6aab2a9953927 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:28:53 -0500 Subject: [PATCH 21/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_01.xml --- .../data/ui/views/sa_sc_analysis_01.xml | 779 ------------------ 1 file changed, 779 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_01.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_01.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_01.xml deleted file mode 100644 index 3e33468..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_01.xml +++ /dev/null @@ -1,779 +0,0 @@ - - - - - - -
    -

    - Understanding Predictive Maintenance Analytics Methdology -

    - -

    Now that you understand the data and the problem to solve, the Analysis stage is where you identify the optimal techniques and methods to use to analyze the data. Depending on the condition of the data and the problem, different techniques must be applied. This section walks you through different techniques for applying analytics to a dataset that you want to use for predictive maintenance. -

    -
- - - - Stage_01 - Stage_02 - Stage_03 - Stage_03-01 - Stage_03-02 - Stage_03-04 - Stage_04 - -
- -
-
- - - - - - - - - - -

- - OBJECTIVE : -

-
    -

    Applying analytics for predictive maintenance is not a simple process, but there is a methodology that you can follow because there is a proven method for analyzing time series data from industrial machines.

    -

    Using the jet engine dataset in this app, you can apply 3 different statistical analysis or machine learning techniques to solve the problem. Those 3 methods are:

    -
      -
        -
      • Anomaly Detection - Statistical baselining of characteristics in data followed by evaluating the deviation from the baseline.
      • -
      • Unsupervised Learning -Tests data that has not been labeled or classified. Unsupervised learning identifies commonalities in the data and clusters results based on the presence or absence of commonalities in each new piece of data.
      • -
      • Supervised Learning - Create a model that can map an input to an output based on functions learned from a training dataset that is already labeled with valid or correct input-output pairs.
      • -
      -
    -
    - -

    In this section, you will learn how to plan and choose an analytics process for different predictive maintenance use cases and explore tools available for performing the analytics techniques.

    -

    - The goal of “Analysis” stage is for you to : -

      -
        -
      • Understand how different analytics techniques work
      • -
      • Be able to choose which analysis technique is appropriate for different problems
      • -
      • Understand the conditions of analysis and how that affects which approach you choose
      • -
      -
    -
    -

    Review the methodology to understand how to plan analytics strategy. Proceed with the step-by-step exercise to evaluate sample tools in Splunk Machine Learning Toolkit.

    -

    -
    -
- -
-
- - - -

- - METHODOLOGY REVIEW : - -

-

This section introduces analytics techniques that will help practitioners to understand different analytics methods that are key in performing predictive maintenance analysis.

-

- - Understanding analytical methods : Choose the right analysis method to use. - -

-
    -

    - Before using an analysis method, plan which technique to use and your problem-solving approach. Using the jet engine dataset reviewed from the exploration stage, three different analysis methods can be applied: anomaly detection, unsupervised learning, supervised learning. Selecting the optimal method depends on the dataset. The following criteria are important for deciding the optimal analysis method. -

    -

    First, identify whether the data or the problem is univariate or multivariate. In other words, does the analysis consider a single feature (Single KPI or metrics) or multiple features (Multiple KPI or metrics). If it is a univariate problem, such as finding spikes in the trend of a KPI, simple standard-deviation-based anomaly detection is recommended. For a multivariate problem, such as a machine with many moving parts and many associated features, use either an unsupervised or supervised machine learning approach.

    -

    Second criteria to apply to multivariate problem is whether the dataset includes labels or can be labeled. The labels refer to as labeled predicting results from past data. -

      -
        -
      • If the data has labels (Known) then the optimal method is supervised learning instead of unsupervised learning, where the labels allow creating a training model that learns the mapping of input (valuables) to output (results). Then using the trained model, predict the results from the current or future dataset. The advantage of supervised learning is that the analyst can control and optimize the accuracy of the model since the analyst can curate training data to build the prediction model.
      • -
      • If the data don’t have the labels or can’t define arbitrary labels (Unknown), the option is to use unsupervised learning. Since there is no historical data with predicting results, a training model cannot be created, instead use unsupervised machine learning clustering algorithms like kmeans, dbscan that recognizes similarities or shapes of patterns.
      • -
      -
    -

    -

    These are 3 major Analysis techniques that will be covered in detail through the Splunk Essentials for Predictive Maintenance app.

    -
    -
      - -
      - - To summarize, the following combinations of analysis conditions exist for predictive maintenance. Each combination of the condition dictates which analysis techniques can be used :
      -
        -
          -
        • - Univariate (Single metric) time series : Applying analytics on a single metric - when there is only a single feature that is relevant for analysis and decision-making. -
        • -
        • - Multivariate (Multiple metrics) time series + “Known” : Applying analytics on multiple metrics when there are multiple factors that could determine maintenance state and a “Known” condition, meaning that there is labeled state data or that the data can be labeled. -
        • -
        • - Multivariate (Multiple metrics) time series + “Unknown” : Applying analytics on multiple metrics when there are multiple factors that could reflect maintenance state and an ”Unknown” condition, which means there are no corresponding labels that indicate the condition of the asset, and the condition cannot be labeled. -
        • -
          -
        -
      -

      The following chart outlines a list of actual algorithms included with the Splunk Machine Learning Toolkit. Depending on which analysis method you use, either “supervised”, “unsupervised” or “anomaly detection”, there are multiple machine learning algorithms that can be used. In the jet engine example, the goal is to predict two different states: “good” or “needs maintenance”, so an algorithm from “Predict categorical fields” makes the most sense.

      -
        - -
        -
      - - This chart categorizes algorithms by analysis method.
      -
        -
          -
        • Anomaly Detection - Forecast Numerical Time series, Detect Categorical Outlines, Detect Numerical outliers
        • -
        • Supervised - Predict Numerical Fields, Predict Categorical fields
        • -
        • Unsupervised - Cluster numerical events
        • -
        -
      -
      -
    -
- -

- - METHOD 1, Anomaly Detection : Simple standard deviation based, anomaly detection - -

-
    -

    The simple standard-deviation-based anomaly detection method calculates thresholds for acceptable data ranges using standard deviation, then categorize any readings that exceed the threshold as anomalous. This method is used to Univariate (Single metric) time series data, when there is just a single feature in the data to consider for prediction. It is very effective when the maintenance problem can be identified using a single or limited number of factors, this method is simple and effective to define a baseline condition using simple statistics like standard deviation. -

    -

    The downside to this method is that It’s likely that very few maintenance problems can be identified easily using a manually-applied threshold based on one factor, so this approach is not very realistic. For anomaly detection simple, “One class SVM” or “Anomaly detection” algorithm from Splunk Machine Learning Toolkit can be used from “Detect Numerical Outliers”.

    -
    -
      - -
      - - For univariate dataset problems, use method 1, simple standard-deviation- based anomaly detection method. This involves calculating averages and standard deviation.
      -
      - - -
      - - The algorithm used is the “Detecting Numerical Outliers” algorithm. -
      -
      -
    -
- - -

- - METHOD 2, Unsupervised learning : Identify commonalities in the data based on the characteristics of the collected sample - - -

-
    -

    Unsupervised learning works by testing data that has not been labeled or classified, identifying commonalities in the data and clustering it based on the presence or absence of such commonalities in each new piece of data. This method is best used with multivariate (multiple metrics) time series data when there are multiple features in the data to consider and labels for the data are not available. In the context of maintenance data, if there are multiple features that identify wear and tear on an asset and if there is no historical data available to identify what a bad state looks like in the data, use this approach.

    -

    Unsupervised learning is a great for understanding the relationship of features with one another and derive trends/patterns /results from dataset that have no defined labels. On the other hand, if you want more controlled results based on inputs you can influence using historical data, unsupervised learning provides no facility to do that. For unsupervised learning, techniques such as Principal Component Analysis (PCA) and clustering techniques will be used.

    -
      - -
      -
      - - For the multivariate dataset with no labels problem, method 2 in our app, unsupervised learning method is used.
      -
    -
    -
      - -
      - - From the Splunk Machine Learning Toolkit algorithm map, “Cluster Numerical Events” algorithm in “Group Events” method is used. -
      -
      -
    -
    -
-

- - METHOD 3, Supervised learning : Infers a function from labeled training data consisting of a set of training examples - -

-
    -

    Supervised learning is a method to create a model using historical data with labels (labels being the past result), then predicting the new dataset by processing the data through the trained model. Majority of machine learning use cases leverage supervised learning method, which this app covers more in-depth. This method is the most used method that applied to multivariate (Multiple Metrics) time series data, when there are multiple features in the data to consider and either labels (historical results) exist or can be arbitrarily labeled after exploration. -

    -

    Supervised learning allows analysts a certain level of control by what historical dataset gets trained to a model as well as update those models with new historical datasets. In order to select the right historical data to train, this method requires a little more knowledge about the data to design the training mode right.

    -

    For supervised learning, techniques like “logistic regression” “Gaussian naive bayes” or “Random Forest” can work well in “Predicting Categorical Values” group.

    -
      - -
      -
      - - For the multivariate dataset with labels (historical data with results), in other words “Known”, method 3 in our app, supervised learning categorical classification method will be used.
      -
    -
    -
      - -
      - - From the Splunk Machine Learning Toolkit algorithm map, the algorithm that will be used is “Predict Categorical Fields (Classification) ” algorithms can be used. -
      -
      -
    -
    -
- -
-
- - - - - - - -

- - STEP BY STEP : - -

-

This step-by-step section guides you through features of the Splunk Machine Learning Toolkit and examples that allow you to use the techniques discussed in the "Methodology Review". After you familiarize yourself with the Splunk Machine Learning Toolkit features, the next section applies those features to jet engine data for predictive maintenance analysis.

-

- - Exploring Splunk Machine Learning Toolkit : Anomaly Detection Example - -

-
    -

    Prior to applying the analytical methods to the jet engine dataset, review the examples in the Splunk Machine Learning Toolkit for “Anomaly Detection”. Familiarize yourself with example data and explore how to use Splunk Machine Learning Toolkit. -

    -
- -
-
- - - - -
- -
    - - STEP 01 - : Select app selection menu from top left of the page, Select Splunk Machine Learning Toolkit from the drop down menu. -
    -
      - -
      - - Select “Detect Outlier in Supermarket Purchases” example from “Detect Numeric Outliers” category. -
      -
      - -
      -
      - - Splunk Machine Learning Toolkit showcases anomaly detection example demonstrating anomalous purchased of a customer based on past purchasing behavior. -
      -
      - - - -
    -
-
- -
- -
- - - - RESULT STEP 01 : Detect Outliers in Supermarket Purchases. -
-
    - -
    -
    - -
    -
    - -
    -
    - -
    -
- -
-
- - - - -
- -
    - - STEP 02 - : In the result table in the bottom, click “Show SPL” button to see how SPL is applied. -
    -
      - -
      - - Pop-up windows shows the SPL (Splunk Processing Language) for detecting outliers for purchases. -
      - -
    -
-
- -
- -
- - - - RESULT STEP 02 : Show SPL result -
-
    - -
    -
    -
- -
-
- - - - -
- -
    - - STEP 03 - : In the result table in the bottom, click “Open in Search” button to apply the search in a search interface. -
    -
      - -
      - - New search window opened and Splunk search command all populated. -
      - -
    -
-
- -
- -
- - - - RESULT STEP 03 : New search interface with anomaly detection search populated. -
-
    - -
    -
    -
- -
-
- - - - - - - - -

- - Exploring Splunk Machine Learning Toolkit : Unsupervised Learning Example - -

-
    -

    Now, review the examples in Splunk Machine Learning Toolkit for “Unsupervised Learning” Example. fter you familiarize yourself with the Splunk Machine Learning Toolkit features, the next section applies those features to jet engine data for predictive maintenance analysis. -

    -
- -
-
- - - - - -
- -
    - - STEP 01 - : Select app selection menu from top left of the page, Select Splunk Machine Learning Toolkit from the drop down menu. -
    -
      - -
      - - Select “Cluster Hard Drives by SMART Metrics” example from “Cluster Numeric Events” category. -
      -
      - -
      - - Splunk Machine Learning Toolkit showcases unsupervised learning example demonstrating grouping of hard disk failures of hard drives. -
      -
      - - - - -
    -
-
- -
- -
- - - RESULT STEP 01 : Cluster Hard Drives by SMART Metrics Example. -
-
    - -
    -
    - -
    -
    - -
    -
    - -
    -
- -
-
- - - - -
- -
    - - STEP 02 - : In the result table in the bottom, click “Show SPL” button to see how SPL is applied. -
    -
      - -
      - - Pop-up windows shows the SPL (Splunk Processing Language) for clustering hard drive failure patterns. -
      - -
    -
-
- -
- -
- - - - RESULT STEP 02 : Pop-up windows shows the SPL (Splunk Processing Language) for clustering. -
-
    - -
    -
    -
- -
-
- - - - -
- -
    - - STEP 03 - : In the result table in the bottom, click “Open in Search” button to apply the search in a search interface. -
    -
      - -
      - - New search window opened and Splunk search command all populated. -
      - -
    -
-
- -
- -
- - - - RESULT STEP 03 : New search interface with clustering search populated. -
-
    - -
    -
    -
- -
-
- - - - - - - - - -

- - Exploring Splunk Machine Learning Toolkit : Supervised Learning Example - -

-
    -

    Now, review the examples in Splunk Machine Learning Toolkit for “Supervised Learning” Example. After you familiarize yourself with the Splunk Machine Learning Toolkit features, the next section applies those features to jet engine data for predictive maintenance analysis.

    -
- -
-
- - - - - -
- -
    - - STEP 01 - : Select app selection menu from top left of the page, Select Splunk Machine Learning Toolkit from the drop down menu. -
    -
      - -
      - - Select “Predict Hard Drive Failure” example from “Predict Categorical Fields” category. -
      -
      - -
      - - Splunk Machine Learning Toolkit showcases supervised learning example demonstrating prediction of hard disk failures. -
      -
      - - - - -
    -
-
- -
- -
- - - - RESULT STEP 01 : Retreive Raw Data in Table. -
-
    - -
    -
    - -
    -
    - -
    -
    - -
    -
- -
-
- - - - -
- -
    - - STEP 02 - : Follow below menus to review anomaly detection example from Splunk Machine Learning Toolkit, “Detect Numeric Outliers” example. -
    -
      - -
      - - Boxplot chart makes it easy to disingush jet engine data features that are more sensitive to change, the longer bar features. -
      - -
    -
-
- -
- -
- - - - RESULT STEP 02 : Pop-up windows shows the SPL (Splunk Processing Language) for categorital prediction. -
-
    - -
    -
    -
- -
-
- - - - -
- -
    - - STEP 03 - : Follow below menus to review anomaly detection example from Splunk Machine Learning Toolkit, “Detect Numeric Outliers” example. -
    -
      - -
      - - Boxplot chart makes it easy to disingush jet engine data features that are more sensitive to change, the longer bar features. -
      - -
    -
-
- -
- -
- - - - RESULT STEP 03 : New search interface with catigorical prediction search populated. -
-
    - -
    -
    -
- -
-
- - - - - - - - - -

- - What's Next? - -

-
    -

    - You completed reviewing the overview of the analytic method. Now that you know what kinds of analysis you can apply to the jet engine dataset, next will be using each of the techniques to apply to get different insight to maintenance data. -

    - - NEXT STEP : Evaluate different analytics methods to the jet engine dataset. -
    -
    - - - - - - -
- - - - - -
-
-

- Do you need help? Ask an Industrial IOT Predictive Maintenance Expert -

-
    -

    Need help with your environment and requirements? Send us your questions and we will get back to you as soon as possible.

    -

    If you need immediate assistance, check out our community forum, Splunk Answers. (i1) -

    - Contact Us -
-
-
-
- Predictive Maintenance expert photo -
-
- -
-
-
\ No newline at end of file From 5694acd6a1ff93a2fed477e8edcef2afcff73fa4 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:29:01 -0500 Subject: [PATCH 22/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_02.xml --- .../data/ui/views/sa_sc_analysis_02.xml | 656 ------------------ 1 file changed, 656 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_02.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_02.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_02.xml deleted file mode 100644 index 56c12df..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_02.xml +++ /dev/null @@ -1,656 +0,0 @@ - - - - - - -
    -

    - Find deviation from the baseline using standard deviation -

    - -

    This section introduces the first analysis method for predictive maintenance, applying anomaly detection to determine wear and tear by statistically calculating the standard deviation. This technique is useful to apply when you need to determine the condition of an asset based on a single or limited number of metrics. This technique can be used to monitor asset metrics of wear and tear, abnormal operation status, or for monitoring quality, such as with Statistical Process Control (SPC). These steps cover a technique to profile the behavior of an engine asset during service and how to determine the abnormal state of the engine. -

    -
- - - - Stage_01 - Stage_02 - Stage_03 - Stage_03-01 - Stage_03-02 - Stage_03-04 - Stage_04 - -
- -
-
- - - - - - - - - - - - - - -

- - OBJECTIVE : -

-
    -

    As covered in the analysis overview section, the anomaly detection method can be used with univariate data to analyze single key metrics that indicate the need for maintenance. The anomaly detection approach defines a data-driven threshold using a statistical concept, standard deviation. This method is not ideal for complex assets with varieties of metrics, because it requires an individual policy to be defined for all sensors in an asset.

    -
    -

    This section introduces an anomaly detection approach to maintenance data, where the characteristics of the data drive the “profile” or “baseline” of the asset, and can then be used to identify anomalies.

    -

    The technique can be applied to a broad set of use cases, from general asset behavior monitoring to predictive maintenance. It is an effective way to profile the behavior of the assets without defining a fixed threshold, where standard deviation and averages are derived from the metrics.

    -

    The goal of the “Analysis” stage is for you to apply basic statistical analytical methods like data distribution analysis or standard deviation to performance data, and then define a data-driven threshold based on the standard deviation.

    -

    - After this section, users will be able to : -

      -
        -
      • Calculate basic statistics (average, standard deviation) from the dataset.
      • -
      • Define adaptive thresholds based on averages and standard deviation.
      • -
      • Evaluate anomalies that would indicate the need for maintenance.
      • -
      -
    -

    - Review the methodology to understand how to apply standard deviation for adaptive thresholding. Proceed with the step-by-step exercise to apply anomaly-detection-based thresholding for maintenance conditions.
    - -
- -
-
- - - - - -

- - METHODOLOGY REVIEW : - -

-

This section introduces the basic statistical concepts that you can use to detect anomalies, such as calculating a standard deviation and defining an upper and lower bound threshold.

-

- - Standard Deviation : Understanding the variation in the data - -

-
    -

    - Standard deviation is a measurement used to quantify the amount of variation in a set of data values. A low standard deviation indicates that the data points tend to be close to the mean (also called the expected value) of a set of values, while a high standard deviation indicates that the data points are spread out over a wider range of values. -

    -
    -
      - -
      - - The chart on the left shows a perfect standard deviation bell curve, that outlines the percentage of coverage per 1 standard deviation. A perfect standard deviation is shaped like a bell, where 1 standard deviation (stdev) equates to 68.2% of the entire dataset. It looks similar to and has similar characteristics of a normal distribution chart.
      - - The illustration on the right displays datasets with same averages, but different standard deviation values, which illustrates a completely different type of characteristics in overall distribution of data.
      -
    -
    -

    n the case of engines or turbines, data values stay within a given range, then spread out in a direction. This kind of dataset would create a tilted bell curve.

    -
      -
      - - The left chart shows this particular value increasing over time as this sensor part wears out. -
      - - The right chart is a histogram that shows the distribution of values. Because the majority of values are in the same range, that indicates a normal condition of the part, and as the sensor parts wear out, the values change. -
      -
    -
-
- -

- - Standard Deviation based Thresholding : Defining threshold using standard deviation - -

-
    -

    - Standard-deviation-based thresholding is a method used to apply an adaptive threshold based on the collected data values. Instead of defining a fixed threshold, the threshold is calculated per asset so that statistics can define an acceptable range per each asset and look for conditions that deviate a certain percentage from the mean. -

    -

    - For example, define a threshold that applies to each asset, based on the historical performance of the asset. For a jet engine, rather than having one fixed threshold to define good performance for all engines, you need a different threshold for each engine because they are used in different ways, or for different routes. -

    -

    - You can define the upper threshold limit by calculating the standard deviation and adding it to the average value. This threshold separates normal values from anomalous values. Define the lower threshold by subtracting the standard deviation from the average value. -

    -

    To control the coverage range of a normal window, you can use a variable multiplier to define the range of the normal state. A variable multiplier of 1 means that 34% of the values are considered “Normal”. A higher variable multiplier expands the range of what is “normal”, while a lower variable multiplier reduces the range. Use a higher variable multiplier to make an anomaly less sensitive.

    - -

    You can use the following formula : -

          -
        • Supper Limit Threshold = Average + (Standard deviation * Variable multiplier)
        • -
        • Lower Limit threshold = Average - (Standard deviation * Variable multiplier)
        • -
    -

    - -
    -
      - -
      - - The upper bound threshold is calculated at around 0.9 and a variable multiplier of “1” is applied to make it more sensitive to value increases. Lowering the upper threshold to be more sensitive in this case is important because as the equipment wears out, the sensor value trend goes up.
      - - Lower bound threshold is calculated at around 1.7 by applying a variable multiplier of 1.8. In this case the lower threshold was widened to make it more less sensitive to false positive values.
      -
      - -
    -
- -
-
- - - - - -

- - STEP BY STEP : - -

-

- This step-by-step section guides you through applying the key techniques covered in the "Methodology Review" to the jet engine dataset. The Left column provides detailed instruction and the right column provides the results of the step. Use button to jump directly into a live environment for each step. -

-

- - Calculate Standard Deviation : Data variation based anomaly detection - -

-
    -

    Calculate some basic statistics on a feature that has a high correlation with engine wear to define the threshold.

    -
- -
-
- - - - - - -
- -
    - - STEP 01 - : Select the jet engine dataset using the `get_pm_event_window(X)` search macro. Then use the "trendline" command to smooth out spikes in the data. The "where" command filters out empty records, and finally the "table" command lets you view the engine sensor values that are already normalized and smoothened. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4*
      -
      - - The results show pre-processed events from the get_pm_event_window metrics, and the applied smoothing technique smooths out spikes in the metrics data.
      - - The newly-processed metric fields start with “sma4” and represent an average of the last 4 values for the metric. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 01 : Retreived data and preprocessed. - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* - - 0 - 1 - - - -
-
-
- - - - - -
- -
    - - STEP 02 - : Out of multiple features in the data, select "LPT_Outlet_Temp" to apply anomaly detection. Select the “LPT_Outlet_Temp” feature based on the results of exploring the data, because it has high correlation with wear and tear of the asset. The LPT_Outlet_Temp feature has high correlation with the change as the asset wears out. Use the table command to display the "LPT_Outlet_Temp" field. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4(SS_sname_LPT_Outlet_Temp) 
      -
      - - The result shows the selected data "LPT_Outlet_Temp" with the unit_cycle number. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 02 : LPT_Outlet_Temp field selected for analysis - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4(SS_sname_LPT_Outlet_Temp) - - 0 - 1 - - - -
-
-
- - - - - -
- -
    - - STEP 03 - : Calculate the average values and standard deviations using the eventstats command, storing averages as "avg" and standard deviation as "stdev". -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4(SS_sname_LPT_Outlet_Temp) 
      -| eventstats avg("sma4(SS_sname_LPT_Outlet_Temp)") as avg, stdev("sma4(SS_sname_LPT_Outlet_Temp)") as stdev 
      -
      - - The average and standard deviation columns are added to the table along with "LPT_Outlet_Temp". -As mentioned in the methodology section, anomaly detection applies to a single metric. Here, the average and standard deviation is calculated for the single feature, LPT_Outlet_Temp. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 03 : Appended average and standard deviation. - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4(SS_sname_LPT_Outlet_Temp) -| eventstats avg("sma4(SS_sname_LPT_Outlet_Temp)") as avg, stdev("sma4(SS_sname_LPT_Outlet_Temp)") as stdev - - 0 - 1 - - - -
-
-
- - - - - - -

- - Define Threshold : Applying threshhold based on standard deviation. - -

-
    -

    Using the calculated average and standard deviation for LPT_Outlet_Temp, define a lower and upper threshold for the data. -

    -
- -
-
- - - - - -
- -
    - - STEP 04 - : Define the lower and upper bound threshold using the eval command. Lower bound as (avg-stdev*1.8) and upper bound as (avg+stdev*1) -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4(SS_sname_LPT_Outlet_Temp) 
      -| eventstats avg("sma4(SS_sname_LPT_Outlet_Temp)") as avg, stdev("sma4(SS_sname_LPT_Outlet_Temp)") as stdev 
      -| eval lowerBound=(avg-stdev*1.8), upperBound=(avg+stdev*1) 
      -
      - - The result adds the additional columns lowerBound and upperBound to each metric. Notice that the upper and lower bound thresholds are different because different variable multipliers were applied. This is something an analysts can decide and tune based on the characteristics of the equipment. Because this example is “temperature”, the analysts might only care about a rising temperature, rather than a cooling temperature. This is reflected in the variable multiplier where upper values are set lower using “1” as the multiplier to make the threshold more sensitive to rising temperature. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 04 : Calculated Lower and Upper thresholds - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4(SS_sname_LPT_Outlet_Temp) -| eventstats avg("sma4(SS_sname_LPT_Outlet_Temp)") as avg, stdev("sma4(SS_sname_LPT_Outlet_Temp)") as stdev -| eval lowerBound=(avg-stdev*1.8), upperBound=(avg+stdev*1) - - 0 - 1 - - - -
-
-
- - - - - -

- - Evaluate Outliers : Determine outliners that exceeds Lower and Upper threshold. - -

-
    -

    Now that you’ve calculated the standard deviation, average, and defined upper and lower thresholds for the data, you are ready to identify values that exceed the threshold and are outliers. Label the outlier values as “1” to identify that they are outliers. -

    -
- -
-
- - - - - -
- -
    - - STEP 05 - : Evaluate outliers by comparing the data points with lower and upper limits. Use the eval command and if logic to define a Boolean 0 or 1 value for the values, where 1 is an outlier value. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4(SS_sname_LPT_Outlet_Temp) 
      -| eventstats avg("sma4(SS_sname_LPT_Outlet_Temp)") as avg, stdev("sma4(SS_sname_LPT_Outlet_Temp)") as stdev 
      -| eval lowerBound=(avg-stdev*exact(1.8)), upperBound=(avg+stdev*exact(1)) 
      -| eval isOutlier=if('sma4(SS_sname_LPT_Outlet_Temp)' < lowerBound OR 'sma4(SS_sname_LPT_Outlet_Temp)' > upperBound, 1, 0) 
      -
      - - A new column "isOutlier" is added to each metric, where a label of 1 identifies an outlier. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 05 : Outliers identified. - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4(SS_sname_LPT_Outlet_Temp) -| eventstats avg("sma4(SS_sname_LPT_Outlet_Temp)") as avg, stdev("sma4(SS_sname_LPT_Outlet_Temp)") as stdev -| eval lowerBound=(avg-stdev*exact(1.8)), upperBound=(avg+stdev*exact(1)) -| eval isOutlier=if('sma4(SS_sname_LPT_Outlet_Temp)' < lowerBound OR 'sma4(SS_sname_LPT_Outlet_Temp)' > upperBound, 1, 0) - - 0 - 1 - - - -
-
-
- - - - - -
- -
    - - STEP 06 - : Select the fields that are relevant in the results. Use the fields command to select only the _time, "sma4(SS_sname_LPT_Outlet_Temp)", lowerBound, upperBound, and isOutlier fields. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4(SS_sname_LPT_Outlet_Temp) 
      -| eventstats avg("sma4(SS_sname_LPT_Outlet_Temp)") as avg, stdev("sma4(SS_sname_LPT_Outlet_Temp)") as stdev 
      -| eval lowerBound=(avg-stdev*exact(1.8)), upperBound=(avg+stdev*exact(1)) 
      -| eval isOutlier=if('sma4(SS_sname_LPT_Outlet_Temp)' < lowerBound OR 'sma4(SS_sname_LPT_Outlet_Temp)' > upperBound, 1, 0) 
      -| fields _time, "sma4(SS_sname_LPT_Outlet_Temp)", lowerBound, upperBound, isOutlier 
      -
      - - With the necessary fields selected, you can visualize the results. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 06 : Selected fields with relevant results - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4(SS_sname_LPT_Outlet_Temp) -| eventstats avg("sma4(SS_sname_LPT_Outlet_Temp)") as avg, stdev("sma4(SS_sname_LPT_Outlet_Temp)") as stdev -| eval lowerBound=(avg-stdev*exact(1.8)), upperBound=(avg+stdev*exact(1)) -| eval isOutlier=if('sma4(SS_sname_LPT_Outlet_Temp)' < lowerBound OR 'sma4(SS_sname_LPT_Outlet_Temp)' > upperBound, 1, 0) -| fields _time, "sma4(SS_sname_LPT_Outlet_Temp)", lowerBound, upperBound, isOutlier - - 0 - 1 - - - -
-
-
- - - - - -
- -
    - - STEP 07 - : To visualize the trend and outliers, select Visualization and choose Outliers Chart. -
    -
      - -
      - - The chart shows the dark shaded area as dataset values within the threshold range and yellow dotted points as outliers. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 07 : Anomalies and outliers visualized - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4(SS_sname_LPT_Outlet_Temp) -| eventstats avg("sma4(SS_sname_LPT_Outlet_Temp)") as avg, stdev("sma4(SS_sname_LPT_Outlet_Temp)") as stdev -| eval lowerBound=(avg-stdev*exact(1.8)), upperBound=(avg+stdev*exact(1)) -| eval isOutlier=if('sma4(SS_sname_LPT_Outlet_Temp)' < lowerBound OR 'sma4(SS_sname_LPT_Outlet_Temp)' > upperBound, 1, 0) -| fields _time, "sma4(SS_sname_LPT_Outlet_Temp)", lowerBound, upperBound, isOutlier - - 0 - - - - -
- - - - - - -

- - What's Next? - -

-
    -

    - You completed the first technique, applying the anomaly detection method. Move on to the next stage in applying the Unsupervised Learning method. -

    - - NEXT STEP : Evaluate the Unsupervised Learning method to jet engine data. -
    -
    - - - - - - -
- - - - - -
-
-

- Do you need help? Ask an Industrial IOT Predictive Maintenance Expert -

-
    -

    Need help with your environment and requirements? Send us your questions and we will get back to you as soon as possible.

    -

    If you need immediate assistance, check out our community forum, Splunk Answers. (i1) -

    - Contact Us -
-
-
-
- Predictive Maintenance expert photo -
-
- -
-
-
\ No newline at end of file From 3758b9ce9ec6db656cf6ebadeda8e8e2303e56ed Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:29:53 -0500 Subject: [PATCH 23/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_03.xml --- .../data/ui/views/sa_sc_analysis_03.xml | 666 ------------------ 1 file changed, 666 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_03.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_03.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_03.xml deleted file mode 100644 index 087a88e..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_03.xml +++ /dev/null @@ -1,666 +0,0 @@ - - - - - - - -
    -

    - Principal Component Analysis (PCA) and Clustering to find assets that need maintenance. - -

    - -

    This section introduces the second analysis method of predictive maintenance analysis, applying unsupervised learning to determine wear and tear using key analysis techniques such as Principal Component Analysis (PCA) and clustering. Unsupervised learning is a technique often applied to multivariate data when applying analysis to multiple features that could indicate the need for maintenance or when preparing a dataset to train a supervised learning model is not possible, due to lack of dataset expertise or difficult-to-define state conditions in the data.

    -

    The exercises and steps in this section cover algorithms to use to find principal components from a set of metrics and apply clustering techniques to group assets based on similar conditions in a dataset.

    -
- - - - Stage_01 - Stage_02 - Stage_03 - Stage_03-01 - Stage_03-02 - Stage_03-04 - Stage_04 - -
- -
-
- - - - - - - - - - - -

- - OBJECTIVE : -

-
    -

    One of the key advantages of “Unsupervised Learning” is that it does not require extensive knowledge of the data before applying the technique and getting a meaningful outcome. The principal componemt analysis (PCA) method also accommodates many features (Multivariate data) by dimensional reduction, which is covered in the “Methodology” section.

    -

    Limitations of unsupervised learning are that the results can be vague depending on which features are used to cluster, and there are fewer options available to control the model outcomes, such as techniques to improve model accuracy.

    -
    - -

    Some unsupervised learning techniques are a great way to get a better understanding of the data characteristics with a more analytical approach. Specifically, principal component analysis allows an abstract way to interpret data by exploring a new dimension of the data. Clustering groups with similar characteristics together exposes the existing characteristics of assets from the collected dataset.

    -

    - After this section, users will be able to : -

      -
        -
      • Apply unsupervised learning approach to multivariate performance data from assets.
      • -
      • Understand what Principal Component Analysis (PCA) does to find the principal components from a set of features.
      • -
      • Cluster data using clustering algorithms like kmeans.
      • -
      -
    -

    - Review the methodology on Principal Component Analysis (PCA) and clustering, then proceed with the step-by-step exercise to apply unsupervised learning to analyze maintenance conditions.
    -
- -
-
- - - - -

- - METHODOLOGY REVIEW : - -

-

This section introduces the mechanics of applying unsupervised learning techniques like principal component analysis and clustering to the jet engine dataset.

-

- - PCA (Principal Component Analysis) : Reduces dimension from multiple variate dataset. - -

-
    -

    Principal component analysis (PCA) takes multiple feature values and calculates an abstract dimension that best represents the characteristics of the data. Because it combines multiple features, you cannot map specific features in the data to principal components. Principal component analysis is a great way to reduce the number of dimensions used in an analysis. This means for the jet engine dataset, 20 different metrics can be used to identify a dimension of PC_1, principal component 1.

    -

    You must understand the different features very well to determine what matters. Typically, PC_1 and PC_2 represent the characteristics of multiple values in the dataset.

    -
    -
      - -
      - - From the graph using the Gene 1, Gene 2, Gene 3, Gene x features, PCA finds new dimensions PC_1 and PC_2 that could be interpreted as the distinctive characteristics of the dataset, evaluated from the multiple features in the dataset. As a result, the data is clearly clustered using principal component 1 and 2.
      -
    -
- -

- - Clustering : Grouping data from its own characteristics. - -

-
    -

    - Clustering groups a set of objects in such a way that the objects in one group are more similar to each other than to those in other groups (clusters). Different algorithms group different patterns of data in different ways. The example below showcases commonly-used algorithms that group objects based on proximity, a kmeans algorithm. -

    -
    -
      - -
      - - The kmeans clustering algorithm clusters values in three groups using a central value for each group and clustering the values that are close to those central values together. The kmeans clustering algorithm adds an additional field to the dataset with an assigned cluster number for each value.
      -
      -
    -
- -

- - Clustering Visualizations : Visuzliation of clustered data for exprations. - -

-
    -

    - It is very important to visualize the results of a clustering algorithm in unsupervised learning, because the key objective is to identify the similarities and differences from the data. Seeing the cluster of data afterward is crucial. Common visualization methods are to use a scatter chart in 2D or 3D, where the observations are a dot or a bubble in a 2D scatter chart along the xy axis, or an added z axis on a 3D scatter chart. Raw values can be mapped in scatter charts, but it’s more effective to chart the results of PCA, PC_1, PC_2, and PC_3 to show the dimensions that represent the dataset best. Unsupervised learning techniques perform PCA before clustering so that the dimensions resulting from PCA can be used as axes to plot the data. -

    -
    -
      - -
      - - The results in a Scatter Chart Matrix show 2-dimensional results in a single graph. The results in a 3D Scatter Chart shows 3-dimensional results in a single graph by selecting PC_1 and PC_2 as xy, and PC_3 as the z axis. -
      - - In the chart, the different colors are the result of the kmeans clustering algorithm where the data was grouped based on the condition of the engine. One group is blue, to represent normal, another group is yellow, to represent warning, and the third group is red to represent critical. After kmeans performs a calculation for each observation, it adds the group name as a field for each metric. -
      -
    -
    -
      - -
      -
    -
- -
-
- - - - - - - -

- - STEP BY STEP : - -

-

This step-by-step section guides you through applying the key techniques covered in the "Methodology Review" to the jet engine dataset. The Left column provides detailed instruction and the right column provides the results of the step. Use button to jump directly into a live environment for each step.

-

Now using the multivariate jet engine dataset, apply the key unsupervised learning techniques to reduce the number of dimensions, then cluster data at different maintenance states. These steps walk you through the following process :

-
    -
      -
    • Understand how principal component analysis (PCA) is applied to performance or maintenance-related data
    • -
    • Understand how to apply clustering algorithms to the results of PCA.
    • -
    • Visualize the data to best represent the characteristics of data.
    • -
    • Understand how the data changes over time from a principal component perspective.
    • -
    -
-

- - Applying PCA (Principal Component Analysis) : Reduces dimension from multi-variate data. - -

-
    -

    From 20 different features or metrics from the jet engine dataset, calculate the principal component PC_1, PC_2 and PC_3. -

    -
- -
-
- - - - - - - -
- -
    - - STEP 01 - : Select the jet engine dataset by using the `get_pm_event_window(X)` search macro. Then use the "trendline" command to smooth out spikes. The "where" command filters out empty records, and the "table" command helps you view the engine sensor values that are already normalized and smoothened. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3
      -| table unit_cycle sma4*
      -
      - - The result shows pre-processed events from the get_pm_event_window metrics and then the applied smoothing technique averaged metrics values to let you better focus on trends in the data. The newly-processed metrics starting with “sma4” are averaged from the last 4 values of each metric. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 01 : Retreived data and preprocessed - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* - - 0 - 1 - - - -
-
-
- - - - - -
- -
    - - STEP 02 - : Calculate the principal components of the selected dataset by using the fit command to apply the PCA algorithm. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3
      -| table unit_cycle sma4*
      -| fit PCA "sma4(SS_sname_Bleed_Enthalpy)", "sma4(SS_sname_Bypass_Ratio)", "sma4(SS_sname_Corr_Core_Speed)", "sma4(SS_sname_Corr_Fan_Speed)", "sma4(SS_sname_Fuel_Flow_Ratio)", "sma4(SS_sname_HPC_Outlet_Temp)", "sma4(SS_sname_HPT_Coolant_Bleed)", "sma4(SS_sname_LPC_Outlet_Temp)", "sma4(SS_sname_LPT_Outlet_Temp)", "sma4(SS_sname_Phys_Core_Speed)", "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_pca_temp_01 
      -
      - - The output appends PC_1, PC_2, PC_3 fields identifying calculated principal component values alongside the sensor values. -
      - - Without deep knowledge of the data, PCA calculates abstract characteristics of the combined features that are most relevant for analysis. Starting with PC_1 as the most meaningful abstracted value, the first 2 or 3 principal characteristics could represent the data well. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 02 : Principal Components calculated - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| fit PCA "sma4(SS_sname_Bleed_Enthalpy)", "sma4(SS_sname_Bypass_Ratio)", "sma4(SS_sname_Corr_Core_Speed)", "sma4(SS_sname_Corr_Fan_Speed)", "sma4(SS_sname_Fuel_Flow_Ratio)", "sma4(SS_sname_HPC_Outlet_Temp)", "sma4(SS_sname_HPT_Coolant_Bleed)", "sma4(SS_sname_LPC_Outlet_Temp)", "sma4(SS_sname_LPT_Outlet_Temp)", "sma4(SS_sname_Phys_Core_Speed)", "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_pca_temp_01 - - 0 - 1 - - - -
-
-
- - - - - -
- -
    - - STEP 03 - : Select the principal components 1, 2, and 3, the principal components that best represent the dataset. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3
      -| table unit_cycle sma4*
      -| fit PCA "sma4(SS_sname_Bleed_Enthalpy)", "sma4(SS_sname_Bypass_Ratio)", "sma4(SS_sname_Corr_Core_Speed)", "sma4(SS_sname_Corr_Fan_Speed)", "sma4(SS_sname_Fuel_Flow_Ratio)", "sma4(SS_sname_HPC_Outlet_Temp)", "sma4(SS_sname_HPT_Coolant_Bleed)", "sma4(SS_sname_LPC_Outlet_Temp)", "sma4(SS_sname_LPT_Outlet_Temp)", "sma4(SS_sname_Phys_Core_Speed)", "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_pca_temp_01 
      -| table unit_cycle, "PC_1", "PC_2"
      -
      - - The results show the unit_cycle with PC_1, PC_2, PC_3. This result can be charted in a scatter chart to validate how PC_1 and PC_2 reflects the changes in value from cycle 3 and further out with cycle number 200+. PC_1 represents the changes reflecting wear and tear of the engine as it goes through more cycles. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 03 : Visualize the top principal components. PC_1, PC_2 - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| fit PCA "sma4(SS_sname_Bleed_Enthalpy)", "sma4(SS_sname_Bypass_Ratio)", "sma4(SS_sname_Corr_Core_Speed)", "sma4(SS_sname_Corr_Fan_Speed)", "sma4(SS_sname_Fuel_Flow_Ratio)", "sma4(SS_sname_HPC_Outlet_Temp)", "sma4(SS_sname_HPT_Coolant_Bleed)", "sma4(SS_sname_LPC_Outlet_Temp)", "sma4(SS_sname_LPT_Outlet_Temp)", "sma4(SS_sname_Phys_Core_Speed)", "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_pca_temp_01 -| table unit_cycle, "PC_1", "PC_2" - - 0 - 1 - - - - - - -
- - - - - -

- - Applying Clustering : Group asset's condition from data - -

-
    -

    The principal components derived from the PCA method, PC_1, PC_2, and PC_3 represent the data well. Now, use the clustering method to group the observations into 3 categories, Normal, Warning and Critical. For this type of proximity classification into groups, kmeans works great to group the 3 different conditions automatically. -

    -
- -
-
- - - - - -
- -
    - - STEP 04 - : Cluster the calculated principal components PC_1, PC_2, PC_3 into 3 groups using the fit command applying the kmeans algorithm. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3
      -| table unit_cycle sma4*
      -| fit PCA "sma4(SS_sname_Bleed_Enthalpy)", "sma4(SS_sname_Bypass_Ratio)", "sma4(SS_sname_Corr_Core_Speed)", "sma4(SS_sname_Corr_Fan_Speed)", "sma4(SS_sname_Fuel_Flow_Ratio)", "sma4(SS_sname_HPC_Outlet_Temp)", "sma4(SS_sname_HPT_Coolant_Bleed)", "sma4(SS_sname_LPC_Outlet_Temp)", "sma4(SS_sname_LPT_Outlet_Temp)", "sma4(SS_sname_Phys_Core_Speed)", "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_pca_temp_01 
      -| table unit_cycle PC_*
      -| fit KMeans k=3 "PC_1" "PC_2" "PC_3" into _app_pm_cluster_kmeans_01 
      -
      - - Now each data point is labeled with a "cluster" field indicating cluster “0 - normal”, “1 - warning”, “2 - critical”. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 04 : Cluster labeled under "cluster" field using top principal components PC1, PC2, PC3 - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| fit PCA "sma4(SS_sname_Bleed_Enthalpy)", "sma4(SS_sname_Bypass_Ratio)", "sma4(SS_sname_Corr_Core_Speed)", "sma4(SS_sname_Corr_Fan_Speed)", "sma4(SS_sname_Fuel_Flow_Ratio)", "sma4(SS_sname_HPC_Outlet_Temp)", "sma4(SS_sname_HPT_Coolant_Bleed)", "sma4(SS_sname_LPC_Outlet_Temp)", "sma4(SS_sname_LPT_Outlet_Temp)", "sma4(SS_sname_Phys_Core_Speed)", "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_pca_temp_01 -| table unit_cycle PC_* -| fit KMeans k=3 "PC_1" "PC_2" "PC_3" into _app_pm_cluster_kmeans_01 - - 0 - 1 - - - -
-
-
- - - - - -
- -
    - - STEP 05 - : Prepare the data for better analysis by organizing the data with the table command to focus just on the cluster field and the principal component fields. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3
      -| table unit_cycle sma4*
      -| fit PCA "sma4(SS_sname_Bleed_Enthalpy)", "sma4(SS_sname_Bypass_Ratio)", "sma4(SS_sname_Corr_Core_Speed)", "sma4(SS_sname_Corr_Fan_Speed)", "sma4(SS_sname_Fuel_Flow_Ratio)", "sma4(SS_sname_HPC_Outlet_Temp)", "sma4(SS_sname_HPT_Coolant_Bleed)", "sma4(SS_sname_LPC_Outlet_Temp)", "sma4(SS_sname_LPT_Outlet_Temp)", "sma4(SS_sname_Phys_Core_Speed)", "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_pca_temp_01 
      -| table unit_cycle PC_*
      -| fit KMeans k=3 "PC_1" "PC_2" "PC_3" into _app_pm_cluster_kmeans_01 
      -| eval cluster= "Cluster: " + cluster 
      -| table cluster, "PC_1", "PC_2", "PC_3" 
      -
      - - The results show only cluster and principal component fields, PC_1, PC_2, PC_3, for each data point. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 05 : Reformat the cluster results, to just show relevant results - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| fit PCA "sma4(SS_sname_Bleed_Enthalpy)", "sma4(SS_sname_Bypass_Ratio)", "sma4(SS_sname_Corr_Core_Speed)", "sma4(SS_sname_Corr_Fan_Speed)", "sma4(SS_sname_Fuel_Flow_Ratio)", "sma4(SS_sname_HPC_Outlet_Temp)", "sma4(SS_sname_HPT_Coolant_Bleed)", "sma4(SS_sname_LPC_Outlet_Temp)", "sma4(SS_sname_LPT_Outlet_Temp)", "sma4(SS_sname_Phys_Core_Speed)", "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_pca_temp_01 -| table unit_cycle PC_* -| fit KMeans k=3 "PC_1" "PC_2" "PC_3" into _app_pm_cluster_kmeans_01 -| eval cluster= "Cluster: " + cluster -| table cluster, "PC_1", "PC_2", "PC_3" - - 0 - 1 - - - -
-
-
- - - - - -

- - Visualizing Analysis Results : Visualizing conditions of data from an asset. - -

-
    -

    The results are now ready to be visualized. For best results, a scatter chart is recommended. Use the Scatterplot Matrix to see a 2-dimensional map with PC_1, PC_2 and PC_3. Use the 3D Scatter Chart to see PC_1, PC_2 and PC_3 as x, y, z axis values. -

    -
- -
-
- - - - - - -
- -
    - - STEP 06 - : To effectively visualize the clusters and relationship of principal components with the 2-dimensional visualization method, select Visualization and choose Scatterplot Matrix chart. -
    -
      - -
      - - Scatterplot Matrix chart shows 9 graphs with combinations of PC_1, PC_2 and PC_3 in a matrix. The matrix illustrates that PC_1 best represents the asset as it wears out, and PC_2 and PC_3 don’t have much correlation with the pattern. -
      - - It is not pretty clear which features from jet engine data-set, we will be using for the next stage. -

      - - - - -
    -
-
- -
- -
- - - RESULT STEP 06 : Shows relationship of Pincipal components - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| fit PCA "sma4(SS_sname_Bleed_Enthalpy)", "sma4(SS_sname_Bypass_Ratio)", "sma4(SS_sname_Corr_Core_Speed)", "sma4(SS_sname_Corr_Fan_Speed)", "sma4(SS_sname_Fuel_Flow_Ratio)", "sma4(SS_sname_HPC_Outlet_Temp)", "sma4(SS_sname_HPT_Coolant_Bleed)", "sma4(SS_sname_LPC_Outlet_Temp)", "sma4(SS_sname_LPT_Outlet_Temp)", "sma4(SS_sname_Phys_Core_Speed)", "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_pca_temp_01 -| table unit_cycle PC_* -| fit KMeans k=3 "PC_1" "PC_2" "PC_3" into _app_pm_cluster_kmeans_01 -| eval cluster= "Cluster: " + cluster -| table cluster, "PC_1", "PC_2", "PC_3" - - 0 - - - - -
- - - - - -
- -
    - - STEP 07 - To instead visualize how the normal state data points deviate with respect to the state where maintenance is required, use a 3D scatter chart by selecting Visualization and 3D Scatter Chart. -
    -
      - -
      - - The visualization results show healthy data points as blue, data points in a warning state as yellow, and data points in a critical state as red. -

      - - - - -
    -
-
- -
- -
- - - RESULT STEP 07 : 3D visuallization after clustering - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| fit PCA "sma4(SS_sname_Bleed_Enthalpy)", "sma4(SS_sname_Bypass_Ratio)", "sma4(SS_sname_Corr_Core_Speed)", "sma4(SS_sname_Corr_Fan_Speed)", "sma4(SS_sname_Fuel_Flow_Ratio)", "sma4(SS_sname_HPC_Outlet_Temp)", "sma4(SS_sname_HPT_Coolant_Bleed)", "sma4(SS_sname_LPC_Outlet_Temp)", "sma4(SS_sname_LPT_Outlet_Temp)", "sma4(SS_sname_Phys_Core_Speed)", "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_pca_temp_01 -| table unit_cycle PC_* -| fit KMeans k=3 "PC_1" "PC_2" "PC_3" into _app_pm_cluster_kmeans_01 -| eval cluster= "Cluster: " + cluster -| table cluster, "PC_1", "PC_2", "PC_3" - - 0 - - - - -
- - - - - -

- - What's Next? - -

-
    -

    - You completed the second technique, applying the Unsupervised Learning method. Move on to the next stage in applying the Supervised Learning method. -

    - - NEXT STEP : Evaluate the Supervised Learning method to jet engine data. -
    -
    - - - - - - -
- - - - - -
-
-

- Do you need help? Ask an Industrial IOT Predictive Maintenance Expert -

-
    -

    Need help with your environment and requirements? Send us your questions and we will get back to you as soon as possible.

    -

    If you need immediate assistance, check out our community forum, Splunk Answers. (i1) -

    - Contact Us -
-
-
-
- Predictive Maintenance expert photo -
-
- -
-
-
\ No newline at end of file From 999b410ffd390445f112a20bd4bba3d09924f208 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:30:17 -0500 Subject: [PATCH 24/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_04.xml --- .../data/ui/views/sa_sc_analysis_04.xml | 1148 ----------------- 1 file changed, 1148 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_04.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_04.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_04.xml deleted file mode 100644 index 5a43489..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_analysis_04.xml +++ /dev/null @@ -1,1148 +0,0 @@ - - - - - - - -
    -

    - Training model to predict maintenance state assets -

    - -

    This section introduces the third method of predictive maintenance analysis, applying supervised learning to the jet engine dataset, covering key techniques for training, creating and applying a machine learning model to predict the state of the jet engine. -

    -
- - - - Stage_01 - Stage_02 - Stage_03 - Stage_03-01 - Stage_03-02 - Stage_03-04 - Stage_04 - -
- -
-
- - - - - - - - - - -

- - OBJECTIVE : -

-
    -

    Supervised learning is a method that uses historical data with known states (labels), such as failures, to train a prediction model, which can be used for predictions with future data. Supervised learning can be applied to a broad set of use cases, from general asset behavior monitoring to predictive maintenance, and it is one of the most common machine learning methods. It is effective to profile different conditions of asset states, then look for conditions that match what has been trained.

    -
    -

    The advantages to supervised learning are that the you have the ability to take in multiple features or variables that could indicate different conditions, then design and curate the data for training to control the quality of prediction results. However, this requires greater understanding of the data and its interpretation, as well as more preparation.

    -

    In this section, use the supervised learning method to train a model with status data labeled “Failed’, then classify and predict the condition of jet engines with a non-training dataset.

    -

    - After this section, users will be able to : -

      -
        -
      • Label “state” to create a training dataset for a machine learning model.
      • -
      • Use tools to train models and then apply the models to new data.
      • -
      • Evaluate the efficacy of the trained model and tune with different machine learning algorithms.
      • -
      -
    -

    - Review the supervised learning concepts in the methodology section, then proceed with the step-by-step exercise to create supervised learning algorithms to process against new data. -
    -
- -
-
- - - - - -

- - METHODOLOGY REVIEW : - -

-

This section introduces the mechanics of applying supervised learning, from selecting the approach to training the model, to actual creation of machine learning algorithms, to applying the algorithms to future data to generate predictions.

- - -

- - Machine Learning - Supervised Learning mechanism : How training machine learning algorithms works? - -

-
    -

    Supervised learning for the jet engine dataset requires the following steps to create, apply and validate the model.

    -
      -
        -
      • 1 - Decide how to train the model: In this app’s jet engine dataset example, train a model to recognize multiple states.
      • -
      • 2 - Prepare training dataset: Using a historical dataset and a field to represent the state of the engine, add a label to the data.
      • -
      • 3 - Decide on an algorithm to use: Both logistic regression and random forest algorithms are used.
      • -
      • 4 - Create the model using the algorithm that you select.
      • -
      • 5 - Evaluate the accuracy and effectiveness of the machine learning model.
      • -
      • 6 - Apply the prediction model created using the training data and algorithm.
      • -
      -
    -
    -
      - -
      - - The above illustration shows that the majority of the training data is used to train and create the prediction model using the selected machine learning algorithm. A subset of the training data is used as test data to validate the accuracy and efficacy of the model at prediction, based on the defined labels in the test data.
      - - During the evaluation process, you can create different models using different machine learning algorithms to test the accuracy of different algorithms. Then you can apply the more accurate prediction model to a future dataset.

      -
    -
- - -

- - Decide on the best approach to train the model for the problem : Looking for “Good” or “Bad” - -

-
    -

    After you’ve decided which machine learning algorithms to use, you need to determine how to train the machine learning model. Depending on the type of problem, you might want to consider 3 different approaches for training data.

    -

    In the context of predictive maintenance, the decision making process looks like this:

    -
    -
      - -
      - - For jet engine data, "Train Multiple State" approach is covered.

      -
    -
- - -

- - Training the “Good” -

-
    -

    This approach works by training a model with a known “Good” state so that the model can identify patterns that don’t match the known “Good” state. Use this approach for complex equipment with many different types of potential problems. Due to the complexity of the equipment or system, it is easier to train on a known “Good” state than on a known “Bad” state, because there could be many patterns that indicate equipment in bad condition.

    -
      -

      For example: If you want to monitor the state of an entire system, such as one made up of several electronic components (like a car), there are many different ways that the various parts reflect the status or performance of the system. In this example it’s easier to monitor known good performance of a car rather than the myriad ways that the system can fail or perform badly.

      -
    -
    -
      - -
      - - Like the illustration, label the “good” state as input for training, then the model can look for anything else that doesn’t match the “good” pattern. The model does not need to be trained for all the different situations that indicate bad performance.
      - -
      - - The trained model classifies matching patterns as “1” (yes, this pattern is “Good”) and anything else as “0” (no, this pattern is not good).

      -
    -
- - - -

- - Training the “Bad” -

-
    -

    This approach works by training a model based on a known “Bad” state so that the model can identify other “Bad” conditions that match that pattern. Use this approach if you can identify a specific condition or pattern of metrics that have a limited number of different state combinations.

    -
      -

      For example, you could use the pattern of an engine going into “hyper spin” as a “Bad” state to train the model with. In order to use this pattern to train the model, you must be able to identify it in the data or it must be already labeled in the data. This approach is used for more simplistic equipment with a limited set of known “Bad” states.

      -
    -

    First, capture the segment of “Bad” state data, then train the model.

    -
    -
      - -
      - - Like the illustration, label the “bad” state as input for training, then the model looks for those specific pattern to identify it as bad.
      - -
      - - The trained model classifies the data, with matching patterns being identified as “1” (yes, they are bad)and anything else as “0” (no, they are not bad patterns).

      -
    -
- - - -

- - Training multiple states -

-
    -

    This approach works by training multiple sets of known states, such as “Good” or “Bad”. Use this approach when you have somewhat limited possible states or conditions and you want to specifically classify those conditions.

    -
      -

      For example, categorizing the movements of an automobile based on car sensor readings as local driving, highway driving, reverse, or stopped. In that case, you can you can label the various states that you want to identify and train a model accordingly.

      -
    -

    First, capture the segment of “Bad” state data, then train the model.

    -
    -
      - -
      - - For jet engine data, label the state based on the intensity of data deviation, normal to extreme.
      - -
      - - The trained model classifies matching multiple outcomes: “Good” as “1” ,“Alert” as “2”, and “Bad” as “3”.

      -
    -
- - -

- - Preparing data for supervised learning : Labeling data for algorithm training. - -

-
    -

    The key when preparing data for supervised learning is to label the data with training values. If the data doesn’t have a status label, which is typical in datasets like the jet engine dataset, you must define the labels after some analysis to use for training.If the goal is to label multiple states beyond just the failed state, you can define ranges and label those as a certain state, like the illustration below.

    -
    -
      - -

      - - This label is added to the training dataset as another column, then the data is used to create a classification algorithm. From the illustration, the data is labeled as the following:
      -
        -
          -
            -
          • Range 0-200 cycles as state=0, normal
          • -
          • Range 201-275 cycle as state=1, warning
          • -
          • Range 276-361 cycle as state=2, critical

          • -
          -
        -
      - -

      - - Looking at the training dataset as tables, the new column “state” includes the labels indicating the state labeled for training purpose. This training dataset is now ready to be used to train a prediction model.
      -
    -
- - - -

- - Machine Learning Algorithms : Different charteristics of ML Algorithms - -

-
    -

    - For the jet engine dataset, a dataset of time series multivariate data, several algorithms can be used. The recommended algorithms are 1) Logistic Regression, 2) Gaussian Naive Bayes, and 3) Random Forest. In the evaluation step, different machine learning algorithms can be evaluated. This app evaluates the logistic Regression and Random Forest algorithms. -

    -
      - -
      - - Above illustration shows how different machine learning algorithms classify data in different ways depending on different dimensions. Applying different models to datasets with different characteristics gives you experience on what kinds of data match with different types of machine learning algorithm.
      -
      -
    -
- - -

- - Prediction Model Evaluation : Confusion Matrix to evaluate the accuracy of machine learning algorithm - -

-
    -

    One technique to evaluate the accuracy of a trained model is to create a confusion matrix. A confusion matrix shows the results of a prediction and its actual values from the test data. The results of the matrix analysis show you a count of correct predictions and a count of incorrect predictions to help you evaluate the accuracy.

    -
    -
      - -
      - - From this you can conclude that the model is highly accurate at predicting the “0” and “2” states, and less accurate at predicting the “1” state. For example, applying a confusion matrix to some data can show results like the above:
      -
        -
          -
            -
          • Actual value of 0 was predicted 226 times as “0”, showing high accuracy.
          • -
          • Actual value of 1 was predicted 46 times as “1”, while the predicted values for that label also included “0” (predicted 3 times) and “2” (predicted 10 times), indicating some inaccuracies in the model.
          • -
          • Actual value of 2 predicted 71 times as “2”, also showing high accuracy.
          • -
          -
        -
      -
      -
    -
- - -

- - Prediction Model Evaluation : Classification Statistics to evaluate the accuracy of machine learning algorithms - -

-
    -

    A second technique to evaluate the accuracy of a trained model is to use classification statistics. The classification statistics show the calculated accuracy, precision, recall, and the f1, or overall model accuracy, of the created prediction model when run against test data. - The Splunk Machine Learning Toolkit provides a “classification statistics” macro that you can use to evaluate a model.

    -
    -
      - -
      - - Tuning of prediction model is evaluated with classification statistics. -
        -
          -
            -
          • Accuracy : the ratio of correctly predicted observations. - The most intuitive performance measure of prediction model.
          • -
          • Precision : the ratio of correct positive observations. - Formula : the number of true positive / total of true positive + false positive
          • -
          • Recall : the ratio of correctly predicted positive events. - Formula : number of true positive / false negative + true positive
          • -
          • F1 : the weighted average of Precision and Recall - Formula : (precision + recall )/2
          • - -
          -
        -
      -

      -
    -
- - -
-
- - - - - - - -

- - STEP BY STEP : - -

-

This step-by-step section guides you through applying the key techniques covered in the "Methodology Review" to the jet engine dataset. The Left column provides detailed instruction and the right column provides the results of the step. Use button to jump directly into a live environment for each step.

-

Using the multivariate jet engine dataset, apply the key supervised learning techniques to create a prediction model using a couple of machine learning algorithms, logistic regression and random forest. The steps walk you through the following process:

-
    -
      -
        -
      • 1 | Label data for training prediction model, with state 0, 1, and 2 being normal, warning, and critical.
      • -
      • 2 | Create a prediction model using logistic regression.
      • -
      • 3 | Apply the created prediction model to the dataset.
      • -
      • 4 | Evaluate the accuracy of the created logistic regression model.
      • -
      • 5 | Create another prediction model using the random forest algorithm.
      • -
      • 6 | Evaluate the accuracy of the random forest prediction model.
      • -
      • 7 | Choose the algorithm that provides better accuracy.
      • -
      -
    -
-

- - Preparing data for supervised learning : Labeling data for algorithm training. - -

-
    -

    After exploring the jet engine dataset, especially maintenance cycle data number 194, apply the state labels of each cycle observation with a status of 0 (normal), 1 (warning) or 2 (critical). Create a prediction model with the labeled data in the next step. -

    -
- -
-
- - - - - - -
- -
    - - STEP 01 - : Select the event window 194 from the jet engine dataset using the `get_pm_event_window(X)` search macro. Then use the "trendline" command to smooth out spikes in the results. The "where" command filters out empty events, and the "table" command helps you view the engine sensor values that are already normalized and smoothened. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4*
      -
      - - The result shows pre-processed events from get_pm_event_window metrics, then the applied smoothing technique smooths out spikes that are less representative of the metric characteristics. The newly processed metrics start with “sma4”. They are the new metrics formed by calculating the last average of 4 values. The spikes are smoothened out. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 01 : Retrieve data and preprocess, standard scale (Normalization) and smoothened spikes - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* - - 0 - 1 - - - -
-
-
- - - - - -
- -
    - - STEP 02 - : Assign a state of 0 ,1 , or 2 using the eval command to create the field “train_model_state”.Define the ranges of unit_cycle (cycle number), where 0-229 is “0” (normal), 230-290 is “1” (warning), and values greater than 290 are “2” (critical). -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4*
      -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1",  unit_cycle>290, "2")
      -
      - - The output puts data in table cell format sorted by oldest record. Each line represents a set of summary statistics of a single flight (a trip). -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 02 : Tagged training labels @ different unit_cycle ranges. - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1", unit_cycle>290, "2") - - 0 - 1 - - - - - - -
- - - - - -

- - Generate a prediction model : Creating a machine learning model using training data - -

-
    -

    Use the training data from the previous steps to create a logistic regression prediction model.

    -
- -
-
- - - - - -
- -
    - - STEP 03 - : Create a logistic regression prediction model, using the fit command and specifying the training state label “train_model_state” and creating a model name “_app_pm_super_logis_reg_01” -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4*
      -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1",  unit_cycle>290, "2")
      -| fit LogisticRegression fit_intercept=true "train_model_state" from "sma4(SS_sname_Bleed_Enthalpy)" "sma4(SS_sname_Bypass_Ratio)" "sma4(SS_sname_Corr_Fan_Speed)" "sma4(SS_sname_Fuel_Flow_Ratio)" "sma4(SS_sname_HPC_Outlet_Temp)" "sma4(SS_sname_HPT_Coolant_Bleed)" "sma4(SS_sname_LPC_Outlet_Temp)" "sma4(SS_sname_LPT_Outlet_Temp)" "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_super_logis_reg_01 
      -
      - - The new prediction model “_app_pm_super_logis_reg_01” is created in the system. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 03 : Trained logistic regression prediction model - Predict categorical value. - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1", unit_cycle>290, "2") -| fit LogisticRegression fit_intercept=true "train_model_state" from "sma4(SS_sname_Bleed_Enthalpy)" "sma4(SS_sname_Bypass_Ratio)" "sma4(SS_sname_Corr_Fan_Speed)" "sma4(SS_sname_Fuel_Flow_Ratio)" "sma4(SS_sname_HPC_Outlet_Temp)" "sma4(SS_sname_HPT_Coolant_Bleed)" "sma4(SS_sname_LPC_Outlet_Temp)" "sma4(SS_sname_LPT_Outlet_Temp)" "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_super_logis_reg_01 - - 0 - 1 - - - - - -
- - - - - -

- - Applying a machine learning model to data : Logistic Regression to process new data with the created machine learning model. - -

-
    -

    In the previous step, you used the fit command to create a prediction model. Now you can use the apply command to apply the model to your dataset. -

    -
- -
-
- - - - - -
- -
    - - STEP 04 - : Replace the fit command syntax with the apply command and run the data through the _app_pm_super_logis_reg_01 prediction model. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1",  unit_cycle>290, "2")
      -| table unit_cycle train_model_state sma4*
      -| apply _app_pm_super_logis_reg_01 
      -
      - - The result shows a new column of “predicted(train_model_state)” values. This field contains the prediction generated by the prediction model. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 04 : Applied the test data to the trained logistic regression model. - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1", unit_cycle>290, "2") -| table unit_cycle train_model_state sma4* -| apply _app_pm_super_logis_reg_01 - - 0 - 1 - - - - - -
- - - - - - -

- - Evaluate machine learning model : Logistic Regression - Using new dataset, evaluate model accuracy. - -

-
    -

    Evaluate the accuracy of the model by looking at the confusion matrix. Validate how the actual results compare to the predictions in the matrix. Look for mismatches between the values to understand the volume of prediction errors made by the model. Then use the classificationstatistics function to review the precision and recall values and assess the accuracy of the model.

    -
- -
-
- - - - - - -
- -
    - - STEP 05 - : Use the confusionmatrix macro by specifying the actual field name (train_model_state) and the predicted field name(predicted(train_model_state)). -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1",  unit_cycle>290, "2")
      -| table unit_cycle train_model_state sma4*
      -| apply _app_pm_super_logis_reg_01 
      -| `confusionmatrix("train_model_state","predicted(train_model_state)")` 
      -
      - - The results show some mismatches between the actual and predicted values. Save the results in order to compare with the other algorithm in the next few steps. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 05 : Confusion Matrix Table - The initial logistic regression - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1", unit_cycle>290, "2") -| table unit_cycle train_model_state sma4* -| apply _app_pm_super_logis_reg_01 -| `confusionmatrix("train_model_state","predicted(train_model_state)")` - - 0 - 1 - - - - - - - - - - - - - - - -
-
-
- - - - - -
- -
    - - STEP 06 - : Use the classificationstatistics macro by specifying the actual field name (train_model_state) and the predicted field name (predicted(train_model_state)). -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4* 
      -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1",  unit_cycle>290, "2")
      -| table unit_cycle train_model_state sma4* 
      -| apply _app_pm_super_logis_reg_01 
      -| `classificationstatistics("train_model_state", "predicted(train_model_state)")` 
      -
      - - The classification statistics macro shows the accuracy, precision, recall, and f1 statistics to evaluate the overall accuracy of the model. The f1 score turns out to be 96%, which is very accurate. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 06 : Prediction statistics result - The initial logistic regression. - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1", unit_cycle>290, "2") -| table unit_cycle train_model_state sma4* -| apply _app_pm_super_logis_reg_01 -| `classificationstatistics("train_model_state", "predicted(train_model_state)")` - - 0 - 1 - - - -
-
-
- - - - - -

- - Explore different machine learning algorithms : Random Forest - Create different ML algorithms for accuracy - -

-
    -

    You created a logistic regression model to process the data already, but follow these steps to create a different model using the random forest algorithm so that you can compare the accuracy with the logistic regression model. Compare the accuracy using the f1 score. -

    -
- -
-
- - - - - -
- -
    - - STEP 07 - : Replace the apply command part of the search with “fit RandomForestClassifier” to train another prediction model with the random forest algorithm. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4*
      -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1",  unit_cycle>290, "2")
      -| fit RandomForestClassifier  "train_model_state" from "sma4(SS_sname_Bleed_Enthalpy)" "sma4(SS_sname_Bypass_Ratio)" "sma4(SS_sname_Corr_Fan_Speed)" "sma4(SS_sname_Fuel_Flow_Ratio)" "sma4(SS_sname_HPC_Outlet_Temp)" "sma4(SS_sname_HPT_Coolant_Bleed)" "sma4(SS_sname_LPC_Outlet_Temp)" "sma4(SS_sname_LPT_Outlet_Temp)" "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_super_randomforest_01 
      -
      - - A new prediction model with the name “_app_pm_super_randomforest_01” is created. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 07 : Trained another machine learning prediction model - Random Forest - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1", unit_cycle>290, "2") -| fit RandomForestClassifier "train_model_state" from "sma4(SS_sname_Bleed_Enthalpy)" "sma4(SS_sname_Bypass_Ratio)" "sma4(SS_sname_Corr_Fan_Speed)" "sma4(SS_sname_Fuel_Flow_Ratio)" "sma4(SS_sname_HPC_Outlet_Temp)" "sma4(SS_sname_HPT_Coolant_Bleed)" "sma4(SS_sname_LPC_Outlet_Temp)" "sma4(SS_sname_LPT_Outlet_Temp)" "sma4(SS_sname_Phys_Fan_Speed)" into _app_pm_super_randomforest_01 - - 0 - - - - - - -
- - - - - - -

- - Evaluate ML model : Random Forest - Using new dataset, evaluate model accuracy. - -

-
    -

    Evaluate the accuracy of the new model created with random forest algorithm by looking at the confusion matrix. Validate how the actual results compare to the predictions in the matrix. Look for mismatches between the values to understand the volume of prediction errors made by the model.

    -

    Then use the classificationstatistics function to review the precision and recall values and assess the accuracy of the model. Choose the model to be used for actual application of analytics model from reviewing accuracy.

    -
- -
-
- - - - - - -
- -
    - - STEP 08 - : Replace the previous fit command section of the search with “apply _app_pm_super_randomforest_01“ to apply the model to the dataset, then immediately use the confusionmatrix macro to evaluate the predictions of the new model. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1",  unit_cycle>290, "2")
      -| table unit_cycle train_model_state sma4*
      -| apply _app_pm_super_randomforest_01
      -| `confusionmatrix("train_model_state","predicted(train_model_state)")` 
      -
      - - The results show a very accurate matching number of 100%. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 08 : Confusion matrix for the random forest regressor model - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1", unit_cycle>290, "2") -| table unit_cycle train_model_state sma4* -| apply _app_pm_super_randomforest_01 -| `confusionmatrix("train_model_state","predicted(train_model_state)")` - - 0 - - - - - - - - - - - - - - -
-
-
- - - - - -
- -
    - - STEP 09 - : Replace the confusionmatrix line with the classificationstatistics macro and specify the actual field name (train_model_state)and the predicted field name(predicted(train_model_state)). -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4* 
      -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1",  unit_cycle>290, "2")
      -| table unit_cycle train_model_state sma4* 
      -| apply _app_pm_super_randomforest_01
      -| `classificationstatistics("train_model_state", "predicted(train_model_state)")` 
      -
      - - The results show 100% accuracy because the model was created with event window 194. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 09 : Classification Statistics for the random forest regressor model - - `get_pm_event_window(194)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| eval train_model_state=case(unit_cycle<230, "0", unit_cycle>=230 AND unit_cycle<=290,"1", unit_cycle>290, "2") -| table unit_cycle train_model_state sma4* -| apply _app_pm_super_randomforest_01 -| `classificationstatistics("train_model_state", "predicted(train_model_state)")` - - 0 - - -
-
-
- - - - - - -

- - Apply different engine data : Using random forest prediction model to other dataset. - -

-
    -

    Apply the validated prediction model (Random Forest Regressor) to another engine dataset. Validate how the designed prediction model works on other datasets, then review the model accuracy and statistics.

    -
- -
-
- - - - - -
- -
    - - STEP 10 - : Replace the 194 cycle event window with a different event window, 116 to the created random forest prediction model. -
    -
      -
      `get_pm_event_window(116)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4*
      -| eval train_model_state=case(unit_cycle<210, "0", unit_cycle>=210 AND unit_cycle<=270,"1",  unit_cycle>270, "2")
      -| apply _app_pm_super_randomforest_01 
      -
      - - The jet engine event window 116 dataset is used to apply the ransom forest prediction model. -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 10 : Another event window dataset applied to the prediction model - Random Forest - - `get_pm_event_window(116)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| table unit_cycle sma4* -| apply _app_pm_super_randomforest_01 - - 0 - - - - - - -
- - - - - - - -
- -
    - - STEP 11 - : Evaluate model statistics for event window 116 dataset. -
    -
      -
      `get_pm_event_window(116)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) 
      -| where unit_cycle>3 
      -| table unit_cycle sma4* 
      -| eval train_model_state=case(unit_cycle<210, "0", unit_cycle>=210 AND unit_cycle<=270,"1",  unit_cycle>270, "2")
      -| table unit_cycle train_model_state sma4* 
      -| apply _app_pm_super_randomforest_01
      -| `classificationstatistics("train_model_state", "predicted(train_model_state)")`
      -
      - - Because the model is now applied to another event window, cycle 116, the results are more realistic because the model was not trained on this event window. -
        -
          -
            -
          • Accuracy = 0.94
          • -
          • Precision = 0.94
          • -
          • Recall = 0.94
          • -
          • F1 = 0.94
          • -
          -
        -
      -

      - - - -
    -
-
- -
- -
- - - RESULT STEP 11 : Classification Statistics for event window 116 dataset - - `get_pm_event_window(116)` -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) -| where unit_cycle>3 -| eval train_model_state=case(unit_cycle<210, "0", unit_cycle>=210 AND unit_cycle<=270,"1", unit_cycle>270, "2") -| table unit_cycle train_model_state sma4* -| apply _app_pm_super_randomforest_01 -| `classificationstatistics("train_model_state", "predicted(train_model_state)")` - - 0 - - -
-
-
- - - - - -

- - What's Next? - -

-
    -

    - You completed the third technique, applying the Supervised Learning method. Move on to the next stage in understanding how you can operationalize the analysis results. -

    - - NEXT STEP : Learn how to operationalize the predictive analysis result. -
    -
    - - - - - - -
- - - - - -
-
-

- Do you need help? Ask an Industrial IOT Predictive Maintenance Expert -

-
    -

    Need help with your environment and requirements? Send us your questions and we will get back to you as soon as possible.

    -

    If you need immediate assistance, check out our community forum, Splunk Answers. (i1) -

    - Contact Us -
-
-
-
- Predictive Maintenance expert photo -
-
- -
-
-
\ No newline at end of file From 787e48384ac0dbe3926a229dd6ef955b08134e62 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:30:48 -0500 Subject: [PATCH 25/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_apply_01.xml --- .../default/data/ui/views/sa_sc_apply_01.xml | 283 ------------------ 1 file changed, 283 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_apply_01.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_apply_01.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_apply_01.xml deleted file mode 100644 index 9fc9067..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_apply_01.xml +++ /dev/null @@ -1,283 +0,0 @@ - - - - - - -
    -

    - How to put Splunk to work? -

    - -

    As you complete the analysis of predictive maintenance asset in your environment, now you may be wondering how I can put this to immediate use? Now you can use the prediction results in various ways like in dashboards, reports, alerts and integrate with other systems. This section will provide you ideas on how you can put the result of the analysis to work. -

    -
- - - - Stage_01 - Stage_02 - Stage_03 - Stage_03-01 - Stage_03-02 - Stage_03-04 - Stage_04 - -
- -
-
- - - - - - - - - - - -

- - OBJECTIVE : -

-
    -

    Splunk Enterprise already is equipped with different creative ways to put your Splunk to work without any development at all. In the methodology section, examples of how Splunk can be put into your operational environment. (Dashboards, Reports, Alerts and Integratoon)

    -
    - After reading this section, you will be able to : -
      -
        -
      • See examples on how to create dashboards and reports.
      • -
      • See examples on Understand how to define alerts.
      • -
      • See examples on how to integrate Splunk with other software.
      • -
      -
    -
    -
- -
-
- - - - - - -

- - EXAMPLES : - -

-

This technical resources page guides you through examples on what you can do with the result of your predictive analysis. Please review various contents and resources link here to learn about how you can expand Splunk to real operations.

-

- - Operationalize dashboards and reports to your environment - -

-
    -

    Online documentation and videos that help you understand how to implement Splunk into your operations.

    -
- -
-
- - - - - - -
- -
    - - Creatig Dashboard : Predictive Maintenance Dashboard -
        -
      • Use the analyzed prediction, create dashboard to show the status of engines in different state.
      • -
      • Use Splunk Dashboard Examples App to customize visuilization and various user inputs and controls.
      • -
      • Navigate Predictive Maintenance demo app created using the exact steps here in this app : Click here
      • -
    -
-
- -
- -
- - - Predictive Maintenance Dashboard -
-
    - -
    -
    -
- -
-
- - - - -
- - - - -
- -
- - - Creating Reports in Splunk Enterprise -
-
    - -
    -
    -
- -
-
- - - - - -

- - Define alerts and rules for your maintenance opearatios. -

-
    -

    Learn how you can create real-time alerts based on predictive analysis on your assets :

    -
- -
-
- - - - - -
- - - - -
- -
- - - Creating Alerts in Splunk Enterprise -
-
    - -
    -
    -
- -
-
- - - - - - -

- - What's Next? - -

-
    -

    - Now you have completed all the exercises in Splunk Essentials for Predictive Maintenance. Do you feel comfortable about how you can apply the concept in the app to your data? If you need help, please email us to support your use-case in making your operation mode proactive. Thank you very much for your attention. -

    - - NEXT STEP : Evaluate how you can apply the concepts you learned in this app. -
    -
    - - - - - - - -
- - - - - -
-
-

- Do you need help? Ask an Industrial IOT Predictive Maintenance Expert -

-
    -

    Need help with your environment and requirements? Send us your questions and we will get back to you as soon as possible.

    -

    If you need immediate assistance, check out our community forum, Splunk Answers. -

    - Contact Us -
-
-
-
- Predictive Maintenance expert photo -
-
- -
-
- - -
\ No newline at end of file From 1f9f95ce1ea558512b2eabb67c4f0d33fa53d23c Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:31:11 -0500 Subject: [PATCH 26/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_dashboard.xml --- .../default/data/ui/views/sa_sc_dashboard.xml | 530 ------------------ 1 file changed, 530 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_dashboard.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_dashboard.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_dashboard.xml deleted file mode 100644 index 24d903d..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_dashboard.xml +++ /dev/null @@ -1,530 +0,0 @@ - - - Using Machine Learning to classify different engine status based on trained engine profile - - - -

- - Total Engines In Operations -

- - - - sourcetype="iot_pm_fail" -| table unit_no unit_cycle sensor* -| reverse -| streamstats count(eval(unit_cycle=1)) as events_frame_id -| rename sensor_02 as sname_LPC_Outlet_Temp, sensor_03 as sname_HPC_Outlet_Temp, sensor_04 as sname_LPT_Outlet_Temp, sensor_07 as sname_Total_HPC_Outlet_Pres, sensor_08 as sname_Phys_Fan_Speed, sensor_09 as sname_Phys_Core_Speed, sensor_11 as sname_Static_HPC_Outlet_Pres, sensor_12 as sname_Fuel_Flow_Ratio, sensor_13 as sname_Corr_Fan_Speed, sensor_14 as sname_Corr_Core_Speed, sensor_15 as sname_Bypass_Ratio, sensor_17 as sname_Bleed_Enthalpy, sensor_21 as sname_HPT_Coolant_Bleed, sensor_22 as sname_LPT_Coolant_Bleed -| strcat events_frame_id "-" unit_cycle pkey -| lookup engine_run_status pkey, pkey -| search cycle_percent=* -| collect index=iot_pm_stash -| stats count - 0 - - 1 - - - - - - -
- - -

- - Normal Engines -

- - - - index=iot_pm_stash -| dedup pkey -| apply iot_pm_ss_tmp -| table events_frame_id unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed -| rename SS_sname_Bleed_Enthalpy as sma4(SS_sname_Bleed_Enthalpy), SS_sname_Bypass_Ratio as sma4(SS_sname_Bypass_Ratio), SS_sname_Corr_Core_Speed as sma4(SS_sname_Corr_Core_Speed), SS_sname_Corr_Fan_Speed as sma4(SS_sname_Corr_Fan_Speed), SS_sname_Fuel_Flow_Ratio as sma4(SS_sname_Fuel_Flow_Ratio), SS_sname_HPC_Outlet_Temp as sma4(SS_sname_HPC_Outlet_Temp), SS_sname_HPT_Coolant_Bleed as sma4(SS_sname_HPT_Coolant_Bleed), SS_sname_LPC_Outlet_Temp as sma4(SS_sname_LPC_Outlet_Temp), SS_sname_LPT_Outlet_Temp as sma4(SS_sname_LPT_Outlet_Temp), SS_sname_Phys_Core_Speed as sma4(SS_sname_Phys_Core_Speed), SS_sname_Phys_Fan_Speed as sma4(SS_sname_Phys_Fan_Speed) -| table events_frame_id unit_cycle sma4* -| apply _app_pm_super_randomforest_01 -| table events_frame_id unit_cycle predicted(train_model_state) unit_cycle -| rename predicted(train_model_state) as predicted -| search predicted=0 -| stats count - 0 - - 1 - - - - - - - - - - - - - - - - - - - - -
- - -

- - Warning Engines -

- - - - index=iot_pm_stash -| dedup pkey -| apply iot_pm_ss_tmp -| table events_frame_id unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed -| rename SS_sname_Bleed_Enthalpy as sma4(SS_sname_Bleed_Enthalpy), SS_sname_Bypass_Ratio as sma4(SS_sname_Bypass_Ratio), SS_sname_Corr_Core_Speed as sma4(SS_sname_Corr_Core_Speed), SS_sname_Corr_Fan_Speed as sma4(SS_sname_Corr_Fan_Speed), SS_sname_Fuel_Flow_Ratio as sma4(SS_sname_Fuel_Flow_Ratio), SS_sname_HPC_Outlet_Temp as sma4(SS_sname_HPC_Outlet_Temp), SS_sname_HPT_Coolant_Bleed as sma4(SS_sname_HPT_Coolant_Bleed), SS_sname_LPC_Outlet_Temp as sma4(SS_sname_LPC_Outlet_Temp), SS_sname_LPT_Outlet_Temp as sma4(SS_sname_LPT_Outlet_Temp), SS_sname_Phys_Core_Speed as sma4(SS_sname_Phys_Core_Speed), SS_sname_Phys_Fan_Speed as sma4(SS_sname_Phys_Fan_Speed) -| table events_frame_id unit_cycle sma4* -| apply _app_pm_super_randomforest_01 -| table events_frame_id unit_cycle predicted(train_model_state) unit_cycle -| rename predicted(train_model_state) as predicted -| search predicted=1 -| stats count - 0 - - 1 - - - - - - - - - - - - - - - - - - - - -
- - -

- - Critical Engines -

- - - - index=iot_pm_stash -| dedup pkey -| apply iot_pm_ss_tmp -| table events_frame_id unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed -| rename SS_sname_Bleed_Enthalpy as sma4(SS_sname_Bleed_Enthalpy), SS_sname_Bypass_Ratio as sma4(SS_sname_Bypass_Ratio), SS_sname_Corr_Core_Speed as sma4(SS_sname_Corr_Core_Speed), SS_sname_Corr_Fan_Speed as sma4(SS_sname_Corr_Fan_Speed), SS_sname_Fuel_Flow_Ratio as sma4(SS_sname_Fuel_Flow_Ratio), SS_sname_HPC_Outlet_Temp as sma4(SS_sname_HPC_Outlet_Temp), SS_sname_HPT_Coolant_Bleed as sma4(SS_sname_HPT_Coolant_Bleed), SS_sname_LPC_Outlet_Temp as sma4(SS_sname_LPC_Outlet_Temp), SS_sname_LPT_Outlet_Temp as sma4(SS_sname_LPT_Outlet_Temp), SS_sname_Phys_Core_Speed as sma4(SS_sname_Phys_Core_Speed), SS_sname_Phys_Fan_Speed as sma4(SS_sname_Phys_Fan_Speed) -| table events_frame_id unit_cycle sma4* -| apply _app_pm_super_randomforest_01 -| table events_frame_id unit_cycle predicted(train_model_state) unit_cycle -| rename predicted(train_model_state) as predicted -| search predicted=2 -| stats count - 0 - - 1 - - - - - - - - - - - - - - - - - - - - -
-
- - - Total Engine Status - - - index=iot_pm_stash -| dedup pkey -| apply app_pm_SS -| table events_frame_id unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed -| rename SS_sname_Bleed_Enthalpy as sma4(SS_sname_Bleed_Enthalpy), SS_sname_Bypass_Ratio as sma4(SS_sname_Bypass_Ratio), SS_sname_Corr_Core_Speed as sma4(SS_sname_Corr_Core_Speed), SS_sname_Corr_Fan_Speed as sma4(SS_sname_Corr_Fan_Speed), SS_sname_Fuel_Flow_Ratio as sma4(SS_sname_Fuel_Flow_Ratio), SS_sname_HPC_Outlet_Temp as sma4(SS_sname_HPC_Outlet_Temp), SS_sname_HPT_Coolant_Bleed as sma4(SS_sname_HPT_Coolant_Bleed), SS_sname_LPC_Outlet_Temp as sma4(SS_sname_LPC_Outlet_Temp), SS_sname_LPT_Outlet_Temp as sma4(SS_sname_LPT_Outlet_Temp), SS_sname_Phys_Core_Speed as sma4(SS_sname_Phys_Core_Speed), SS_sname_Phys_Fan_Speed as sma4(SS_sname_Phys_Fan_Speed) -| table unit_cycle sma4* -| apply _app_pm_super_randomforest_01 -| table unit_cycle predicted(train_model_state) sma4* -| rename predicted(train_model_state) as state -| table state unit_cycle -| sort - state unit_cycle -| replace 0 with Normal, 1 with Warning, 2 with Critical in state -| stats count by state - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Optimal Condition - - Sorted by top cycles - - index=iot_pm_stash -| dedup pkey -| apply iot_pm_ss_tmp -| table events_frame_id unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed -| rename SS_sname_Bleed_Enthalpy as sma4(SS_sname_Bleed_Enthalpy), SS_sname_Bypass_Ratio as sma4(SS_sname_Bypass_Ratio), SS_sname_Corr_Core_Speed as sma4(SS_sname_Corr_Core_Speed), SS_sname_Corr_Fan_Speed as sma4(SS_sname_Corr_Fan_Speed), SS_sname_Fuel_Flow_Ratio as sma4(SS_sname_Fuel_Flow_Ratio), SS_sname_HPC_Outlet_Temp as sma4(SS_sname_HPC_Outlet_Temp), SS_sname_HPT_Coolant_Bleed as sma4(SS_sname_HPT_Coolant_Bleed), SS_sname_LPC_Outlet_Temp as sma4(SS_sname_LPC_Outlet_Temp), SS_sname_LPT_Outlet_Temp as sma4(SS_sname_LPT_Outlet_Temp), SS_sname_Phys_Core_Speed as sma4(SS_sname_Phys_Core_Speed), SS_sname_Phys_Fan_Speed as sma4(SS_sname_Phys_Fan_Speed) -| table events_frame_id unit_cycle sma4* -| apply _app_pm_super_randomforest_01 -| table events_frame_id unit_cycle predicted(train_model_state) unit_cycle -| rename predicted(train_model_state) as predicted -| search predicted=0 -| table events_frame_id predicted unit_cycle -| sort predicted unit_cycle -| fields - events_frame_id - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /app/Splunk_Essentials_Predictive_Maintenance_for_IOT/pm_demo_02 - - - - - Monitor Changes - - Sorted by top cycles - - index=iot_pm_stash -| dedup pkey -| apply iot_pm_ss_tmp -| table events_frame_id unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed -| rename SS_sname_Bleed_Enthalpy as sma4(SS_sname_Bleed_Enthalpy), SS_sname_Bypass_Ratio as sma4(SS_sname_Bypass_Ratio), SS_sname_Corr_Core_Speed as sma4(SS_sname_Corr_Core_Speed), SS_sname_Corr_Fan_Speed as sma4(SS_sname_Corr_Fan_Speed), SS_sname_Fuel_Flow_Ratio as sma4(SS_sname_Fuel_Flow_Ratio), SS_sname_HPC_Outlet_Temp as sma4(SS_sname_HPC_Outlet_Temp), SS_sname_HPT_Coolant_Bleed as sma4(SS_sname_HPT_Coolant_Bleed), SS_sname_LPC_Outlet_Temp as sma4(SS_sname_LPC_Outlet_Temp), SS_sname_LPT_Outlet_Temp as sma4(SS_sname_LPT_Outlet_Temp), SS_sname_Phys_Core_Speed as sma4(SS_sname_Phys_Core_Speed), SS_sname_Phys_Fan_Speed as sma4(SS_sname_Phys_Fan_Speed) -| table events_frame_id unit_cycle sma4* -| apply _app_pm_super_randomforest_01 -| table events_frame_id unit_cycle predicted(train_model_state) unit_cycle -| rename predicted(train_model_state) as predicted -| search predicted=1 -| table events_frame_id predicted unit_cycle -| sort predicted unit_cycle -| fields - events_frame_id - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /app/Splunk_Essentials_Predictive_Maintenance_for_IOT/pm_demo_02 - - - - - Requires Immediate Maintenance - - Sorted by top cycles - - index=iot_pm_stash -| dedup pkey -| apply iot_pm_ss_tmp -| table events_frame_id unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed -| rename SS_sname_Bleed_Enthalpy as sma4(SS_sname_Bleed_Enthalpy), SS_sname_Bypass_Ratio as sma4(SS_sname_Bypass_Ratio), SS_sname_Corr_Core_Speed as sma4(SS_sname_Corr_Core_Speed), SS_sname_Corr_Fan_Speed as sma4(SS_sname_Corr_Fan_Speed), SS_sname_Fuel_Flow_Ratio as sma4(SS_sname_Fuel_Flow_Ratio), SS_sname_HPC_Outlet_Temp as sma4(SS_sname_HPC_Outlet_Temp), SS_sname_HPT_Coolant_Bleed as sma4(SS_sname_HPT_Coolant_Bleed), SS_sname_LPC_Outlet_Temp as sma4(SS_sname_LPC_Outlet_Temp), SS_sname_LPT_Outlet_Temp as sma4(SS_sname_LPT_Outlet_Temp), SS_sname_Phys_Core_Speed as sma4(SS_sname_Phys_Core_Speed), SS_sname_Phys_Fan_Speed as sma4(SS_sname_Phys_Fan_Speed) -| table events_frame_id unit_cycle sma4* -| apply _app_pm_super_randomforest_01 -| table events_frame_id unit_cycle predicted(train_model_state) unit_cycle -| rename predicted(train_model_state) as predicted -| search predicted=2 -| table events_frame_id predicted unit_cycle -| sort predicted unit_cycle -| fields - events_frame_id - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /app/Splunk_Essentials_Predictive_Maintenance_for_IOT/pm_demo_02 - - - - - - - Normal Engines Detail - - Click to Drill in - - index=iot_pm_stash -| dedup pkey -| apply iot_pm_ss_tmp -| table events_frame_id unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed -| rename SS_sname_Bleed_Enthalpy as sma4(SS_sname_Bleed_Enthalpy), SS_sname_Bypass_Ratio as sma4(SS_sname_Bypass_Ratio), SS_sname_Corr_Core_Speed as sma4(SS_sname_Corr_Core_Speed), SS_sname_Corr_Fan_Speed as sma4(SS_sname_Corr_Fan_Speed), SS_sname_Fuel_Flow_Ratio as sma4(SS_sname_Fuel_Flow_Ratio), SS_sname_HPC_Outlet_Temp as sma4(SS_sname_HPC_Outlet_Temp), SS_sname_HPT_Coolant_Bleed as sma4(SS_sname_HPT_Coolant_Bleed), SS_sname_LPC_Outlet_Temp as sma4(SS_sname_LPC_Outlet_Temp), SS_sname_LPT_Outlet_Temp as sma4(SS_sname_LPT_Outlet_Temp), SS_sname_Phys_Core_Speed as sma4(SS_sname_Phys_Core_Speed), SS_sname_Phys_Fan_Speed as sma4(SS_sname_Phys_Fan_Speed) -| table events_frame_id unit_cycle sma4* -| apply _app_pm_super_randomforest_01 -| table events_frame_id unit_cycle predicted(train_model_state) unit_cycle -| rename predicted(train_model_state) as predicted -| search predicted=0 -| table events_frame_id predicted unit_cycle -| sort - unit_cycle -| fields - predicted -| head 20 - 0 - - 1 - - - - - - - - - - - - - - - /app/Splunk_Essentials_Predictive_Maintenance_for_IOT/pm_details?asset=$row.events_frame_id$&unit_cycle=$row.unit_cycle$ - -
-
- - Warning Engines Detail - - Click to Drill in - 161 - - index=iot_pm_stash -| dedup pkey -| apply app_pm_SS -| table events_frame_id unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed -| rename SS_sname_Bleed_Enthalpy as sma4(SS_sname_Bleed_Enthalpy), SS_sname_Bypass_Ratio as sma4(SS_sname_Bypass_Ratio), SS_sname_Corr_Core_Speed as sma4(SS_sname_Corr_Core_Speed), SS_sname_Corr_Fan_Speed as sma4(SS_sname_Corr_Fan_Speed), SS_sname_Fuel_Flow_Ratio as sma4(SS_sname_Fuel_Flow_Ratio), SS_sname_HPC_Outlet_Temp as sma4(SS_sname_HPC_Outlet_Temp), SS_sname_HPT_Coolant_Bleed as sma4(SS_sname_HPT_Coolant_Bleed), SS_sname_LPC_Outlet_Temp as sma4(SS_sname_LPC_Outlet_Temp), SS_sname_LPT_Outlet_Temp as sma4(SS_sname_LPT_Outlet_Temp), SS_sname_Phys_Core_Speed as sma4(SS_sname_Phys_Core_Speed), SS_sname_Phys_Fan_Speed as sma4(SS_sname_Phys_Fan_Speed) -| table events_frame_id unit_cycle sma4* -| apply _app_pm_super_randomforest_01 -| table events_frame_id unit_cycle predicted(train_model_state) unit_cycle -| rename predicted(train_model_state) as predicted -| search predicted=1 -| table events_frame_id predicted unit_cycle -| sort - unit_cycle -| fields - predicted -| head 20 - 0 - - 1 - - - - - - - - - - - - - - - /app/Splunk_Essentials_Predictive_Maintenance_for_IOT/pm_details?asset=$row.events_frame_id$&unit_cycle=$row.unit_cycle$ - -
-
- - Critical Engines Detail - - Click to Drill in - 184 - - index=iot_pm_stash -| dedup pkey -| apply iot_pm_ss_tmp -| table events_frame_id unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed -| rename SS_sname_Bleed_Enthalpy as sma4(SS_sname_Bleed_Enthalpy), SS_sname_Bypass_Ratio as sma4(SS_sname_Bypass_Ratio), SS_sname_Corr_Core_Speed as sma4(SS_sname_Corr_Core_Speed), SS_sname_Corr_Fan_Speed as sma4(SS_sname_Corr_Fan_Speed), SS_sname_Fuel_Flow_Ratio as sma4(SS_sname_Fuel_Flow_Ratio), SS_sname_HPC_Outlet_Temp as sma4(SS_sname_HPC_Outlet_Temp), SS_sname_HPT_Coolant_Bleed as sma4(SS_sname_HPT_Coolant_Bleed), SS_sname_LPC_Outlet_Temp as sma4(SS_sname_LPC_Outlet_Temp), SS_sname_LPT_Outlet_Temp as sma4(SS_sname_LPT_Outlet_Temp), SS_sname_Phys_Core_Speed as sma4(SS_sname_Phys_Core_Speed), SS_sname_Phys_Fan_Speed as sma4(SS_sname_Phys_Fan_Speed) -| table events_frame_id unit_cycle sma4* -| apply _app_pm_super_randomforest_01 -| table events_frame_id unit_cycle predicted(train_model_state) unit_cycle -| rename predicted(train_model_state) as predicted -| search predicted=2 -| table events_frame_id predicted unit_cycle -| sort - unit_cycle -| fields - predicted -| head 20 - 0 - - 1 - - - - - - - - - - - - - - - /app/Splunk_Essentials_Predictive_Maintenance_for_IOT/pm_details?asset=$row.events_frame_id$&unit_cycle=$row.unit_cycle$ - -
-
-
-
\ No newline at end of file From df984a84500396cb16125e0d3b0357522d043e09 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:32:12 -0500 Subject: [PATCH 27/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_01.xml --- .../default/data/ui/views/sa_sc_demo_01.xml | 417 ------------------ 1 file changed, 417 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_01.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_01.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_01.xml deleted file mode 100644 index 2904d03..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_01.xml +++ /dev/null @@ -1,417 +0,0 @@ - - - Factory Pump Operational Status Dashboard - Monitor and analyze health of pumps in operations - - - Pump Operational Metrics - - Operations Pump Assets - - index=_internal -| head 10 -| table _time -| eval A=160 -| eval value = A + random()%5 -| table _time, value - 0 - 1 - - - - - - - - - - - - - - - - - - - - - - Line Production Ratio - - index=_internal -| head 10 -| table _time -| eval A=96 -| eval value = A + random()%5 -| table _time, value - 0 - 1 - - - - - - - - - - - - - - - - - - - - - - - Number of Pumps under maintenance - - index=_internal -| head 10 -| table _time -| eval A=1 -| eval value = A + random()%3 -| table _time, value - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - Business / HR Metrics - - Increased utilizations - - index=_internal -| head 10 -| table _time -| eval A=50000 -| eval value = A + random()%5000 -| table _time, value - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - Gained utilization to dollars - - index=_internal -| head 10 -| table _time -| eval A=130 -| eval value = A + random()%30 -| table _time, value - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - Ops Staff Utilization - - index=_internal -| head 10 -| table _time -| eval A=70 -| eval value = A + random()%5 -| table _time, value - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - -

- - Current Pump Operational Status -
- - - Stage_01 - - -

- -
-
- - - -

- - Current Pump Operational Life Status -

- - - - index=iot_pm_stash -| dedup pkey -| rex field=pkey "(?<pump_id>\d+)" -| table pump_id current_cycle -| sort pump_id - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /app/Splunk_Essentials_Predictive_Maintenance_for_IOT/pm_dashboard - - -
-
- - - -

- - Key Performance Indicators - Corr Speed -

- - - - index=iot_pm_stash -| dedup pkey -| rex field=pkey "(?<pump_id>\d+)" -| table pump_id sname_Corr_Core_Speed - 0 - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -

- - Key Performance Indicators - Fuel Flow Ratio -

- - - - index=iot_pm_stash -| dedup pkey -| rex field=pkey "(?<pump_id>\d+)" -| table pump_id sname_Fuel_Flow_Ratio - 0 - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - -

- - Current Pump Operational Conditions - Permanence Metrics Relationships -

- - - - index=iot_pm_stash -| dedup pkey -| rex field=pkey "(?<pump_id>\d+)" -| table current_cycle sname* -| fields - sname_Phys_Fan_Speed sname_Corr_Core_Speed sname_Fuel_Flow_Ratio sname_Corr_Fan_Speed sname_Static_HPC_Outlet_Pres sname_HPC_Outlet_Temp sname_Total_HPC* - 0 - - 1 - - - - - - - - - - - - -
-
-
\ No newline at end of file From 37d32acbec4466a7069289330c93c47077e2c252 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:32:34 -0500 Subject: [PATCH 28/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_02.xml --- .../default/data/ui/views/sa_sc_demo_02.xml | 245 ------------------ 1 file changed, 245 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_02.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_02.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_02.xml deleted file mode 100644 index 68a3310..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_02.xml +++ /dev/null @@ -1,245 +0,0 @@ -
- - Remaining Useful Life Calculator - Created from engine conditional indicators - Input "Current Unit Cycle" and "Current Conditional Indicator Value" for calculation. -
- - - 150 - 150 - - - - -1 - -1 - -
- - - Remaining Useful Life Prediction Results - - - index=iot_pm_stash EVT_TYPE=RUL_STATS -| table events_frame_id, unit_cycle, condition_ind, eol -| eval test_condition_ind=$INPUT_IND_VALUE$ -| search unit_cycle=$INPUT_CYCLE$ condition_ind>$INPUT_IND_VALUE$ -| sort condition_ind -| head 5 -| append - [ search index=iot_pm_stash EVT_TYPE=RUL_STATS - | table events_frame_id, unit_cycle, condition_ind, eol - | eval test_condition_ind=$INPUT_IND_VALUE$ - | search unit_cycle=$INPUT_CYCLE$ condition_ind<$INPUT_IND_VALUE$ - | sort - condition_ind - | head 5 - ] -| stats count as input_data_cnt, avg(eol) as predicted_eol by unit_cycle test_condition_ind -| rename predicted_eol as PREDICTED_END_OF_LIFE, input_data_cnt as INPUT_MODEL_COUNT, unit_cycle as UNIT_AGE, test_condition_ind as INPUT_CONDITION_IND -| eval REMAINING_LIFE=PREDICTED_END_OF_LIFE-UNIT_AGE -| eval LEFT_PERCENT=(REMAINING_LIFE/PREDICTED_END_OF_LIFE)*100 - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
- - - Remaining Useful Life Prediction Model - Selected Age and Condition - - - index=iot_pm_stash EVT_TYPE=RUL_STATS - [ search index=iot_pm_stash EVT_TYPE=RUL_STATS - | table events_frame_id, unit_cycle, condition_ind, eol - | search unit_cycle=$INPUT_CYCLE$ condition_ind>$INPUT_IND_VALUE$ - | sort condition_ind - | head 5 - | table events_frame_id ] OR - [ search index=iot_pm_stash EVT_TYPE=RUL_STATS - | table events_frame_id, unit_cycle, condition_ind, eol - | search unit_cycle=$INPUT_CYCLE$ condition_ind<$INPUT_IND_VALUE$ - | sort - condition_ind - | head 5 - | table events_frame_id ] -| chart last(condition_ind) as cond by unit_cycle, events_frame_id -| eval CURRENT_AGE=if(unit_cycle=$INPUT_CYCLE$,1,0) - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - Remaining Useful Life Prediction Model - ALL Assets - - - index=iot_pm_stash EVT_TYPE=RUL_STATS -| chart last(condition_ind) as cond by unit_cycle, events_frame_id useother=0 limit=0 - 0 - 1 - - - - - - - - - - - - Remaining Useful Life Prediction Model - Historical End Of Life Age Distro - - - index=iot_pm_stash EVT_TYPE=RUL_STATS unit_cycle=1 - -| `histogram(eol,100)` - 0 - 1 - - - - - - - - - - - Assets with Different Remaining life Analysis - - - index=iot_pm_stash EVT_TYPE=RUL_STATS -| table events_frame_id unit_cycle condition_ind -| eval pkey=events_frame_id+"-"+unit_cycle -| lookup engine_run_status pkey as pkey OUTPUT pkey as MATCH -| search MATCH=* -| table events_frame_id unit_cycle condition_ind -| head 10 -| map search="search index=iot_pm_stash EVT_TYPE=RUL_STATS unit_cycle=$$unit_cycle$$ - [ search index=iot_pm_stash EVT_TYPE=RUL_STATS - | table events_frame_id, unit_cycle, condition_ind, eol - | search unit_cycle=$$unit_cycle$$ condition_ind>$$condition_ind$$ - | sort condition_ind - | head 5 - | table events_frame_id ] OR - [ search index=iot_pm_stash EVT_TYPE=RUL_STATS - | table events_frame_id, unit_cycle, condition_ind, eol - | search unit_cycle=$$unit_cycle$$ condition_ind<$$condition_ind$$ - | sort - condition_ind - | head 5 - | table events_frame_id ] -| stats count as input_data_cnt, avg(eol) as predicted_eol by unit_cycle | eval condition_ind=$$condition_ind$$ " -| eval REMAINDER_LIFE=predicted_eol-unit_cycle -| table unit_cycle, condition_ind input_data_cnt predicted_eol REMAINDER_LIFE -| eval LEFT_PERCENT=(REMAINDER_LIFE/predicted_eol)*100 -| sort LEFT_PERCENT - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
\ No newline at end of file From 0ada081a3e129f01807d5aea6c427a1003b4936f Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:33:07 -0500 Subject: [PATCH 29/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_01.xml --- .../data/ui/views/sa_sc_exploration_01.xml | 738 ------------------ 1 file changed, 738 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_01.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_01.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_01.xml deleted file mode 100644 index 8937f77..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_01.xml +++ /dev/null @@ -1,738 +0,0 @@ - - - - - - - -
    -

    - Understanding Predictive Maintenance Data-set -

    - -

    Data exploration is the start of “Analysis” process, which is a key stage to plan how to proceed with analytics to get the desired outcome. - Before we start with deciding which analytical model should be evaluated for creating a predictive maintenance analysis model, understanding the type of data and the characteristics of data-set is crucial in engineering right features in the data. -

    -
- - - - Stage_01 - Stage_02 - Stage_03 - Stage_03-01 - Stage_03-02 - Stage_03-04 - Stage_04 - -
- -
-
- - - - - - - - - - -

- - OBJECTIVE : -

-
    -

    The goal of the “Explore data” stage is for you to understand characteristics of the maintenance related performance dataset to have clear a methodology for picking out the right features from the dataset for analysis, so the analyst can decide the right analytics approach in predicting optimal maintenance.

    -

    - After this section, you will be able to : -

      -
        -
      • Normalize data to find hidden signals in the data.
      • -
      • Be able to capture a full event window, preparing for Machine Learning algorithm training.
      • -
      • Visualize data based on the patterns and characteristics of the data.
      • -
      • Select features for doing accurate predictive maintenance analysis.
      • -
      -
    -

    -
- -
-
- - - - - -

- - METHODOLOGY REVIEW : - -

-

This section introduces a few key data exploration techniques that will help practitioners understand the characteristics of data which are essential to prepare for better machine learning predictive modeling.

-

- - Understanding Features from Data : Exploration of asset metrics - -

-
    -

    Data exploration starts with understanding what metrics mean in the context of your assets and the components of assets. Metrics that indicate the status of different components of an asset are often called “features” in analysis. Understanding the types of features that exist in your data is critical for identifying how to analyze the data. -

    -

    Identify the features in the data based on your understanding of the system : -

      -
        -
      • To be able to effectively analyze the data, you need to identify what the asset does, and the component parts of the asset. The functions performed by the asset generate metrics and indicate the status of the asset components.
      • -
      • For example, if you have “Fuel flow ration” metrics for an engine, that indicates the rate of engine fuel flowing through the engine.
      • -
      • Understand how the metrics change, and what a typical deviation in values is over time. For example, as an engine wears out, the core temperature metric increases.
      • -
      -
    -

    -

    Identifying how these features make sense in the context of your system will allow you to focus on the relevant factors for predictive maintenance.

    -
      -
        - - -
      - - Based on exploring the raw data and identifying features, we can conclude the following basic knowledge about the asset:
      -
            -
          • Jet engines have 3 major moving parts, representing a three-shaft configuration with different pressure systems spinning at different rates that correspond to features in data.
          • -
          • Jet engines sensor metrics consists of 13 major sensors metrics that measures various speeds, temperatures, pressures, liquid flow ratios.
          • -
      -
    -
-

- - Data-set preprocessing : Normalization (Standard Scaling) - -

-
    -

    - Normalization (often called as “Standard Scaling”) is a data processing technique that puts all dataset values in different scales to a single scale. Imagine you have 20 different data points that are all in different unit scales so far apart it is difficult to see patterns or trends in the data. -

    - -
    -
      - -
      - - As you see the results in metrics with different scales so far apart, the directional trend changes get buried in a bigger picture. Each feature metrics reading seems to have flat trending lines.
      -
      -
    -

    By doing the normalization (or Standard Scaling), you can convert all metrics in different scales into a single common scale, making each metrics scale at the average of value "0". This allows you to compare the trends between different metrics at different scales as well as amplifying the changes in data values. This data pre-processing technique makes it ideal for creating machine learning models because it includes clear directional data changes in the deviations from one state to another.

    -
      - -
      - - Normalization provides important benefits in the analysis of initial data. It amplifies data with small deviations as well as equalizes data with extream variances, which uncovers clear trends that help clearly distinguish the segments (or windows) in the data. -
      -
      -
    -
-

- - Event Window : Capturing an event window - -

-
    -

    - Event windowing is a technique to capture a full data cycle of the subject you want to analyze so that you can profile its behavior. After the normalization process, the full data cycles that reflect "a maintenance cycle" become clear. In a jet engine example like below, an "event window" is metrics readings from the initial maintenance completed state to the complete deterioration state of an engine. These windows of events can be used as the inputs to train a machine learning model. -

    - -
    -
      - -
      - - The results above show a clear event window that reflects a jet engine cycle from newly maintained to a deterioration state of the engine. A set of metrics from each time slice represents a numerical summary of a cycle. In the context of air transportation, this equates to average readings from a flight.
      - - The length of this event window is approximately 120-400+ cycles (or flights) for a jet engine, which covers anywhere from 3 months to 10 months of use.
      -
      -
    -
-

- - Feature Exploration : Dataset feature analysis - -

-
    -

    - By exploring the dataset, we can understand which features of the data are related to or correlated with maintenance performance metrics. -Features in the dataset are types of values that recorded captures the behavior of that particular asset in an analysis. Selecting the feature is a critical step to making a prediction model relevant. It is important to choose both to decide, -

          -
        • Features that are relevant to the problem trying to solve and
        • -
        • Features that best represent the desired analytics result.
        • -
    -

    -

    - The related features reflect the wear and tear trends in the data. Using this data, we can make decisions about which features are suitable for creating a machine learning prediction model. Exploring the data helps us gain insight into : -

          -
        • How different performance metrics deviate and what do the trends look.
        • -
        • How long the equipment lasts, and when the asset starts to deteriorate and when it fails.
        • -
        • Which features in the data reflect higher correlations and deviation related to the wear and tear.
        • -
    -

    -
      - -
      -
      - - Looking at the full event window (a full maintenance cycle of an engine), we can clearly distinguish how the metrics stay within a certain range after the maintenance, then deviate as more cycle/flights that are taken with an engine or that an engine is used. Also, we can identify that some features have a stronger correlation with the usage of the asset than others. This is an important factor to consider as selecting optimal feature for training a model in the later stage.
      -
    -
    -
      - -
      -
      - - Looking at the same result using the Box Plots visualization, it is easier to identify potential features for a machine learning model. You can use the Box Plots visualization to see a simple distribution of data in each feature. The taller the height of the bar, the greater the deviation of values, which means these features are more relevant or highly associated with wear and tear. Therefore, the taller box plot features are more optimal features to select for analysis.
      - Based on the results of the data exploration, we can Based on the results of the data exploration, we can select the following features as key for building the prediction model in the next stage: Bleed Entropy, Bypass Ratio, Corr Fan Speed, HPT Coolant Bleed, LPC Outlet Temp, LPT Outlet Temp, Phys Fan Speed -
      -
      -
    -
- -
-
- - - - - -

- - STEP BY STEP : - -

-

- This step-by-step section guides you through applying the key techniques covered in the "Methodology Review" to the jet engine dataset. The Left column provides detailed instruction and the right column provides the results of the step. Use button to jump directly into a live environment for each step. - -

-

- - Data-set preprocessing : Normalization (Standard Scaling) - -

-
    -

    Using the jet engine test dataset from the last section, review and apply normalization (or standard scaling) to the original sensor data. Then discover the patterns in the data. These steps guide you through the process of visualizing your data and normalizing your dataset values with a standard scale. -

      -
    -

    -
- -
-
- - - - - - - - - - -
    - - STEP 01 - : Select jet engine dataset by searching for the source type iot_pm_fail. Then use the reverse, table, head 1000 command to sort and see the first 1000 records of jet engine performance dataset. Use the reverse command to sort the order of events from older to recent. -
    -
      -
      sourcetype=iot_pm_fail
      -| reverse
      -| table unit_cycle sname*
      -| head 1000
      - - The output puts data in table cell format sorted by oldest record. Each line represents a set of summary statistics of a single cycle or a flight (a trip). -

      - - - -
    -
- -
- - - - RESULT STEP 01 : Retreived Raw Data in Table - - sourcetype="iot_pm_fail" - | reverse - | table unit_cycle sname* - | head 1000 - - 0 - 1 - - - -
-
-
- - - - - - - - - - -
    - - STEP 02 - : View the data as a line chart, select "Visualization" then select the visualization type. For the visualization, select "Downsampled line chart". -
    -
      - -
      - - The result shows the line chart of collected sensor values in original scales. -
      -
    -
- -
- - - - RESULT STEP 02 : Viewing all Raw Data (metrics) in original scales - - sourcetype="iot_pm_fail" - | reverse - | table unit_cycle sname* - | head 1000 - - 0 - 1 - - - - - - - - -
- - - - - - - - - -
    - - STEP 03 - : Add the "fit" command to normalize the differently-scaled data into a single scale. The fit command places the normalization-processed values into new fields starting with “SS_*”. -
    -
      -
      sourcetype="iot_pm_fail"
      -| reverse
      -| table unit_cycle sname*
      -| fit StandardScaler sname_Bleed_Enthalpy sname_Bypass_Ratio sname_Corr_Core_Speed sname_Corr_Fan_Speed sname_Fuel_Flow_Ratio sname_HPC_Outlet_Temp sname_HPT_Coolant_Bleed sname_LPC_Outlet_Temp sname_LPT_Outlet_Temp sname_Phys_Core_Speed sname_Phys_Fan_Speed with_mean=true with_std=true into app_pm_SS
      -| table unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed
      -| head 1000
      - - The result shows clear patterns and cycles in the data, making it easier to distinguish event windows. -

      - - - -
    -
- -
- - - - RESULT STEP 03 : After data values normalization (Standard Scaling) in a single scale - - sourcetype="iot_pm_fail" - | reverse - | table unit_cycle sname* - | fit StandardScaler sname_Bleed_Enthalpy sname_Bypass_Ratio sname_Corr_Core_Speed sname_Corr_Fan_Speed sname_Fuel_Flow_Ratio sname_HPC_Outlet_Temp sname_HPT_Coolant_Bleed sname_LPC_Outlet_Temp sname_LPT_Outlet_Temp sname_Phys_Core_Speed sname_Phys_Fan_Speed with_mean=true with_std=true into app_pm_SS - | table unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed - | head 1000 - - 0 - 1 - - - - - - - - -
- - - - -

- - Event Window : Capturing an event window - -

-
    -

    Now the event window is clearly visible, an event window is a set of data that shows a full cycle of a jet engine from a maintained state to a worn-out state. This app includes a macro to group and clip the event window for you. Click ""get_pm_event_window" macro to look at the macro defined for your reference. -

    -
- -
-
- - - - - - - - - - - -
    - - STEP 04 - : Using the macro "get_pm_event_window", retrieve event window number 194. The macro `get_pm_event_window(194)` takes an argument of the window number. -
    -
      -
      `get_pm_event_window(194)`
      - - Event window number #194 is an event window of a jet engine that flew more than 350 cycles/flights. -
      - - The get_pm_event_window macro already includes the normalization step as part of the macro, which simplifies preprocessing steps in analysis. -

      - - -
    -
    -
- -
- - - - RESULT STEP 04 : Full "Event Window" capture from good state to fail state - - `get_pm_event_window(194)` - - 0 - 1 - - - - - - - - - -
- - - - - -

- - Feature Exploration : Data-set feature analysis - -

-
    -

    Now, let's understand the characteristics of the jet engine data. This step uses some transformation and visualization techniques to select the features that are important when creating a machine learning model.

    -
- -
-
- - - - - - - - - - - -
    - - STEP 05 - : Smooth out the spikes in the data using the trendline command to better render the characteristics of jet engines. The trendline command uses a smoothing technique to allow you to average metric values to better focus on trends in the data. To use this in this example, specify the sma4 function to average the last 4 values of the metric. -
    -
      -
      `get_pm_event_window(194)` |
      - trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed)
      -| where unit_cycle>3
      -| table sma4*
      - - trendline sma4 removes temporal spikes from the trends, which makes the characteristics of features more distinct.
      - - where unit_cycle>3 is used to filter out empty rows.
      - - After applying the smoothing function, the smoothed out fields start with sma4*, so use the table command to specify sma4* and view only those fields in the results.
      -
      - -
    -
- -
- - - - RESULT STEP 05 : Applying moving averages to smooth-out temporal spikes - - `get_pm_event_window(194)` - | trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) - | where unit_cycle>3 - | table sma4* - - 0 - 1 - - - - - - - - - -
- - - - - - - - - - -
    - - STEP 06 - : To use boxplot to simply visualize all features(fields), use `boxplot` macro to create statistics on data dispersion per feature. -
    -
      -
      `get_pm_event_window(194)` 
      -| trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed)
      -| where unit_cycle>3
      -| table sma4*
      -| `boxplot`
      - - The boxplot macro calculates min, median, max, exactperc25, exactperc75 statistics per feature. Here is the description of each statistics: -
          -
        • max (Maximum) - Maximum value of the feature metrics
        • -
        • exactperc75 - Upper quartile of the feature metrics
        • -
        • med (Median) - Average of the feature metrics
        • -
        • exactperc25 - Lower quartile of the feature metrics
        • -
        • min (Minumin) - Minimum value of the feature metrics
        • -

      - - -
    -
- -
- - - - RESULT STEP 06 : Statistics for boxplot visualization - - `get_pm_event_window(194)` - | trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) - | where unit_cycle>3 - | table sma4* - | `boxplot` - - 0 - 1 - - -
-
-
- - - - - - - - - - - -
    - - STEP 07 - : Use the Box Plots visualization to clarify feature selection, select "Visualization", and then select the "Boxplot Chart" visualization type. -
    -
      - -
      - - Boxplot chart makes it easy to distinguish jet engine data features that are more sensitive to change, the longer bar features. -
      - - It is now pretty clear which features from jet engine dataset are more associated with wear and tear. These features will be used for the next stage. -
      -
      - -
    -
- -
- - - - RESULT STEP 07 : Alternative Visualization to better see "Features" in data-set - - `get_pm_event_window(194)` - | trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) - | where unit_cycle>3 - | table sma4* - | `boxplot` - - 0 - 1 - - - - - - - - -
- - - - - -

- - What's Next? - -

-
    -

    - You completed the Explore Data stage in Splunk Essentials for Predictive Maintenance. Now that you identified key elements in predictive maintenance data-set, move on to the next stage in evaluting analytics method that would get you answer you need. -

    - - NEXT STEP : Evaluate different analytics options. -
    -
    - - - - - - -
- - - - - -
-
-

- Do you need help? Ask an Industrial IOT Predictive Maintenance Expert -

-
    -

    Need help with your environment and requirements? Send us your questions and we will get back to you as soon as possible.

    -

    If you need immediate assistance, check out our community forum, Splunk Answers. (i1) -

    - Contact Us -
-
-
-
- Predictive Maintenance expert photo -
-
- -
-
-
\ No newline at end of file From e793d99d7e5db2d045c9ad5a0696c1ec190b8804 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:33:19 -0500 Subject: [PATCH 30/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_demo_01.xml --- .../ui/views/sa_sc_exploration_demo_01.xml | 155 ------------------ 1 file changed, 155 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_demo_01.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_demo_01.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_demo_01.xml deleted file mode 100644 index 21e2006..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_exploration_demo_01.xml +++ /dev/null @@ -1,155 +0,0 @@ - - - - - -

- - EXPLORE DATA : Understanding Predictive Maintenance Data-set -

- -
-
- - - RAW Source Engine Sensor Data - - Each records holds summary of engines performance / status metrics on each trips (cycles) - - sourcetype="iot_pm_fail" - | reverse - | table unit_cycle sname* - | head 1000 - - 0 - 1 - - - -
-
-
- - - Explore Original Data - - Viewing all Raw Data (metrics) in original scales - - sourcetype="iot_pm_fail" - | reverse - | table unit_cycle sname* - | head 1000 - - 0 - 1 - - - - - - - - - - - - Standard Scaling Data-sets - - After data values normalization (Standard Scaling) in a single scale - - sourcetype="iot_pm_fail" - | reverse - | table unit_cycle sname* - | fit StandardScaler sname_Bleed_Enthalpy sname_Bypass_Ratio sname_Corr_Core_Speed sname_Corr_Fan_Speed sname_Fuel_Flow_Ratio sname_HPC_Outlet_Temp sname_HPT_Coolant_Bleed sname_LPC_Outlet_Temp sname_LPT_Outlet_Temp sname_Phys_Core_Speed sname_Phys_Fan_Speed with_mean=true with_std=true into app_pm_SS - | table unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed - | head 1000 - - 0 - 1 - - - - - - - - - - - - Capturing an Event Frame - - Full "Event Frame" capture from good state to fail state - - `get_pm_event_frame(194)` - - 0 - 1 - - - - - - - - - - - - -

- - Feature Exploration : Data-set feature analysis - -

-
    -

    Now, let's understand the characteristics of the jet engine data. We will use a couple of transformation and visualization technique to select the features that are important in preparing for Machine Learning model creations.

    -
- -
-
- - - Feature Explorations - Trends - - Applying moving averages to smooth-out temporal spikes - - `get_pm_event_frame(194)` - | trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) - | where unit_cycle>3 - | table sma4* - - 0 - 1 - - - - - - - - - - - - Feature Explorations - Trends - - Alternative Visualization to better see "Features" in box plots - - `get_pm_event_frame(194)` - | trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) - | where unit_cycle>3 - | table sma4* - - 0 - 1 - - - - - - - - - -
From e18783a4509fc0d662878b7dc3f9d018ecd6070e Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:33:39 -0500 Subject: [PATCH 31/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_get_data_01.xml --- .../data/ui/views/sa_sc_get_data_01.xml | 386 ------------------ 1 file changed, 386 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_get_data_01.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_get_data_01.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_get_data_01.xml deleted file mode 100644 index 414aa61..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_get_data_01.xml +++ /dev/null @@ -1,386 +0,0 @@ - - - - - - - -
    -

    - - Getting data into Splunk -

    - -

    For our predictive maintenance analysis journey, let’s start with getting data into Splunk. Splunk software makes it easy to collect, ingest and make industrial data available for predictive analytics.

    -
- - - - Stage_01 - Stage_02 - Stage_03 - Stage_03-01 - Stage_03-02 - Stage_03-04 - Stage_04 - -
- -
-
- - - - - - - - - - -

- - OBJECTIVE : -

-
    -

    Splunk software lets you consolidate machine data and rapidly use the data processed in real-time for machine learning. This section covers basic data collection methods from different technologies, with a brief introduction into the features of Splunk that make it easier to do this with your data.

    -

    If you’re already using Splunk software, you can skip this section. If you are new to Splunk, review the linked resources to understand the different ways you can use Splunk software to consolidate data in your environment.

    - -

    - After reading this section, you will be able to - : -

      -
        -
      • Understand how Splunk software integrates data from different systems.
      • -
      • Understand how to make real-time data available in Splunk software.
      • -
      • Create models to easily analyze data in Splunk software.
      • -
      -
    -

    -
    -
- -
-
- - - -

- - METHODOLOGY REVIEW : - -

-

This in section introduces overall data integration technology that will help practitioners to understand how different system data from industrial environment is accomplished.

-

- - - Getting Data In : - Ingest Industrial IOT data into Splunk - -

-
    -

    Following illustration summarized how different industrial data is collected through Splunk. Splunk uses a collection of its native integration technology to stream data in real-time and leverages partner technology to integrate industrial specialized data, such as historians.

    -
    -
      - -
      -
      - - Splunk data collection and integration capability highlights : -
      -
        -
          -
        • Splunk allows all structure and unstructured data to be collected.
        • -
        • Splunk has many partnerships to integrate historians.
        • -
        • Splunk has native support for industrial specific protocols such as OPC UA, OPC DA, OPC AnE and MQTT.
        • -
          -
        -
      -
    -
- -
-
- - - -

- - TECHNICAL RESOURCES : - -

-

This technical resources page guides you through various resources for you to get started with using Splunk as your industrial data platform. Please review various contents and resources link here. The primary focus of Splunk Essential for Predictive Maintenance is to demonstrate how data in Splunk can be rapidly utilized to predict maintenance decision, so the detailed techniques and mechanisms on how to integrate data will not be covered.

-

- - - Documentations and Contents for “Getting Data In” - -

-
    -

    Online documentation and videos that help you understand how to integrate data in Splunk.

    -
- -
-
- - - - - - - - - - - - - - - - - - - “Getting Data In” online documentation -
- - -
-
- - - - - - - - - - - - - - - - - - - Training video - “Getting Data In with Forwarders” -
- - -
-
- - - -

- - - Splunk Apps : - splunkbase.splunk.com - -

-
    -

    Splunk, partners, and community-driven app exchange ecosystems :

    -
- -
-
- - - - - - - - - - - - - - - - - - - Splunk App eco-system : splunkbase.splunk.com -
- - -
-
- - - -

- - What's Next? - -

-
    -

    - You reviewed the - Getting Data in - stage in - Splunk Essentuals for Predictive Maintenance - app. Now that you know how the data can be processed in Splunk, move on to the next stage in "Explore Data" to get predictive analysis started. -

    - - NEXT STEP - : Explore maintenance data from jet engines. -
    -
    - - - - - - -
- - - - - -
-
-

- - Do you need help? Ask an Industrial IOT Predictive Maintenance Expert -

-
    -

    Need help with your environment and requirements? Send us your questions and we will get back to you as soon as possible.

    -

    - If you need immediate assistance, check out our community forum, - Splunk Answers. -

    - Contact Us -
-
-
-
- Predictive Maintenance expert photo -
-
- -
-
-
\ No newline at end of file From d8287fdcee4c08613558406dc4c4fb0ac0ec8c0f Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:34:07 -0500 Subject: [PATCH 32/34] Delete Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_intro_demo.xml --- .../data/ui/views/sa_sc_intro_demo.xml | 288 ------------------ 1 file changed, 288 deletions(-) delete mode 100644 Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_intro_demo.xml diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_intro_demo.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_intro_demo.xml deleted file mode 100644 index f9a2f40..0000000 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_intro_demo.xml +++ /dev/null @@ -1,288 +0,0 @@ - - - - - -

- - - EXPLORE YOUR ASSET DATA : Turbine Engine Predictive Maintenance -

- - - - - -
-
- - - Statistics on number of cycles each engine served until failure - - - sourcetype="iot_pm_fail" -| transaction startswith="unit_cycle=1" -| rename _cd as UID -| eval unit_no="unit_"+unit_no -| stats max(unit_cycle) as last by unit_no _raw -| table unit_no last -| rename unit_no as "Engine ID", last as "Last Number of Cycle" - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Statistics on number of cycles each engine served until failure - ordered by most number of serviced per engine - - - sourcetype="iot_pm_fail" -| transaction startswith="unit_cycle=1" -| rename _cd as UID -| eval unit_no="unit_"+unit_no -| stats max(unit_cycle) as last by unit_no _raw -| table unit_no last -| eval scheduled_standard_maintenance_due = 120 -| sort - last -| rename unit_no as "Engine ID", last as "Last Number of Cycle" - 0 - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

- - EXPLORE DATA : Understanding Predictive Maintenance Data-set -

- -
-
- - - RAW Source Engine Sensor Data - - Each records holds summary of engines performance / status metrics on each trips (cycles) - - sourcetype="iot_pm_fail" - | reverse - | table unit_cycle sname* - | head 1000 - - 0 - 1 - - - -
-
-
- - - Explore Original Data - - Viewing all Raw Data (metrics) in original scales - - sourcetype="iot_pm_fail" - | reverse - | table unit_cycle sname* - | head 1000 - - 0 - 1 - - - - - - - - - - - - Standard Scaling Data-sets - - After data values normalization (Standard Scaling) in a single scale - - sourcetype="iot_pm_fail" - | reverse - | table unit_cycle sname* - | fit StandardScaler sname_Bleed_Enthalpy sname_Bypass_Ratio sname_Corr_Core_Speed sname_Corr_Fan_Speed sname_Fuel_Flow_Ratio sname_HPC_Outlet_Temp sname_HPT_Coolant_Bleed sname_LPC_Outlet_Temp sname_LPT_Outlet_Temp sname_Phys_Core_Speed sname_Phys_Fan_Speed with_mean=true with_std=true into app_pm_SS - | table unit_cycle SS_sname_Bleed_Enthalpy SS_sname_Bypass_Ratio SS_sname_Corr_Core_Speed SS_sname_Corr_Fan_Speed SS_sname_Fuel_Flow_Ratio SS_sname_HPC_Outlet_Temp SS_sname_HPT_Coolant_Bleed SS_sname_LPC_Outlet_Temp SS_sname_LPT_Outlet_Temp SS_sname_Phys_Core_Speed SS_sname_Phys_Fan_Speed - | head 1000 - - 0 - 1 - - - - - - - - - - - - Capturing an Event Frame - - Full "Event Frame" capture from good state to fail state - - `get_pm_event_frame(194)` - - 0 - 1 - - - - - - - - - - - - -

- - Feature Exploration : Data-set feature analysis - -

-
    -

    Now, let's understand the characteristics of the jet engine data. We will use a couple of transformation and visualization technique to select the features that are important in preparing for Machine Learning model creations.

    -
- -
-
- - - Feature Explorations - Trends - - Applying moving averages to smooth-out temporal spikes - - `get_pm_event_frame(194)` - | trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) - | where unit_cycle>3 - | table sma4* - - 0 - 1 - - - - - - - - - - - - Feature Explorations - Trends - - Alternative Visualization to better see "Features" in box plots - - `get_pm_event_frame(194)` - | trendline sma4(SS_sname_Bleed_Enthalpy), sma4(SS_sname_Bypass_Ratio), sma4(SS_sname_Corr_Core_Speed), sma4(SS_sname_Corr_Fan_Speed), sma4(SS_sname_Fuel_Flow_Ratio), sma4(SS_sname_HPC_Outlet_Temp), sma4(SS_sname_HPT_Coolant_Bleed), sma4(SS_sname_LPC_Outlet_Temp), sma4(SS_sname_LPT_Outlet_Temp), sma4(SS_sname_Phys_Core_Speed), sma4(SS_sname_Phys_Fan_Speed) - | where unit_cycle>3 - | table sma4* - | `boxplot` - - 0 - 1 - - - - - - - - - -
\ No newline at end of file From e2a644ccd1fe9acb5c17e15d1e57750b9bdb2c7f Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:36:27 -0500 Subject: [PATCH 33/34] Update sa_sc_demo_01_mon_edi_trans.xml --- .../ui/views/sa_sc_demo_01_mon_edi_trans.xml | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_01_mon_edi_trans.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_01_mon_edi_trans.xml index 018c060..bdbea9a 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_01_mon_edi_trans.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_01_mon_edi_trans.xml @@ -2,7 +2,7 @@ Monitoring all EDI X12 / SAP iDocs Transactions from ERP / Data Exchange Platforms - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | fields + * $INPUT_TIME_GLOBAL.earliest$ $INPUT_TIME_GLOBAL.latest$ @@ -34,7 +34,7 @@ Total Amount of EDI/iDocs Requests - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -67,7 +67,7 @@ Transactions with No Respose - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -102,7 +102,7 @@ Error / Retry Rates - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -138,7 +138,7 @@ Response Performance - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, list(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status, count as evt_count by edi_tr_id edi_code_groupby @@ -183,7 +183,7 @@ Status of all EDI transactions - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -202,7 +202,7 @@ Trends on EDI response status - Visual Line Chart - index=supply_chain_edi sourcetype="edi:x12" + sourcetype="edi:x12" | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -225,7 +225,7 @@ Error + Retry Rate with Trends over time - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -245,8 +245,8 @@ Top Suppliers with EDI Processing Issues - index=supply_chain_edi sourcetype="edi:x12" NOT edi_code=997 - [ search index=supply_chain_edi sourcetype="edi:x12" source=edi_quantumline_customer_full NOT edi_code=997 + sourcetype="edi:x12" NOT edi_code=997 + [ search sourcetype="edi:x12" source=edi_quantumline_customer_full NOT edi_code=997 | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | strcat edi_code "-" edi_ack_status edi_event_pattern_combo @@ -299,7 +299,7 @@ Trends on EDI transmission success rates with volume - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -347,7 +347,7 @@ EDI Transactions processing performance - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, list(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status, count as evt_count by edi_tr_id edi_code_groupby @@ -363,7 +363,7 @@ EDIs SLA violations Trends - Volume of EDIs with long processing times - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -402,7 +402,7 @@ - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type edi_name | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -433,7 +433,7 @@ Details on EDIs transactions above SLA
- ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -465,4 +465,4 @@
- \ No newline at end of file + From a1b12136c315febc7b1a2931d46d7886bb5bfc13 Mon Sep 17 00:00:00 2001 From: cduffey-splunk <93333731+cduffey-splunk@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:38:30 -0500 Subject: [PATCH 34/34] Update sa_sc_demo_02_po_lifecycle_mgmt.xml --- .../views/sa_sc_demo_02_po_lifecycle_mgmt.xml | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_02_po_lifecycle_mgmt.xml b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_02_po_lifecycle_mgmt.xml index e40eb25..c5951cc 100644 --- a/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_02_po_lifecycle_mgmt.xml +++ b/Supply Chain/App/supply_chain_soln_accelerator/default/data/ui/views/sa_sc_demo_02_po_lifecycle_mgmt.xml @@ -2,7 +2,7 @@ Monitoring all EDI X12 / SAP iDocs Transactions from ERP / Data Exchange Platforms - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850, 997) + sourcetype="edi:x12" edi_code IN (850, 997) | fields + * $INPUT_TIME_GLOBAL.earliest$ $INPUT_TIME_GLOBAL.latest$ @@ -34,7 +34,7 @@ Total Amount of POs - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -67,7 +67,7 @@ POs with No Respose - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -102,7 +102,7 @@ PO Error / Retry Rates - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -138,7 +138,7 @@ PO Response Performance - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, list(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status, count as evt_count by edi_tr_id edi_code_groupby @@ -183,7 +183,7 @@ Total stats on EDI response status (Chart) - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -202,7 +202,7 @@ Trends on EDI response status - Visual Line Chart - index=supply_chain_edi sourcetype="edi:x12" edi_code IN (850, 997) + sourcetype="edi:x12" edi_code IN (850, 997) | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -225,7 +225,7 @@ Visualization on error + retry trend - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats last(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -245,8 +245,8 @@ Suppliers with EDI Processing Issues - index=supply_chain_edi sourcetype="edi:x12" NOT edi_code=997 - [ search index=supply_chain_edi sourcetype="edi:x12" source=edi_quantumline_customer_full NOT edi_code=997 + sourcetype="edi:x12" NOT edi_code=997 + [ search sourcetype="edi:x12" source=edi_quantumline_customer_full NOT edi_code=997 | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | strcat edi_code "-" edi_ack_status edi_event_pattern_combo @@ -299,7 +299,7 @@ Trends on EDI transmission success rates w/ occurrences - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -346,7 +346,7 @@ RESULT ANALYSIS 01 : EDI Transactions with response status determined. - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, list(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status, count as evt_count by edi_tr_id edi_code_groupby @@ -362,7 +362,7 @@ RESULT ANALYSIS 02 : Total stats on EDI response status - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -401,7 +401,7 @@ - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | table _time edi_ack_status edi_buyer edi_code edi_code_ack edi_cont_num edi_date edi_flag edi_requestor edi_responder edi_seller edi_sequence edi_time edi_tr_id edi_type edi_name | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) @@ -429,7 +429,7 @@
RESULT ANALYSIS 02 : Total stats on EDI response status - ```index=supply_chain_edi sourcetype="edi:x12" ``` + | eval edi_ack_status_combo=edi_code+"-"+edi_code_ack+"-"+edi_ack_status | eval edi_code_groupby=if(isnull(edi_code_ack), edi_code, edi_code_ack) | stats latest(_time) as time_last, earliest(_time) as time_first,first(_time) as _time, last(edi_requestor) as edi_requestor, last(edi_responder) as edi_responder, first(edi_code) as edi_type, values(edi_code) as edi_code, last(edi_ack_status) as edi_ack_status by edi_tr_id edi_code_groupby @@ -461,4 +461,4 @@
- \ No newline at end of file +