diff --git a/Makefile b/Makefile
index d97b94a42..8200e25d9 100644
--- a/Makefile
+++ b/Makefile
@@ -379,9 +379,9 @@ package-clean:
.PHONY: lasp-test
lasp-test: daemon
if [ ! $(SUITE_LASP) ]; then echo "USAGE: make lasp-test SUITE_LASP=suite-most-secure"; exit 1; fi
- if [ "$(LICENSE_lasp_$(subst -,_,$(SUITE_LASP)))" = "" ] ; then echo "Missing license for $(SUITE_LASP)"; exit 1; fi
+ @if [ "$(LICENSE_lasp_$(subst -,_,$(SUITE_LASP)))" = "" ] ; then echo "Missing license for $(SUITE_LASP)"; exit 1; fi
if [ ! -d "tests/lasp/$(SUITE_LASP)" ]; then echo "No such suite in tests/lasp folder"; exit 1; fi
- for PHP in $${PHPS:-8.1 8.0 7.4 7.3 7.2 7.1 7.0 5.6 5.5}; do \
+ @for PHP in $${PHPS:-8.1 8.0 7.4 7.3 7.2 7.1 7.0 5.6 5.5}; do \
echo; echo "# PHP=$${PHP}"; \
NRLAMP_PHP=$${PHP} bin/integration_runner $(INTEGRATION_ARGS) -loglevel debug \
-license $(LICENSE_lasp_$(subst -,_,$(SUITE_LASP))) \
diff --git a/VERSION b/VERSION
index 2b42764c6..a13e7b9c8 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-9.21.0
+10.0.0
diff --git a/agent/php_environment.c b/agent/php_environment.c
index aac910ec9..1a86f4337 100644
--- a/agent/php_environment.c
+++ b/agent/php_environment.c
@@ -382,6 +382,134 @@ static void nr_php_gather_dispatcher_information(nrobj_t* env) {
nro_set_hash_string(env, "Dispatcher", dstring);
}
+void nr_php_process_environment_variable_to_nrobj(const char* prefix,
+ const char* key,
+ const char* value,
+ nrobj_t* kv_hash) {
+ if ((NULL == prefix) || (NULL == kv_hash) || (NULL == key)) {
+ return;
+ }
+
+ if (nr_strlen(prefix) >= nr_strlen(key)) {
+ return;
+ }
+
+ if (0 == nr_strncmp(key, prefix, nr_strlen(prefix))) {
+ nro_set_hash_string(kv_hash, key, value);
+ }
+}
+
+char* nr_php_process_environment_variable_to_string(const char* prefix,
+ const char* key,
+ const char* value,
+ char* kv_string,
+ const char* kv_delimiter,
+ const char* delimiter) {
+ if ((NULL == prefix) || (NULL == key)) {
+ return kv_string;
+ }
+
+ if (nr_strlen(prefix) >= nr_strlen(key)) {
+ return kv_string;
+ }
+
+ if (0 == nr_strncmp(key, prefix, nr_strlen(prefix))) {
+ kv_string = nr_str_append(kv_string, key + nr_strlen(prefix), delimiter);
+ kv_string = nr_str_append(kv_string, value, kv_delimiter);
+ }
+ return kv_string;
+}
+
+/*
+ * Current variables we scan are:
+ * 1. Variables with the key prefix NEW_RELIC_METADATA_
+ * 2. Variables with the key prefix NEW_RELIC_LABEL_
+ * 3. Variable with the key NEW_RELIC_LABELS
+ */
+static void nr_php_get_environment_variables(TSRMLS_D) {
+ nrobj_t* parsed_key_val = NULL;
+ const char* plural_label = NULL;
+
+ /*
+ * `environ` works for non-windows machines.
+ * Otherwise, we'd need to use *__p__environ() as well.
+ */
+ extern char** environ;
+
+ /*
+ * Initialize the metadata hash. If there aren't any variables, we still need
+ * to send the empty hash.
+ */
+ NR_PHP_PROCESS_GLOBALS(metadata) = nro_new_hash();
+
+ /* Initialize the environment labels string to collect labels.
+ * If labels are specified in both the agent configuration file and the
+ * NEW_RELIC_LABELS environment variable or prefixed with the NEW_RELIC_LABEL_
+ * key prefix, the agent MUST use existing configuration precedence rules to
+ * determine which set of labels to send to New Relic. Configuration via
+ * environment variables always trumps file-based configuration, so if
+ * duplicate labels are specified in both the agent configuration file and the
+ * environment variable, the duplicate labels from the environment variable
+ * should be used. By appending the environment variables to the end of the
+ * ini string, we ensure the environmental variables take precedence when it
+ * is entered into the label hash. Additionally, with the environmental and
+ * ini values combined, we are able to take advantage of all the previous
+ * label structures, tests, and string validation when `nr_php_txn_get_labels`
+ * puts the string in a hash.
+ */
+ NR_PHP_PROCESS_GLOBALS(env_labels)
+ = nr_str_append(NR_PHP_PROCESS_GLOBALS(env_labels), NRINI(labels), ";");
+
+ /*
+ * Search environment for the specific keys (not a prefix) that NR supports.
+ */
+
+ /*
+ * Plural labels key.
+ */
+ plural_label = getenv(NR_LABELS_PLURAL_KEY);
+ if (!nr_strempty(plural_label)) {
+ NR_PHP_PROCESS_GLOBALS(env_labels)
+ = nr_str_append(NR_PHP_PROCESS_GLOBALS(env_labels), plural_label, ";");
+ }
+
+ /*
+ * Get the environment to parse the variables for that have prefixes we are
+ * interested in. If we are unable to get the environment don't try to parse
+ * it.
+ */
+ if (NULL == environ) {
+ nrl_warning(NRL_AGENT, "%s: Unable to access environmental variables.",
+ __func__);
+ return;
+ }
+
+ /*
+ * Iterate through the environment variables, searching for a single key or
+ * a set of keys with a prefix that the agent will use.
+ */
+ for (size_t i = 0; environ[i] != NULL; i++) {
+ parsed_key_val = nr_strsplit(environ[i], "=", 0);
+ if ((NULL == parsed_key_val) || (2 != nro_getsize(parsed_key_val))) {
+ nrl_verbosedebug(NRL_AGENT,
+ "%s: Skipping malformed environmental variable %s",
+ __func__, environ[i]);
+ } else {
+ const char* key = nro_get_array_string(parsed_key_val, 1, NULL);
+ const char* value = nro_get_array_string(parsed_key_val, 2, NULL);
+ nr_php_process_environment_variable_to_nrobj(
+ NR_METADATA_KEY_PREFIX, key, value, NR_PHP_PROCESS_GLOBALS(metadata));
+ NR_PHP_PROCESS_GLOBALS(env_labels)
+ = nr_php_process_environment_variable_to_string(
+ NR_LABELS_SINGULAR_KEY_PREFIX, key, value,
+ NR_PHP_PROCESS_GLOBALS(env_labels), ":", ";");
+ }
+ nro_delete(parsed_key_val);
+ }
+ nrl_verbosedebug(NRL_AGENT,
+ "%s: set NR_PHP_PROCESS_GLOBALS(env_labels) labels %s",
+ __func__, NR_PHP_PROCESS_GLOBALS(env_labels));
+}
nrobj_t* nr_php_get_environment(TSRMLS_D) {
nrobj_t* env;
@@ -391,6 +519,7 @@ nrobj_t* nr_php_get_environment(TSRMLS_D) {
nr_php_gather_machine_information(env);
nr_php_gather_dynamic_modules(env TSRMLS_CC);
nr_php_gather_dispatcher_information(env);
+ nr_php_get_environment_variables(TSRMLS_C);
return env;
}
diff --git a/agent/php_environment.h b/agent/php_environment.h
index 30eba7f9f..333e2b171 100644
--- a/agent/php_environment.h
+++ b/agent/php_environment.h
@@ -9,6 +9,10 @@
#ifndef PHP_ENVIRONMENT_HDR
#define PHP_ENVIRONMENT_HDR
+#define NR_METADATA_KEY_PREFIX "NEW_RELIC_METADATA_"
+#define NR_LABELS_PLURAL_KEY "NEW_RELIC_LABELS"
+#define NR_LABELS_SINGULAR_KEY_PREFIX "NEW_RELIC_LABEL_"
+
/*
* Purpose : Produce the object that describes the invariant parts of the
* execution environment.
@@ -42,4 +46,55 @@ extern nrobj_t* nr_php_get_environment(TSRMLS_D);
*/
void nr_php_parse_rocket_assignment_list(char* s, size_t len, nrobj_t* kv_hash);
+/*
+ * Purpose : Compare the given prefix to a key in a key value pair. If matched,
+ * add the key value pair to the given hash.
+ *
+ * The scanner looks for lines holding "=" style
+ * assignments:
+ *
+ * key = value
+ *
+ * This format is generally seen with system environment variable
+ * output.
+ *
+ * Params : 1. The prefix to scan for.
+ * 2. The key to compare to the prefix.
+ * 3. The value associated with the prefix.
+ * 4. The object that will have the key/value pair added to it.
+ *
+ */
+void nr_php_process_environment_variable_to_nrobj(const char* prefix,
+ const char* key,
+ const char* value,
+ nrobj_t* kv_hash);
+
+/*
+ * Purpose : Compare the given prefix to a key in a key value pair. If matched,
+ * add the key value pair to the given hash.
+ *
+ * The scanner looks for lines holding "=" style
+ * assignments:
+ *
+ * key = value
+ *
+ * This format is generally seen with system environment variable
+ * output.
+ *
+ * Params : 1. The prefix to scan for.
+ * 2. The key to compare to the prefix.
+ * 3. The value associated with the prefix.
+ * 4. The string that will have the key/value pair added to it.
+ * 5. The delimiter used to separate the key and value in the string.
+ * 6. The delimiter used to separate key/value pairs in the string.
+ *
+ * Returns : String with matching key/value appended.
+ */
+char* nr_php_process_environment_variable_to_string(const char* prefix,
+ const char* key,
+ const char* value,
+ char* kv_hash,
+ const char* kv_delimeter,
+ const char* delimeter);
+
#endif /* PHP_ENVIRONMENT_HDR */
diff --git a/agent/php_globals.c b/agent/php_globals.c
index 341dfb5e6..68f64ef04 100644
--- a/agent/php_globals.c
+++ b/agent/php_globals.c
@@ -38,6 +38,8 @@ static void nr_php_per_process_globals_dispose(void) {
nr_free(nr_php_per_process_globals.php_version);
nr_free(nr_php_per_process_globals.upgrade_license_key);
nro_delete(nr_php_per_process_globals.appenv);
+ nro_delete(nr_php_per_process_globals.metadata);
+ nr_free(nr_php_per_process_globals.env_labels);
nr_free(nr_php_per_process_globals.apache_add);
nr_memset(&nr_php_per_process_globals, 0, sizeof(nr_php_per_process_globals));
diff --git a/agent/php_globals.h b/agent/php_globals.h
index a4ff24ceb..8ae65995e 100644
--- a/agent/php_globals.h
+++ b/agent/php_globals.h
@@ -42,6 +42,11 @@ typedef struct _nrphpglobals_t {
mode */
int daemon_special_integration; /* Cause daemon to dump special log entries to
help integration testing. */
+ nrobj_t* metadata; /* P17 metadata taken from environment variables with the
+ * prefix `NEW_RELIC_METADATA_` */
+ char* env_labels; /* Labels taken from environment variables with the
+ * prefix `NEW_RELIC_LABEL_` and from the environment
+ * variable with the key `NEW_RELIC_LABELS` */
#if ZEND_MODULE_API_NO >= ZEND_8_1_X_API_NO /* PHP 8.1+ */
zend_long zend_offset; /* Zend extension offset */
zend_long
diff --git a/agent/php_minit.c b/agent/php_minit.c
index f18b406bc..c1fac0c96 100644
--- a/agent/php_minit.c
+++ b/agent/php_minit.c
@@ -236,6 +236,22 @@ static char* nr_php_check_for_upgrade_license_key(void) {
return 0;
}
+static nr_status_t nr_php_check_8T_DT_config(TSRMLS_D) {
+ /* check if infinite tracing is enabled and DT disabled */
+ if (!nr_strempty(NRINI(trace_observer_host))
+ && !NRINI(distributed_tracing_enabled)) {
+ nrl_warning(
+ NRL_INIT,
+ "Infinite tracing will be DISABLED because distributed tracing is"
+ " disabled and infinite tracing requires distributed tracing to be "
+ "enabled. Please check the"
+ " value of 'newrelic.distributed_tracing_enabled' in the agent "
+ "configuration.");
+ return NR_FAILURE;
+ }
+ return NR_SUCCESS;
+}
+
static char* nr_php_get_agent_specific_info(void) {
const char* php_version;
const char* zend_type;
@@ -553,6 +569,16 @@ PHP_MINIT_FUNCTION(newrelic) {
nr_agent_close_daemon_connection();
}
+ /* Do some sanity checking of configuration settings and handle accordingly */
+
+ /* If infinite tracing (8T) is enabled but distributed tracing (DT) is
+ * disabled this is an unworkable combination because span IDs cannot be
+ * assigned to segments and this causes problems in
+ * axiom/nr_segment.c::nr_segment_to_span_event() Output a warning about this
+ * config issue and also that 8T will be disabled
+ */
+ nr_php_check_8T_DT_config(TSRMLS_C);
+
/*
* Save the original PHP hooks and then apply our own hooks. The agent is
* almost fully operational now. The last remaining initialization that
diff --git a/agent/php_nrini.c b/agent/php_nrini.c
index 5d2f5c180..091509971 100644
--- a/agent/php_nrini.c
+++ b/agent/php_nrini.c
@@ -824,6 +824,7 @@ NR_PHP_UTILIZATION_MH(azure)
NR_PHP_UTILIZATION_MH(gcp)
NR_PHP_UTILIZATION_MH(pcf)
NR_PHP_UTILIZATION_MH(docker)
+NR_PHP_UTILIZATION_MH(kubernetes)
static PHP_INI_MH(nr_daemon_special_curl_verbose_mh) {
int val;
@@ -1936,7 +1937,11 @@ PHP_INI_ENTRY_EX("newrelic.daemon.utilization.detect_docker",
NR_PHP_SYSTEM,
NR_PHP_UTILIZATION_MH_NAME(docker),
nr_enabled_disabled_dh)
-
+PHP_INI_ENTRY_EX("newrelic.daemon.utilization.detect_kubernetes",
+ "1",
+ NR_PHP_SYSTEM,
+ NR_PHP_UTILIZATION_MH_NAME(kubernetes),
+ nr_enabled_disabled_dh)
/*
* This daemon flag is for internal development use only. It should not be
* documented to customers.
diff --git a/agent/php_txn.c b/agent/php_txn.c
index e35630d61..ecc903617 100644
--- a/agent/php_txn.c
+++ b/agent/php_txn.c
@@ -511,15 +511,15 @@ static nr_attribute_config_t* nr_php_create_attribute_config(TSRMLS_D) {
return config;
}
-static nrobj_t* nr_php_txn_get_labels(TSRMLS_D) {
- const char* input;
-
+static nrobj_t* nr_php_txn_get_labels() {
/*
- * Specifying labels via an environment variable is not supported.
+ * By appending the environment variables to the end of the ini string, we
+ * ensure the environmental variables take precedence when it is entered into
+ * the label hash. Additionally, with the environmental and ini values
+ * combined, we are able to take advantage of all the previous label
+ * structures, tests, and string validation.
*/
- input = NRINI(labels);
-
- return nr_labels_parse(input);
+ return nr_labels_parse(NR_PHP_PROCESS_GLOBALS(env_labels));
}
static void nr_php_txn_prepared_statement_destroy(void* sql) {
@@ -605,6 +605,25 @@ nrobj_t* nr_php_txn_get_supported_security_policy_settings(nrtxnopt_t* opts) {
return supported_policy_settings;
}
+#define NR_APP_ERROR_DT_ON_TT_OFF_BACKOFF_SECONDS 60
+
+static void nr_php_txn_log_error_dt_on_tt_off(void) {
+ static unsigned n_occur = 0;
+ static time_t last_warn = (time_t)(0);
+ time_t now = time(0);
+
+ n_occur++;
+
+ if ((now - last_warn) > NR_APP_ERROR_DT_ON_TT_OFF_BACKOFF_SECONDS) {
+ last_warn = now;
+ nrl_error(NRL_INIT,
+ "newrelic.transaction_tracer.enabled must be enabled in order "
+ "to use distributed tracing. Occurred %u times.",
+ n_occur);
+ n_occur = 0;
+ }
+}
+
nr_status_t nr_php_txn_begin(const char* appnames,
const char* license TSRMLS_DC) {
nrtxnopt_t opts;
@@ -708,7 +727,8 @@ nr_status_t nr_php_txn_begin(const char* appnames,
info.license = nr_strdup(lic_to_use);
info.settings = NULL; /* Populated through callback. */
info.environment = nro_copy(NR_PHP_PROCESS_GLOBALS(appenv));
- info.labels = nr_php_txn_get_labels(TSRMLS_C);
+ info.metadata = nro_copy(NR_PHP_PROCESS_GLOBALS(metadata));
+ info.labels = nr_php_txn_get_labels();
info.host_display_name = nr_strdup(NRINI(process_host_display_name));
info.lang = nr_strdup("php");
info.version = nr_strdup(nr_version());
@@ -717,11 +737,15 @@ nr_status_t nr_php_txn_begin(const char* appnames,
info.security_policies_token = nr_strdup(NRINI(security_policies_token));
info.supported_security_policies
= nr_php_txn_get_supported_security_policy_settings(&opts);
- info.trace_observer_host = nr_strdup(NRINI(trace_observer_host));
+ /* if DT is disabled we cannot stream 8T events so disable observer host */
+ if (NRINI(distributed_tracing_enabled))
+ info.trace_observer_host = nr_strdup(NRINI(trace_observer_host));
+ else
+ info.trace_observer_host = nr_strdup("");
+ /* observer port setting does not really depend on DT being enabled */
info.trace_observer_port = NRINI(trace_observer_port);
info.span_queue_size = NRINI(span_queue_size);
info.span_events_max_samples_stored = NRINI(span_events_max_samples_stored);
-
NRPRG(app) = nr_agent_find_or_add_app(
nr_agent_applist, &info,
/*
@@ -843,9 +867,7 @@ nr_status_t nr_php_txn_begin(const char* appnames,
if (NRPRG(txn)->options.distributed_tracing_enabled
&& !NRPRG(txn)->options.tt_enabled) {
- nrl_error(NRL_INIT,
- "newrelic.transaction_tracer.enabled must be enabled in order "
- "to use distributed tracing");
+ nr_php_txn_log_error_dt_on_tt_off();
}
#if ZEND_MODULE_API_NO >= ZEND_8_1_X_API_NO
diff --git a/agent/php_vm.c b/agent/php_vm.c
index 948658ac0..19f4bb215 100644
--- a/agent/php_vm.c
+++ b/agent/php_vm.c
@@ -88,7 +88,6 @@ static int nr_php_handle_cufa_fcall(zend_execute_data* execute_data) {
nr_php_opcode_handler_entry_t prev_handler;
const zend_op* prev_opline;
- nrl_verbosedebug(NRL_AGENT, "%s: cannot get function from call", __func__);
/*
* We should have execute_data (and there isn't a realistic case where we
* wouldn't other than memory corruption), so if we don't, we should bail as
diff --git a/agent/scripts/newrelic.cfg.template b/agent/scripts/newrelic.cfg.template
index 3e8967b34..71ddfc98f 100644
--- a/agent/scripts/newrelic.cfg.template
+++ b/agent/scripts/newrelic.cfg.template
@@ -209,8 +209,16 @@
# Info : Enable detection of whether the system is running on Pivotal Cloud
# Foundry.
#
-#newrelic.utilization.detect_pcf = true
+#utilization.detect_pcf = true
+# Setting: utilization.detect_kubernetes
+# Type : boolean
+# Scope : system
+# Default: true
+# Info : Enable detection of whether the system is running in a Kubernetes
+# cluster.
+#
+#utilization.detect_kubernetes = true
# Setting: app_timeout
# Type : time specification string ("5m", "1h20m", etc)
diff --git a/agent/scripts/newrelic.ini.template b/agent/scripts/newrelic.ini.template
index b0b127c58..c3be6efa1 100644
--- a/agent/scripts/newrelic.ini.template
+++ b/agent/scripts/newrelic.ini.template
@@ -335,6 +335,16 @@ newrelic.daemon.logfile = "/var/log/newrelic/newrelic-daemon.log"
;
;newrelic.daemon.utilization.detect_docker = true
+; Setting: newrelic.daemon.utilization.detect_kubernetes
+; Type : boolean
+; Scope : system
+; Default: true
+; Info : Enable detection of whether the system is running in a Kubernetes
+; cluster.
+;
+;newrelic.utilization.detect_kubernetes = true
+
+
; Setting: newrelic.daemon.app_timeout
; Type : time specification string ("5m", "1h20m", etc)
; Scope : system
diff --git a/agent/tests/test_environment.c b/agent/tests/test_environment.c
index f4a87b4d7..f5cfe32c6 100644
--- a/agent/tests/test_environment.c
+++ b/agent/tests/test_environment.c
@@ -11,6 +11,104 @@
tlib_parallel_info_t parallel_info
= {.suggested_nthreads = -1, .state_size = 0};
+/*
+ * Purpose : Tests if given a prefix a given key/value pair is added to a hash.
+ * Params : 1. prefix: The prefix to check the key against.
+ * 2. key: The key to compare to the prefix.
+ * 3. value: The value that corresponds to the key
+ * 4. validCase: bool to indicate if the case should fail or succeed.
+ * Returns : void
+ */
+static void test_nr_php_process_environment_variable_to_nrobj(
+ const char* prefix,
+ const char* key,
+ const char* value,
+ bool validCase) {
+ nrobj_t* result_hash = NULL;
+ nrobj_t* expect_hash = NULL;
+ const char* r = NULL;
+ nr_status_t err;
+ char* result_str;
+ char* expect_str;
+
+ result_hash = nro_new_hash();
+ nr_php_process_environment_variable_to_nrobj(prefix, key, value, result_hash);
+ r = nro_get_hash_string(result_hash, key, &err);
+ if (validCase) {
+ tlib_pass_if_true("index OK", NR_SUCCESS == err, "success=%d", (int)err);
+ tlib_pass_if_true("pick", 0 == nr_strcmp(r, NULL == value ? "" : value),
+ "r=%s but expected %s for key %s", r, value, key);
+ } else {
+ tlib_pass_if_false("index OK", NR_SUCCESS == err, "success=%d", (int)err);
+ tlib_pass_if_null("NULL terms", r);
+ }
+
+ expect_hash = nro_new_hash();
+ if (validCase) {
+ nro_set_hash_string(expect_hash, key, value);
+ }
+ expect_str = nro_dump(expect_hash);
+ result_str = nro_dump(result_hash);
+ tlib_pass_if_true("contents", 0 == nr_strcmp(expect_str, result_str),
+ "\nresult_str=%s\nexpect_str=%s", result_str, expect_str);
+
+ nr_free(expect_str);
+ nr_free(result_str);
+ nro_delete(expect_hash);
+ nro_delete(result_hash);
+}
+
+/*
+ * Purpose : Tests adding multiple key/value pairs to a hash.
+ *
+ * Returns : void
+ */
+static void test_multi_nr_php_process_environment_variable_to_nrobj() {
+ nrobj_t* result_hash = NULL;
+ nrobj_t* expect_hash = NULL;
+ const char* r = NULL;
+ nr_status_t err;
+ char* result_str;
+ char* expect_str;
+
+ result_hash = nro_new_hash();
+ /*
+ * Add multiple key/value pairs to the hash including ones with duplicate
+ * keys. The last added key should always take precedence over a previous
+ * duplicate key.
+ */
+ nr_php_process_environment_variable_to_nrobj("MYPREFIX", "MYPREFIX_ONE",
+ "one", result_hash);
+ nr_php_process_environment_variable_to_nrobj("MYPREFIX", "MYPREFIX_TWO",
+ "two", result_hash);
+ nr_php_process_environment_variable_to_nrobj("MYPREFIX", "MYPREFIX_ONE",
+ "second_one", result_hash);
+ nr_php_process_environment_variable_to_nrobj("MYPREFIX", "MYPREFIX_ONE",
+ "third_one", result_hash);
+ nr_php_process_environment_variable_to_nrobj("MYPREFIX", "PREFIX_THREE",
+ "three", result_hash);
+
+ r = nro_get_hash_string(result_hash, "MYPREFIX_ONE", &err);
+
+ tlib_pass_if_true("index OK", NR_SUCCESS == err, "success=%d", (int)err);
+ tlib_pass_if_true("pick", 0 == nr_strcmp(r, "third_one"),
+ "r=%s but expected third_one", r);
+
+ expect_hash = nro_new_hash();
+ nro_set_hash_string(expect_hash, "MYPREFIX_ONE", "third_one");
+ nro_set_hash_string(expect_hash, "MYPREFIX_TWO", "two");
+
+ expect_str = nro_dump(expect_hash);
+ result_str = nro_dump(result_hash);
+ tlib_pass_if_true("contents", 0 == nr_strcmp(expect_str, result_str),
+ "\nresult_str=%s\nexpect_str=%s", result_str, expect_str);
+
+ nr_free(expect_str);
+ nr_free(result_str);
+ nro_delete(expect_hash);
+ nro_delete(result_hash);
+}
+
static void test_single_rocket_assignment(const char* key, const char* value) {
nrobj_t* result_env = NULL;
nrobj_t* expect_env = NULL;
@@ -69,6 +167,57 @@ static void test_rocket_assignment_string_to_obj_fn(const char* stimulus,
nro_delete(result_env);
}
+/*
+ * Purpose : Test the nr_php_process_environment_variables_to_nrobj
+ * functionality.
+ *
+ * Returns : Void
+ */
+static void test_nr_php_process_environment_variables_to_nrobj(void) {
+ /* Prefix and Key are same length, should fail because a value with only the
+ * prefix is not valid.
+ */
+ test_nr_php_process_environment_variable_to_nrobj(
+ NR_METADATA_KEY_PREFIX, "NR_METADATA_PREFIX_", "value", false);
+
+ /* Valid prefix, key, value. Pair should be added to hash. */
+ test_nr_php_process_environment_variable_to_nrobj(
+ NR_METADATA_KEY_PREFIX, "NEW_RELIC_METADATA_ONE", "metadata_one", true);
+
+ /* Non-matching prefix and key. Should not add pair to hash. */
+ test_nr_php_process_environment_variable_to_nrobj(
+ NR_METADATA_KEY_PREFIX, "OTHER", "metadata_two", false);
+
+ /* Non-matching prefix and key. Should not add pair to hash. */
+ test_nr_php_process_environment_variable_to_nrobj(
+ NR_METADATA_KEY_PREFIX, "NEW_RELIC_THREE", "metadata_three", false);
+
+ /* Null prefix should fail. Should not add pair to hash. */
+ test_nr_php_process_environment_variable_to_nrobj(
+ NULL, "NEW_RELIC_METADATA_FOUR", "metadata_four", false);
+
+ /* Valid prefix, key, value. Pair should be added to hash. */
+ test_nr_php_process_environment_variable_to_nrobj(
+ NR_METADATA_KEY_PREFIX, "NEW_RELIC_METADATA_FIVE",
+ "metadata_five with a space", true);
+
+ /* Valid prefix, key, NULL value (acceptable). Pair should be added to hash.
+ */
+ test_nr_php_process_environment_variable_to_nrobj(
+ NR_METADATA_KEY_PREFIX, "NEW_RELIC_METADATA_SIX", NULL, true);
+
+ /* NULL key, NULL value. Pair should not be added to hash. */
+ test_nr_php_process_environment_variable_to_nrobj(NR_METADATA_KEY_PREFIX,
+ NULL, NULL, false);
+
+ /* NULL key. Pair should not be added to hash. */
+ test_nr_php_process_environment_variable_to_nrobj(
+ NR_METADATA_KEY_PREFIX, NULL, "metadata_seven", false);
+
+ /* Should be able to add multiple valid pairs to hash. */
+ test_multi_nr_php_process_environment_variable_to_nrobj();
+}
+
static void test_rocket_assignments(void) {
nrobj_t* expect_env = NULL;
@@ -161,6 +310,116 @@ static void test_rocket_assignments(void) {
nro_delete(expect_env);
}
+/*
+ * Purpose : Tests if given a prefix a given key/value pair is added to a hash.
+ * Params : 1. prefix: The prefix to check the key against.
+ * 2. key: The key to compare to the prefix.
+ * 3. value: The value that corresponds to the key
+ * 4. expect_str: expected value.
+ * Returns : void
+ */
+static void test_nr_php_process_environment_variable_to_string(
+ const char* prefix,
+ const char* key,
+ const char* value,
+ const char* expect_str) {
+ char* result_str = NULL;
+
+ result_str = nr_php_process_environment_variable_to_string(
+ prefix, key, value, result_str, ":", ";");
+
+ tlib_pass_if_true("contents", 0 == nr_strcmp(expect_str, result_str),
+ "\nresult_str=%s\nexpect_str=%s", result_str, expect_str);
+
+ nr_free(result_str);
+}
+
+/*
+ * Purpose : Tests adding multiple key/value pairs to a hash.
+ *
+ * Returns : void
+ */
+static void test_multi_nr_php_process_environment_variable_to_string() {
+ char* result_str = NULL;
+ char* expect_str = NULL;
+
+ /*
+ * Add multiple key/value pairs to the string including ones with duplicate
+ * keys. The last added key will eventually take precedence over a previous
+ * duplicate key when the string is eventually converted to a hash object.
+ */
+ result_str = nr_php_process_environment_variable_to_string(
+ "MYPREFIX_", "MYPREFIX_ONE", "one", result_str, ":", ";");
+ result_str = nr_php_process_environment_variable_to_string(
+ "MYPREFIX_", "MYPREFIX_TWO", "two", result_str, ":", ";");
+ result_str = nr_php_process_environment_variable_to_string(
+ "MYPREFIX_", "MYPREFIX_ONE", "second_one", result_str, ":", ";");
+ result_str = nr_php_process_environment_variable_to_string(
+ "MYPREFIX_", "MYPREFIX_ONE", "third_one", result_str, ":", ";");
+ result_str = nr_php_process_environment_variable_to_string(
+ "MYPREFIX_", "PREFIX_THREE", "three", result_str, ":", ";");
+
+ expect_str = nr_strdup("ONE:one;TWO:two;ONE:second_one;ONE:third_one");
+ tlib_pass_if_true("contents", 0 == nr_strcmp(expect_str, result_str),
+ "\nresult_str=%s\nexpect_str=%s", result_str, expect_str);
+
+ nr_free(expect_str);
+ nr_free(result_str);
+}
+
+/*
+ * Purpose : Test the nr_php_process_environment_variables_to_string
+ * functionality.
+ *
+ * Returns : Void
+ */
+static void test_nr_php_process_environment_variables_to_string(void) {
+ /* Prefix and Key are same length, should fail because a value with only the
+ * prefix is not valid.
+ */
+
+ test_nr_php_process_environment_variable_to_string(
+ NR_LABELS_SINGULAR_KEY_PREFIX, "NEW_RELIC_LABEL_", "value", NULL);
+
+ /* Valid prefix, key, value. Pair should be added to string. */
+ test_nr_php_process_environment_variable_to_string(
+ NR_LABELS_SINGULAR_KEY_PREFIX, "NEW_RELIC_LABEL_ONE", "one", "ONE:one");
+
+ /* Non-matching prefix and key. Should not add pair to string. */
+ test_nr_php_process_environment_variable_to_string(
+ NR_LABELS_SINGULAR_KEY_PREFIX, "OTHER", "two", NULL);
+
+ /* Non-matching prefix and key. Should not add pair to string. */
+
+ test_nr_php_process_environment_variable_to_string(
+ NR_LABELS_SINGULAR_KEY_PREFIX, "NR_LABELS_THREE", "three", false);
+
+ /* Null prefix should fail. Should not add pair to string. */
+ test_nr_php_process_environment_variable_to_string(
+ NULL, "NEW_RELIC_LABEL_FOUR", "four", NULL);
+
+ /* Valid prefix, key, value. Pair should be added to string. */
+ test_nr_php_process_environment_variable_to_string(
+ NR_LABELS_SINGULAR_KEY_PREFIX, "NEW_RELIC_LABEL_FIVE",
+ "metadata_five with a space", "FIVE:metadata_five with a space");
+
+ /* Valid prefix, key, NULL value (acceptable). Pair should be added to string.
+ */
+ test_nr_php_process_environment_variable_to_string(
+ NR_LABELS_SINGULAR_KEY_PREFIX, "NEW_RELIC_LABEL_SIX", NULL, "SIX");
+
+ /* NULL key, NULL value. Pair should not be added to string. */
+ test_nr_php_process_environment_variable_to_string(
+ NR_LABELS_SINGULAR_KEY_PREFIX, NULL, NULL, NULL);
+
+ /* NULL key. Pair should not be added to string. */
+ test_nr_php_process_environment_variable_to_string(
+ NR_LABELS_SINGULAR_KEY_PREFIX, NULL, "seven", NULL);
+
+ /* Should be able to add multiple valid pairs to string. */
+ test_multi_nr_php_process_environment_variable_to_string();
+}
+
void test_main(void* p NRUNUSED) {
#if defined(ZTS) && !defined(PHP7)
void*** tsrm_ls = NULL;
@@ -170,5 +429,9 @@ void test_main(void* p NRUNUSED) {
test_rocket_assignments();
+ test_nr_php_process_environment_variables_to_nrobj();
+
+ test_nr_php_process_environment_variables_to_string();
+
tlib_php_engine_destroy(TSRMLS_C);
}
diff --git a/axiom/cmd_appinfo_transmit.c b/axiom/cmd_appinfo_transmit.c
index f7fcee73b..0d2ebc57a 100644
--- a/axiom/cmd_appinfo_transmit.c
+++ b/axiom/cmd_appinfo_transmit.c
@@ -67,6 +67,26 @@ static uint32_t nr_appinfo_prepend_settings(const nr_app_info_t* info,
return offset;
}
+/*
+ * Send the metadata to the daemon in the format expected by the
+ * collector in the connect command.
+ */
+
+static uint32_t nr_appinfo_prepend_metadata(const nr_app_info_t* info,
+ nr_flatbuffer_t* fb) {
+ char* json;
+ uint32_t offset;
+
+ if ((NULL == info) || (NULL == info->metadata)) {
+ return 0;
+ }
+ json = nro_to_json(info->metadata);
+ offset = nr_flatbuffers_prepend_string(fb, json);
+ nr_free(json);
+
+ return offset;
+}
+
static nr_status_t convert_appenv(const char* key,
const nrobj_t* val,
void* ptr) {
@@ -126,6 +146,7 @@ nr_flatbuffer_t* nr_appinfo_create_query(const char* agent_run_id,
uint32_t supported_security_policies;
uint32_t host_name;
uint32_t trace_observer_host;
+ uint32_t metadata;
char* json_supported_security_policies;
fb = nr_flatbuffers_create(0);
@@ -150,6 +171,8 @@ nr_flatbuffer_t* nr_appinfo_create_query(const char* agent_run_id,
supported_security_policies
= nr_flatbuffers_prepend_string(fb, json_supported_security_policies);
+ metadata = nr_appinfo_prepend_metadata(info, fb);
+
nr_flatbuffers_object_begin(fb, APP_NUM_FIELDS);
nr_flatbuffers_object_prepend_u64(fb, APP_SPAN_QUEUE_SIZE,
info->span_queue_size, 0);
@@ -162,6 +185,7 @@ nr_flatbuffer_t* nr_appinfo_create_query(const char* agent_run_id,
nr_flatbuffers_object_prepend_uoffset(fb, APP_HOST, host_name, 0);
nr_flatbuffers_object_prepend_uoffset(fb, APP_SUPPORTED_SECURITY_POLICIES,
supported_security_policies, 0);
+ nr_flatbuffers_object_prepend_uoffset(fb, APP_METADATA, metadata, 0);
nr_flatbuffers_object_prepend_uoffset(fb, APP_SECURITY_POLICY_TOKEN,
security_policy_token, 0);
nr_flatbuffers_object_prepend_uoffset(fb, APP_DISPLAY_HOST, display_host, 0);
@@ -197,6 +221,7 @@ nr_flatbuffer_t* nr_appinfo_create_query(const char* agent_run_id,
nr_flatbuffers_finish(fb, message);
nr_free(json_supported_security_policies);
+
return fb;
}
@@ -204,7 +229,8 @@ int nr_command_is_flatbuffer_invalid(nr_flatbuffer_t* msg, size_t msglen) {
size_t offset = nr_flatbuffers_read_uoffset(nr_flatbuffers_data(msg), 0);
if (msglen - MIN_FLATBUFFER_SIZE <= offset) {
- nrl_verbosedebug(NRL_DAEMON, "offset is too large, len=%zu", offset);
+ nrl_verbosedebug(NRL_DAEMON, "flatbuffer offset is too large, len=%zu",
+ offset);
return 1;
}
diff --git a/axiom/nr_app.c b/axiom/nr_app.c
index f59b32955..cddefffbb 100644
--- a/axiom/nr_app.c
+++ b/axiom/nr_app.c
@@ -84,6 +84,7 @@ void nr_app_info_destroy_fields(nr_app_info_t* info) {
nro_delete(info->settings);
nro_delete(info->environment);
nro_delete(info->labels);
+ nro_delete(info->metadata);
nr_free(info->host_display_name);
nr_free(info->lang);
nr_free(info->version);
@@ -264,6 +265,7 @@ static nrapp_t* create_new_app(const nr_app_info_t* info) {
app->info.environment = nro_copy(info->environment);
app->info.high_security = info->high_security;
app->info.labels = nro_copy(info->labels);
+ app->info.metadata = nro_copy(info->metadata);
app->info.host_display_name = nr_strdup(info->host_display_name);
app->info.redirect_collector = nr_strdup(info->redirect_collector);
app->info.security_policies_token = nr_strdup(info->security_policies_token);
diff --git a/axiom/nr_app.h b/axiom/nr_app.h
index aa4bceaf9..b3be7ba34 100644
--- a/axiom/nr_app.h
+++ b/axiom/nr_app.h
@@ -62,6 +62,7 @@ typedef struct _nr_app_info_t {
nrobj_t* settings; /* New Relic settings */
nrobj_t* environment; /* Application environment */
nrobj_t* labels; /* Labels for Language Agents */
+ nrobj_t* metadata; /* Environment provided metadata for Language Agents */
char* host_display_name; /* Optional user-provided host name for UI */
char* lang; /* Language */
char* version; /* Version */
diff --git a/axiom/nr_commands_private.h b/axiom/nr_commands_private.h
index 4bda179d6..21a82ea9b 100644
--- a/axiom/nr_commands_private.h
+++ b/axiom/nr_commands_private.h
@@ -80,7 +80,8 @@ enum {
APP_TRACE_OBSERVER_PORT = 14,
APP_SPAN_QUEUE_SIZE = 15,
APP_SPAN_EVENTS_MAX_SAMPLES_STORED = 16,
- APP_NUM_FIELDS = 17,
+ APP_METADATA = 17,
+ APP_NUM_FIELDS = 18,
};
/* Generated from: table AppReply */
diff --git a/axiom/nr_daemon_spawn.c b/axiom/nr_daemon_spawn.c
index d16893b85..66d12569a 100644
--- a/axiom/nr_daemon_spawn.c
+++ b/axiom/nr_daemon_spawn.c
@@ -129,6 +129,8 @@ nr_argv_t* nr_daemon_args_to_argv(const char* name,
args->utilization.pcf ? "true" : "false");
nr_argv_append_flag(argv, "--define", "utilization.detect_docker=%s",
args->utilization.docker ? "true" : "false");
+ nr_argv_append_flag(argv, "--define", "utilization.detect_kubernetes=%s",
+ args->utilization.kubernetes ? "true" : "false");
/* diagnostic and testing flags */
if (args->integration_mode) {
diff --git a/axiom/nr_distributed_trace.c b/axiom/nr_distributed_trace.c
index 5652bcbe8..a7c7e356e 100644
--- a/axiom/nr_distributed_trace.c
+++ b/axiom/nr_distributed_trace.c
@@ -384,7 +384,7 @@ nrtime_t nr_distributed_trace_inbound_get_timestamp_delta(
extern bool nr_distributed_trace_inbound_has_timestamp(
const nr_distributed_trace_t* dt) {
if (NULL == dt) {
- return 0;
+ return 0;
}
return dt->inbound.timestamp != 0;
@@ -988,14 +988,15 @@ static const char* nr_distributed_trace_convert_w3c_headers_tracestate(
/*
* Keep the other raw tracestate headers
*/
- headers_to_be_forwarded = nr_str_append(headers_to_be_forwarded, value);
+ headers_to_be_forwarded
+ = nr_str_append(headers_to_be_forwarded, value, ",");
/*
* Keep the other tracing vendors
*/
parsed_vendor = nr_strsplit(value, "=", 0);
tracing_vendors = nr_str_append(
- tracing_vendors, nro_get_array_string(parsed_vendor, 1, NULL));
+ tracing_vendors, nro_get_array_string(parsed_vendor, 1, NULL), ",");
nro_delete(parsed_vendor);
}
}
diff --git a/axiom/nr_header.c b/axiom/nr_header.c
index f26204346..f70398e4d 100644
--- a/axiom/nr_header.c
+++ b/axiom/nr_header.c
@@ -360,7 +360,7 @@ nr_hashmap_t* nr_header_outbound_request_create(nrtxn_t* txn,
txn->distributed_trace);
if (tracing_vendors && tracestate_ptr) {
- tracestate_ptr = nr_str_append(tracestate_ptr, tracing_vendors);
+ tracestate_ptr = nr_str_append(tracestate_ptr, tracing_vendors, ",");
}
nr_header_outbound_save(outbound_headers, W3C_TRACESTATE, tracestate_ptr);
diff --git a/axiom/nr_segment.c b/axiom/nr_segment.c
index b10b42975..b7e7a68ef 100644
--- a/axiom/nr_segment.c
+++ b/axiom/nr_segment.c
@@ -352,6 +352,27 @@ static nr_status_t add_agent_attribute_to_span_event(const char* key,
return NR_SUCCESS;
}
+#define NR_APP_LOG_WARNING_SEGMENT_ID_FAILURE_BACKOFF_SECONDS 60
+
+static void nr_segment_log_warning_segment_id_missing(void) {
+ static unsigned n_occur = 0;
+ static time_t last_warn = (time_t)(0);
+ time_t now = time(0);
+
+ n_occur++;
+
+ if ((now - last_warn)
+ > NR_APP_LOG_WARNING_SEGMENT_ID_FAILURE_BACKOFF_SECONDS) {
+ last_warn = now;
+ nrl_warning(
+ NRL_SEGMENT,
+ "cannot create a span event when a segment ID cannot be "
+ "generated; is distributed tracing enabled? Occurred %u times.",
+ n_occur);
+ n_occur = 0;
+ }
+}
+
nr_span_event_t* nr_segment_to_span_event(nr_segment_t* segment) {
nr_span_event_t* event;
char* trace_id;
@@ -378,9 +399,7 @@ nr_span_event_t* nr_segment_to_span_event(nr_segment_t* segment) {
}
if (NULL == nr_segment_ensure_id(segment, segment->txn)) {
- nrl_warning(NRL_SEGMENT,
- "cannot create a span event when a segment ID cannot be "
- "generated; is distributed tracing enabled?");
+ nr_segment_log_warning_segment_id_missing();
return NULL;
}
@@ -433,7 +452,8 @@ nr_span_event_t* nr_segment_to_span_event(nr_segment_t* segment) {
event, NR_SPAN_PARENT_TRANSPORT_TYPE,
nr_distributed_trace_inbound_get_transport_type(
segment->txn->distributed_trace));
- if (nr_distributed_trace_inbound_has_timestamp(segment->txn->distributed_trace)) {
+ if (nr_distributed_trace_inbound_has_timestamp(
+ segment->txn->distributed_trace)) {
nr_span_event_set_parent_transport_duration(
event, nr_distributed_trace_inbound_get_timestamp_delta(
segment->txn->distributed_trace,
diff --git a/axiom/nr_txn.c b/axiom/nr_txn.c
index cdfd61efc..68ad127b6 100644
--- a/axiom/nr_txn.c
+++ b/axiom/nr_txn.c
@@ -291,6 +291,12 @@ void nr_txn_enforce_security_settings(nrtxnopt_t* opts,
NRL_TXN, "Setting newrelic.analytics_events.enabled = false by server");
}
+ if (0 == nr_reply_get_bool(connect_reply, "collect_span_events", 1)) {
+ opts->span_events_enabled = 0;
+ nrl_verbosedebug(NRL_TXN,
+ "Setting newrelic.span_events_enabled = false by server");
+ }
+
// LASP also modifies this setting. Kept seperate for readability.
if (0 == nr_reply_get_bool(connect_reply, "collect_custom_events", 1)) {
opts->custom_events_enabled = 0;
@@ -494,6 +500,12 @@ nrtxn_t* nr_txn_begin(nrapp_t* app,
nt->options.span_events_enabled
= nt->options.span_events_enabled && app->limits.span_events;
+ /*
+ * Enforce SSC and LASP if enabled
+ */
+ nr_txn_enforce_security_settings(&nt->options, app->connect_reply,
+ app->security_policies);
+
/*
* Update the options based on the 8T configuration.
*/
@@ -520,12 +532,6 @@ nrtxn_t* nr_txn_begin(nrapp_t* app,
nt->custom_events = nr_analytics_events_create(app->limits.custom_events);
- /*
- * Enforce SSC and LASP if enabled
- */
- nr_txn_enforce_security_settings(&nt->options, app->connect_reply,
- app->security_policies);
-
/*
* Set the status fields to their defaults.
*/
diff --git a/axiom/nr_utilization.h b/axiom/nr_utilization.h
index ccc471980..fe246c026 100644
--- a/axiom/nr_utilization.h
+++ b/axiom/nr_utilization.h
@@ -12,6 +12,7 @@ typedef struct _nr_utilization_t {
int gcp : 1;
int pcf : 1;
int docker : 1;
+ int kubernetes : 1;
} nr_utilization_t;
static const nr_utilization_t nr_utilization_default = {
@@ -20,6 +21,7 @@ static const nr_utilization_t nr_utilization_default = {
.gcp = 1,
.pcf = 1,
.docker = 1,
+ .kubernetes = 1,
};
#endif /* NR_UTILIZATION_HDR */
diff --git a/axiom/nr_version.c b/axiom/nr_version.c
index a82aec670..e494d94ac 100644
--- a/axiom/nr_version.c
+++ b/axiom/nr_version.c
@@ -29,8 +29,9 @@
* yam 23Aug2021 (9.18)
* zomp 02Mar2022 (9.19)
* allium 14Mar2022 (9.20)
+ * buttercup 26Apr2022 (9.21)
*/
-#define NR_CODENAME "buttercup"
+#define NR_CODENAME "cosmos"
const char* nr_version(void) {
return NR_STR2(NR_VERSION);
diff --git a/axiom/tests/cross_agent_tests/distributed_tracing/distributed_tracing.json b/axiom/tests/cross_agent_tests/distributed_tracing/distributed_tracing.json
index fda3bc3aa..c32912068 100644
--- a/axiom/tests/cross_agent_tests/distributed_tracing/distributed_tracing.json
+++ b/axiom/tests/cross_agent_tests/distributed_tracing/distributed_tracing.json
@@ -64,6 +64,66 @@
["Supportability/DistributedTrace/AcceptPayload/Success", 1]
]
},
+ {
+ "test_name": "high_priority_but_sampled_false",
+ "comment": "this should never happen, but is here to verify your agent only creates a span event if sampled=true, not just based off of priority",
+ "trusted_account_key": "33",
+ "account_id": "33",
+ "web_transaction": true,
+ "raises_exception": false,
+ "force_sampled_true": false,
+ "span_events_enabled": true,
+ "major_version": 0,
+ "minor_version": 1,
+ "transport_type": "HTTP",
+ "inbound_payloads": [
+ {
+ "v": [0, 1],
+ "d": {
+ "ac": "33",
+ "ap": "2827902",
+ "id": "7d3efb1b173fecfa",
+ "tx": "e8b91a159289ff74",
+ "pr": 1.234567,
+ "sa": false,
+ "ti": 1518469636035,
+ "tr": "d6b4ba0c3a712ca",
+ "ty": "App"
+ }
+ }
+ ],
+ "intrinsics": {
+ "target_events": ["Transaction"],
+ "common":{
+ "exact": {
+ "traceId": "d6b4ba0c3a712ca",
+ "priority": 1.234567,
+ "sampled": false
+ },
+ "expected": ["guid"],
+ "unexpected": ["grandparentId", "cross_process_id", "nr.tripId", "nr.pathHash", "nr.referringPathHash", "nr.guid", "nr.referringTransactionGuid", "nr.alternatePathHashes"]
+ },
+ "Transaction": {
+ "exact": {
+ "parent.type": "App",
+ "parent.app": "2827902",
+ "parent.account": "33",
+ "parent.transportType": "HTTP",
+ "parentId": "e8b91a159289ff74",
+ "parentSpanId": "7d3efb1b173fecfa"
+ },
+ "expected": ["parent.transportDuration"]
+ },
+ "unexpected_events": ["Span"]
+ },
+ "expected_metrics": [
+ ["DurationByCaller/App/33/2827902/HTTP/all", 1],
+ ["DurationByCaller/App/33/2827902/HTTP/allWeb", 1],
+ ["TransportDuration/App/33/2827902/HTTP/all", 1],
+ ["TransportDuration/App/33/2827902/HTTP/allWeb", 1],
+ ["Supportability/DistributedTrace/AcceptPayload/Success", 1]
+ ]
+ },
{
"test_name": "multiple_accept_calls",
"trusted_account_key": "33",
diff --git a/axiom/tests/cross_agent_tests/distributed_tracing/trace_context.json b/axiom/tests/cross_agent_tests/distributed_tracing/trace_context.json
index a1f587c6c..9c066addd 100644
--- a/axiom/tests/cross_agent_tests/distributed_tracing/trace_context.json
+++ b/axiom/tests/cross_agent_tests/distributed_tracing/trace_context.json
@@ -70,6 +70,27 @@
"tracestate": "33@nr=0-0-33-2827902-b4a07f08064ee8f9-e8b91a159289ff74-0-0.123456-1518469636035"
}
],
+ "outbound_payloads": [
+ {
+ "exact": {
+ "traceparent.version": "00",
+ "traceparent.trace_id": "37375fc353f345b5801b166e31b76136",
+ "traceparent.trace_flags": "00",
+ "tracestate.tenant_id": "33",
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
+ "tracestate.parent_account_id": "33",
+ "tracestate.sampled": "0",
+ "tracestate.priority": "0.12346"
+ },
+ "expected": [
+ "traceparent.parent_id",
+ "tracestate.transaction_id",
+ "tracestate.timestamp",
+ "tracestate.parent_application_id"
+ ]
+ }
+ ],
"intrinsics": {
"target_events": ["Transaction"],
"common":{
@@ -171,6 +192,7 @@
"raises_exception": false,
"force_sampled_true": false,
"span_events_enabled": true,
+ "transaction_events_enabled": true,
"transport_type": "HTTP",
"inbound_headers": [
{
@@ -185,11 +207,11 @@
"traceparent.trace_id": "7a933b0e517e8c1f6bc6a7466be6f2a0",
"traceparent.trace_flags": "01",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33",
- "tracestate.sampled": true,
- "tracestate.priority": 1.23456
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456"
},
"expected": [
"traceparent.parent_id",
@@ -268,11 +290,11 @@
"traceparent.trace_id": "2c7a33d956d44531b48ec6f2e535e5c4",
"traceparent.trace_flags": "01",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33",
- "tracestate.sampled": true,
- "tracestate.priority": 1.23456
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456"
},
"expected": [
"traceparent.parent_id",
@@ -368,8 +390,7 @@
}
},
"expected_metrics": [
- ["DurationByCaller/Unknown/Unknown/Unknown/HTTP/all", 1],
- ["DurationByCaller/Unknown/Unknown/Unknown/HTTP/allWeb", 1]
+ ["Supportability/TraceContext/Create/Success", 1]
]
},
{
@@ -617,11 +638,11 @@
"traceparent.trace_id": "e22175eb1d68b6de32bf70e38458ccc3",
"traceparent.trace_flags": "01",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33",
- "tracestate.sampled": true,
- "tracestate.priority": 1.23456
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456"
},
"expected": [
"traceparent.parent_id",
@@ -695,11 +716,11 @@
"traceparent.trace_id": "099ae207600a34ecdd5902aba9c8c6c3",
"traceparent.trace_flags": "01",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33",
- "tracestate.sampled": true,
- "tracestate.priority": 1.23456
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456"
},
"expected": [
"traceparent.parent_id",
@@ -715,11 +736,11 @@
"traceparent.trace_id": "099ae207600a34ecdd5902aba9c8c6c3",
"traceparent.trace_flags": "01",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33",
- "tracestate.sampled": true,
- "tracestate.priority": 1.23456
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456"
},
"expected": [
"traceparent.parent_id",
@@ -793,11 +814,11 @@
"traceparent.trace_id": "44673569f54fad422c3795b6cd4aef69",
"traceparent.trace_flags": "01",
"tracestate.tenant_id": "65",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "11",
- "tracestate.sampled": true,
- "tracestate.priority": 1.23456
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456"
},
"expected": [
"traceparent.parent_id",
@@ -958,11 +979,11 @@
"traceparent.trace_id": "ccaa36c833b26ce54bafa6c4102fd740",
"traceparent.trace_flags": "01",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33",
- "tracestate.sampled": true,
- "tracestate.priority": 1.23456
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456"
},
"expected": [
"traceparent.parent_id",
@@ -1276,8 +1297,8 @@
"traceparent.version": "00",
"traceparent.trace_id": "5f2796876f44a3c898994ce2668e2222",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0"
},
"expected": [
"traceparent.trace_flags",
@@ -1359,8 +1380,8 @@
"traceparent.version": "00",
"traceparent.trace_id": "5f2796876f44a3c898994ce2668e2222",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33"
},
"expected": [
@@ -1439,8 +1460,8 @@
"exact": {
"traceparent.version": "00",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33"
},
"expected": [
@@ -1459,8 +1480,7 @@
}
],
"expected_metrics": [
- ["DurationByCaller/Unknown/Unknown/Unknown/HTTP/all", 1],
- ["DurationByCaller/Unknown/Unknown/Unknown/HTTP/allWeb", 1]
+ ["Supportability/TraceContext/Create/Success", 1]
]
},
{
@@ -1481,8 +1501,8 @@
"exact": {
"traceparent.version": "00",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33"
},
"expected": [
@@ -1499,8 +1519,7 @@
}
],
"expected_metrics": [
- ["DurationByCaller/Unknown/Unknown/Unknown/HTTP/all", 1],
- ["DurationByCaller/Unknown/Unknown/Unknown/HTTP/allWeb", 1]
+ ["Supportability/TraceContext/Create/Success", 1]
]
},
{
@@ -1525,11 +1544,11 @@
"traceparent.version": "00",
"traceparent.trace_id": "87b1c9a429205b25e5b687d890d4821f",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "99",
- "tracestate.sampled": true,
- "tracestate.priority": 1.23456
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456"
},
"expected": [
"traceparent.trace_flags",
@@ -1582,6 +1601,71 @@
["Supportability/TraceContext/Accept/Success", 1]
]
},
+ {
+ "test_name": "priority_not_converted_to_scientific_notation",
+ "trusted_account_key": "33",
+ "account_id": "99",
+ "web_transaction": true,
+ "raises_exception": false,
+ "force_sampled_true": false,
+ "span_events_enabled": true,
+ "transaction_events_enabled": true,
+ "transport_type": "HTTP",
+ "inbound_headers": [
+ {
+ "traceparent": "00-87b1c9a429205b25e5b687d890d4821f-afe162ae3117a892-00",
+ "tracestate": "33@nr=0-0-11-30299-afe162ae3117a892-0b752e7f02c85205-0-0.000012-1518469636035"
+ }
+ ],
+ "outbound_payloads": [
+ {
+ "exact": {
+ "traceparent.version": "00",
+ "traceparent.trace_id": "87b1c9a429205b25e5b687d890d4821f",
+ "tracestate.tenant_id": "33",
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
+ "tracestate.parent_account_id": "99",
+ "tracestate.sampled": "0",
+ "tracestate.priority": "0.00001"
+ },
+ "expected": [
+ "traceparent.trace_flags",
+ "traceparent.parent_id",
+ "tracestate.transaction_id",
+ "tracestate.parent_application_id",
+ "tracestate.timestamp"
+ ] }
+ ],
+ "intrinsics": {
+ "target_events": ["Transaction"],
+ "common":{
+ "exact": {
+ "traceId": "87b1c9a429205b25e5b687d890d4821f",
+ "priority": 0.000012,
+ "sampled": false
+ },
+ "expected": ["guid"],
+ "unexpected": ["grandparentId", "cross_process_id", "nr.tripId", "nr.pathHash", "nr.referringPathHash", "nr.guid", "nr.referringTransactionGuid", "nr.alternatePathHashes"]
+ },
+ "Transaction": {
+ "exact": {
+ "parent.type": "App",
+ "parent.app": "30299",
+ "parent.account": "11",
+ "parent.transportType": "HTTP",
+ "parentId": "0b752e7f02c85205",
+ "parentSpanId": "afe162ae3117a892"
+ },
+ "expected": ["parent.transportDuration"]
+ }
+ },
+ "expected_metrics": [
+ ["DurationByCaller/App/11/30299/HTTP/all", 1],
+ ["DurationByCaller/App/11/30299/HTTP/allWeb", 1],
+ ["Supportability/TraceContext/Accept/Success", 1]
+ ]
+ },
{
"test_name": "w3c_and_newrelic_headers_present",
"comment": "outbound newrelic headers are built from w3c headers, ignoring inbound newrelic headers",
@@ -1606,16 +1690,15 @@
"newrelic.v": [0, 1],
"newrelic.d.ty": "App",
"newrelic.d.ac": "33",
- "newrelic.d.ap": "2827902",
"newrelic.d.tr": "da8bc8cc6d062849b0efcf3c169afb5a",
"newrelic.d.pr": 1.23456,
"newrelic.d.sa": true
},
"expected": [
- "newrelic.d.pr",
- "newrelic.d.ap",
- "newrelic.d.tx",
- "newrelic.d.ti",
+ "newrelic.d.pr",
+ "newrelic.d.ap",
+ "newrelic.d.tx",
+ "newrelic.d.ti",
"newrelic.d.id"],
"unexpected": ["newrelic.d.tk"]
}
@@ -1663,8 +1746,8 @@
"test_name": "w3c_and_newrelic_headers_present_error_parsing_traceparent",
"comment": [
"If the traceparent header is present on an inbound request, conforming agents MUST",
- "use this header to continue the trace. The newrelic header MUST be used _only_ when",
- "traceparent is _missing_."
+ "ignore any newrelic header. If the traceparent header is invalid, a new trace MUST",
+ "be started. The newrelic header MUST be used _only_ when traceparent is _missing_."
],
"trusted_account_key": "33",
"account_id": "33",
@@ -1678,7 +1761,7 @@
{
"traceparent": "garbage",
"tracestate": "33@nr=0-0-33-2827902-7d3efb1b173fecfa-e8b91a159289ff74-1-1.23456-1518469636035",
- "newrelic": "{\"v\":[0,1],\"d\":{\"ty\":\"Mobile\",\"ac\":\"123\",\"ap\":\"51424\",\"id\":\"5f474d64b9cc9b2a\",\"tr\":\"6e2fea0b173fdad0\",\"pr\":0.1234,\"sa\":true,\"ti\":1482959525577,\"tx\":\"27856f70d3d314b7\"}}"
+ "newrelic": "{\"v\":[0,1],\"d\":{\"ty\":\"Mobile\",\"ac\":\"33\",\"ap\":\"51424\",\"id\":\"5f474d64b9cc9b2a\",\"tr\":\"6e2fea0b173fdad0\",\"pr\":0.1234,\"sa\":true,\"ti\":1482959525577,\"tx\":\"27856f70d3d314b7\"}}"
}
],
"outbound_payloads": [
@@ -1687,25 +1770,24 @@
"newrelic.v": [0, 1],
"newrelic.d.ty": "App",
"newrelic.d.ac": "33",
- "newrelic.d.ap": "2827902",
"newrelic.d.sa": true
},
"notequal": {
"newrelic.d.tr": "6e2fea0b173fdad0"
},
"expected": [
- "newrelic.d.pr",
- "newrelic.d.ap",
- "newrelic.d.tx",
- "newrelic.d.ti",
- "newrelic.d.id",
+ "newrelic.d.pr",
+ "newrelic.d.ap",
+ "newrelic.d.tx",
+ "newrelic.d.ti",
+ "newrelic.d.id",
"newrelic.d.tr"
],
"unexpected": ["newrelic.d.tk"]
}
],
"intrinsics": {
- "target_events": ["Transaction", "Span"],
+ "target_events": ["Span"],
"common":{
"expected": ["guid", "traceId", "priority", "sampled"],
"unexpected": ["grandparentId", "cross_process_id", "nr.tripId", "nr.pathHash", "nr.referringPathHash", "nr.guid", "nr.referringTransactionGuid", "nr.alternatePathHashes", "parent.type", "parent.app", "parent.account", "parentId", "parentSpanId", "parent.transportDuration", "tracingVendors"]
@@ -1715,8 +1797,6 @@
}
},
"expected_metrics": [
- ["DurationByCaller/Unknown/Unknown/Unknown/HTTP/all", 1],
- ["DurationByCaller/Unknown/Unknown/Unknown/HTTP/allWeb", 1],
["Supportability/TraceContext/TraceParent/Parse/Exception", 1]
]
},
@@ -1743,15 +1823,14 @@
"newrelic.v": [0, 1],
"newrelic.d.ty": "App",
"newrelic.d.ac": "33",
- "newrelic.d.ap": "2827902",
"newrelic.d.tr": "da8bc8cc6d062849b0efcf3c169afb5a",
"newrelic.d.sa": true
},
"expected": [
- "newrelic.d.pr",
- "newrelic.d.ap",
- "newrelic.d.tx",
- "newrelic.d.ti",
+ "newrelic.d.pr",
+ "newrelic.d.ap",
+ "newrelic.d.tx",
+ "newrelic.d.ti",
"newrelic.d.id"
],
"unexpected": ["newrelic.d.tk"]
@@ -1816,18 +1895,16 @@
"traceparent.trace_id": "00000000000000006e2fea0b173fdad0",
"traceparent.trace_flags": "01",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33",
- "tracestate.sampled": true,
- "tracestate.priority": 1.123432,
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.12343",
"newrelic.v": [0, 1],
"newrelic.d.ty": "App",
"newrelic.d.ac": "33",
- "newrelic.d.ap": "2827902",
"newrelic.d.tr": "6E2fEA0B173FDAD0",
- "newrelic.d.sa": true,
- "newrelic.d.pr": 1.1234321
+ "newrelic.d.sa": true
},
"expected": [
"traceparent.parent_id",
@@ -1835,10 +1912,11 @@
"tracestate.parent_application_id",
"tracestate.span_id",
"tracestate.transaction_id",
- "newrelic.d.ap",
- "newrelic.d.tx",
- "newrelic.d.ti",
- "newrelic.d.id"
+ "newrelic.d.ap",
+ "newrelic.d.tx",
+ "newrelic.d.ti",
+ "newrelic.d.id",
+ "newrelic.d.pr"
],
"unexpected": ["newrelic.d.tk"]
}
@@ -1903,11 +1981,11 @@
"traceparent.trace_id": "e22175eb1d68b6de32bf70e38458ccc3",
"traceparent.trace_flags": "01",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33",
- "tracestate.sampled": true,
- "tracestate.priority": 1.23456
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456"
},
"expected": [
"traceparent.parent_id",
@@ -1977,11 +2055,11 @@
"traceparent.trace_id": "e22175eb1d68b6de32bf70e38458ccc3",
"traceparent.trace_flags": "01",
"tracestate.tenant_id": "33",
- "tracestate.version": 0,
- "tracestate.parent_type": 0,
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
"tracestate.parent_account_id": "33",
- "tracestate.sampled": true,
- "tracestate.priority": 1.23456
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456"
},
"expected": [
"traceparent.parent_id",
@@ -2002,5 +2080,241 @@
["Supportability/TraceContext/Accept/Success", 1],
["Supportability/TraceContext/Create/Success", 1]
]
+ },
+ {
+ "test_name": "w3c_and_newrelic_headers_present_emit_both_header_types",
+ "trusted_account_key": "33",
+ "account_id": "33",
+ "web_transaction": true,
+ "raises_exception": false,
+ "force_sampled_true": false,
+ "span_events_enabled": true,
+ "transaction_events_enabled": true,
+ "transport_type": "HTTP",
+ "inbound_headers": [
+ {
+ "traceparent": "00-da8bc8cc6d062849b0efcf3c169afb5a-7d3efb1b173fecfa-01",
+ "tracestate": "33@nr=0-0-33-2827902-7d3efb1b173fecfa-e8b91a159289ff74-1-1.23456-1518469636035",
+ "newrelic": "{\"v\":[0,1],\"d\":{\"ty\":\"Mobile\",\"ac\":\"123\",\"ap\":\"51424\",\"id\":\"5f474d64b9cc9b2a\",\"tr\":\"6e2fea0b173fdad0\",\"pr\":0.1234,\"sa\":true,\"ti\":1482959525577,\"tx\":\"27856f70d3d314b7\"}}"
+ }
+ ],
+ "outbound_payloads": [
+ {
+ "exact": {
+ "traceparent.version": "00",
+ "traceparent.trace_id": "da8bc8cc6d062849b0efcf3c169afb5a",
+ "tracestate.tenant_id": "33",
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
+ "tracestate.parent_account_id": "33",
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456",
+ "newrelic.v": [0, 1],
+ "newrelic.d.ty": "App",
+ "newrelic.d.ac": "33",
+ "newrelic.d.tr": "da8bc8cc6d062849b0efcf3c169afb5a",
+ "newrelic.d.sa": true,
+ "newrelic.d.pr": 1.23456
+ },
+ "expected": [
+ "traceparent.trace_flags",
+ "traceparent.parent_id",
+ "tracestate.span_id",
+ "tracestate.transaction_id",
+ "tracestate.parent_application_id",
+ "tracestate.timestamp",
+ "newrelic.d.ap",
+ "newrelic.d.tx",
+ "newrelic.d.ti",
+ "newrelic.d.id"
+ ],
+ "unexpected": ["newrelic.d.tk"]
+ }
+ ],
+ "expected_metrics": [
+ ["DurationByCaller/App/33/2827902/HTTP/all", 1],
+ ["DurationByCaller/App/33/2827902/HTTP/allWeb", 1],
+ ["TransportDuration/App/33/2827902/HTTP/all", 1],
+ ["TransportDuration/App/33/2827902/HTTP/allWeb", 1],
+ ["Supportability/TraceContext/Accept/Success", 1],
+ ["Supportability/TraceContext/Create/Success", 1],
+ ["Supportability/DistributedTrace/CreatePayload/Success", 1]
+ ]
+ },
+ {
+ "test_name": "only_w3c_headers_present_emit_both_header_types",
+ "trusted_account_key": "33",
+ "account_id": "33",
+ "web_transaction": true,
+ "raises_exception": false,
+ "force_sampled_true": false,
+ "span_events_enabled": true,
+ "transaction_events_enabled": true,
+ "transport_type": "HTTP",
+ "inbound_headers": [
+ {
+ "traceparent": "00-da8bc8cc6d062849b0efcf3c169afb5a-7d3efb1b173fecfa-01",
+ "tracestate": "33@nr=0-0-33-2827902-7d3efb1b173fecfa-e8b91a159289ff74-1-1.23456-1518469636035"
+ }
+ ],
+ "outbound_payloads": [
+ {
+ "exact": {
+ "traceparent.version": "00",
+ "traceparent.trace_id": "da8bc8cc6d062849b0efcf3c169afb5a",
+ "tracestate.tenant_id": "33",
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
+ "tracestate.parent_account_id": "33",
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456",
+ "newrelic.v": [0, 1],
+ "newrelic.d.ty": "App",
+ "newrelic.d.ac": "33",
+ "newrelic.d.tr": "da8bc8cc6d062849b0efcf3c169afb5a",
+ "newrelic.d.sa": true,
+ "newrelic.d.pr": 1.23456
+ },
+ "expected": [
+ "traceparent.trace_flags",
+ "traceparent.parent_id",
+ "tracestate.span_id",
+ "tracestate.transaction_id",
+ "tracestate.parent_application_id",
+ "tracestate.timestamp",
+ "newrelic.d.ap",
+ "newrelic.d.tx",
+ "newrelic.d.ti",
+ "newrelic.d.id"
+ ],
+ "unexpected": ["newrelic.d.tk"]
+ }
+ ],
+ "expected_metrics": [
+ ["DurationByCaller/App/33/2827902/HTTP/all", 1],
+ ["DurationByCaller/App/33/2827902/HTTP/allWeb", 1],
+ ["TransportDuration/App/33/2827902/HTTP/all", 1],
+ ["TransportDuration/App/33/2827902/HTTP/allWeb", 1],
+ ["Supportability/TraceContext/Accept/Success", 1],
+ ["Supportability/TraceContext/Create/Success", 1],
+ ["Supportability/DistributedTrace/CreatePayload/Success", 1]
+ ]
+ },
+ {
+ "test_name": "only_newrelic_headers_present_emit_both_header_types",
+ "trusted_account_key": "33",
+ "account_id": "33",
+ "web_transaction": true,
+ "raises_exception": false,
+ "force_sampled_true": false,
+ "span_events_enabled": true,
+ "transaction_events_enabled": true,
+ "transport_type": "HTTP",
+ "inbound_headers": [
+ {
+ "newrelic": "{\"v\":[0,1],\"d\":{\"ty\":\"App\",\"ac\":\"33\",\"ap\":\"2827902\",\"id\":\"5f474d64b9cc9b2a\",\"tr\":\"da8bc8cc6d062849b0efcf3c169afb5a\",\"pr\":1.23456,\"sa\":true,\"ti\":1482959525577,\"tx\":\"27856f70d3d314b7\"}}"
+ }
+ ],
+ "outbound_payloads": [
+ {
+ "exact": {
+ "traceparent.version": "00",
+ "traceparent.trace_id": "da8bc8cc6d062849b0efcf3c169afb5a",
+ "tracestate.tenant_id": "33",
+ "tracestate.version": "0",
+ "tracestate.parent_type": "0",
+ "tracestate.parent_account_id": "33",
+ "tracestate.sampled": "1",
+ "tracestate.priority": "1.23456",
+ "newrelic.v": [0, 1],
+ "newrelic.d.ty": "App",
+ "newrelic.d.ac": "33",
+ "newrelic.d.tr": "da8bc8cc6d062849b0efcf3c169afb5a",
+ "newrelic.d.sa": true,
+ "newrelic.d.pr": 1.23456
+ },
+ "expected": [
+ "traceparent.trace_flags",
+ "traceparent.parent_id",
+ "tracestate.span_id",
+ "tracestate.transaction_id",
+ "tracestate.parent_application_id",
+ "tracestate.timestamp",
+ "newrelic.d.ap",
+ "newrelic.d.tx",
+ "newrelic.d.ti",
+ "newrelic.d.id"
+ ],
+ "unexpected": ["newrelic.d.tk"]
+ }
+ ],
+ "expected_metrics": [
+ ["DurationByCaller/App/33/2827902/HTTP/all", 1],
+ ["DurationByCaller/App/33/2827902/HTTP/allWeb", 1],
+ ["TransportDuration/App/33/2827902/HTTP/all", 1],
+ ["TransportDuration/App/33/2827902/HTTP/allWeb", 1],
+ ["Supportability/DistributedTrace/AcceptPayload/Success", 1],
+ ["Supportability/TraceContext/Create/Success", 1],
+ ["Supportability/DistributedTrace/CreatePayload/Success", 1]
+ ]
+ },
+ {
+ "test_name": "inbound_payload_from_agent_in_serverless_mode",
+ "comment": [
+ "Test a payload that originates from a serverless agent. The only",
+ "difference in the payload between a serverless and non-serverless agent",
+ "is the `appId` in the tracestate header will be 'Unknown'."
+ ],
+ "trusted_account_key": "33",
+ "account_id": "33",
+ "web_transaction": true,
+ "raises_exception": false,
+ "force_sampled_true": false,
+ "span_events_enabled": true,
+ "transaction_events_enabled": true,
+ "transport_type": "HTTP",
+ "inbound_headers": [
+ {
+ "traceparent": "00-da8bc8cc6d062849b0efcf3c169afb5a-7d3efb1b173fecfa-01",
+ "tracestate": "33@nr=0-0-33-Unknown-7d3efb1b173fecfa-e8b91a159289ff74-1-1.23456-1518469636035"
+ }
+ ],
+ "intrinsics": {
+ "target_events": ["Transaction", "Span"],
+ "common":{
+ "exact": {
+ "traceId": "da8bc8cc6d062849b0efcf3c169afb5a",
+ "priority": 1.23456,
+ "sampled": true
+ },
+ "expected": ["guid"],
+ "unexpected": ["grandparentId", "cross_process_id", "nr.tripId", "nr.pathHash", "nr.referringPathHash", "nr.guid", "nr.referringTransactionGuid", "nr.alternatePathHashes"]
+ },
+ "Transaction": {
+ "exact": {
+ "parent.type": "App",
+ "parent.app": "Unknown",
+ "parent.account": "33",
+ "parent.transportType": "HTTP",
+ "parentId": "e8b91a159289ff74",
+ "parentSpanId": "7d3efb1b173fecfa"
+ },
+ "expected": ["parent.transportDuration"]
+ },
+ "Span": {
+ "exact": {
+ "parentId": "7d3efb1b173fecfa",
+ "trustedParentId": "7d3efb1b173fecfa"
+ },
+ "expected": ["transactionId"],
+ "unexpected": ["parent.transportDuration", "parent.type", "parent.app", "parent.account", "parent.transportType", "tracingVendors"]
+ }
+ },
+ "expected_metrics": [
+ ["DurationByCaller/App/33/Unknown/HTTP/all", 1],
+ ["DurationByCaller/App/33/Unknown/HTTP/allWeb", 1],
+ ["TransportDuration/App/33/Unknown/HTTP/all", 1],
+ ["TransportDuration/App/33/Unknown/HTTP/allWeb", 1]
+ ]
}
]
diff --git a/axiom/tests/cross_agent_tests/sql_obfuscation/sql_obfuscation.json b/axiom/tests/cross_agent_tests/sql_obfuscation/sql_obfuscation.json
index 69dd2b8c4..1d32e1c1f 100644
--- a/axiom/tests/cross_agent_tests/sql_obfuscation/sql_obfuscation.json
+++ b/axiom/tests/cross_agent_tests/sql_obfuscation/sql_obfuscation.json
@@ -646,5 +646,33 @@
"cassandra",
"sqlite"
]
+ },
+ {
+ "name": "in_clause_digits",
+ "sql": "select * from foo where bar IN (123, 456, 789)",
+ "obfuscated": [
+ "select * from foo where bar IN (?, ?, ?)"
+ ],
+ "dialects": [
+ "mysql",
+ "postgres",
+ "oracle",
+ "cassandra",
+ "mssql"
+ ]
+ },
+ {
+ "name": "in_clause_strings",
+ "sql": "select * from foo where bar IN ('asdf', 'fdsa')",
+ "obfuscated": [
+ "select * from foo where bar IN (?, ?)"
+ ],
+ "dialects": [
+ "mysql",
+ "postgres",
+ "oracle",
+ "cassandra",
+ "mssql"
+ ]
}
]
diff --git a/axiom/tests/cross_agent_tests/utilization_vendor_specific/aws.json b/axiom/tests/cross_agent_tests/utilization_vendor_specific/aws.json
index 97bc04771..4d183c768 100644
--- a/axiom/tests/cross_agent_tests/utilization_vendor_specific/aws.json
+++ b/axiom/tests/cross_agent_tests/utilization_vendor_specific/aws.json
@@ -2,7 +2,7 @@
{
"testname": "aws api times out, no vendor hash or supportability metric reported",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": null,
"instanceType": null,
@@ -21,7 +21,7 @@
{
"testname": "instance type, instance-id, availability-zone are all happy",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": "i-test.19characters",
"instanceType": "test.type",
@@ -41,7 +41,7 @@
{
"testname": "instance type with invalid characters",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": "test.id",
"instanceType": "",
@@ -60,7 +60,7 @@
{
"testname": "instance type too long",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": "test.id",
"instanceType": "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz",
@@ -79,7 +79,7 @@
{
"testname": "instance id with invalid characters",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": "",
"instanceType": "test.type",
@@ -98,7 +98,7 @@
{
"testname": "instance id too long",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz",
"instanceType": "test.type",
@@ -117,7 +117,7 @@
{
"testname": "availability zone with invalid characters",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": "test.id",
"instanceType": "test.type",
@@ -136,7 +136,7 @@
{
"testname": "availability zone too long",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": "test.id",
"instanceType": "test.type",
@@ -155,7 +155,7 @@
{
"testname": "UTF-8 high codepoints",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": "滈 橀槶澉 鞻饙騴 鱙鷭黂 甗糲 紁羑 嗂 蛶觢豥 餤駰鬳 釂鱞鸄",
"instanceType": "test.type",
@@ -175,7 +175,7 @@
{
"testname": "comma with multibyte characters",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": "滈 橀槶澉 鞻饙騴 鱙鷭黂 甗糲, 紁羑 嗂 蛶觢豥 餤駰鬳 釂鱞鸄",
"instanceType": "test.type",
@@ -194,7 +194,7 @@
{
"testname": "Exclamation point response",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": "bang!",
"instanceType": "test.type",
@@ -213,7 +213,7 @@
{
"testname": "Valid punctuation in response",
"uri": {
- "http://169.254.169.254/2016-09-02/dynamic/instance-identity/document": {
+ "http://169.254.169.254/latest/dynamic/instance-identity/document": {
"response": {
"instanceId": "test.id",
"instanceType": "a-b_c.3... and/or 503 867-5309",
diff --git a/axiom/tests/reference/test_daemon.cmp b/axiom/tests/reference/test_daemon.cmp
index d8bfcd284..a7adbee48 100644
--- a/axiom/tests/reference/test_daemon.cmp
+++ b/axiom/tests/reference/test_daemon.cmp
@@ -26,6 +26,8 @@ verbosedebug: exec[24]='--define'
verbosedebug: exec[25]='utilization.detect_pcf=false'
verbosedebug: exec[26]='--define'
verbosedebug: exec[27]='utilization.detect_docker=true'
-verbosedebug: exec[28]=''
+verbosedebug: exec[28]='--define'
+verbosedebug: exec[29]='utilization.detect_kubernetes=false'
+verbosedebug: exec[30]=''
info: stdout should be redirected to the log file
info: stderr should be redirected to the log file
diff --git a/axiom/tests/test_app.c b/axiom/tests/test_app.c
index 1fd24bc22..b4466d016 100644
--- a/axiom/tests/test_app.c
+++ b/axiom/tests/test_app.c
@@ -25,6 +25,8 @@
#define TEST_AGENT_RUN_ID "12345678"
#define TEST_LABELS_JSON \
"{\"Data Center\":\"US-East\",\"Server Color\":\"Beige\"}"
+#define TEST_METADATA_JSON \
+ "{\"NEW_RELIC_METADATA_ZIP\":\"zap\",\"NEW_RELIC_METADATA_ONE\":\"one\"}"
typedef struct _test_app_state_t {
bool cmd_appinfo_succeed;
@@ -201,6 +203,7 @@ static void test_find_or_add_app(void) {
info.settings = nro_create_from_json("[\"my_settings\"]");
info.environment = nro_create_from_json("[\"my_environment\"]");
info.labels = nro_create_from_json(TEST_LABELS_JSON);
+ info.metadata = nro_create_from_json(TEST_METADATA_JSON);
info.host_display_name = nr_strdup("my_host_display_name");
info.high_security = 0;
info.redirect_collector = nr_strdup("collector.newrelic.com");
@@ -251,6 +254,7 @@ static void test_find_or_add_app(void) {
test_obj_as_json("new app", app->info.environment,
"[\"my_environment\"]");
test_obj_as_json("new app", app->info.labels, TEST_LABELS_JSON);
+ test_obj_as_json("new app", app->info.metadata, TEST_METADATA_JSON);
tlib_pass_if_str_equal("new app", info.host_display_name,
app->info.host_display_name);
tlib_pass_if_str_equal("new app", info.redirect_collector,
@@ -310,6 +314,7 @@ static void test_find_or_add_app(void) {
test_obj_as_json("find app", app->info.environment,
"[\"my_environment\"]");
test_obj_as_json("find app", app->info.labels, TEST_LABELS_JSON);
+ test_obj_as_json("find app", app->info.metadata, TEST_METADATA_JSON);
tlib_pass_if_str_equal("find app", info.host_display_name,
app->info.host_display_name);
tlib_pass_if_str_equal("new app", info.redirect_collector,
@@ -343,6 +348,7 @@ static void test_find_or_add_app_high_security_mismatch(void) {
info.settings = nro_create_from_json("[\"my_settings\"]");
info.environment = nro_create_from_json("[\"my_environment\"]");
info.labels = nro_create_from_json(TEST_LABELS_JSON);
+ info.metadata = nro_create_from_json(TEST_METADATA_JSON);
info.high_security = 0;
info.redirect_collector = nr_strdup("collector.newrelic.com");
@@ -504,6 +510,7 @@ static void test_agent_find_or_add_app(void) {
info.settings = NULL;
info.environment = nro_create_from_json("[\"my_environment\"]");
info.labels = nro_create_from_json(TEST_LABELS_JSON);
+ info.metadata = nro_create_from_json(TEST_METADATA_JSON);
info.high_security = 555;
info.redirect_collector = nr_strdup("collector.newrelic.com");
info.security_policies_token = nr_strdup("");
@@ -549,6 +556,7 @@ static void test_agent_find_or_add_app(void) {
tlib_pass_if_null("new app", app->info.settings);
test_obj_as_json("new app", app->info.environment, "[\"my_environment\"]");
test_obj_as_json("new app", app->info.labels, TEST_LABELS_JSON);
+ test_obj_as_json("new app", app->info.metadata, TEST_METADATA_JSON);
tlib_pass_if_str_equal("new app", info.redirect_collector,
app->info.redirect_collector);
tlib_pass_if_str_equal("new app", system_host_name, app->host_name);
@@ -657,6 +665,49 @@ static void test_agent_find_or_add_app(void) {
* applist. */
}
+ /*
+ * Test : New app, but null metadata
+ */
+ p->cmd_appinfo_succeed = false;
+ p->cmd_appinfo_called = 0;
+ nr_free(info.appname);
+ info.appname = nr_strdup("appname_null_metadata");
+ nro_delete(info.metadata);
+ app = nr_agent_find_or_add_app(applist, &info, settings_callback_fn, 0);
+ tlib_pass_if_null("new app NULL metadata", app);
+ tlib_pass_if_int_equal("new app NULL metadata", 4, applist->num_apps);
+ tlib_pass_if_int_equal("new app NULL metadata", 1, p->cmd_appinfo_called);
+ app = applist->apps[3];
+ tlib_pass_if_not_null("new app NULL metadata", app);
+ if (0 != app) {
+ test_obj_as_json("new app NULL metadata", app->info.metadata, "null");
+
+ /* No unlock here because the app actually came in unlocked from the
+ * applist. */
+ }
+
+ /*
+ * Test : New app, but empty metadata
+ */
+ p->cmd_appinfo_succeed = false;
+ p->cmd_appinfo_called = 0;
+ nr_free(info.appname);
+ info.appname = nr_strdup("appname_empty_metadata");
+ nro_delete(info.metadata);
+ info.metadata = nro_create_from_json("{}");
+ app = nr_agent_find_or_add_app(applist, &info, settings_callback_fn, 0);
+ tlib_pass_if_null("new app empty metadata", app);
+ tlib_pass_if_int_equal("new app empty metadata", 5, applist->num_apps);
+ tlib_pass_if_int_equal("new app empty metadata", 1, p->cmd_appinfo_called);
+ app = applist->apps[4];
+ tlib_pass_if_not_null("new app empty metadata", app);
+ if (0 != app) {
+ test_obj_as_json("new app empty metadata", app->info.metadata, "{}");
+
+ /* No unlock here because the app actually came in unlocked from the
+ * applist. */
+ }
+
/*
* Test : Unable to add application due to full applist.
*/
@@ -676,7 +727,7 @@ static void test_agent_find_or_add_app(void) {
*/
p->cmd_appinfo_succeed = true;
p->cmd_appinfo_called = 0;
- applist->num_apps = 3;
+ applist->num_apps = 5;
nr_free(info.appname);
info.appname = nr_strdup("appname_security");
info.high_security = 1;
@@ -719,6 +770,7 @@ static void test_verify_id(void) {
info.settings = NULL;
info.environment = nro_create_from_json("[\"my_environment\"]");
info.labels = nro_create_from_json(TEST_LABELS_JSON);
+ info.metadata = nro_create_from_json(TEST_METADATA_JSON);
info.high_security = 0;
info.redirect_collector = nr_strdup("collector.newrelic.com");
diff --git a/axiom/tests/test_cmd_appinfo.c b/axiom/tests/test_cmd_appinfo.c
index 004110fc2..2eef9447d 100644
--- a/axiom/tests/test_cmd_appinfo.c
+++ b/axiom/tests/test_cmd_appinfo.c
@@ -53,6 +53,7 @@ static void test_create_empty_query(void) {
test_pass_if_empty_vector(&app, APP_HOST);
test_pass_if_empty_vector(&app, APP_TRACE_OBSERVER_HOST);
test_pass_if_empty_vector(&app, APP_FIELD_LABELS);
+ test_pass_if_empty_vector(&app, APP_METADATA);
high_security
= nr_flatbuffers_table_read_i8(&app, APP_FIELD_HIGH_SECURITY, 42);
@@ -85,6 +86,7 @@ static void test_create_query(void) {
info.settings = nro_create_from_json(settings_json);
info.environment = nro_create_from_json("{\"my_environment\":\"hi\"}");
info.labels = nro_create_from_json("{\"my_labels\":\"hello\"}");
+ info.metadata = nro_create_from_json("{\"NEWRELIC_METADATA_ZIP\":\"zap\"}");
info.host_display_name = nr_strdup("my_host_display_name");
info.lang = nr_strdup("my_lang");
info.version = nr_strdup("my_version");
@@ -128,6 +130,9 @@ static void test_create_query(void) {
tlib_pass_if_str_equal(
__func__, "[{\"label_type\":\"my_labels\",\"label_value\":\"hello\"}]",
(const char*)nr_flatbuffers_table_read_bytes(&app, APP_FIELD_LABELS));
+ tlib_pass_if_str_equal(
+ __func__, "{\"NEWRELIC_METADATA_ZIP\":\"zap\"}",
+ (const char*)nr_flatbuffers_table_read_bytes(&app, APP_METADATA));
tlib_pass_if_str_equal(__func__, "[[\"my_environment\",\"hi\"]]",
(const char*)nr_flatbuffers_table_read_bytes(
&app, APP_FIELD_ENVIRONMENT));
diff --git a/axiom/tests/test_strings.c b/axiom/tests/test_strings.c
index ca46e7420..ea1576e36 100644
--- a/axiom/tests/test_strings.c
+++ b/axiom/tests/test_strings.c
@@ -1197,23 +1197,35 @@ static void test_toupper(void) {
static void test_str_append(void) {
char* str = NULL;
-
/*
* Test : Bad parameters.
*/
- tlib_pass_if_null("null dest and src strings", nr_str_append(NULL, NULL));
- tlib_pass_if_str_equal("null src string", nr_str_append("dest", NULL),
+ tlib_pass_if_null("null dest and src strings",
+ nr_str_append(NULL, NULL, ","));
+ tlib_pass_if_str_equal("null src string", nr_str_append("dest", NULL, ","),
"dest");
- str = nr_str_append(str, "string1");
+ str = nr_str_append(str, "string1", ",");
tlib_pass_if_str_equal("null dest string", str, "string1");
-
/*
* Test : Valid destination and source strings.
*/
- str = nr_str_append(str, "string2");
+ str = nr_str_append(str, "string2", ",");
tlib_pass_if_str_equal("valid dest and src strings", str, "string1,string2");
nr_free(str);
+
+ /*
+ * Test : Delimiters.
+ */
+ str = nr_str_append(str, "string1", NULL);
+ str = nr_str_append(str, "string2", ":");
+ tlib_pass_if_str_equal("valid dest and src strings", str, "string1:string2");
+ nr_free(str);
+
+ str = nr_str_append(str, "string1", ",");
+ str = nr_str_append(str, "string2", NULL);
+ tlib_pass_if_str_equal("valid dest and src strings", str, "string1string2");
+ nr_free(str);
}
tlib_parallel_info_t parallel_info = {.suggested_nthreads = 2, .state_size = 0};
diff --git a/axiom/tests/test_txn.c b/axiom/tests/test_txn.c
index 92f785119..e2bc180c1 100644
--- a/axiom/tests/test_txn.c
+++ b/axiom/tests/test_txn.c
@@ -63,6 +63,28 @@ typedef struct {
int line;
} hash_is_subset_of_data_t;
+static bool need_to_stringify(const nrobj_t* val, const nrobj_t* obj) {
+ nrotype_t expected_type;
+ nrotype_t found_type;
+
+ if (NULL == val || NULL == obj) {
+ return false;
+ }
+
+ expected_type = nro_type(val);
+ found_type = nro_type(obj);
+
+ if (NR_OBJECT_STRING != expected_type || NR_OBJECT_INVALID == found_type) {
+ return false;
+ }
+
+ if (expected_type != found_type) {
+ return true;
+ }
+
+ return false;
+}
+
static nr_status_t hash_is_subset_of(const char* key,
const nrobj_t* val,
void* ptr) {
@@ -72,7 +94,14 @@ static nr_status_t hash_is_subset_of(const char* key,
* types.
*/
char* expected = nro_to_json(val);
- char* found = nro_to_json(nro_get_hash_value(data->set, key, NULL));
+ char* found;
+ const nrobj_t* found_obj = nro_get_hash_value(data->set, key, NULL);
+
+ if (need_to_stringify(val, found_obj)) {
+ found = nro_stringify(found_obj);
+ } else {
+ found = nro_to_json(found_obj);
+ }
test_pass_if_true_file_line(
data->testname, 0 == nr_strcmp(expected, found), data->file, data->line,
@@ -1776,6 +1805,15 @@ static void test_begin(void) {
test_created_txn("app turns off error events", rv, &correct);
nr_txn_destroy(&rv);
+ /*
+ * Test : App turns off span events
+ */
+ nro_set_hash_boolean(app->connect_reply, "collect_span_events", 0);
+ correct.span_events_enabled = 0;
+ rv = nr_txn_begin(app, opts, attribute_config);
+ test_created_txn("app turns off span events", rv, &correct);
+ nr_txn_destroy(&rv);
+
/*
* Test : High security off
*/
diff --git a/axiom/util_object.c b/axiom/util_object.c
index e84b95f62..4327fed5f 100644
--- a/axiom/util_object.c
+++ b/axiom/util_object.c
@@ -1906,3 +1906,44 @@ char* nro_dump(const nrobj_t* obj) {
return str;
}
+
+static char* stringify_boolean(const nrobj_t* obj) {
+ nr_status_t err;
+ nrbuf_t* buf;
+ char* ret;
+ int ival = nro_get_ival(obj, &err);
+
+ buf = nr_buffer_create(1024, 1024);
+
+ add_obj_jfmt(buf, "%d", ival);
+ nr_buffer_add(buf, "\0", 1);
+ ret = nr_strdup((const char*)nr_buffer_cptr(buf));
+ nr_buffer_destroy(&buf);
+
+ return ret;
+}
+
+char* nro_stringify(const nrobj_t* found) {
+ nrotype_t found_type = nro_type(found);
+ nrbuf_t* buf;
+ char* tmp;
+ char* ret;
+
+ buf = nr_buffer_create(1024, 1024);
+
+ if (NR_OBJECT_BOOLEAN == found_type) {
+ tmp = stringify_boolean(found);
+ } else {
+ tmp = nro_to_json(found);
+ }
+
+ nr_buffer_add_escape_json(buf, tmp);
+ nr_buffer_add(buf, '\0', 1);
+
+ ret = nr_strdup((const char*)nr_buffer_cptr(buf));
+
+ nr_buffer_destroy(&buf);
+ nr_free(tmp);
+
+ return ret;
+}
diff --git a/axiom/util_object.h b/axiom/util_object.h
index 007acda84..c183990c7 100644
--- a/axiom/util_object.h
+++ b/axiom/util_object.h
@@ -274,4 +274,10 @@ extern nrobj_t* nro_create_from_json_unterminated(const char* json, int len);
*/
extern char* nro_dump(const nrobj_t* obj);
+/*
+ * Stringify the resulting value from a test to satisfy 'exact' fields
+ * of outbound payloads.
+ */
+extern char* nro_stringify(const nrobj_t* found);
+
#endif /* UTIL_OBJECT_HDR */
diff --git a/axiom/util_strings.c b/axiom/util_strings.c
index dd42ef8d1..54e13e7cf 100644
--- a/axiom/util_strings.c
+++ b/axiom/util_strings.c
@@ -423,8 +423,10 @@ int nr_str_char_count(const char* s, char c) {
return count;
}
-char* nr_str_append(char* dest, const char* src) {
+char* nr_str_append(char* dest, const char* src, const char* delimiter) {
char* tmp = NULL;
+ const char* delim
+ = (NULL != delimiter) ? delimiter : ""; // Treat NULL delimiter as no delimiter
if (NULL == src) {
return dest;
@@ -434,7 +436,7 @@ char* nr_str_append(char* dest, const char* src) {
dest = nr_strdup(src);
} else {
tmp = dest;
- dest = nr_formatf("%s,%s", dest, src);
+ dest = nr_formatf("%s%s%s", dest, delim, src);
nr_free(tmp);
}
diff --git a/axiom/util_strings.h b/axiom/util_strings.h
index a09fb1fd5..c1d61e0b4 100644
--- a/axiom/util_strings.h
+++ b/axiom/util_strings.h
@@ -240,14 +240,16 @@ extern int nr_strncspn(const char* s1, int s1len, const char* s2, int s2len);
extern int nr_str_char_count(const char* s, char c);
/*
- * Purpose : Append a string to the end of another string separated by a comma.
+ * Purpose : Append a string to the end of another string separated by a
+ * delimiter.
*
* Params : 1. The destination string.
* 2. The source string.
+ * 3. The delimiter to separate the strings; NULL treated as empty string.
*
* Returns : A newly allocated string containing both.
*/
-extern char* nr_str_append(char* dest, const char* src);
+extern char* nr_str_append(char* dest, const char* src, const char* delimiter);
/*
* Purpose : Test for an alphanumeric character using the "C" locale. In the "C"
diff --git a/docker-compose.yaml b/docker-compose.yaml
index 410c33295..2ff148906 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -6,6 +6,7 @@ version: '3.8'
services:
# The Database
mysqldb:
+ platform: linux/amd64
image: mysql:5.6
restart: always
environment:
diff --git a/files/Dockerfile b/files/Dockerfile
index 983115ccf..23aac3b3f 100644
--- a/files/Dockerfile
+++ b/files/Dockerfile
@@ -101,10 +101,20 @@ RUN if [ -z "$(grep '^8\.' /etc/debian_version)" ]; then \
# install composer
WORKDIR /usr/src
-RUN php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
-RUN php -r "if (hash_file('sha384', 'composer-setup.php') === '906a84df04cea2aa72f40b5f787e49f22d4c2f19492ac310e8cba5b96ac8b64115ac402c8cd292b8a03482574915d1a8') { echo 'Installer verified'; } else { echo 'Installer corrupt'; unlink('composer-setup.php'); } echo PHP_EOL;"
-RUN php composer-setup.php
-RUN php -r "unlink('composer-setup.php');"
+
+# based on https://getcomposer.org/doc/faqs/how-to-install-composer-programmatically.md
+RUN \
+ EXPECTED_CHECKSUM="$(php -r 'copy("https://composer.github.io/installer.sig", "php://stdout");')" \
+ && php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');" \
+ && ACTUAL_CHECKSUM="$(php -r "echo hash_file('sha384', 'composer-setup.php');")" \
+ && if [ "$EXPECTED_CHECKSUM" != "$ACTUAL_CHECKSUM" ]; \
+ then \
+ >&2 echo 'ERROR: Invalid installer checksum'; \
+ rm composer-setup.php; \
+ exit 1; \
+ fi \
+ && php composer-setup.php \
+ && php -r "unlink('composer-setup.php');"
#
# The explain plan in the sql tests contain partition/filtered properties
diff --git a/protocol/flatbuffers/protocol.fbs b/protocol/flatbuffers/protocol.fbs
index 49474c97b..a48cfb962 100644
--- a/protocol/flatbuffers/protocol.fbs
+++ b/protocol/flatbuffers/protocol.fbs
@@ -32,6 +32,7 @@ table App {
trace_observer_port: uint16; // added for PHP agent release 9.11
span_queue_size: uint64; // added for PHP agent release 9.11
span_events_max_samples_stored: uint64; // added for PHP agent release 9.21
+ metadata: string; // pre-computed json, added for PHP agent release 10.0
}
enum AppStatus : byte { Unknown = 0, Disconnected = 1, InvalidLicense = 2,
diff --git a/src/daemon/main.go b/src/daemon/main.go
index 607f0f88a..d4a2e230f 100644
--- a/src/daemon/main.go
+++ b/src/daemon/main.go
@@ -239,6 +239,7 @@ type Config struct {
DetectGCP bool `config:"utilization.detect_gcp"` // Whether to detect if this is running on GCP in utilization
DetectPCF bool `config:"utilization.detect_pcf"` // Whether to detect if this is running on PCF in utilization
DetectDocker bool `config:"utilization.detect_docker"` // Whether to detect if this is in a Docker container in utilization
+ DetectKubernetes bool `config:"utilization.detect_kubernetes"` // Whether to detect if this is in a Kubernetes cluster
LogicalProcessors int `config:"utilization.logical_processors"` // Customer provided number of logical processors for pricing control.
TotalRamMIB int `config:"utilization.total_ram_mib"` // Customer provided total RAM in mebibytes for pricing control.
BillingHostname string `config:"utilization.billing_hostname"` // Customer provided hostname for pricing control.
@@ -259,6 +260,7 @@ func (cfg *Config) MakeUtilConfig() utilization.Config {
DetectGCP: cfg.DetectGCP,
DetectPCF: cfg.DetectPCF,
DetectDocker: cfg.DetectDocker,
+ DetectKubernetes: cfg.DetectKubernetes,
LogicalProcessors: cfg.LogicalProcessors,
TotalRamMIB: cfg.TotalRamMIB,
BillingHostname: cfg.BillingHostname,
@@ -338,11 +340,12 @@ func main() {
if cfg.Utilization {
util := utilization.Gather(utilization.Config{
- DetectAWS: true,
- DetectAzure: true,
- DetectGCP: true,
- DetectPCF: true,
- DetectDocker: true,
+ DetectAWS: true,
+ DetectAzure: true,
+ DetectGCP: true,
+ DetectPCF: true,
+ DetectDocker: true,
+ DetectKubernetes: true,
})
str, err := json.MarshalIndent(util, "", "\t")
if err != nil {
diff --git a/src/flatbuffersdata/data.go b/src/flatbuffersdata/data.go
index a3ecfa3fd..9997a8ad6 100644
--- a/src/flatbuffersdata/data.go
+++ b/src/flatbuffersdata/data.go
@@ -19,6 +19,7 @@ func MarshalAppInfo(info *newrelic.AppInfo) ([]byte, error) {
settingsJSON, _ := json.Marshal(info.Settings)
envJSON, _ := json.Marshal(info.Environment)
labelsJSON, _ := json.Marshal(info.Labels)
+ metadataJSON, _ := json.Marshal(info.Metadata)
buf := flatbuffers.NewBuilder(0)
@@ -30,6 +31,7 @@ func MarshalAppInfo(info *newrelic.AppInfo) ([]byte, error) {
settings := buf.CreateString(string(settingsJSON))
env := buf.CreateString(string(envJSON))
labels := buf.CreateString(string(labelsJSON))
+ metadata :=buf.CreateString(string(metadataJSON))
host := buf.CreateString(string(info.Hostname))
traceObserverHost := buf.CreateString(info.TraceObserverHost)
@@ -41,6 +43,7 @@ func MarshalAppInfo(info *newrelic.AppInfo) ([]byte, error) {
protocol.AppAddRedirectCollector(buf, collector)
protocol.AppAddEnvironment(buf, env)
protocol.AppAddLabels(buf, labels)
+ protocol.AppAddMetadata(buf, metadata)
protocol.AppAddSettings(buf, settings)
protocol.AppAddHost(buf, host)
protocol.AppAddTraceObserverHost(buf, traceObserverHost)
diff --git a/src/integration_runner/main.go b/src/integration_runner/main.go
index 6971462b0..43f7dbe3f 100644
--- a/src/integration_runner/main.go
+++ b/src/integration_runner/main.go
@@ -134,6 +134,7 @@ var (
HighSecurity: false,
Environment: nil,
Labels: nil,
+ Metadata: nil,
Settings:
// Ensure that we get Javascript agent code in the reply
map[string]interface{}{"newrelic.browser_monitoring.debug": false, "newrelic.browser_monitoring.loader": "rum"},
@@ -575,10 +576,10 @@ func discoverTests(pattern string, searchPaths []string) []string {
return testFiles
}
-func injectIntoConnectReply(reply []byte, newRunID, crossProcessId string) []byte {
+func injectIntoConnectReply(reply collector.RPMResponse, newRunID, crossProcessId string) []byte {
var x map[string]interface{}
- json.Unmarshal(reply, &x)
+ json.Unmarshal(reply.Body, &x)
x["agent_run_id"] = newRunID
x["cross_process_id"] = crossProcessId
@@ -590,7 +591,7 @@ func injectIntoConnectReply(reply []byte, newRunID, crossProcessId string) []byt
type IntegrationDataHandler struct {
sync.Mutex // Protects harvests
harvests map[string]*newrelic.Harvest // Keyed by tc.Name (which is used as AgentRunID)
- reply []byte // Constant after creation
+ reply collector.RPMResponse // Constant after creation
rawSecurityPolicies []byte // policies from connection attempt, needed for AppInfo reply
}
@@ -651,11 +652,12 @@ func startDaemon(network, address string, securityToken string, securityPolicies
client, _ := newrelic.NewClient(&newrelic.ClientConfig{})
connectPayload := TestApp.ConnectPayload(utilization.Gather(
utilization.Config{
- DetectAWS: true,
- DetectAzure: true,
- DetectGCP: true,
- DetectPCF: true,
- DetectDocker: true,
+ DetectAWS: true,
+ DetectAzure: true,
+ DetectGCP: true,
+ DetectPCF: true,
+ DetectDocker: true,
+ DetectKubernetes: true,
}))
policies := newrelic.AgentPolicies{}
diff --git a/src/newrelic/analytics_events.go b/src/newrelic/analytics_events.go
index 879b79d49..50b13c7bf 100644
--- a/src/newrelic/analytics_events.go
+++ b/src/newrelic/analytics_events.go
@@ -61,6 +61,14 @@ func (events *analyticsEvents) Split() (*analyticsEvents, *analyticsEvents) {
return e1, e2
}
+// NumAttempts returns the total number of attempts sent to this endpoint.
+// The value is the number of times the agent attempted to call the given endpoint before it was successful.
+// This metric MUST NOT be generated if only one attempt was made.
+// Does not include the successful attempt.
+func (events *analyticsEvents) NumFailedAttempts() float64 {
+ return float64(events.failedHarvests)
+}
+
// NumSeen returns the total number of analytics events observed.
func (events *analyticsEvents) NumSeen() float64 {
return float64(events.numSeen)
diff --git a/src/newrelic/app.go b/src/newrelic/app.go
index 16a35a6e4..b813c1319 100644
--- a/src/newrelic/app.go
+++ b/src/newrelic/app.go
@@ -31,11 +31,12 @@ func (id AgentRunID) String() string {
type AppState int
const (
- AppStateUnknown AppState = iota
- AppStateConnected
- AppStateInvalidLicense
- AppStateDisconnected
- AppStateInvalidSecurityPolicies
+ AppStateUnknown AppState = iota
+ AppStateConnected
+ AppStateDisconnected
+ AppStateRestart
+ AppStateInvalidLicense
+ AppStateInvalidSecurityPolicies
)
// An AppKey uniquely identifies an application.
@@ -64,6 +65,7 @@ type AppInfo struct {
Environment JSONString
HighSecurity bool
Labels JSONString
+ Metadata JSONString
RedirectCollector string
SecurityPolicyToken string
SupportedSecurityPolicies AgentPolicies
@@ -100,6 +102,7 @@ type RawConnectPayload struct {
HighSecurity bool `json:"high_security"`
Labels JSONString `json:"labels"`
Environment JSONString `json:"environment"`
+ Metadata JSONString `json:"metadata"`
Identifier string `json:"identifier"`
Util *utilization.Data `json:"utilization,omitempty"`
SecurityPolicies map[string]SecurityPolicy `json:"security_policies,omitempty"`
@@ -123,6 +126,8 @@ type ConnectReply struct {
SamplingTarget int `json:"sampling_target"`
EventHarvestConfig collector.EventHarvestConfig `json:"event_harvest_config"`
SpanEventHarvestConfig collector.SpanEventHarvestConfig `json:"span_event_harvest_config"`
+ RequestHeadersMap map[string]string `json:"request_headers_map"`
+ MaxPayloadSizeInBytes int `json:"max_payload_size_in_bytes"`
}
// An App represents the state of an application.
@@ -246,6 +251,11 @@ func (info *AppInfo) ConnectPayloadInternal(pid int, util *utilization.Data) *Ra
} else {
data.Labels = JSONString("[]")
}
+ if len(info.Metadata) > 0 {
+ data.Metadata = info.Metadata
+ } else {
+ data.Metadata = JSONString("{}")
+ }
return data
}
@@ -293,7 +303,7 @@ func combineEventConfig (ehc collector.EventHarvestConfig, sehc collector.SpanEv
}
func parseConnectReply(rawConnectReply []byte) (*ConnectReply, error) {
- var c ConnectReply
+ c := ConnectReply{MaxPayloadSizeInBytes: limits.DefaultMaxPayloadSizeInBytes}
err := json.Unmarshal(rawConnectReply, &c)
if nil != err {
diff --git a/src/newrelic/app_harvest.go b/src/newrelic/app_harvest.go
index 20943b603..e03882ef9 100644
--- a/src/newrelic/app_harvest.go
+++ b/src/newrelic/app_harvest.go
@@ -43,12 +43,13 @@ func NewAppHarvest(id AgentRunID, app *App, harvest *Harvest, ph chan ProcessorH
if len(app.info.TraceObserverHost) > 0 {
cfg := &infinite_tracing.Config{
- RunId: id.String(),
- License: string(app.info.License),
- Host: app.info.TraceObserverHost,
- Port: app.info.TraceObserverPort,
- Secure: true,
- QueueSize: app.info.SpanQueueSize,
+ RunId: id.String(),
+ License: string(app.info.License),
+ Host: app.info.TraceObserverHost,
+ Port: app.info.TraceObserverPort,
+ Secure: true,
+ QueueSize: app.info.SpanQueueSize,
+ RequestHeadersMap: app.connectReply.RequestHeadersMap,
}
ah.TraceObserver = infinite_tracing.NewTraceObserver(cfg)
}
diff --git a/src/newrelic/app_test.go b/src/newrelic/app_test.go
index 07b30f3a4..ec6600298 100644
--- a/src/newrelic/app_test.go
+++ b/src/newrelic/app_test.go
@@ -6,6 +6,7 @@
package newrelic
import (
+ "fmt"
"testing"
"time"
"strconv"
@@ -34,6 +35,7 @@ func TestConnectPayloadInternal(t *testing.T) {
Environment: JSONString(`[["b", 2]]`),
HighSecurity: false,
Labels: JSONString(`[{"label_type":"c","label_value":"d"}]`),
+ Metadata: JSONString(`{"NEW_RELIC_METADATA_ONE":"one","NEW_RELIC_METADATA_TWO":"two"}`),
RedirectCollector: "collector.newrelic.com",
Hostname: "some_host",
}
@@ -50,6 +52,7 @@ func TestConnectPayloadInternal(t *testing.T) {
AppName: []string{"one", "two"},
HighSecurity: false,
Labels: JSONString(`[{"label_type":"c","label_value":"d"}]`),
+ Metadata: JSONString(`{"NEW_RELIC_METADATA_ONE":"one","NEW_RELIC_METADATA_TWO":"two"}`),
Environment: JSONString(`[["b",2]]`),
Identifier: "one;two",
Util: util,
@@ -238,13 +241,19 @@ func TestNeedsConnectAttempt(t *testing.T) {
t.Fatal(now, app.lastConnectAttempt, app.state)
}
- app.state = AppStateInvalidLicense
+ app.state = AppStateDisconnected
app.lastConnectAttempt = now.Add(-limits.AppConnectAttemptBackoff)
if app.NeedsConnectAttempt(now, limits.AppConnectAttemptBackoff) {
t.Fatal(now, app.lastConnectAttempt, app.state)
}
- app.state = AppStateDisconnected
+ app.state = AppStateRestart
+ app.lastConnectAttempt = now.Add(-limits.AppConnectAttemptBackoff)
+ if app.NeedsConnectAttempt(now, limits.AppConnectAttemptBackoff) {
+ t.Fatal(now, app.lastConnectAttempt, app.state)
+ }
+
+ app.state = AppStateInvalidLicense
app.lastConnectAttempt = now.Add(-limits.AppConnectAttemptBackoff)
if app.NeedsConnectAttempt(now, limits.AppConnectAttemptBackoff) {
t.Fatal(now, app.lastConnectAttempt, app.state)
@@ -342,11 +351,11 @@ func TestConnectPayloadEncoded(t *testing.T) {
Environment: JSONString(`[["b", 2]]`),
HighSecurity: false,
Labels: JSONString(`[{"label_type":"c","label_value":"d"}]`),
+ Metadata: JSONString(`{"NEW_RELIC_METADATA_ONE":"one","NEW_RELIC_METADATA_TWO":"two"}`),
RedirectCollector: "collector.newrelic.com",
Hostname: "some_host",
}
-
// A valid span event max samples stored value configured from the agent should
// propagate through and be sent to the collector
info.AgentEventLimits.SpanEventConfig.Limit = 2323
@@ -364,6 +373,7 @@ func TestConnectPayloadEncoded(t *testing.T) {
`"high_security":false,` +
`"labels":[{"label_type":"c","label_value":"d"}],` +
`"environment":[["b",2]],` +
+ `"metadata":{"NEW_RELIC_METADATA_ONE":"one","NEW_RELIC_METADATA_TWO":"two"},` +
`"identifier":"one;two",` +
`"utilization":{"metadata_version":1,"logical_processors":22,"total_ram_mib":1000,"hostname":"some_host"},` +
`"event_harvest_config":{"report_period_ms":60000,"harvest_limits":{"error_event_data":100,"analytic_event_data":10000,"custom_event_data":10000,"span_event_data":2323}}` +
@@ -394,6 +404,7 @@ func TestConnectPayloadEncoded(t *testing.T) {
`"high_security":false,` +
`"labels":[{"label_type":"c","label_value":"d"}],` +
`"environment":[["b",2]],` +
+ `"metadata":{"NEW_RELIC_METADATA_ONE":"one","NEW_RELIC_METADATA_TWO":"two"},` +
`"identifier":"one;two",` +
`"utilization":{"metadata_version":1,"logical_processors":22,"total_ram_mib":1000,"hostname":"some_host"},` +
`"event_harvest_config":{"report_period_ms":60000,"harvest_limits":{"error_event_data":100,"analytic_event_data":10000,"custom_event_data":10000,"span_event_data":`+strconv.Itoa(limits.MaxSpanMaxEvents)+`}}`+
@@ -421,6 +432,35 @@ func TestConnectPayloadEncoded(t *testing.T) {
`"high_security":false,` +
`"labels":[{"label_type":"c","label_value":"d"}],` +
`"environment":[["b",2]],` +
+ `"metadata":{"NEW_RELIC_METADATA_ONE":"one","NEW_RELIC_METADATA_TWO":"two"},` +
+ `"identifier":"one;two",` +
+ `"utilization":{"metadata_version":1,"logical_processors":22,"total_ram_mib":1000,"hostname":"some_host"},` +
+ `"event_harvest_config":{"report_period_ms":60000,"harvest_limits":{"error_event_data":100,"analytic_event_data":10000,"custom_event_data":10000,"span_event_data":1001}}` +
+ `}` +
+ `]`
+
+ b, err = EncodePayload(info.ConnectPayloadInternal(pid, util))
+ if err != nil {
+ t.Error(err)
+ } else if string(b) != expected {
+ t.Errorf("expected: %s\nactual: %s", expected, string(b))
+ }
+
+
+ // an empty JSON for the Metadata should be sent
+ info.Metadata = JSONString(`{}`)
+ expected = `[` +
+ `{` +
+ `"pid":123,` +
+ `"language":"php",` +
+ `"agent_version":"0.1",` +
+ `"host":"some_host",` +
+ `"settings":{"a":"1","b":true},` +
+ `"app_name":["one","two"],` +
+ `"high_security":false,` +
+ `"labels":[{"label_type":"c","label_value":"d"}],` +
+ `"environment":[["b",2]],` +
+ `"metadata":{},` +
`"identifier":"one;two",` +
`"utilization":{"metadata_version":1,"logical_processors":22,"total_ram_mib":1000,"hostname":"some_host"},` +
`"event_harvest_config":{"report_period_ms":60000,"harvest_limits":{"error_event_data":100,"analytic_event_data":10000,"custom_event_data":10000,"span_event_data":1001}}` +
@@ -434,4 +474,55 @@ func TestConnectPayloadEncoded(t *testing.T) {
t.Errorf("expected: %s\nactual: %s", expected, string(b))
}
+ // a NULL JSON for the Metadata should send an empty JSON
+ info.Metadata = nil
+ expected = `[` +
+ `{` +
+ `"pid":123,` +
+ `"language":"php",` +
+ `"agent_version":"0.1",` +
+ `"host":"some_host",` +
+ `"settings":{"a":"1","b":true},` +
+ `"app_name":["one","two"],` +
+ `"high_security":false,` +
+ `"labels":[{"label_type":"c","label_value":"d"}],` +
+ `"environment":[["b",2]],` +
+ `"metadata":{},` +
+ `"identifier":"one;two",` +
+ `"utilization":{"metadata_version":1,"logical_processors":22,"total_ram_mib":1000,"hostname":"some_host"},` +
+ `"event_harvest_config":{"report_period_ms":60000,"harvest_limits":{"error_event_data":100,"analytic_event_data":10000,"custom_event_data":10000,"span_event_data":1001}}` +
+ `}` +
+ `]`
+
+ b, err = EncodePayload(info.ConnectPayloadInternal(pid, util))
+ if err != nil {
+ t.Error(err)
+ } else if string(b) != expected {
+ t.Errorf("expected: %s\nactual: %s", expected, string(b))
+ }
+
+}
+
+func TestMaxPayloadSizeInBytesFromDefault(t *testing.T) {
+ expectedMaxPayloadSizeInBytes := limits.DefaultMaxPayloadSizeInBytes
+ cannedConnectReply := []byte(`{"agent_run_id":"1"}`) // parseConnectReply expects at least agent_run_id in collector reply
+
+ c, err := parseConnectReply(cannedConnectReply)
+ if err != nil {
+ t.Error(err)
+ } else if c.MaxPayloadSizeInBytes != expectedMaxPayloadSizeInBytes {
+ t.Errorf("parseConnectReply(nothing), got [%v], expected [%v]", c.MaxPayloadSizeInBytes, expectedMaxPayloadSizeInBytes)
+ }
+}
+
+func TestMaxPayloadSizeInBytesFromConnectReply(t *testing.T) {
+ expectedMaxPayloadSizeInBytes := 1000
+ cannedConnectReply := []byte(`{"agent_run_id":"1", "max_payload_size_in_bytes":`+fmt.Sprint(expectedMaxPayloadSizeInBytes)+`}`)
+
+ c, err := parseConnectReply(cannedConnectReply)
+ if err != nil {
+ t.Error(err)
+ } else if c.MaxPayloadSizeInBytes != expectedMaxPayloadSizeInBytes {
+ t.Errorf("parseConnectReply(something), got [%v], expected [%v]", c.MaxPayloadSizeInBytes, expectedMaxPayloadSizeInBytes)
+ }
}
diff --git a/src/newrelic/collector/client.go b/src/newrelic/collector/client.go
index ebbdcfe21..b9f47fceb 100644
--- a/src/newrelic/collector/client.go
+++ b/src/newrelic/collector/client.go
@@ -8,7 +8,6 @@ package collector
import (
"crypto/tls"
"encoding/json"
- "errors"
"fmt"
"io/ioutil"
"net"
@@ -23,10 +22,6 @@ import (
"newrelic/version"
)
-var ErrPayloadTooLarge = errors.New("payload too large")
-var ErrUnauthorized = errors.New("unauthorized")
-var ErrUnsupportedMedia = errors.New("unsupported media")
-
type CollectibleFunc func(auditVersion bool) ([]byte, error)
func (fn CollectibleFunc) CollectorJSON(auditVersion bool) ([]byte, error) {
@@ -37,16 +32,127 @@ type Collectible interface {
CollectorJSON(auditVersion bool) ([]byte, error)
}
-type Cmd struct {
- Name string
- Collector string
- License LicenseKey
- RunID string
+// RpmCmd contains fields specific to an individual call made to RPM.
+type RpmCmd struct {
+ Name string
+ Collector string
+ RunID string
+ Data []byte
+ License LicenseKey
+ RequestHeadersMap map[string]string
+ MaxPayloadSize int
+}
+
+// RpmControls contains fields which will be the same for all calls made
+// by the same application.
+type RpmControls struct {
+ Collectible Collectible
AgentLanguage string
AgentVersion string
- Collectible Collectible
+ ua string
+}
+
+// RPMResponse contains a NR endpoint response.
+//
+// Agent Behavior Summary:
+//
+// on connect/preconnect:
+// 410 means shutdown
+// 200, 202 mean success (start run)
+// all other response codes and errors mean try after backoff
+//
+// on harvest:
+// 410 means shutdown
+// 401, 409 mean restart run
+// 408, 429, 500, 503 mean save data for next harvest
+// all other response codes and errors discard the data and continue the current harvest
+type RPMResponse struct {
+ StatusCode int
+ Body []byte
+ // Err indicates whether or not the call was successful: newRPMResponse
+ // should be used to avoid mismatch between StatusCode and Err.
+ Err error
+ disconnectSecurityPolicy bool
+}
+
+func newRPMResponse(StatusCode int) RPMResponse {
+ var err error
+ if StatusCode != 200 && StatusCode != 202 {
+ err = fmt.Errorf("response code: %d: %s", StatusCode, GetStatusCodeMessage(StatusCode))
+ }
+ return RPMResponse{StatusCode: StatusCode, Err: err}
+}
- ua string
+// IsDisconnect indicates that the agent should disconnect.
+func (resp RPMResponse) IsDisconnect() bool {
+ return resp.StatusCode == 410 || resp.disconnectSecurityPolicy
+}
+
+// IsRestartException indicates that the agent should restart.
+// 401 (License Exception) is considered a restart exception according to the spec,
+// and is included here as such, however the PHP agent will not restart on a 401 and instead stop
+func (resp RPMResponse) IsRestartException() bool {
+ return resp.StatusCode == 401 || resp.StatusCode == 409
+}
+
+// This is in place because, to update the license ini value, the PHP app must be shut off
+func (resp RPMResponse) IsInvalidLicense() bool {
+ return resp.StatusCode == 401
+}
+
+// ShouldSaveHarvestData indicates that the agent should save the data and try
+// to send it in the next harvest.
+func (resp RPMResponse) ShouldSaveHarvestData() bool {
+ switch resp.StatusCode {
+ case 408, 429, 500, 503:
+ return true
+ default:
+ return false
+ }
+}
+
+// Not a method of RPMResponse so that it can be called during creation
+func GetStatusCodeMessage(StatusCode int) string {
+ switch StatusCode {
+ case 400:
+ return "Invalid request formatting"
+ case 401:
+ return "Authentication failure"
+ case 403:
+ return "Forbidden"
+ case 404:
+ return "Not found"
+ case 405:
+ return "HTTP method not found"
+ case 407:
+ return "Proxy authentication failure (misconfigured)"
+ case 408:
+ return "Timeout"
+ case 409:
+ return "Conflict: you should reconnect"
+ case 410:
+ return "Gone: you should disconnect"
+ case 411:
+ return "Content-length required"
+ case 413:
+ return "Payload too large"
+ case 414:
+ return "URI too large"
+ case 415:
+ return "Content-type or content-encoding is wrong"
+ case 417:
+ return "Expectation failed"
+ case 429:
+ return "Too many requests"
+ case 431:
+ return "Request headers too large"
+ case 500:
+ return "NR server internal error"
+ case 503:
+ return "NR service unavailable"
+ default:
+ return "Unknown response code"
+ }
}
// The agent languages we give the collector are not necessarily the ideal
@@ -58,33 +164,33 @@ var userAgentMappings = map[string]string{
"": "Native",
}
-func (cmd *Cmd) userAgent() string {
- if cmd.ua == "" {
- lang := cmd.AgentLanguage
- if val, ok := userAgentMappings[cmd.AgentLanguage]; ok {
+func (control *RpmControls) userAgent() string {
+ if control.ua == "" {
+ lang := control.AgentLanguage
+ if val, ok := userAgentMappings[control.AgentLanguage]; ok {
lang = val
}
ver := "unknown"
- if cmd.AgentVersion != "" {
- ver = cmd.AgentVersion
+ if control.AgentVersion != "" {
+ ver = control.AgentVersion
}
- cmd.ua = fmt.Sprintf("NewRelic-%s-Agent/%s NewRelic-GoDaemon/%s", lang,
+ control.ua = fmt.Sprintf("NewRelic-%s-Agent/%s NewRelic-GoDaemon/%s", lang,
ver, version.Number)
}
- return cmd.ua
+ return control.ua
}
type Client interface {
- Execute(cmd Cmd) ([]byte, error)
+ Execute(cmd RpmCmd, cs RpmControls) RPMResponse
}
-type ClientFn func(cmd Cmd) ([]byte, error)
+type ClientFn func(cmd RpmCmd, cs RpmControls) RPMResponse
-func (fn ClientFn) Execute(cmd Cmd) ([]byte, error) {
- return fn(cmd)
+func (fn ClientFn) Execute(cmd RpmCmd, cs RpmControls) RPMResponse {
+ return fn(cmd, cs)
}
type limitClient struct {
@@ -93,7 +199,7 @@ type limitClient struct {
semaphore chan bool
}
-func (l *limitClient) Execute(cmd Cmd) ([]byte, error) {
+func (l *limitClient) Execute(cmd RpmCmd, cs RpmControls) RPMResponse {
var timer <-chan time.Time
if 0 != l.timeout {
@@ -103,10 +209,10 @@ func (l *limitClient) Execute(cmd Cmd) ([]byte, error) {
select {
case <-l.semaphore:
defer func() { l.semaphore <- true }()
- b, err := l.orig.Execute(cmd)
- return b, err
+ resp := l.orig.Execute(cmd, cs)
+ return resp
case <-timer:
- return nil, fmt.Errorf("timeout after %v", l.timeout)
+ return RPMResponse{Err: fmt.Errorf("timeout after %v", l.timeout)}
}
}
@@ -214,88 +320,56 @@ type clientImpl struct {
httpClient *http.Client
}
-func (c *clientImpl) perform(url string, data []byte, userAgent string) ([]byte, error) {
- deflated, err := Compress(data)
+func (c *clientImpl) perform(url string, cmd RpmCmd, cs RpmControls) RPMResponse {
+ deflated, err := Compress(cmd.Data)
if nil != err {
- return nil, err
+ return RPMResponse{Err: err}
+ }
+
+ if l := deflated.Len(); l > cmd.MaxPayloadSize {
+ return RPMResponse{Err: fmt.Errorf("payload size too large: %d greater than %d", l, cmd.MaxPayloadSize)}
}
req, err := http.NewRequest("POST", url, deflated)
+
if nil != err {
- return nil, err
+ return RPMResponse{Err: err}
}
req.Header.Add("Accept-Encoding", "identity, deflate")
req.Header.Add("Content-Type", "application/octet-stream")
- req.Header.Add("User-Agent", userAgent)
+ req.Header.Add("User-Agent", cs.userAgent())
req.Header.Add("Content-Encoding", "deflate")
+ for k, v := range cmd.RequestHeadersMap {
+ req.Header.Add(k, v)
+ }
+
resp, err := c.httpClient.Do(req)
if err != nil {
- return nil, err
+ return RPMResponse{Err: err}
}
defer resp.Body.Close()
- switch resp.StatusCode {
- case 200:
- // Nothing to do.
- case 401:
- return nil, ErrUnauthorized
- case 413:
- return nil, ErrPayloadTooLarge
- case 415:
- return nil, ErrUnsupportedMedia
- default:
- // If the response code is not 200, then the collector may not return
- // valid JSON
- return nil, fmt.Errorf("unexpected collector HTTP status code: %d",
- resp.StatusCode)
- }
+ r := newRPMResponse(resp.StatusCode)
- b, err := ioutil.ReadAll(resp.Body)
- if nil != err {
- return nil, err
- }
- return parseResponse(b)
-}
+ body, err := ioutil.ReadAll(resp.Body)
-func (c *clientImpl) Execute(cmd Cmd) ([]byte, error) {
- data, err := cmd.Collectible.CollectorJSON(false)
- if nil != err {
- return nil, fmt.Errorf("unable to create json payload for '%s': %s",
- cmd.Name, err)
+ // if no previous error (from newRPMResponse based on response status code) then
+ // return any error from the ReadAll()
+ if nil == r.Err {
+ r.Err = err
}
- var audit []byte
-
- if log.Auditing() {
- audit, err = cmd.Collectible.CollectorJSON(true)
- if nil != err {
- log.Errorf("unable to create audit json payload for '%s': %s", cmd.Name, err)
- audit = data
- }
- if nil == audit {
- audit = data
- }
+ // if the response code is 200 parse out the "return_value"
+ // key and return as the body of the response
+ // any other response codes will not have a body to parse
+ if r.StatusCode == 200 {
+ r.Body, r.Err = parseResponse(body)
}
- url := cmd.url(false)
- cleanURL := cmd.url(true)
-
- log.Audit("command='%s' url='%s' payload={%s}", cmd.Name, url, audit)
- log.Debugf("command='%s' url='%s' payload={%s}", cmd.Name, cleanURL, data)
-
- resp, err := c.perform(url, data, cmd.userAgent())
- if err != nil {
- log.Debugf("attempt to perform %s failed: %q, url=%s",
- cmd.Name, err.Error(), cleanURL)
- }
-
- log.Audit("command='%s' url='%s', response={%s}", cmd.Name, url, resp)
- log.Debugf("command='%s' url='%s', response={%s}", cmd.Name, cleanURL, resp)
-
- return resp, err
+ return r
}
type rpmException struct {
@@ -306,41 +380,6 @@ type rpmException struct {
func (e *rpmException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorType, e.Message)
}
-
-func hasType(e error, expected string) bool {
- rpmErr, ok := e.(*rpmException)
- if !ok {
- return false
- }
- return rpmErr.ErrorType == expected
-
-}
-
-const (
- forceRestartType = "NewRelic::Agent::ForceRestartException"
- disconnectType = "NewRelic::Agent::ForceDisconnectException"
- licenseInvalidType = "NewRelic::Agent::LicenseException"
- runtimeType = "RuntimeError"
-)
-
-// These clients exist for testing.
-var (
- DisconnectClient = ClientFn(func(cmd Cmd) ([]byte, error) {
- return nil, SampleDisonnectException
- })
- LicenseInvalidClient = ClientFn(func(cmd Cmd) ([]byte, error) {
- return nil, SampleLicenseInvalidException
- })
- SampleRestartException = &rpmException{ErrorType: forceRestartType}
- SampleDisonnectException = &rpmException{ErrorType: disconnectType}
- SampleLicenseInvalidException = &rpmException{ErrorType: licenseInvalidType}
-)
-
-func IsRestartException(e error) bool { return hasType(e, forceRestartType) }
-func IsLicenseException(e error) bool { return hasType(e, licenseInvalidType) }
-func IsRuntime(e error) bool { return hasType(e, runtimeType) }
-func IsDisconnect(e error) bool { return hasType(e, disconnectType) }
-
func parseResponse(b []byte) ([]byte, error) {
var r struct {
ReturnValue json.RawMessage `json:"return_value"`
@@ -358,3 +397,40 @@ func parseResponse(b []byte) ([]byte, error) {
return r.ReturnValue, nil
}
+
+func (c *clientImpl) Execute(cmd RpmCmd, cs RpmControls) RPMResponse {
+ data, err := cs.Collectible.CollectorJSON(false)
+ if nil != err {
+ return RPMResponse{Err: err}
+ }
+ cmd.Data = data
+
+ var audit []byte
+
+ if log.Auditing() {
+ audit, err = cs.Collectible.CollectorJSON(true)
+ if nil != err {
+ log.Errorf("unable to create audit json payload for '%s': %s", cmd.Name, err)
+ audit = data
+ }
+ if nil == audit {
+ audit = data
+ }
+ }
+
+ url := cmd.url(false)
+ cleanURL := cmd.url(true)
+
+ log.Audit("command='%s' url='%s' payload={%s}", cmd.Name, url, audit)
+ log.Debugf("command='%s' url='%s' max_payload_size_in_bytes='%d' payload={%s}", cmd.Name, cleanURL, cmd.MaxPayloadSize, data)
+
+ resp := c.perform(url, cmd, cs)
+ if err != nil {
+ log.Debugf("attempt to perform %s failed: %q, url=%s",
+ cmd.Name, err.Error(), cleanURL)
+ }
+
+ log.Audit("command='%s' url='%s', response={%s}", cmd.Name, url, string(resp.Body))
+ log.Debugf("command='%s' url='%s', response={%s}", cmd.Name, cleanURL, string(resp.Body))
+ return resp
+}
diff --git a/src/newrelic/collector/client_test.go b/src/newrelic/collector/client_test.go
index 671627db7..447f77936 100644
--- a/src/newrelic/collector/client_test.go
+++ b/src/newrelic/collector/client_test.go
@@ -6,6 +6,11 @@
package collector
import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strings"
"testing"
"newrelic/version"
@@ -74,7 +79,7 @@ func TestUserAgent(t *testing.T) {
expected: "NewRelic-Native-Agent/unknown",
},
} {
- cmd := &Cmd{
+ cmd := &RpmControls{
AgentLanguage: tc.language,
AgentVersion: tc.version,
}
@@ -91,3 +96,79 @@ func TestUserAgent(t *testing.T) {
}
}
}
+
+func TestExecuteWhenMaxPayloadSizeExceeded(t *testing.T) {
+ cmdPayload := "dummy-data"
+ cmd := RpmCmd{
+ MaxPayloadSize: 0,
+ }
+ cs := RpmControls{
+ Collectible: CollectibleFunc(func(auditVersion bool) ([]byte, error) {
+ if auditVersion {
+ return nil, nil
+ }
+ return []byte(cmdPayload), nil
+ }),
+ }
+ testedFn := fmt.Sprintf("client.Execute(payload: %v, MaxPayloadSize: %v)", cmdPayload, cmd.MaxPayloadSize)
+ var wantResponseBody []byte = nil
+ wantErr := errors.New("payload size too large:")
+
+ client := clientImpl{
+ httpClient: &http.Client{
+ Transport: roundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ // no http.Response because no HTTP request should be made
+ return nil, nil
+ }),
+ },
+ }
+
+ resp := client.Execute(cmd, cs)
+ if resp.Body != nil {
+ t.Errorf("%s, got [%v], want [%v]", testedFn, resp.Body, wantResponseBody)
+ } else if resp.Err == nil {
+ t.Errorf("%s, got [%v], want [%v]", testedFn, resp.Err, wantErr)
+ } else if !strings.HasPrefix(resp.Err.Error(), wantErr.Error()) {
+ t.Errorf("%s, got [%v], want [%v]", testedFn, resp.Err, wantErr)
+ }
+}
+
+func TestExecuteWhenMaxPayloadSizeNotExceeded(t *testing.T) {
+ cmdPayload := "dummy-data"
+ cmd := RpmCmd{
+ MaxPayloadSize: 100,
+ }
+ cs := RpmControls{
+ Collectible: CollectibleFunc(func(auditVersion bool) ([]byte, error) {
+ if auditVersion {
+ return nil, nil
+ }
+ return []byte(cmdPayload), nil
+ }),
+ }
+ testedFn := fmt.Sprintf("client.Execute(payload: %v, MaxPayloadSize: %v)", cmdPayload, cmd.MaxPayloadSize)
+ var wantErr error = nil
+
+ client := clientImpl{
+ httpClient: &http.Client{
+ Transport: roundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return &http.Response{
+ StatusCode: 200,
+ // perform function calls parseResponse which expects
+ // a valid JSON. Providing minimal valid JSON as HTTP
+ // response body.
+ Body: ioutil.NopCloser(strings.NewReader("{}")),
+ }, nil
+ }),
+ },
+ }
+
+ // This test does not test http.Response.Body parsing.
+ // This test ensures there's no error if payload does
+ // not exceed configured max_payload_size_in_bytes.
+ // That's why the body is ignored here.
+ resp := client.Execute(cmd, cs)
+ if resp.Err != nil {
+ t.Errorf("%s, got [%v], want [%v]", testedFn, resp.Err, wantErr)
+ }
+}
diff --git a/src/newrelic/collector/collector.go b/src/newrelic/collector/collector.go
index 042a2a7d8..738316956 100644
--- a/src/newrelic/collector/collector.go
+++ b/src/newrelic/collector/collector.go
@@ -26,20 +26,20 @@ const (
)
const (
- protocolVersion = "16"
+ protocolVersion = "17"
)
// LicenseKey represents a license key for an account.
type LicenseKey string
-func (cmd *Cmd) String() string {
+func (cmd *RpmCmd) String() string {
if cmd.RunID != "" {
return cmd.Name + " " + cmd.RunID
}
return cmd.Name
}
-func (cmd *Cmd) url(obfuscate bool) string {
+func (cmd *RpmCmd) url(obfuscate bool) string {
var u url.URL
u.Host = cmd.Collector
diff --git a/src/newrelic/collector/collector_test.go b/src/newrelic/collector/collector_test.go
index d09c49719..363b8c9bb 100644
--- a/src/newrelic/collector/collector_test.go
+++ b/src/newrelic/collector/collector_test.go
@@ -6,152 +6,161 @@
package collector
import (
+ "io/ioutil"
+ "net/http"
"net/url"
"newrelic/crossagent"
+ "strings"
"testing"
)
var (
actualData = "my_data"
- call = Cmd{
- Name: CommandErrors,
- Collector: "the-collector.com",
- License: "12345",
- RunID: "db97531",
- Collectible: CollectibleFunc(func(auditVersion bool) ([]byte, error) {
- if auditVersion {
- return nil, nil
- }
- return []byte(actualData), nil
- }),
- }
)
-func TestLicenseInvalid(t *testing.T) {
- r := `{"exception":{"message":"Invalid license key, please contact support@newrelic.com","error_type":"NewRelic::Agent::LicenseException"}}`
- reply, err := parseResponse([]byte(r))
- if reply != nil {
- t.Fatal(string(reply))
- }
- if !IsLicenseException(err) {
- t.Fatal(err)
- }
-}
-
-func TestRedirectSuccess(t *testing.T) {
- reply, err := parseResponse([]byte(`{"return_value":"staging-collector-101.newrelic.com"}`))
- if nil != err {
- t.Fatal(err)
- }
- if string(reply) != `"staging-collector-101.newrelic.com"` {
- t.Fatal(string(reply))
- }
-}
-
-func TestEmptyHash(t *testing.T) {
- reply, err := parseResponse([]byte(`{}`))
- if nil != err {
- t.Fatal(err)
- }
- if nil != reply {
- t.Fatal(string(reply))
- }
-}
-
-func TestReturnValueNull(t *testing.T) {
- reply, err := parseResponse([]byte(`{"return_value":null}`))
- if nil != err {
- t.Fatal(err)
- }
- if "null" != string(reply) {
- t.Fatal(string(reply))
- }
-}
-
-func TestReplyNull(t *testing.T) {
- reply, err := parseResponse(nil)
- if nil == err || err.Error() != `unexpected end of JSON input` {
- t.Fatal(err)
- }
- if nil != reply {
- t.Fatal(string(reply))
- }
-}
-
-func TestConnectSuccess(t *testing.T) {
- inner := `{"agent_run_id":"599551769342729","product_level":40,"js_agent_file":"","cross_process_id":"17833#31785","collect_errors":true,"url_rules":[{"each_segment":false,"match_expression":".*\\.(ace|arj|ini|txt|udl|plist|css|gif|ico|jpe?g|js|png|swf|woff|caf|aiff|m4v|mpe?g|mp3|mp4|mov)$","eval_order":1000,"replace_all":false,"ignore":false,"terminate_chain":true,"replacement":"\/*.\\1"},{"each_segment":true,"match_expression":"^[0-9][0-9a-f_,.-]*$","eval_order":1001,"replace_all":false,"ignore":false,"terminate_chain":false,"replacement":"*"},{"each_segment":false,"match_expression":"^(.*)\/[0-9][0-9a-f_,-]*\\.([0-9a-z][0-9a-z]*)$","eval_order":1002,"replace_all":false,"ignore":false,"terminate_chain":false,"replacement":"\\1\/.*\\2"}],"messages":[{"message":"Reporting to: https:\/\/staging.newrelic.com\/accounts\/17833\/applications\/31785","level":"INFO"}],"data_report_period":60,"collect_traces":true,"sampling_rate":0,"js_agent_loader":"","encoding_key":"d67afc830dab717fd163bfcb0b8b88423e9a1a3b","apdex_t":0.5,"collect_analytics_events":true,"trusted_account_ids":[17833]}`
- outer := `{"return_value":` + inner + `}`
- reply, err := parseResponse([]byte(outer))
- if nil != err {
- t.Fatal(err)
- }
- if string(reply) != inner {
- t.Fatal(string(reply))
- }
-}
-
-func TestClientError(t *testing.T) {
- reply, err := parseResponse([]byte(`{"exception":{"message":"something","error_type":"my_error"}}`))
- if nil == err || err.Error() != "my_error: something" {
- t.Fatal(err)
+func TestResponseCodeError(t *testing.T) {
+ testcases := []struct {
+ code int
+ success bool
+ disconnect bool
+ restart bool
+ saveHarvestData bool
+ }{
+ // success
+ {code: 200, success: true, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 202, success: true, disconnect: false, restart: false, saveHarvestData: false},
+ // disconnect
+ {code: 410, success: false, disconnect: true, restart: false, saveHarvestData: false},
+ // restart
+ {code: 401, success: false, disconnect: false, restart: true, saveHarvestData: false},
+ {code: 409, success: false, disconnect: false, restart: true, saveHarvestData: false},
+ // save data
+ {code: 408, success: false, disconnect: false, restart: false, saveHarvestData: true},
+ {code: 429, success: false, disconnect: false, restart: false, saveHarvestData: true},
+ {code: 500, success: false, disconnect: false, restart: false, saveHarvestData: true},
+ {code: 503, success: false, disconnect: false, restart: false, saveHarvestData: true},
+ // other errors
+ {code: 400, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 403, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 404, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 405, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 407, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 411, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 413, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 414, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 415, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 417, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 431, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ // unexpected weird codes
+ {code: -1, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 1, success: false, disconnect: false, restart: false, saveHarvestData: false},
+ {code: 999999, success: false, disconnect: false, restart: false, saveHarvestData: false},
}
- if nil != reply {
- t.Fatal(string(reply))
+ for _, tc := range testcases {
+ resp := newRPMResponse(tc.code)
+ if tc.success != (nil == resp.Err) {
+ t.Error("error", tc.code, tc.success, resp.Err)
+ }
+ if tc.disconnect != resp.IsDisconnect() {
+ t.Error("disconnect", tc.code, tc.disconnect, resp.Err)
+ }
+ if tc.restart != resp.IsRestartException() {
+ t.Error("restart", tc.code, tc.restart, resp.Err)
+ }
+ if tc.saveHarvestData != resp.ShouldSaveHarvestData() {
+ t.Error("save harvest data", tc.code, tc.saveHarvestData, resp.Err)
+ }
}
}
-func TestForceRestartException(t *testing.T) {
- // NOTE: This string was generated manually, not taken from the actual
- // collector.
- r := `{"exception":{"message":"something","error_type":"NewRelic::Agent::ForceRestartException"}}`
- reply, err := parseResponse([]byte(r))
- if reply != nil {
- t.Fatal(string(reply))
- }
- if !IsRestartException(err) {
- t.Fatal(err)
- }
-}
+type roundTripperFunc func(*http.Request) (*http.Response, error)
-func TestForceDisconnectException(t *testing.T) {
- // NOTE: This string was generated manually, not taken from the actual
- // collector.
- r := `{"exception":{"message":"something","error_type":"NewRelic::Agent::ForceDisconnectException"}}`
- reply, err := parseResponse([]byte(r))
- if reply != nil {
- t.Fatal(string(reply))
- }
- if !IsDisconnect(err) {
- t.Fatal(err)
- }
+func (fn roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+ return fn(r)
}
-func TestRuntimeError(t *testing.T) {
- // NOTE: This string was generated manually, not taken from the actual
- // collector.
- r := `{"exception":{"message":"something","error_type":"RuntimeError"}}`
- reply, err := parseResponse([]byte(r))
- if reply != nil {
- t.Fatal(string(reply))
+func TestCollectorRequest(t *testing.T) {
+ cmd := RpmCmd{
+ Name: "cmd_name",
+ Collector: "collector.com",
+ RunID: "run_id",
+ Data: nil,
+ License: "the_license",
+ RequestHeadersMap: map[string]string{"zip": "zap"},
+ MaxPayloadSize: 1000 * 1000,
+ }
+ testField := func(name, v1, v2 string) {
+ if v1 != v2 {
+ t.Errorf("Field %s want %s, got %s", name, v2, v1)
+ }
}
- if !IsRuntime(err) {
- t.Fatal(err)
+ cs := RpmControls{
+ Collectible: CollectibleFunc(func(auditVersion bool) ([]byte, error) {
+ if auditVersion {
+ return nil, nil
+ }
+ return []byte(actualData), nil
+ }),
+ AgentVersion: "agent_version",
+ }
+ client := clientImpl{
+ httpClient: &http.Client{
+ Transport: roundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ testField("method", r.Method, "POST")
+ testField("url", r.URL.String(), "https://collector.com/agent_listener/invoke_raw_method?license_key=the_license&marshal_format=json&method=cmd_name&protocol_version=17&run_id=run_id")
+ testField("Accept-Encoding", r.Header.Get("Accept-Encoding"), "identity, deflate")
+ testField("Content-Type", r.Header.Get("Content-Type"), "application/octet-stream")
+ testField("Content-Encoding", r.Header.Get("Content-Encoding"), "deflate")
+ testField("zip", r.Header.Get("zip"), "zap")
+ return &http.Response{
+ StatusCode: 200,
+ Body: ioutil.NopCloser(strings.NewReader("{\"Body\": 0}")),
+ }, nil
+ }),
+ },
+ }
+ resp := client.Execute(cmd, cs)
+ if nil != resp.Err {
+ t.Error(resp.Err)
}
}
-func TestUnknownError(t *testing.T) {
- r := `{"exception":{"message":"something","error_type":"unknown_type"}}`
- reply, err := parseResponse([]byte(r))
- if reply != nil {
- t.Fatal(string(reply))
- }
- if nil == err || err.Error() != "unknown_type: something" {
- t.Fatal(err)
+func TestCollectorBadRequest(t *testing.T) {
+ cmd := RpmCmd{
+ Name: "cmd_name",
+ Collector: "collector.com",
+ RunID: "run_id",
+ Data: nil,
+ License: "the_license",
+ }
+ cs := RpmControls{
+ Collectible: CollectibleFunc(func(auditVersion bool) ([]byte, error) {
+ if auditVersion {
+ return nil, nil
+ }
+ return []byte(actualData), nil
+ }),
+ AgentVersion: "agent_version",
+ }
+ client := clientImpl{
+ httpClient: &http.Client{
+ Transport: roundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return &http.Response{
+ StatusCode: 200,
+ Body: ioutil.NopCloser(strings.NewReader("body")),
+ }, nil
+ }),
+ },
+ }
+ u := ":" // bad url
+ resp := client.perform(u, cmd, cs)
+ if nil == resp.Err {
+ t.Error("missing expected error")
}
}
func TestObfuscateLicense(t *testing.T) {
- cmd := Cmd{
+ cmd := RpmCmd{
Name: "foo_method",
Collector: "example.com",
License: "123abc",
@@ -172,7 +181,7 @@ func TestObfuscateLicense(t *testing.T) {
}
func TestObfuscateLicenseShort(t *testing.T) {
- cmd := Cmd{
+ cmd := RpmCmd{
Name: "foo_method",
Collector: "example.com",
License: "abc",
diff --git a/src/newrelic/collector_integration_test.go b/src/newrelic/collector_integration_test.go
index 54dba00d4..39a572b37 100644
--- a/src/newrelic/collector_integration_test.go
+++ b/src/newrelic/collector_integration_test.go
@@ -32,11 +32,12 @@ func sampleConnectPayload(lic collector.LicenseKey) *RawConnectPayload {
}
return info.ConnectPayload(utilization.Gather(utilization.Config{
- DetectAWS: false,
- DetectAzure: false,
- DetectGCP: false,
- DetectPCF: false,
- DetectDocker: false,
+ DetectAWS: false,
+ DetectAzure: false,
+ DetectGCP: false,
+ DetectPCF: false,
+ DetectDocker: false,
+ DetectKubernetes: false,
}))
}
diff --git a/src/newrelic/commands.go b/src/newrelic/commands.go
index 67509e5aa..86e5b1fd8 100644
--- a/src/newrelic/commands.go
+++ b/src/newrelic/commands.go
@@ -256,6 +256,7 @@ func UnmarshalAppInfo(tbl flatbuffers.Table) *AppInfo {
RedirectCollector: string(app.RedirectCollector()),
Environment: JSONString(copySlice(app.Environment())),
Labels: JSONString(copySlice(app.Labels())),
+ Metadata: JSONString(copySlice(app.Metadata())),
Hostname: string(app.Host()),
HostDisplayName: string(app.DisplayHost()),
SecurityPolicyToken: string(app.SecurityPolicyToken()),
@@ -269,10 +270,10 @@ func UnmarshalAppInfo(tbl flatbuffers.Table) *AppInfo {
info.initSettings(app.Settings())
- // Of the four Event Limits (span, custom, analytic and error),
- // only span events is configurable from the agent.
- // If this changes in the future, the other values can be added here.
- info.AgentEventLimits.SpanEventConfig.Limit = int(app.SpanEventsMaxSamplesStored())
+ // Of the four Event Limits (span, custom, analytic and error),
+ // only span events is configurable from the agent.
+ // If this changes in the future, the other values can be added here.
+ info.AgentEventLimits.SpanEventConfig.Limit = int(app.SpanEventsMaxSamplesStored())
return info
}
diff --git a/src/newrelic/harvest.go b/src/newrelic/harvest.go
index 4fec6aaf7..868edb75e 100644
--- a/src/newrelic/harvest.go
+++ b/src/newrelic/harvest.go
@@ -6,6 +6,7 @@
package newrelic
import (
+ "strconv"
"time"
"newrelic/collector"
@@ -28,6 +29,7 @@ type Harvest struct {
SpanEvents *SpanEvents
commandsProcessed int
pidSet map[int]struct{}
+ httpErrorSet map[int]float64
}
func NewHarvest(now time.Time, hl collector.EventConfigs) *Harvest {
@@ -42,6 +44,7 @@ func NewHarvest(now time.Time, hl collector.EventConfigs) *Harvest {
SpanEvents: NewSpanEvents(hl.SpanEventConfig.Limit),
commandsProcessed: 0,
pidSet: make(map[int]struct{}),
+ httpErrorSet: make(map[int]float64),
}
}
@@ -67,6 +70,47 @@ func createTraceObserverMetrics(to *infinite_tracing.TraceObserver, metrics *Met
}
}
+func (h *Harvest) createHttpErrorMetrics() {
+ if h.empty() {
+ // No agent data received, do not create derived metrics. This allows
+ // upstream to detect inactivity sooner.
+ return
+ }
+
+ for code, val := range h.httpErrorSet {
+ h.Metrics.AddCount("Supportability/Agent/Collector/HTTPError/"+strconv.Itoa(code), "", val, Forced)
+ }
+}
+
+// Update the Http error counts
+func (h *Harvest) IncrementHttpErrors(statusCode int) {
+ if h.empty() {
+ // No agent data received, do not create derived metrics. This allows
+ // upstream to detect inactivity sooner.
+ return
+ }
+ counter, isPresent := h.httpErrorSet[statusCode]
+
+ if isPresent {
+ h.httpErrorSet[statusCode] = counter + 1
+ } else {
+ h.httpErrorSet[statusCode] = 1
+ }
+}
+
+func (h *Harvest) createEndpointAttemptsMetric(endpoint string, val float64) {
+ if h.empty() {
+ // No agent data received, do not create derived metrics. This allows
+ // upstream to detect inactivity sooner.
+ return
+ }
+
+ if val > 0 {
+ h.Metrics.AddCount("Supportability/Agent/Collector/"+endpoint+"/Attempts", "", val, Forced)
+ }
+
+}
+
func (h *Harvest) createFinalMetrics(harvestLimits collector.EventHarvestConfig, to *infinite_tracing.TraceObserver) {
if h.empty() {
// No agent data received, do not create derived metrics. This allows
@@ -87,17 +131,21 @@ func (h *Harvest) createFinalMetrics(harvestLimits collector.EventHarvestConfig,
// Custom Events Supportability Metrics
h.Metrics.AddCount("Supportability/Events/Customer/Seen", "", h.CustomEvents.NumSeen(), Forced)
h.Metrics.AddCount("Supportability/Events/Customer/Sent", "", h.CustomEvents.NumSaved(), Forced)
+ h.createEndpointAttemptsMetric(h.CustomEvents.Cmd(), h.CustomEvents.NumFailedAttempts())
// Transaction Events Supportability Metrics
// Note that these metrics used to have different names:
// Supportability/RequestSampler/requests
// Supportability/RequestSampler/samples
+
h.Metrics.AddCount("Supportability/AnalyticsEvents/TotalEventsSeen", "", h.TxnEvents.NumSeen(), Forced)
h.Metrics.AddCount("Supportability/AnalyticsEvents/TotalEventsSent", "", h.TxnEvents.NumSaved(), Forced)
+ h.createEndpointAttemptsMetric(h.TxnEvents.Cmd(), h.TxnEvents.NumFailedAttempts())
// Error Events Supportability Metrics
h.Metrics.AddCount("Supportability/Events/TransactionError/Seen", "", h.ErrorEvents.NumSeen(), Forced)
h.Metrics.AddCount("Supportability/Events/TransactionError/Sent", "", h.ErrorEvents.NumSaved(), Forced)
+ h.createEndpointAttemptsMetric(h.ErrorEvents.Cmd(), h.ErrorEvents.NumFailedAttempts())
if h.Metrics.numDropped > 0 {
h.Metrics.AddCount("Supportability/MetricsDropped", "", float64(h.Metrics.numDropped), Forced)
@@ -106,6 +154,7 @@ func (h *Harvest) createFinalMetrics(harvestLimits collector.EventHarvestConfig,
// Span Events Supportability Metrics
h.Metrics.AddCount("Supportability/SpanEvent/TotalEventsSeen", "", h.SpanEvents.analyticsEvents.NumSeen(), Forced)
h.Metrics.AddCount("Supportability/SpanEvent/TotalEventsSent", "", h.SpanEvents.analyticsEvents.NumSaved(), Forced)
+ h.createEndpointAttemptsMetric(h.SpanEvents.Cmd(), h.SpanEvents.analyticsEvents.NumFailedAttempts())
// Certificate supportability metrics.
if collector.CertPoolState == collector.SystemCertPoolMissing {
@@ -119,7 +168,11 @@ func (h *Harvest) createFinalMetrics(harvestLimits collector.EventHarvestConfig,
h.Metrics.AddCount("Supportability/EventHarvest/ErrorEventData/HarvestLimit", "", float64(harvestLimits.EventConfigs.ErrorEventConfig.Limit), Forced)
h.Metrics.AddCount("Supportability/EventHarvest/SpanEventData/HarvestLimit", "", float64(harvestLimits.EventConfigs.SpanEventConfig.Limit), Forced)
+ h.createEndpointAttemptsMetric(h.Metrics.Cmd(), h.Metrics.NumFailedAttempts())
+
createTraceObserverMetrics(to, h.Metrics)
+
+ h.createHttpErrorMetrics()
}
type FailedHarvestSaver interface {
diff --git a/src/newrelic/harvest_test.go b/src/newrelic/harvest_test.go
index 94a6d0f5f..5e6e0cd9b 100644
--- a/src/newrelic/harvest_test.go
+++ b/src/newrelic/harvest_test.go
@@ -56,10 +56,36 @@ func TestCreateFinalMetricsWithLotsOfMetrics(t *testing.T) {
},
},
}
+ // Harvest will fail and retry depending on the response code from the backend
+ // The specs say create a metric ONLY IF the total attempts are greater than 1
+ // which means only send the metric if the harvest failed at some point and needed collector
+ // re-attempted
+
+ // For PHP, only metricEvents, ErrorEvents, CustomEvents, TxnEvents, and SpanEvents are retried,
+ // therefore only those will ever have a count higher than 0 failed attemps. For more details, see:
+ // The Data Collection Limits section of Collector-Response-Handling.md in the agent-specs.
+
+ // TxnEvents will succeed on the first attempt and should not generate a metric
+
+ // Have CustomEvents fail once
+ harvest.CustomEvents.FailedHarvest(harvest)
+
+ // Have ErrorEvents fail twice for a total of 2
+ harvest.ErrorEvents.FailedHarvest(harvest)
+ harvest.ErrorEvents.FailedHarvest(harvest)
+
+ // Have ErrorEvents fail twice for a total of 3
+ harvest.SpanEvents.FailedHarvest(harvest)
+ harvest.SpanEvents.FailedHarvest(harvest)
+ harvest.SpanEvents.FailedHarvest(harvest)
+
harvest.createFinalMetrics(limits, nil)
var expectedJSON = `["12345",1447203720,1417136520,` +
`[[{"name":"Instance/Reporting"},[1,0,0,0,0,0]],` +
+ `[{"name":"Supportability/Agent/Collector/custom_event_data/Attempts"},[1,0,0,0,0,0]],` + // Check for Connect attempt supportability metrics
+ `[{"name":"Supportability/Agent/Collector/error_event_data/Attempts"},[2,0,0,0,0,0]],` +
+ `[{"name":"Supportability/Agent/Collector/span_event_data/Attempts"},[3,0,0,0,0,0]],` +
`[{"name":"Supportability/AnalyticsEvents/TotalEventsSeen"},[8,0,0,0,0,0]],` +
`[{"name":"Supportability/AnalyticsEvents/TotalEventsSent"},[8,0,0,0,0,0]],` +
`[{"name":"Supportability/EventHarvest/AnalyticEventData/HarvestLimit"},[2,0,0,0,0,0]],` +
@@ -67,12 +93,12 @@ func TestCreateFinalMetricsWithLotsOfMetrics(t *testing.T) {
`[{"name":"Supportability/EventHarvest/ErrorEventData/HarvestLimit"},[1,0,0,0,0,0]],` +
`[{"name":"Supportability/EventHarvest/ReportPeriod"},[1234,0,0,0,0,0]],` +
`[{"name":"Supportability/EventHarvest/SpanEventData/HarvestLimit"},[4,0,0,0,0,0]],` +
- `[{"name":"Supportability/Events/Customer/Seen"},[4,0,0,0,0,0]],` +
- `[{"name":"Supportability/Events/Customer/Sent"},[4,0,0,0,0,0]],` +
- `[{"name":"Supportability/Events/TransactionError/Seen"},[7,0,0,0,0,0]],` +
- `[{"name":"Supportability/Events/TransactionError/Sent"},[7,0,0,0,0,0]],` +
- `[{"name":"Supportability/SpanEvent/TotalEventsSeen"},[3,0,0,0,0,0]],` +
- `[{"name":"Supportability/SpanEvent/TotalEventsSent"},[3,0,0,0,0,0]]]]`
+ `[{"name":"Supportability/Events/Customer/Seen"},[8,0,0,0,0,0]],` +
+ `[{"name":"Supportability/Events/Customer/Sent"},[8,0,0,0,0,0]],` +
+ `[{"name":"Supportability/Events/TransactionError/Seen"},[28,0,0,0,0,0]],` +
+ `[{"name":"Supportability/Events/TransactionError/Sent"},[28,0,0,0,0,0]],` +
+ `[{"name":"Supportability/SpanEvent/TotalEventsSeen"},[24,0,0,0,0,0]],` +
+ `[{"name":"Supportability/SpanEvent/TotalEventsSent"},[24,0,0,0,0,0]]]]`
json, err := harvest.Metrics.CollectorJSONSorted(AgentRunID(`12345`), end)
if nil != err {
diff --git a/src/newrelic/infinite_tracing/grpc_sender.go b/src/newrelic/infinite_tracing/grpc_sender.go
index 37c23da54..e1c3b3885 100644
--- a/src/newrelic/infinite_tracing/grpc_sender.go
+++ b/src/newrelic/infinite_tracing/grpc_sender.go
@@ -40,6 +40,11 @@ type codec struct {
encoding.Codec
}
+const (
+ licenseMetadataKey = "license_key"
+ runIDMetadataKey = "agent_run_token"
+)
+
var (
supportabilityCodeErr = "Supportability/InfiniteTracing/Span/gRPC/"
codeStrings = func() map[codes.Code]string {
@@ -107,10 +112,11 @@ func (s *grpcSpanBatchSender) clone() (spanBatchSender, error) {
}
func (s *grpcSpanBatchSender) connect() (error, spanBatchSenderStatus) {
+ md := newMetadata(s.RunId, s.License, s.RequestHeadersMap)
+ ctx := metadata.NewOutgoingContext(context.Background(), md)
+
stream, err := s.client.RecordSpanBatch(
- metadata.AppendToOutgoingContext(context.Background(),
- "license_key", s.License,
- "agent_run_token", s.RunId),
+ ctx,
grpc.ForceCodec(&codec{encoding.GetCodec("proto")}))
if err != nil {
@@ -184,6 +190,15 @@ func newSpanBatchStatusFromGrpcErr(err error) spanBatchSenderStatus {
}
}
+// newMetadata creates a grpc metadata with proper keys and values for use when
+// connecting to RecordSpan.
+func newMetadata(runID string, license string, requestHeadersMap map[string]string) metadata.MD {
+ md := metadata.New(requestHeadersMap)
+ md.Set(licenseMetadataKey, license)
+ md.Set(runIDMetadataKey, runID)
+ return md
+}
+
func errToCodeString(err error) string {
code := status.Code(err)
str, ok := codeStrings[code]
diff --git a/src/newrelic/infinite_tracing/grpc_sender_test.go b/src/newrelic/infinite_tracing/grpc_sender_test.go
index a1d4b3922..86424510a 100644
--- a/src/newrelic/infinite_tracing/grpc_sender_test.go
+++ b/src/newrelic/infinite_tracing/grpc_sender_test.go
@@ -198,9 +198,10 @@ func TestSimpleSpan(t *testing.T) {
defer srv.Close()
sender, err := newGrpcSpanBatchSender(&Config{
- Host: srv.host,
- Port: srv.port,
- Secure: false,
+ Host: srv.host,
+ Port: srv.port,
+ Secure: false,
+ RequestHeadersMap: map[string]string{"zip": "zap"},
})
defer sender.conn.Close()
@@ -226,6 +227,23 @@ func TestSimpleSpan(t *testing.T) {
t.Fatalf("unexpected error during sending: %v", err)
}
+ md := <-srv.metadataReceivedChan
+
+ expected := map[string]string{
+ "zip": "zap",
+ }
+
+ for expectedKey, expectedValue := range expected {
+ value, ok := md[expectedKey]
+ if ok && len(value) == 1 {
+ if value[0] != expectedValue {
+ t.Errorf("invalid value for key %s metadata: got %s, want %s", expectedKey, value[0], expectedValue)
+ }
+ } else {
+ t.Errorf("no value for %s metadata", expectedKey)
+ }
+ }
+
received := <-srv.spansReceivedChan
if len(received.Spans) != 1 {
diff --git a/src/newrelic/infinite_tracing/trace_observer.go b/src/newrelic/infinite_tracing/trace_observer.go
index 7817c577f..e79c43aa8 100644
--- a/src/newrelic/infinite_tracing/trace_observer.go
+++ b/src/newrelic/infinite_tracing/trace_observer.go
@@ -95,12 +95,13 @@ type TraceObserver struct {
}
type Config struct {
- Host string
- Port uint16
- Secure bool
- License string
- RunId string
- QueueSize uint64
+ Host string
+ Port uint16
+ Secure bool
+ License string
+ RunId string
+ QueueSize uint64
+ RequestHeadersMap map[string]string
}
type metricIncrement struct {
diff --git a/src/newrelic/limits/limits.go b/src/newrelic/limits/limits.go
index 5da671101..9c084f772 100644
--- a/src/newrelic/limits/limits.go
+++ b/src/newrelic/limits/limits.go
@@ -36,6 +36,7 @@ const (
MaxRegularTraces = 1
MaxForcePersistTraces = 10
MaxSyntheticsTraces = 20
+ DefaultMaxPayloadSizeInBytes = 1000 * 1000
// Failed Harvest Data Rollover Limits
// Use the same harvest failure limit for custom events and txn events
diff --git a/src/newrelic/metrics.go b/src/newrelic/metrics.go
index a149c9664..3a5e7cee0 100644
--- a/src/newrelic/metrics.go
+++ b/src/newrelic/metrics.go
@@ -109,6 +109,14 @@ func (data *metricData) aggregate(src *metricData) {
data.sumSquares += src.sumSquares
}
+// NumAttempts returns the total number of attempts sent to this endpoint.
+// The value is the number of times the agent attempted to call the given endpoint before it was successful.
+// This metric MUST NOT be generated if only one attempt was made.
+// Does not include the successful attempt.
+func (mt *MetricTable) NumFailedAttempts() float64 {
+ return float64(mt.failedHarvests)
+}
+
func (mt *MetricTable) mergeMetric(nameSlice []byte, nameString, scope string,
m *metric) {
var s map[string]*metric
diff --git a/src/newrelic/processor.go b/src/newrelic/processor.go
index 83eae8094..85aa3837f 100644
--- a/src/newrelic/processor.go
+++ b/src/newrelic/processor.go
@@ -48,15 +48,14 @@ type ConnectAttempt struct {
Key AppKey
Collector string
Reply *ConnectReply
- RawReply []byte
+ RawReply collector.RPMResponse
Err error
RawSecurityPolicies []byte
}
type HarvestError struct {
- Err error
id AgentRunID
- Reply []byte
+ Reply collector.RPMResponse
data FailedHarvestSaver
}
@@ -163,29 +162,42 @@ func ConnectApplication(args *ConnectArgs) ConnectAttempt {
args.Payload, err = EncodePayload(&RawPreconnectPayload{SecurityPolicyToken: args.SecurityPolicyToken, HighSecurity: args.HighSecurity})
if err != nil {
log.Errorf("unable to connect application: %v", err)
+ rep.Err = err
return rep
}
// Prepare preconnect call
collectorHostname := collector.CalculatePreconnectHost(args.License, args.RedirectCollector)
- call := collector.Cmd{
- Name: collector.CommandPreconnect,
- Collector: collectorHostname,
- License: args.License,
+ cs := collector.RpmControls{
AgentLanguage: args.AgentLanguage,
AgentVersion: args.AgentVersion,
Collectible: collector.CollectibleFunc(func(auditVersion bool) ([]byte, error) {
return args.Payload, nil
}),
}
+ cmd := collector.RpmCmd{
+ Name: collector.CommandPreconnect,
+ Collector: collectorHostname,
+ License: args.License,
+ // Use default maximum, because we don't know the collector limit yet
+ MaxPayloadSize: limits.DefaultMaxPayloadSizeInBytes,
+ }
// Make call to preconnect
- rep.RawReply, rep.Err = args.Client.Execute(call)
- if nil != rep.Err {
+ // return value is a struct with the body and any error from attempt
+ // if something fails from this point on the error needs to be
+ // propagated up into the return value (rep.Err) as code downstream
+ // expects this field value to contain any errors which occurred
+ // during the connect attempt and will not inspect the RawReply
+ // field for an error value
+ rep.RawReply = args.Client.Execute(cmd, cs)
+
+ if nil != rep.RawReply.Err {
+ rep.Err = rep.RawReply.Err
return rep
}
- rep.Err = json.Unmarshal(rep.RawReply, &preconnectReply)
+ rep.Err = json.Unmarshal(rep.RawReply.Body, &preconnectReply)
if nil != rep.Err {
return rep
}
@@ -235,26 +247,28 @@ func ConnectApplication(args *ConnectArgs) ConnectAttempt {
args.Payload, err = EncodePayload(&args.PayloadRaw)
if err != nil {
log.Errorf("unable to connect application: %v", err)
+ rep.Err = err
return rep
}
rep.Collector = preconnectReply.Collector
- call.Collector = rep.Collector
+ cmd.Collector = rep.Collector
- call.Collectible = collector.CollectibleFunc(func(auditVersion bool) ([]byte, error) {
+ cs.Collectible = collector.CollectibleFunc(func(auditVersion bool) ([]byte, error) {
return args.Payload, nil
})
- call.Name = collector.CommandConnect
+ cmd.Name = collector.CommandConnect
// Make call to connect
- rep.RawReply, rep.Err = args.Client.Execute(call)
- if nil != rep.Err {
+ rep.RawReply = args.Client.Execute(cmd, cs)
+ if nil != rep.RawReply.Err {
+ rep.Err = rep.RawReply.Err
return rep
}
// Process the connect reply
processConnectMessages(rep.RawReply)
- rep.Reply, rep.Err = parseConnectReply(rep.RawReply)
+ rep.Reply, rep.Err = parseConnectReply(rep.RawReply.Body)
return rep
}
@@ -358,7 +372,7 @@ func (p *Processor) processAppInfo(m AppInfoMessage) {
p.apps[key] = app
}
-func processConnectMessages(reply []byte) {
+func processConnectMessages(reply collector.RPMResponse) {
var msgs struct {
Messages []struct {
Message string `json:"message"`
@@ -366,7 +380,7 @@ func processConnectMessages(reply []byte) {
} `json:"messages"`
}
- err := json.Unmarshal(reply, &msgs)
+ err := json.Unmarshal(reply.Body, &msgs)
if nil != err {
return
}
@@ -391,23 +405,29 @@ func (p *Processor) processConnectAttempt(rep ConnectAttempt) {
return
}
- if nil != rep.Err {
- switch {
- case collector.IsDisconnect(rep.Err):
- app.state = AppStateDisconnected
- case collector.IsLicenseException(rep.Err):
+ app.RawConnectReply = rep.RawReply.Body
+ if rep.RawReply.IsDisconnect() {
+ app.state = AppStateDisconnected
+ log.Warnf("app '%s' connect attempt returned %s; disconnecting", app, rep.RawReply.Err)
+ return
+ } else if rep.RawReply.IsRestartException() {
+ // in accord with the spec, invalid license is a restart exception. Except we want
+ // to shutdown instead of restart.
+ if rep.RawReply.IsInvalidLicense() {
app.state = AppStateInvalidLicense
- default:
- // Try again later.
- app.state = AppStateUnknown
+ log.Warnf("app '%s' connect attempt returned %s; shutting down", app, rep.RawReply.Err)
+ } else {
+ app.state = AppStateRestart
+ log.Warnf("app '%s' connect attempt returned %s; restarting", app, rep.RawReply.Err)
}
-
+ return
+ } else if nil != rep.Err {
+ app.state = AppStateUnknown
log.Warnf("app '%s' connect attempt returned %s", app, rep.Err)
return
}
app.connectReply = rep.Reply
- app.RawConnectReply = rep.RawReply
app.state = AppStateConnected
app.collector = rep.Collector
app.RawSecurityPolicies = rep.RawSecurityPolicies
@@ -446,18 +466,25 @@ type harvestArgs struct {
harvestErrorChannel chan<- HarvestError
client collector.Client
splitLargePayloads bool
+ RequestHeadersMap map[string]string
+ maxPayloadSize int
+
// Used for final harvest before daemon exit
blocking bool
}
func harvestPayload(p PayloadCreator, args *harvestArgs) {
- call := collector.Cmd{
- Name: p.Cmd(),
- Collector: args.collector,
- License: args.license,
+ cmd := collector.RpmCmd{
+ Name: p.Cmd(),
+ Collector: args.collector,
+ License: args.license,
+ RunID: args.id.String(),
+ RequestHeadersMap: args.RequestHeadersMap,
+ MaxPayloadSize: args.maxPayloadSize,
+ }
+ cs := collector.RpmControls{
AgentLanguage: args.agentLanguage,
AgentVersion: args.agentVersion,
- RunID: args.id.String(),
Collectible: collector.CollectibleFunc(func(auditVersion bool) ([]byte, error) {
if auditVersion {
return p.Audit(args.id, args.HarvestStart)
@@ -466,17 +493,16 @@ func harvestPayload(p PayloadCreator, args *harvestArgs) {
}),
}
- reply, err := args.client.Execute(call)
+ reply := args.client.Execute(cmd, cs)
// We don't need to process the response to a harvest command unless an
// error happened. (Note that this may change if we have to support metric
// cache ids).
- if nil == err {
+ if nil == reply.Err {
return
}
args.harvestErrorChannel <- HarvestError{
- Err: err,
Reply: reply,
id: args.id,
data: p,
@@ -636,6 +662,8 @@ func (p *Processor) doHarvest(ph ProcessorHarvest) {
rules: app.connectReply.MetricRules,
harvestErrorChannel: p.harvestErrorChannel,
client: p.cfg.Client,
+ RequestHeadersMap: app.connectReply.RequestHeadersMap,
+ maxPayloadSize: app.connectReply.MaxPayloadSizeInBytes,
// Splitting large payloads is limited to applications that have
// distributed tracing on. That restriction is a saftey measure
// to not overload the backend by sending two payloads instead
@@ -643,7 +671,6 @@ func (p *Processor) doHarvest(ph ProcessorHarvest) {
splitLargePayloads: app.info.Settings["newrelic.distributed_tracing_enabled"] == true,
blocking: ph.Blocking,
}
-
harvestByType(ph.AppHarvest, &args, harvestType)
}
@@ -658,28 +685,21 @@ func (p *Processor) processHarvestError(d HarvestError) {
}
app := h.App
- log.Warnf("app %q with run id %q received %s", app, d.id, d.Err)
+ log.Warnf("app %q with run id %q received %s", app, d.id, d.Reply.Err)
+ h.Harvest.IncrementHttpErrors(d.Reply.StatusCode)
+
+ if d.Reply.ShouldSaveHarvestData() {
+ d.data.FailedHarvest(h.Harvest)
+ }
switch {
- case collector.IsDisconnect(d.Err):
+ case d.Reply.IsDisconnect() || app.state == AppStateDisconnected:
app.state = AppStateDisconnected
p.shutdownAppHarvest(d.id)
- case collector.IsLicenseException(d.Err):
- // I think this is unlikely to ever happen (the invalid license
- // exception should trigger during the connect), but it is included
- // here for defensiveness.
- app.state = AppStateInvalidLicense
- p.shutdownAppHarvest(d.id)
- case collector.IsRestartException(d.Err):
+ case d.Reply.IsRestartException() || app.state == AppStateRestart:
app.state = AppStateUnknown
p.shutdownAppHarvest(d.id)
p.considerConnect(app)
- case (d.Err == collector.ErrPayloadTooLarge) ||
- (d.Err == collector.ErrUnsupportedMedia):
- // Do not call the failed harvest fn, since we do not want to save
- // the data.
- default:
- d.data.FailedHarvest(h.Harvest)
}
}
diff --git a/src/newrelic/processor_test.go b/src/newrelic/processor_test.go
index 77ddb0d9c..67df8a8fa 100644
--- a/src/newrelic/processor_test.go
+++ b/src/newrelic/processor_test.go
@@ -9,6 +9,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "strings"
"testing"
"time"
@@ -17,6 +18,39 @@ import (
"newrelic/utilization"
)
+var ErrPayloadTooLarge = errors.New("payload too large")
+var ErrUnauthorized = errors.New("unauthorized")
+var ErrUnsupportedMedia = errors.New("unsupported media")
+
+type rpmException struct {
+ Message string `json:"message"`
+ ErrorType string `json:"error_type"`
+}
+
+func (e *rpmException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorType, e.Message)
+}
+
+const (
+ forceRestartType = "NewRelic::Agent::ForceRestartException"
+ disconnectType = "NewRelic::Agent::ForceDisconnectException"
+ licenseInvalidType = "NewRelic::Agent::LicenseException"
+ runtimeType = "RuntimeError"
+)
+
+// These clients exist for testing.
+var (
+ DisconnectClient = collector.ClientFn(func(cmd collector.RpmCmd, cs collector.RpmControls) collector.RPMResponse {
+ return collector.RPMResponse{Body: nil, Err: SampleDisonnectException, StatusCode: 410}
+ })
+ LicenseInvalidClient = collector.ClientFn(func(cmd collector.RpmCmd, cs collector.RpmControls) collector.RPMResponse {
+ return collector.RPMResponse{Body: nil, Err: SampleLicenseInvalidException, StatusCode: 401}
+ })
+ SampleRestartException = &rpmException{ErrorType: forceRestartType}
+ SampleDisonnectException = &rpmException{ErrorType: disconnectType}
+ SampleLicenseInvalidException = &rpmException{ErrorType: licenseInvalidType}
+)
+
var (
idOne = AgentRunID("one")
idTwo = AgentRunID("two")
@@ -26,13 +60,14 @@ var (
sampleTrace = &TxnTrace{Data: data}
sampleCustomEvent = []byte("half birthday")
- sampleSpanEvent = []byte("belated birthday")
+ sampleSpanEvent = []byte("belated birthday")
sampleErrorEvent = []byte("forgotten birthday")
)
type ClientReturn struct {
reply []byte
err error
+ code int
}
type ClientParams struct {
@@ -52,14 +87,14 @@ func NewMockedProcessor(numberOfHarvestPayload int) *MockedProcessor {
clientReturn := make(chan ClientReturn, numberOfHarvestPayload)
clientParams := make(chan ClientParams, numberOfHarvestPayload)
- client := collector.ClientFn(func(cmd collector.Cmd) ([]byte, error) {
- data, err := cmd.Collectible.CollectorJSON(false)
+ client := collector.ClientFn(func(cmd collector.RpmCmd, cs collector.RpmControls) collector.RPMResponse {
+ data, err := cs.Collectible.CollectorJSON(false)
if nil != err {
- return nil, err
+ return collector.RPMResponse{Err: err}
}
clientParams <- ClientParams{cmd.Name, data}
r := <-clientReturn
- return r.reply, r.err
+ return collector.RPMResponse{Body: r.reply, Err: r.err, StatusCode: r.code}
})
p := NewProcessor(ProcessorConfig{Client: client})
@@ -91,17 +126,17 @@ func (m *MockedProcessor) DoAppInfo(t *testing.T, id *AgentRunID, expectState Ap
func (m *MockedProcessor) DoConnect(t *testing.T, id *AgentRunID) {
<-m.clientParams // preconnect
- m.clientReturn <- ClientReturn{[]byte(`{"redirect_host":"specific_collector.com"}`), nil}
+ m.clientReturn <- ClientReturn{[]byte(`{"redirect_host":"specific_collector.com"}`), nil, 200}
<-m.clientParams // connect
- m.clientReturn <- ClientReturn{[]byte(`{"agent_run_id":"` + id.String() + `","zip":"zap","event_harvest_config":{"report_period_ms":5000,"harvest_limits":{"analytics_event_data":5,"custom_event_data":5,"error_event_data":5,"span_event_data":5}}}`), nil}
+ m.clientReturn <- ClientReturn{[]byte(`{"agent_run_id":"` + id.String() + `","zip":"zap","event_harvest_config":{"report_period_ms":5000,"harvest_limits":{"analytics_event_data":5,"custom_event_data":5,"error_event_data":5,"span_event_data":5}}}`), nil, 200}
<-m.p.trackProgress // receive connect reply
}
func (m *MockedProcessor) DoConnectConfiguredReply(t *testing.T, reply string) {
<-m.clientParams // preconnect
- m.clientReturn <- ClientReturn{[]byte(`{"redirect_host":"specific_collector.com"}`), nil}
+ m.clientReturn <- ClientReturn{[]byte(`{"redirect_host":"specific_collector.com"}`), nil, 200}
<-m.clientParams // connect
- m.clientReturn <- ClientReturn{[]byte(reply), nil}
+ m.clientReturn <- ClientReturn{[]byte(reply), nil, 200}
<-m.p.trackProgress // receive connect reply
}
@@ -131,8 +166,8 @@ var (
h.ErrorEvents.AddEventFromData(sampleErrorEvent, SamplingPriority(0.8))
})
txnSpanEventSample = AggregaterIntoFn(func(h *Harvest) {
- h.SpanEvents.AddEventFromData(sampleSpanEvent, SamplingPriority(0.8))
- })
+ h.SpanEvents.AddEventFromData(sampleSpanEvent, SamplingPriority(0.8))
+ })
txnEventSample1Times = func(times int) AggregaterIntoFn {
return AggregaterIntoFn(func(h *Harvest) {
for i := 0; i < times; i++ {
@@ -227,6 +262,87 @@ func TestProcessorHarvestCleanExit(t *testing.T) {
}
}
+func TestSupportabilityHarvest(t *testing.T) {
+ m := NewMockedProcessor(1)
+
+ m.DoAppInfo(t, nil, AppStateUnknown)
+
+ m.DoConnect(t, &idOne)
+ m.DoAppInfo(t, nil, AppStateConnected)
+
+ m.TxnData(t, idOne, txnErrorEventSample)
+
+ m.processorHarvestChan <- ProcessorHarvest{
+ AppHarvest: m.p.harvests[idOne],
+ ID: idOne,
+ Type: HarvestDefaultData,
+ }
+ <-m.p.trackProgress // receive harvest notice
+ m.clientReturn <- ClientReturn{} /* metrics */
+ //<-m.p.trackProgress // receive harvest
+
+ m.processorHarvestChan <- ProcessorHarvest{
+ AppHarvest: m.p.harvests[idOne],
+ ID: idOne,
+ Type: HarvestDefaultData,
+ }
+ <-m.p.trackProgress // receive harvest notice
+
+ cp := <-m.clientParams
+ // Add timeout error response code for second harvest
+ m.clientReturn <- ClientReturn{nil, ErrUnsupportedMedia, 408}
+ <-m.p.trackProgress // receive harvest error
+
+ harvest := m.p.harvests[idOne]
+ limits := collector.EventHarvestConfig{
+ ReportPeriod: 1234,
+ EventConfigs: collector.EventConfigs{
+ ErrorEventConfig: collector.Event{
+ Limit: 1,
+ },
+ AnalyticEventConfig: collector.Event{
+ Limit: 2,
+ },
+ CustomEventConfig: collector.Event{
+ Limit: 3,
+ },
+ SpanEventConfig: collector.Event{
+ Limit: 4,
+ },
+ },
+ }
+ harvest.createFinalMetrics(limits, nil)
+ // Because MockedProcessor wraps a real processor, we have no way to directly set the time
+ // of harvests. So we extract the time from what we receive
+ time := strings.Split(string(cp.data), ",")[1]
+ var expectedJSON = `["one",` + time + `,1417136520,` +
+ `[[{"name":"Instance/Reporting"},[2,0,0,0,0,0]],` +
+ `[{"name":"Supportability/Agent/Collector/HTTPError/408"},[1,0,0,0,0,0]],` + // Check for HTTPError Supportability metric
+ `[{"name":"Supportability/Agent/Collector/metric_data/Attempts"},[1,0,0,0,0,0]],` + // Metrics were sent first when the 408 error occurred, so check for the metric failure.
+ `[{"name":"Supportability/AnalyticsEvents/TotalEventsSeen"},[0,0,0,0,0,0]],` +
+ `[{"name":"Supportability/AnalyticsEvents/TotalEventsSent"},[0,0,0,0,0,0]],` +
+ `[{"name":"Supportability/EventHarvest/AnalyticEventData/HarvestLimit"},[10002,0,0,0,0,0]],` +
+ `[{"name":"Supportability/EventHarvest/CustomEventData/HarvestLimit"},[8,0,0,0,0,0]],` +
+ `[{"name":"Supportability/EventHarvest/ErrorEventData/HarvestLimit"},[6,0,0,0,0,0]],` +
+ `[{"name":"Supportability/EventHarvest/ReportPeriod"},[5000001234,0,0,0,0,0]],` +
+ `[{"name":"Supportability/EventHarvest/SpanEventData/HarvestLimit"},[4,0,0,0,0,0]],` +
+ `[{"name":"Supportability/Events/Customer/Seen"},[0,0,0,0,0,0]],` +
+ `[{"name":"Supportability/Events/Customer/Sent"},[0,0,0,0,0,0]],` +
+ `[{"name":"Supportability/Events/TransactionError/Seen"},[2,0,0,0,0,0]],` +
+ `[{"name":"Supportability/Events/TransactionError/Sent"},[2,0,0,0,0,0]],` +
+ `[{"name":"Supportability/SpanEvent/TotalEventsSeen"},[0,0,0,0,0,0]],` +
+ `[{"name":"Supportability/SpanEvent/TotalEventsSent"},[0,0,0,0,0,0]]]]`
+
+ json, err := harvest.Metrics.CollectorJSONSorted(AgentRunID(idOne), end)
+ if nil != err {
+ t.Fatal(err)
+ }
+ if got := string(json); got != expectedJSON {
+ t.Errorf("\ngot=%q \nwant=%q", got, expectedJSON)
+ }
+ m.p.quit()
+}
+
func TestProcessorHarvestErrorEvents(t *testing.T) {
m := NewMockedProcessor(1)
@@ -256,7 +372,7 @@ func TestProcessorHarvestSpanEvents(t *testing.T) {
m.DoAppInfo(t, nil, AppStateUnknown)
- m.DoConnectConfiguredReply(t, `{"agent_run_id":"` + idOne.String() + `","zip":"zap","span_event_harvest_config":{"report_period_ms":5000,"harvest_limit":7},"event_harvest_config":{"report_period_ms":5000,"harvest_limits":{"analytics_event_data":5,"custom_event_data":5,"error_event_data":0,"span_event_data":5}}}`)
+ m.DoConnectConfiguredReply(t, `{"agent_run_id":"`+idOne.String()+`","zip":"zap","span_event_harvest_config":{"report_period_ms":5000,"harvest_limit":7},"event_harvest_config":{"report_period_ms":5000,"harvest_limits":{"analytics_event_data":5,"custom_event_data":5,"error_event_data":0,"span_event_data":5}}}`)
m.DoAppInfo(t, nil, AppStateConnected)
m.TxnData(t, idOne, txnSpanEventSample)
@@ -284,7 +400,7 @@ func TestProcessorHarvestSpanEventsZeroReservoir(t *testing.T) {
m.DoAppInfo(t, nil, AppStateUnknown)
- m.DoConnectConfiguredReply(t, `{"agent_run_id":"` + idOne.String() + `","zip":"zap","span_event_harvest_config":{"report_period_ms":5000,"harvest_limit":0},"event_harvest_config":{"report_period_ms":5000,"harvest_limits":{"analytics_event_data":5,"custom_event_data":5,"error_event_data":0,"span_event_data":5}}}`)
+ m.DoConnectConfiguredReply(t, `{"agent_run_id":"`+idOne.String()+`","zip":"zap","span_event_harvest_config":{"report_period_ms":5000,"harvest_limit":0},"event_harvest_config":{"report_period_ms":5000,"harvest_limits":{"analytics_event_data":5,"custom_event_data":5,"error_event_data":0,"span_event_data":5}}}`)
m.DoAppInfo(t, nil, AppStateConnected)
m.TxnData(t, idOne, txnSpanEventSample)
@@ -326,7 +442,7 @@ func TestProcessorHarvestSpanEventsExceedReservoir(t *testing.T) {
m.DoAppInfo(t, nil, AppStateUnknown)
- m.DoConnectConfiguredReply(t, `{"agent_run_id":"` + idOne.String() + `","zip":"zap","span_event_harvest_config":{"report_period_ms":5000,"harvest_limit":1},"event_harvest_config":{"report_period_ms":5000,"harvest_limits":{"analytics_event_data":5,"custom_event_data":5,"error_event_data":0,"span_event_data":5}}}`)
+ m.DoConnectConfiguredReply(t, `{"agent_run_id":"`+idOne.String()+`","zip":"zap","span_event_harvest_config":{"report_period_ms":5000,"harvest_limit":1},"event_harvest_config":{"report_period_ms":5000,"harvest_limits":{"analytics_event_data":5,"custom_event_data":5,"error_event_data":0,"span_event_data":5}}}`)
m.DoAppInfo(t, nil, AppStateConnected)
m.TxnData(t, idOne, txnSpanEventSample)
@@ -349,13 +465,12 @@ func TestProcessorHarvestSpanEventsExceedReservoir(t *testing.T) {
}
-
func TestProcessorHarvestZeroErrorEvents(t *testing.T) {
m := NewMockedProcessor(1)
m.DoAppInfo(t, nil, AppStateUnknown)
- m.DoConnectConfiguredReply(t, `{"agent_run_id":"` + idOne.String() + `","zip":"zap","span_event_harvest_config":{"report_period_ms":5000,"harvest_limit":7},"event_harvest_config":{"report_period_ms":5000,"harvest_limits":{"analytics_event_data":5,"custom_event_data":5,"error_event_data":0,"span_event_data":5}}}`)
+ m.DoConnectConfiguredReply(t, `{"agent_run_id":"`+idOne.String()+`","zip":"zap","span_event_harvest_config":{"report_period_ms":5000,"harvest_limit":7},"event_harvest_config":{"report_period_ms":5000,"harvest_limits":{"analytics_event_data":5,"custom_event_data":5,"error_event_data":0,"span_event_data":5}}}`)
m.DoAppInfo(t, nil, AppStateConnected)
m.TxnData(t, idOne, txnErrorEventSample)
@@ -521,7 +636,7 @@ func TestForceRestart(t *testing.T) {
t.Fatal(string(cp.data))
}
- m.clientReturn <- ClientReturn{nil, collector.SampleRestartException}
+ m.clientReturn <- ClientReturn{nil, SampleRestartException, 401}
<-m.p.trackProgress // receive harvest error
m.DoConnect(t, &idTwo)
@@ -552,7 +667,7 @@ func TestDisconnectAtPreconnect(t *testing.T) {
m.DoAppInfo(t, nil, AppStateUnknown)
<-m.clientParams // preconnect
- m.clientReturn <- ClientReturn{nil, collector.SampleDisonnectException}
+ m.clientReturn <- ClientReturn{nil, SampleDisonnectException, 410}
<-m.p.trackProgress // receive connect reply
m.DoAppInfo(t, nil, AppStateDisconnected)
@@ -566,7 +681,7 @@ func TestLicenseExceptionAtPreconnect(t *testing.T) {
m.DoAppInfo(t, nil, AppStateUnknown)
<-m.clientParams // preconnect
- m.clientReturn <- ClientReturn{nil, collector.SampleLicenseInvalidException}
+ m.clientReturn <- ClientReturn{nil, SampleLicenseInvalidException, 401}
<-m.p.trackProgress // receive connect reply
m.DoAppInfo(t, nil, AppStateInvalidLicense)
@@ -580,9 +695,9 @@ func TestDisconnectAtConnect(t *testing.T) {
m.DoAppInfo(t, nil, AppStateUnknown)
<-m.clientParams // preconnect
- m.clientReturn <- ClientReturn{[]byte(`{"redirect_host":"specific_collector.com"}`), nil}
+ m.clientReturn <- ClientReturn{[]byte(`{"redirect_host":"specific_collector.com"}`), nil, 200}
<-m.clientParams // connect
- m.clientReturn <- ClientReturn{nil, collector.SampleDisonnectException}
+ m.clientReturn <- ClientReturn{nil, SampleDisonnectException, 410}
<-m.p.trackProgress // receive connect reply
m.DoAppInfo(t, nil, AppStateDisconnected)
@@ -611,11 +726,11 @@ func TestDisconnectAtHarvest(t *testing.T) {
<-m.p.trackProgress // receive harvest notice
<-m.clientParams
- m.clientReturn <- ClientReturn{nil, collector.SampleDisonnectException}
+ m.clientReturn <- ClientReturn{nil, SampleDisonnectException, 410}
<-m.p.trackProgress // receive harvest error
<-m.clientParams
- m.clientReturn <- ClientReturn{nil, collector.SampleDisonnectException}
+ m.clientReturn <- ClientReturn{nil, SampleDisonnectException, 410}
<-m.p.trackProgress // receive harvest error
m.DoAppInfo(t, nil, AppStateDisconnected)
@@ -644,10 +759,11 @@ func TestLicenseExceptionAtHarvest(t *testing.T) {
t.Fatal(string(cp.data))
}
- m.clientReturn <- ClientReturn{nil, collector.SampleLicenseInvalidException}
+ m.clientReturn <- ClientReturn{nil, SampleLicenseInvalidException, 401}
<-m.p.trackProgress // receive harvest error
- m.DoAppInfo(t, nil, AppStateInvalidLicense)
+ // Unknown app state triggered immediately following AppStateRestart
+ m.DoAppInfo(t, nil, AppStateUnknown)
m.p.quit()
}
@@ -658,9 +774,9 @@ func TestMalformedConnectReply(t *testing.T) {
m.DoAppInfo(t, nil, AppStateUnknown)
<-m.clientParams // preconnect
- m.clientReturn <- ClientReturn{[]byte(`{"redirect_host":"specific_collector.com"}`), nil}
+ m.clientReturn <- ClientReturn{[]byte(`{"redirect_host":"specific_collector.com"}`), nil, 200}
<-m.clientParams // connect
- m.clientReturn <- ClientReturn{[]byte(`{`), nil}
+ m.clientReturn <- ClientReturn{[]byte(`{`), nil, 202}
<-m.p.trackProgress // receive connect reply
m.DoAppInfo(t, nil, AppStateUnknown)
@@ -674,7 +790,7 @@ func TestMalformedCollector(t *testing.T) {
m.DoAppInfo(t, nil, AppStateUnknown)
<-m.clientParams // preconnect
- m.clientReturn <- ClientReturn{[]byte(`"`), nil}
+ m.clientReturn <- ClientReturn{[]byte(`"`), nil, 200}
<-m.p.trackProgress // receive connect reply
m.DoAppInfo(t, nil, AppStateUnknown)
@@ -700,7 +816,7 @@ func TestDataSavedOnHarvestError(t *testing.T) {
<-m.p.trackProgress // receive harvest notice
cp := <-m.clientParams
- m.clientReturn <- ClientReturn{nil, errors.New("unusual error")}
+ m.clientReturn <- ClientReturn{nil, errors.New("unusual error"), 500}
if string(cp.data) != `["one",{"reservoir_size":10000,"events_seen":1},[[{"x":1},{},{}]]]` {
t.Fatal(string(cp.data))
}
@@ -714,7 +830,7 @@ func TestDataSavedOnHarvestError(t *testing.T) {
<-m.p.trackProgress // receive harvest notice
cp = <-m.clientParams
- m.clientReturn <- ClientReturn{nil, nil}
+ m.clientReturn <- ClientReturn{nil, nil, 202}
if string(cp.data) != `["one",{"reservoir_size":10000,"events_seen":1},[[{"x":1},{},{}]]]` {
t.Fatal(string(cp.data))
}
@@ -738,7 +854,7 @@ func TestNoDataSavedOnPayloadTooLarge(t *testing.T) {
<-m.p.trackProgress // receive harvest notice
cp := <-m.clientParams
- m.clientReturn <- ClientReturn{nil, collector.ErrPayloadTooLarge}
+ m.clientReturn <- ClientReturn{nil, ErrPayloadTooLarge, 413}
if string(cp.data) != `["one",{"reservoir_size":10000,"events_seen":1},[[{"x":1},{},{}]]]` {
t.Fatal(string(cp.data))
}
@@ -754,7 +870,7 @@ func TestNoDataSavedOnPayloadTooLarge(t *testing.T) {
<-m.p.trackProgress // receive harvest notice
cp = <-m.clientParams
- m.clientReturn <- ClientReturn{nil, nil}
+ m.clientReturn <- ClientReturn{nil, nil, 202}
if string(cp.data) != `["one",{"reservoir_size":10000,"events_seen":1},[[{"x":2},{},{}]]]` {
t.Fatal(string(cp.data))
}
@@ -778,7 +894,7 @@ func TestNoDataSavedOnErrUnsupportedMedia(t *testing.T) {
<-m.p.trackProgress // receive harvest notice
cp := <-m.clientParams
- m.clientReturn <- ClientReturn{nil, collector.ErrUnsupportedMedia}
+ m.clientReturn <- ClientReturn{nil, ErrUnsupportedMedia, 415}
if string(cp.data) != `["one",{"reservoir_size":10000,"events_seen":1},[[{"x":1},{},{}]]]` {
t.Fatal(string(cp.data))
}
@@ -794,7 +910,7 @@ func TestNoDataSavedOnErrUnsupportedMedia(t *testing.T) {
<-m.p.trackProgress // receive harvest notice
cp = <-m.clientParams
- m.clientReturn <- ClientReturn{nil, nil}
+ m.clientReturn <- ClientReturn{nil, nil, 202}
if string(cp.data) != `["one",{"reservoir_size":10000,"events_seen":1},[[{"x":2},{},{}]]]` {
t.Fatal(string(cp.data))
}
@@ -816,11 +932,11 @@ var (
HighSecurity: true,
Hostname: "agent-hostname",
}
- connectClient = collector.ClientFn(func(cmd collector.Cmd) ([]byte, error) {
+ connectClient = collector.ClientFn(func(cmd collector.RpmCmd, cs collector.RpmControls) collector.RPMResponse {
if cmd.Name == collector.CommandPreconnect {
- return []byte(`{"redirect_host":"specific_collector.com"}`), nil
+ return collector.RPMResponse{Body: []byte(`{"redirect_host":"specific_collector.com"}`), Err: nil, StatusCode: 202}
}
- return []byte(`{"agent_run_id":"12345","zip":"zap"}`), nil
+ return collector.RPMResponse{Body: []byte(`{"agent_run_id":"12345","zip":"zap"}`), Err: nil, StatusCode: 200}
})
)
@@ -829,7 +945,7 @@ func init() {
}
func TestAppInfoInvalid(t *testing.T) {
- p := NewProcessor(ProcessorConfig{Client: collector.LicenseInvalidClient})
+ p := NewProcessor(ProcessorConfig{Client: LicenseInvalidClient})
p.processorHarvestChan = nil
p.trackProgress = make(chan struct{}, 100)
go p.Run()
@@ -851,7 +967,7 @@ func TestAppInfoInvalid(t *testing.T) {
}
func TestAppInfoDisconnected(t *testing.T) {
- p := NewProcessor(ProcessorConfig{Client: collector.DisconnectClient})
+ p := NewProcessor(ProcessorConfig{Client: DisconnectClient})
p.processorHarvestChan = nil
p.trackProgress = make(chan struct{}, 100)
go p.Run()
diff --git a/src/newrelic/protocol/App.go b/src/newrelic/protocol/App.go
index f70327c63..37e8d2a5c 100644
--- a/src/newrelic/protocol/App.go
+++ b/src/newrelic/protocol/App.go
@@ -1,9 +1,4 @@
// Code generated by the FlatBuffers compiler. DO NOT EDIT.
-//
-// Copyright 2020 New Relic Corporation. All rights reserved.
-// SPDX-License-Identifier: Apache-2.0
-//
-
package protocol
@@ -190,8 +185,16 @@ func (rcv *App) MutateSpanEventsMaxSamplesStored(n uint64) bool {
return rcv._tab.MutateUint64Slot(36, n)
}
+func (rcv *App) Metadata() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(38))
+ if o != 0 {
+ return rcv._tab.ByteVector(o + rcv._tab.Pos)
+ }
+ return nil
+}
+
func AppStart(builder *flatbuffers.Builder) {
- builder.StartObject(17)
+ builder.StartObject(18)
}
func AppAddLicense(builder *flatbuffers.Builder, license flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(license), 0)
@@ -244,6 +247,9 @@ func AppAddSpanQueueSize(builder *flatbuffers.Builder, spanQueueSize uint64) {
func AppAddSpanEventsMaxSamplesStored(builder *flatbuffers.Builder, spanEventsMaxSamplesStored uint64) {
builder.PrependUint64Slot(16, spanEventsMaxSamplesStored, 0)
}
+func AppAddMetadata(builder *flatbuffers.Builder, metadata flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(17, flatbuffers.UOffsetT(metadata), 0)
+}
func AppEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
diff --git a/src/newrelic/utilization/addresses.go b/src/newrelic/utilization/addresses.go
new file mode 100644
index 000000000..0fa26b1dd
--- /dev/null
+++ b/src/newrelic/utilization/addresses.go
@@ -0,0 +1,78 @@
+//
+// Copyright 2020 New Relic Corporation. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package utilization
+
+import (
+ "fmt"
+ "net"
+)
+
+func nonlocalIPAddressesByInterface() (map[string][]string, error) {
+ ifaces, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+ ips := make(map[string][]string, len(ifaces))
+ for _, ifc := range ifaces {
+ addrs, err := ifc.Addrs()
+ if err != nil {
+ continue
+ }
+ for _, addr := range addrs {
+ var ip net.IP
+ switch iptype := addr.(type) {
+ case *net.IPAddr:
+ ip = iptype.IP
+ case *net.IPNet:
+ ip = iptype.IP
+ case *net.TCPAddr:
+ ip = iptype.IP
+ case *net.UDPAddr:
+ ip = iptype.IP
+ }
+ if nil != ip && !ip.IsLoopback() && !ip.IsUnspecified() {
+ ips[ifc.Name] = append(ips[ifc.Name], ip.String())
+ }
+ }
+ }
+ return ips, nil
+}
+
+// utilizationIPs gathers IP address which may help identify this entity. This
+// code chooses all IPs from the interface which contains the IP of a UDP
+// connection with NR. This approach has the following advantages:
+// * Matches the behavior of the Java agent.
+// * Reports fewer IPs to lower linking burden on infrastructure backend.
+// * The UDP connection interface is more likely to contain unique external IPs.
+func utilizationIPs() ([]string, error) {
+ // Port choice designed to match
+ // https://source.datanerd.us/java-agent/java_agent/blob/master/newrelic-agent/src/main/java/com/newrelic/agent/config/Hostname.java#L110
+ conn, err := net.Dial("udp", "newrelic.com:10002")
+ if err != nil {
+ return nil, err
+ }
+ defer conn.Close()
+
+ addr, ok := conn.LocalAddr().(*net.UDPAddr)
+
+ if !ok || nil == addr || addr.IP.IsLoopback() || addr.IP.IsUnspecified() {
+ return nil, fmt.Errorf("unexpected connection address: %v", conn.LocalAddr())
+ }
+ outboundIP := addr.IP.String()
+
+ ipsByInterface, err := nonlocalIPAddressesByInterface()
+ if err != nil {
+ return nil, err
+ }
+ for _, ips := range ipsByInterface {
+ for _, ip := range ips {
+ if ip == outboundIP {
+ return ips, nil
+ }
+ }
+ }
+ return nil, nil
+}
\ No newline at end of file
diff --git a/src/newrelic/utilization/aws.go b/src/newrelic/utilization/aws.go
index 96f6fa9dc..b666a8ba7 100644
--- a/src/newrelic/utilization/aws.go
+++ b/src/newrelic/utilization/aws.go
@@ -13,7 +13,7 @@ import (
const (
awsHostname = "169.254.169.254"
- awsEndpointPath = "/2016-09-02/dynamic/instance-identity/document"
+ awsEndpointPath = "/latest/dynamic/instance-identity/document"
awsTokenEndpointPath = "/latest/api/token"
awsEndpoint = "http://" + awsHostname + awsEndpointPath
awsTokenEndpoint = "http://" + awsHostname + awsTokenEndpointPath
diff --git a/src/newrelic/utilization/fqdn.go b/src/newrelic/utilization/fqdn.go
new file mode 100644
index 000000000..2a5c68266
--- /dev/null
+++ b/src/newrelic/utilization/fqdn.go
@@ -0,0 +1,29 @@
+//go:build go1.8
+// +build go1.8
+
+package utilization
+
+import (
+ "context"
+ "net"
+ "strings"
+)
+
+func lookupAddr(addr string) ([]string, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), lookupAddrTimeout)
+ defer cancel()
+
+ r := &net.Resolver{}
+
+ return r.LookupAddr(ctx, addr)
+}
+
+func getFQDN(candidateIPs []string) string {
+ for _, ip := range candidateIPs {
+ names, _ := lookupAddr(ip)
+ if len(names) > 0 {
+ return strings.TrimSuffix(names[0], ".")
+ }
+ }
+ return ""
+}
\ No newline at end of file
diff --git a/src/newrelic/utilization/kubernetes.go b/src/newrelic/utilization/kubernetes.go
new file mode 100644
index 000000000..39df61a4a
--- /dev/null
+++ b/src/newrelic/utilization/kubernetes.go
@@ -0,0 +1,59 @@
+//
+// Copyright 2020 New Relic Corporation. All rights reserved.
+// SPDX-License-Identifier: Apache-2.0
+//
+
+package utilization
+
+import (
+ "errors"
+ "fmt"
+)
+
+type kubernetes struct {
+ KubernetesServiceHost string `json:"kubernetes_service_host",omitempty`
+
+ // Having a custom getter allows the unit tests to mock os.Getenv().
+ environmentVariableGetter func(key string) string
+}
+
+func GatherKubernetes(v *vendors, getenv func(string) string) error {
+ k8s := newKubernetes(getenv)
+ if err := k8s.Gather(); err != nil {
+ return fmt.Errorf("Kubernetes not detected: %s", err)
+ } else {
+ if k8s.KubernetesServiceHost != "" {
+ v.Kubernetes = k8s
+ }
+ }
+
+ return nil
+}
+
+func newKubernetes(getenv func(string) string) *kubernetes {
+ return &kubernetes{
+ environmentVariableGetter: getenv,
+ }
+}
+
+func (k8s *kubernetes) Gather() error {
+ k8s.KubernetesServiceHost = k8s.environmentVariableGetter("KUBERNETES_SERVICE_HOST")
+
+ if err := k8s.validate(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (k8s *kubernetes) validate() (err error) {
+ k8s.KubernetesServiceHost, err = normalizeValue(k8s.KubernetesServiceHost)
+ if err != nil {
+ return fmt.Errorf("Invalid Kubernetes Service Host: %v", err)
+ }
+
+ if k8s.KubernetesServiceHost == "" {
+ err = errors.New("The environment variable KUBERNETES_SERVICE_HOST was unavailable")
+ }
+ return
+}
diff --git a/src/newrelic/utilization/provider.go b/src/newrelic/utilization/provider.go
index 1e9fb2ef0..09e3bfa57 100644
--- a/src/newrelic/utilization/provider.go
+++ b/src/newrelic/utilization/provider.go
@@ -19,6 +19,7 @@ const (
maxFieldValueSize = 255 // The maximum value size, in bytes.
providerTimeout = 1 * time.Second // The maximum time a HTTP provider
// may block.
+ lookupAddrTimeout = 500 * time.Millisecond
)
// validationError represents a response from a provider endpoint that doesn't
diff --git a/src/newrelic/utilization/utilization_hash.go b/src/newrelic/utilization/utilization_hash.go
index 0fe4054f5..aeba66e7e 100644
--- a/src/newrelic/utilization/utilization_hash.go
+++ b/src/newrelic/utilization/utilization_hash.go
@@ -9,6 +9,7 @@ package utilization
import (
"fmt"
+ "os"
"runtime"
"sync"
@@ -17,7 +18,7 @@ import (
)
const (
- metadataVersion = 3
+ metadataVersion = 5
)
type Config struct {
@@ -26,6 +27,7 @@ type Config struct {
DetectGCP bool
DetectPCF bool
DetectDocker bool
+ DetectKubernetes bool
LogicalProcessors int
TotalRamMIB int
BillingHostname string
@@ -44,9 +46,11 @@ type Data struct {
LogicalProcessors *int `json:"logical_processors"`
RamMiB *uint64 `json:"total_ram_mib"`
Hostname string `json:"hostname"`
+ FullHostname string `json:"full_hostname,omitempty"`
+ Addresses []string `json:"ip_address,omitempty"`
BootID string `json:"boot_id,omitempty"`
- Vendors *vendors `json:"vendors,omitempty"`
Config *override `json:"config,omitempty"`
+ Vendors *vendors `json:"vendors,omitempty"`
}
type docker struct {
@@ -54,15 +58,16 @@ type docker struct {
}
type vendors struct {
- AWS *aws `json:"aws,omitempty"`
- Azure *azure `json:"azure,omitempty"`
- GCP *gcp `json:"gcp,omitempty"`
- PCF *pcf `json:"pcf,omitempty"`
- Docker *docker `json:"docker,omitempty"`
+ AWS *aws `json:"aws,omitempty"`
+ Azure *azure `json:"azure,omitempty"`
+ GCP *gcp `json:"gcp,omitempty"`
+ PCF *pcf `json:"pcf,omitempty"`
+ Docker *docker `json:"docker,omitempty"`
+ Kubernetes *kubernetes `json:"kubernetes,omitempty"`
}
func (v *vendors) isEmpty() bool {
- return v.AWS == nil && v.Azure == nil && v.GCP == nil && v.PCF == nil && v.Docker == nil
+ return nil == v || *v == vendors{}
}
func overrideFromConfig(config Config) *override {
@@ -112,6 +117,14 @@ func Gather(config Config) *Data {
goGather(GatherCPU, uDat)
goGather(GatherMemory, uDat)
+ // Gather IPs before spawning goroutines since the IPs are used in
+ // gathering full hostname.
+ if ips, err := utilizationIPs(); nil == err {
+ uDat.Addresses = ips
+ } else {
+ log.Debugf("Error gathering addresses: %s", err)
+ }
+
// Now things the user can turn off.
if config.DetectDocker {
goGather(GatherDockerID, uDat)
@@ -133,6 +146,18 @@ func Gather(config Config) *Data {
goGather(GatherPCF, uDat)
}
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ uDat.FullHostname = getFQDN(uDat.Addresses)
+ }()
+
+ if config.DetectKubernetes {
+ if err_k8s := GatherKubernetes(uDat.Vendors, os.Getenv); err_k8s != nil {
+ log.Debugf("%s", err_k8s)
+ }
+ }
+
// Now we wait for everything!
wg.Wait()
diff --git a/src/newrelic/utilization/utilization_hash_test.go b/src/newrelic/utilization/utilization_hash_test.go
index 9546248eb..0d159ffb2 100644
--- a/src/newrelic/utilization/utilization_hash_test.go
+++ b/src/newrelic/utilization/utilization_hash_test.go
@@ -30,6 +30,7 @@ func TestJSONMarshalling(t *testing.T) {
AvailabilityZone: "us-west-1",
},
Docker: &docker{ID: "47cbd16b77c50cbf71401"},
+ Kubernetes: &kubernetes{KubernetesServiceHost: "10.96.0.1"},
},
Config: &override{
LogicalProcessors: &configProcessors,
@@ -37,10 +38,13 @@ func TestJSONMarshalling(t *testing.T) {
}
expect := `{
- "metadata_version": 3,
+ "metadata_version": 5,
"logical_processors": 4,
"total_ram_mib": 1024,
"hostname": "localhost",
+ "config": {
+ "logical_processors": 16
+ },
"vendors": {
"aws": {
"instanceId": "8BADFOOD",
@@ -49,10 +53,10 @@ func TestJSONMarshalling(t *testing.T) {
},
"docker": {
"id": "47cbd16b77c50cbf71401"
+ },
+ "kubernetes": {
+ "kubernetes_service_host": "10.96.0.1"
}
- },
- "config": {
- "logical_processors": 16
}
}`
@@ -69,7 +73,7 @@ func TestJSONMarshalling(t *testing.T) {
u.Hostname = ""
u.Config = nil
expect = `{
- "metadata_version": 3,
+ "metadata_version": 5,
"logical_processors": 4,
"total_ram_mib": null,
"hostname": "",
@@ -81,6 +85,9 @@ func TestJSONMarshalling(t *testing.T) {
},
"docker": {
"id": "47cbd16b77c50cbf71401"
+ },
+ "kubernetes": {
+ "kubernetes_service_host": "10.96.0.1"
}
}
}`
@@ -99,18 +106,20 @@ func TestJSONMarshalling(t *testing.T) {
func TestUtilizationHash(t *testing.T) {
configs := []Config{
{
- DetectAWS: true,
- DetectAzure: true,
- DetectGCP: true,
- DetectPCF: true,
- DetectDocker: true,
+ DetectAWS: true,
+ DetectAzure: true,
+ DetectGCP: true,
+ DetectPCF: true,
+ DetectDocker: true,
+ DetectKubernetes: true,
},
{
- DetectAWS: false,
- DetectAzure: false,
- DetectGCP: false,
- DetectPCF: false,
- DetectDocker: false,
+ DetectAWS: false,
+ DetectAzure: false,
+ DetectGCP: false,
+ DetectPCF: false,
+ DetectDocker: false,
+ DetectKubernetes: false,
},
}
for _, c := range configs {
@@ -186,6 +195,8 @@ type utilizationCrossAgentTestcase struct {
RAMMIB *uint64 `json:"input_total_ram_mib"`
LogicalProcessors *int `json:"input_logical_processors"`
Hostname string `json:"input_hostname"`
+ FullHostname string `json:"input_full_hostname"`
+ Addresses []string `json:"input_ip_address"`
BootID string `json:"input_boot_id"`
AWSID string `json:"input_aws_id"`
AWSType string `json:"input_aws_type"`
@@ -206,6 +217,7 @@ type utilizationCrossAgentTestcase struct {
LogicalProcessors json.RawMessage `json:"NEW_RELIC_UTILIZATION_LOGICAL_PROCESSORS"`
RAWMMIB json.RawMessage `json:"NEW_RELIC_UTILIZATION_TOTAL_RAM_MIB"`
Hostname string `json:"NEW_RELIC_UTILIZATION_BILLING_HOSTNAME"`
+ KubernetesHost string `json:"KUBERNETES_SERVICE_HOST"`
} `json:"input_environment_variables"`
}
@@ -250,6 +262,14 @@ func crossAgentVendors(tc utilizationCrossAgentTestcase) *vendors {
v.PCF.validate()
}
+ GatherKubernetes(v, func(key string) string {
+ if key == "KUBERNETES_SERVICE_HOST" {
+ return tc.Config.KubernetesHost
+ }
+
+ return ""
+ })
+
if v.isEmpty() {
return nil
}
@@ -266,18 +286,18 @@ func compactJSON(js []byte) []byte {
func runUtilizationCrossAgentTestcase(t *testing.T, tc utilizationCrossAgentTestcase) {
- // Skip utilitzation cross agent tests for metadata versions newer than
- // the supported one (which is version 3).
- type version struct {
- MetadataVersion uint64 `json:metadata_version`
- }
-
- var v version
+ // check metadata version expected and skip test if beyond the currently supported level
+ var d Data
- err := json.Unmarshal(tc.ExpectedOutput, &v)
+ err := json.Unmarshal(tc.ExpectedOutput, &d)
+ if nil != err {
+ t.Errorf("Unable to decode expected output for test \"%s\" - %s", tc.Name, err)
+ return
+ }
- if err == nil && v.MetadataVersion != 3 {
- t.Skip("Unsupported utilization metdata version: ", v.MetadataVersion)
+ if metadataVersion < d.MetadataVersion {
+ t.Logf("skipping test \"%s\" - test metadata version (%d) > supported (%d)", tc.Name, d.MetadataVersion, metadataVersion)
+ return
}
var ConfigRAWMMIB int
@@ -303,6 +323,8 @@ func runUtilizationCrossAgentTestcase(t *testing.T, tc utilizationCrossAgentTest
BootID: tc.BootID,
Vendors: crossAgentVendors(tc),
Config: overrideFromConfig(cfg),
+ FullHostname: tc.FullHostname,
+ Addresses: tc.Addresses,
}
js, err := json.Marshal(data)
@@ -344,4 +366,9 @@ func TestVendorsIsEmpty(t *testing.T) {
if v.isEmpty() {
t.Fatal("non-empty vendors registers as empty")
}
+
+ var nilVendors *vendors
+ if !nilVendors.isEmpty() {
+ t.Fatal("nil vendors should be empty")
+ }
}
diff --git a/tests/integration/api/metadata/test_linking_metadata_dt.php b/tests/integration/api/metadata/test_linking_metadata_dt.php
index e70799174..afc0dd69b 100644
--- a/tests/integration/api/metadata/test_linking_metadata_dt.php
+++ b/tests/integration/api/metadata/test_linking_metadata_dt.php
@@ -59,4 +59,4 @@
tap_equal($payload->{"d"}->{"tr"}, $metadata['trace.id'], 'trace id');
tap_equal($payload->{"d"}->{"id"}, $metadata['span.id'], 'span id');
-tap_assert(!isset($metadata['entity.guid']), 'entity guid');
+tap_assert(isset($metadata['entity.guid']) && $metadata['entity.guid'] !== '', 'entity guid');
diff --git a/tests/integration/api/metadata/test_linking_metadata_no_dt.php b/tests/integration/api/metadata/test_linking_metadata_no_dt.php
index 97c9e1a3c..753482c8a 100644
--- a/tests/integration/api/metadata/test_linking_metadata_no_dt.php
+++ b/tests/integration/api/metadata/test_linking_metadata_no_dt.php
@@ -45,6 +45,6 @@
tap_equal('SERVICE', $metadata['entity.type'], 'entity type');
tap_equal(gethostname(), $metadata['hostname'], 'host name');
-tap_assert(!isset($metadata['entity.guid']), 'entity guid');
+tap_assert(isset($metadata['entity.guid']) && $metadata['entity.guid'] !== '', 'entity guid');
tap_assert(!isset($metadata['trace.id']), 'trace id');
tap_assert(!isset($metadata['span.id']), 'span id');