Skip to content

Commit 4565d47

Browse files
Merge branch 'develop' into NR-488604-Data-Ingest-Budgets
2 parents 90fb4e2 + 995819a commit 4565d47

File tree

195 files changed

+14516
-5794
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

195 files changed

+14516
-5794
lines changed

scripts/listMdxFreshness.mjs

Lines changed: 33 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -62,63 +62,48 @@ function main() {
6262
rows.sort((a,b) => b.ageDays - a.ageDays);
6363
console.log('Sorted files by age (descending)');
6464

65-
// Grouping buckets
66-
const buckets = [
67-
{ label: '24+ months', filter: r => r.ageMonths >= 24 },
68-
{ label: '18-24 months', filter: r => r.ageMonths >= 18 && r.ageMonths < 24 },
69-
{ label: '12-18 months', filter: r => r.ageMonths >= 12 && r.ageMonths < 18 },
70-
{ label: '6-12 months', filter: r => r.ageMonths >= 6 && r.ageMonths < 12 },
71-
{ label: '0-6 months', filter: r => r.ageMonths < 6 },
72-
];
73-
74-
function renderTable(sectionRows) {
75-
if (sectionRows.length === 0) return '_No files_';
76-
const header = ['Path','Last Commit','Age (months)','Age (days)'];
77-
const lines = [
78-
`| ${header.join(' | ')} |`,
79-
`| ${header.map(()=> '---').join(' | ')} |`
80-
];
81-
for (const r of sectionRows) {
82-
lines.push(`| ${r.path} | ${r.lastCommit} | ${r.ageMonths.toFixed(1)} | ${r.ageDays.toFixed(0)} |`);
83-
}
84-
return lines.join('\n');
65+
// Determine age bucket for each row
66+
function getAgeBucket(ageMonths) {
67+
if (ageMonths >= 24) return '24+ months';
68+
if (ageMonths >= 18) return '18-24 months';
69+
if (ageMonths >= 12) return '12-18 months';
70+
if (ageMonths >= 6) return '6-12 months';
71+
return '0-6 months';
8572
}
8673

87-
const summaryCounts = buckets.map(b => ({ label: b.label, count: rows.filter(b.filter).length }));
88-
89-
const out = [];
90-
out.push('# MDX Report');
91-
out.push('');
92-
93-
out.push(`Generated: ${new Date().toISOString()}`);
94-
out.push('');
95-
96-
out.push('## Summary by Age Bucket');
97-
out.push('');
98-
99-
out.push('| Age Bucket | File Count |');
100-
out.push('| --- | ---: |');
101-
for (const sc of summaryCounts) {
102-
out.push(`| ${sc.label} | ${sc.count} |`);
74+
// Escape CSV field if needed (contains comma, quote, or newline)
75+
function escapeCSV(field) {
76+
if (field.includes(',') || field.includes('"') || field.includes('\n')) {
77+
return `"${field.replace(/"/g, '""')}"`;
78+
}
79+
return field;
10380
}
104-
out.push('');
105-
106-
for (const b of buckets) {
107-
const sectionRows = rows.filter(b.filter);
108-
out.push(`## ${b.label}`);
109-
out.push('');
110-
out.push(renderTable(sectionRows));
111-
out.push('');
81+
82+
// Generate CSV output
83+
const csvLines = [];
84+
csvLines.push('Path,Last Commit,Age (months),Age (days),Age Bucket');
85+
86+
for (const r of rows) {
87+
const bucket = getAgeBucket(r.ageMonths);
88+
const line = [
89+
escapeCSV(r.path),
90+
r.lastCommit,
91+
r.ageMonths.toFixed(1),
92+
r.ageDays.toFixed(0),
93+
bucket
94+
].join(',');
95+
csvLines.push(line);
11296
}
113-
const outputTxt = out.join('\n');
97+
98+
const outputCSV = csvLines.join('\n');
11499
const outArg = process.argv.find(a => a.startsWith('--out='));
115-
const outPath = outArg ? outArg.split('=')[1] : 'mdx-freshness.txt';
100+
const outPath = outArg ? outArg.split('=')[1] : 'mdx-freshness.csv';
116101
try {
117-
writeFileSync(outPath, outputTxt, 'utf8');
102+
writeFileSync(outPath, outputCSV, 'utf8');
118103
console.log(`Wrote report to ${outPath}`);
119104
} catch (e) {
120105
console.log('Failed writing file, falling back to stdout');
121-
console.log(outputTxt);
106+
console.log(outputCSV);
122107
}
123108
console.log('Done');
124109
}

src/content/docs/accounts/accounts-billing/new-relic-one-user-management/user-management-concepts.mdx

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -131,18 +131,15 @@ You can create roles at three different scopes, each serving different purposes:
131131

132132
* **Organization-scoped roles**: You apply these roles for organization-wide functions like managing authentication domains, creating accounts, configuring organization settings, or managing scorecards and teams. Standard roles include:
133133
* <DNT>**Organization manager**</DNT>: Permissions related to organization settings, including adding accounts, and changing the name of the organization and accounts. This also includes sensitive observability tasks, such as deleting certain entities.
134-
* <DNT>**Authentication domain manager**</DNT>: Permissions related to adding and managing users, including configuring authentication domains and customizing groups and roles. Options within this include:
135-
* <DNT>**Manage**</DNT>: Can manage all aspects of authentication domains, including configuring domains and adding users.
136-
* <DNT>**Read only**</DNT>: Can view authentication domain and user information.
137-
* <DNT>**Add users**</DNT>: Can view user information, and add users to the organization, but lacks other auth domain configuration and management abilities.
138-
* <DNT>**Read users**</DNT>: Can only view user information.
134+
* <DNT>**Authentication domain manager**</DNT>: Permissions related to adding and managing users, including configuring authentication domains and customizing groups and roles.
139135
* <DNT>**Billing**</DNT>: Lets a user view and manage billing and usage, and data retention. For organizations with multiple accounts, billing is aggregated in the <DNT>**reporting account**</DNT> (usually the first account created in an organization).
140-
* <DNT>**Organization product admin**</DNT>: Permissions related to organization-scoped observability features like scorecard and team management. This is the organization-scoped equivalent to <strong>All product admin</strong>.
136+
* <DNT>**Organization product admin**</DNT>: Permissions related to organization-scoped observability features like scorecard and team management. This is the organization-scoped equivalent to <strong>All product admin</strong>
137+
* <DNT>**Organization read only**</DNT>: Provides read-only access to the New Relic platform organization-scoped features.
141138

142139
* **Account-scoped roles**: You apply these roles for access to platform features within specific accounts, such as configuring APM settings, managing alerts, or running queries. These are the traditional roles most users work with. Standard roles include:
143140
* <DNT>**All product admin**</DNT>: Includes all New Relic platform permissions except the ability to manage organization-level settings, users, and billing.
144141
* <DNT>**Standard user**</DNT>: Provides access to our platform features but lacks permissions to configure those features and lacks organization-level and user management permissions.
145-
* <DNT>**Read only**</DNT>: Provides read-only access to the New Relic platform.
142+
* <DNT>**Read only**</DNT>: Provides read-only access to the New Relic platform account-scoped features.
146143

147144
* **Entity-scoped roles**: You apply these roles for fine-grained access to specific resources like individual dashboards, fleets, or alert policies. This enables precise permission control at the individual resource level. You can create custom entity-scoped roles based on your needs.
148145

src/content/docs/alerts/organize-alerts/change-applied-intelligence-correlation-logic-decisions.mdx

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -412,7 +412,7 @@ The time range is set to 20 minutes by default. You can adjust it between 1-120
412412

413413
#### Step 5: Testing your decision using a simulation [#basic-test-with-simulation]
414414

415-
After adding a filter logic, the system automatically runs a [simulation](#simulations) using the past 7 days of incident data.
415+
After adding filter logic, the system automatically runs a [simulation](#simulations) using the past 7 days of incident data to help you validate the decision before applying it.
416416

417417
You can also manually trigger the simulation by clicking <DNT>**Simulate**</DNT>, which you may want to do if something is changed in the decision.
418418

@@ -1295,9 +1295,17 @@ You can use the correlation assistant to more quickly analyze [incidents](/docs/
12951295
</Collapser>
12961296
</CollapserGroup>
12971297

1298+
### Simulation vs real-time correlation [#simulation-vs-decisions]
1299+
1300+
It's important to understand the difference between simulation and real-time correlation in decisions:
1301+
1302+
* <DNT>**Simulation**</DNT>: Simulation correlation involves analyzing two separate incidents to understand their relationship under simulated conditions. These incidents can originate from either the same underlying issue or from different issues. The focus is on determining potential causative factors or shared characteristics between individual incidents. Simulation helps you test and validate your correlation logic against historical data before applying it in real-time.
1303+
1304+
* <DNT>**Real-time correlation (decisions)**</DNT>: In contrast, real-time correlation targets distinct issues, with each issue potentially encompassing multiple incidents. The aim is to detect and connect patterns across these multiple incidents to identify underlying issues for more efficient correlation. Real-time correlation leverages live data streams, allowing for prompt identification and response to emerging problems.
1305+
12981306
### Using simulation [#simulations]
12991307

1300-
Simulation will test the logic against the last week of your data and show you how many correlations would have happened. Here's a breakdown of the decision preview information displayed when you simulate:
1308+
Simulation tests your correlation logic by analyzing two separate incidents from the last week of your data, showing you how many correlations would have happened. This allows you to validate your decision logic before it's applied to real-time correlation of issues. Here's a breakdown of the decision preview information displayed when you simulate:
13011309

13021310
* <DNT>**Potential correlation rate:**</DNT> The percentage of tested incidents this decision would have affected.
13031311
* <DNT>**Total created incidents:**</DNT> The number of incidents tested by this decision.

src/content/docs/apm/agents/nodejs-agent/getting-started/compatibility-requirements-nodejs-agent.mdx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -302,10 +302,10 @@ supported by the agent.
302302
| `@hapi/hapi` | 20.1.2 | 21.4.4 | 9.0.0 |
303303
| `@koa/router` | 12.0.1 | 15.0.0 | 3.2.0 |
304304
| `@langchain/core` | 0.1.17 | 1.1.1 | 11.13.0 |
305-
| `@modelcontextprotocol/sdk` | 1.13.0 | 1.24.1 | 13.2.0 |
305+
| `@modelcontextprotocol/sdk` | 1.13.0 | 1.24.2 | 13.2.0 |
306306
| `@nestjs/cli` | 9.0.0 | 11.0.14 | 10.1.0 |
307307
| `@opensearch-project/opensearch` | 2.1.0 | 3.5.1 | 12.10.0 |
308-
| `@prisma/client` | 5.0.0 | 7.0.1 | 11.0.0 |
308+
| `@prisma/client` | 5.0.0 | 7.1.0 | 11.0.0 |
309309
| `@smithy/smithy-client` | 2.0.0 | 4.9.9 | 11.0.0 |
310310
| `amqplib` | 0.5.0 | 0.10.9 | 2.0.0 |
311311
| `aws-sdk` | 2.2.48 | 2.1692.0 | 6.2.0 |

src/content/docs/data-apis/manage-data/manage-data-retention.mdx

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,23 @@ This table shows the default [namespace](/docs/glossary/glossary) retention sett
145145
8
146146
</td>
147147
</tr>
148+
<tr>
149+
<td>
150+
APM
151+
</td>
152+
153+
<td>
154+
AI Monitoring
155+
</td>
156+
157+
<td>
158+
30
159+
</td>
160+
161+
<td>
162+
120
163+
</td>
164+
</tr>
148165

149166
<tr>
150167
<td>

src/content/docs/data-apis/understand-data/event-data/events-reported-browser-monitoring.mdx

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,15 @@ Select an event name in the following table to see its attributes.
114114
`Span` data is reported for [distributed tracing](/docs/browser/new-relic-browser/browser-pro-features/browser-data-distributed-tracing).
115115
</td>
116116
</tr>
117+
<tr>
118+
<td>
119+
[`UserAction`](/attribute-dictionary/?event=UserAction)
120+
</td>
121+
122+
<td>
123+
`UserAction` event is captured as the result of a user interaction with the web application. This event includes action information and DOM target identifiers along with several default attributes, such as application and geographic data.
124+
</td>
125+
</tr>
117126
</tbody>
118127
</table>
119128

src/content/docs/distributed-tracing/infinite-tracing-on-premise/bring-your-own-cache.mdx

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -19,21 +19,21 @@ The processor supports any Redis-compatible cache implementation. It has been te
1919
For production deployments, we recommend using cluster mode (sharded) to ensure high availability and scalability. To enable distributed caching, add the `distributed_cache` configuration to your `tail_sampling` processor section:
2020

2121
```yaml
22-
tail_sampling:
23-
distributed_cache:
24-
connection:
25-
address: redis://localhost:6379/0
26-
password: 'local'
27-
trace_window_expiration: 30s # Default: how long to wait after last span before evaluating
28-
in_flight_timeout: 120s # Optional: defaults to trace_window_expiration if not set
29-
traces_ttl: 3600s # Optional: default 1 hour
30-
cache_ttl: 7200s # Optional: default 2 hours
31-
suffix: "itc" # Redis key prefix
32-
max_traces_per_batch: 500 # Default: traces processed per evaluation cycle
33-
evaluation_interval: 1s # Default: evaluation frequency
34-
evaluation_workers: 4 # Default: number of parallel workers (defaults to CPU count)
35-
data_compression:
36-
format: lz4 # Optional: compression format (none, snappy, zstd, lz4); lz4 recommended
22+
tail_sampling:
23+
distributed_cache:
24+
connection:
25+
address: redis://localhost:6379/0
26+
password: 'local'
27+
trace_window_expiration: 30s # Default: how long to wait after last span before evaluating
28+
in_flight_timeout: 120s # Optional: defaults to trace_window_expiration if not set
29+
traces_ttl: 3600s # Optional: default 1 hour
30+
cache_ttl: 7200s # Optional: default 2 hours
31+
suffix: "itc" # Redis key prefix
32+
max_traces_per_batch: 500 # Default: traces processed per evaluation cycle
33+
evaluation_interval: 1s # Default: evaluation frequency
34+
evaluation_workers: 4 # Default: number of parallel workers (defaults to CPU count)
35+
data_compression:
36+
format: lz4 # Optional: compression format (none, snappy, zstd, lz4); lz4 recommended
3737
```
3838
3939
<Callout variant="important">
@@ -43,16 +43,16 @@ For production deployments, we recommend using cluster mode (sharded) to ensure
4343
The `address` parameter must specify a valid Redis-compatible server address using the standard format:
4444

4545
```shell
46-
redis[s]://[[username][:password]@][host][:port][/db-number]
46+
[output] redis[s]://[[username][:password]@][host][:port][/db-number]
4747
```
4848

4949
Alternatively, you can embed credentials directly in the `address` parameter:
5050

5151
```yaml
52-
tail_sampling:
53-
distributed_cache:
54-
connection:
55-
address: redis://:yourpassword@localhost:6379/0
52+
tail_sampling:
53+
distributed_cache:
54+
connection:
55+
address: redis://:yourpassword@localhost:6379/0
5656
```
5757

5858
The processor is implemented in Go and uses the [go-redis](https://github.com/redis/go-redis/tree/v9) client library.
@@ -114,8 +114,8 @@ Proper Redis instance sizing is critical for optimal performance. Use the config
114114

115115
### Memory estimation formula
116116

117-
```shell
118-
Total Memory = (Trace Data) + (Decision Caches) + (Overhead)
117+
```
118+
Total Memory = (Trace Data) + (Decision Caches) + (Overhead)
119119
```
120120
121121
#### 1. Trace data storage
@@ -133,13 +133,13 @@ Trace data is stored in Redis for the full `traces_ttl` period to support late-a
133133
134134
**Example calculation**: At 10,000 spans/second with 1-hour `traces_ttl`:
135135
136-
```shell
136+
```
137137
10,000 spans/sec × 3600 sec × 900 bytes = 32.4 GB
138138
```
139139
140140
**With lz4 compression** (we have observed 25% reduction):
141141
142-
```shell
142+
```
143143
32.4 GB × 0.75 = 24.3 GB
144144
```
145145
@@ -164,7 +164,7 @@ When using `distributed_cache`, the decision caches are stored in Redis without
164164
165165
**Example calculation**: 500 traces per batch (default) with 20 spans per trace on average:
166166
167-
```shell
167+
```
168168
500 × 20 × 900 bytes = 9 MB per batch
169169
```
170170
@@ -247,4 +247,4 @@ The processor uses a cascading TTL structure, with each level providing protecti
247247
4. **`cache_ttl`** (default: 2 hours)
248248
- Redis key expiration for decision cache entries (sampled/non-sampled)
249249
- Prevents duplicate evaluation for late-arriving spans
250-
- Defined via `distributed_cache.cache_ttl`
250+
- Defined via `distributed_cache.cache_ttl`

0 commit comments

Comments
 (0)