Skip to content

Commit

Permalink
Improve stateproof API performance in v2 (#9283)
Browse files Browse the repository at this point in the history
Improve `/api/v1/transactions/{id}/stateproof` API performance.

* Change grpc requests to match performance environments
* Change transaction ID query to clamp consensus timestamp to `[validStart, validStart + 35m]`
* Change record file query to clamp consensus timestamp to `[timestamp, timestamp+10s]`
* Fix and clean up Stackgres upgrade documentation
* Rename `consensusNs` to `consensusTimestamp` in `stateproof.js`

---------

Signed-off-by: Steven Sheehy <[email protected]>
  • Loading branch information
steven-sheehy authored Sep 6, 2024
1 parent 63d8ab6 commit d2f7848
Show file tree
Hide file tree
Showing 5 changed files with 152 additions and 127 deletions.
4 changes: 4 additions & 0 deletions charts/hedera-mirror/values-prod.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ grpc:
priorityClassName: medium
prometheusRules:
enabled: true
resources:
requests:
cpu: 1000m
memory: 1024Mi
serviceMonitor:
enabled: true

Expand Down
37 changes: 0 additions & 37 deletions docs/runbook/perform-stackgres-security-upgrade.md

This file was deleted.

45 changes: 45 additions & 0 deletions docs/runbook/stackgres-upgrade.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Stackgres Upgrade

## Problem

After upgrading the Stackgres Helm chart version, we need to perform a security upgrade to ensure all its components
are updated in the current cluster.

## Execution

After successful deployment of the upgraded Stackgres Helm chart, we need to perform the following steps:

1. Determine the namespace and name of each sharded cluster and generate the yaml file below for each instance.

```
kubectl get sgshardedclusters -A
NAMESPACE NAME VERSION
mainnet-citus mirror-citus 16.2
```

2. Create a file containing the below yaml with `<namespace>` and `sgShardedCluster` replaced with the correct
values from step one and execute the command:
```
kubectl apply -n <namespace> -f - <<EOF
apiVersion: stackgres.io/v1
kind: SGShardedDbOps
metadata:
name: stackgres-upgrade
spec:
maxRetries: 1
op: securityUpgrade
priorityClassName: critical
securityUpgrade:
method: InPlace
sgShardedCluster: mirror-citus
EOF
```
3. Verify that the clusters are annotated with the correct Stackgres version.
```
kubectl get sgclusters -n <namespace> -o json | jq '.items[].metadata.annotations."stackgres.io/operatorVersion"'
```
4. Once the op completes successfully, you should delete the SGShardedDbOps resource(s).
```
kubectl delete sgshardeddbops -n <namespace> stackgres-upgrade
```
59 changes: 34 additions & 25 deletions hedera-mirror-rest/__tests__/stateproof.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import stateproof from '../stateproof';
import {CompositeRecordFile} from '../stream';
import TransactionId from '../transactionId';
import {opsMap} from '../utils';
import * as utils from '../utils.js';

global.pool = {};

Expand All @@ -38,10 +39,10 @@ const {
canReachConsensus,
downloadRecordStreamFilesFromObjectStorage,
formatCompactableRecordFile,
getAddressBooksAndNodeAccountIdsByConsensusNs,
getAddressBooksAndNodeAccountIdsByConsensusTimestamp,
getQueryParamValues,
getRCDFileInfoByConsensusNs,
getSuccessfulTransactionConsensusNs,
getRCDFileInfoByConsensusTimestamp,
getSuccessfulTransactionConsensusTimestamp,
} = stateproof;

const emptyQueryResult = {
Expand All @@ -59,56 +60,62 @@ const verifyFakeCallCountAndLastCallParamsArg = (fake, expectedCount, expectedLa
expect(actualParams).toEqual(expectedLastCallParams);
};

describe('getSuccessfulTransactionConsensusNs', () => {
const expectedValidConsensusNs = '1234567891000000001';
describe('getSuccessfulTransactionConsensusTimestamp', () => {
const expectedValidConsensusTimestamp = '1234567891000000001';
const validQueryResult = {
rows: [{consensus_timestamp: expectedValidConsensusNs}],
rows: [{consensus_timestamp: expectedValidConsensusTimestamp}],
};
const transactionId = TransactionId.fromString('0.0.1-1234567891-000111222');
const {maxTransactionConsensusTimestampRangeNs} = config.query;
const validStart = BigInt(transactionId.getValidStartNs());

test('with transaction found in db table', async () => {
const fakeQuery = sinon.fake.resolves(validQueryResult);
global.pool = {queryQuietly: fakeQuery};

const consensusNs = await getSuccessfulTransactionConsensusNs(transactionId, 0, false);
expect(consensusNs).toEqual(expectedValidConsensusNs);
const consensusTimestamp = await getSuccessfulTransactionConsensusTimestamp(transactionId, 0, false);
expect(consensusTimestamp).toEqual(expectedValidConsensusTimestamp);
verifyFakeCallCountAndLastCallParamsArg(fakeQuery, 1, [
transactionId.getEntityId().getEncodedId(),
transactionId.getValidStartNs(),
validStart,
0,
false,
validStart + maxTransactionConsensusTimestampRangeNs,
]);
});

test('with transaction not found', async () => {
const fakeQuery = sinon.fake.resolves(emptyQueryResult);
global.pool = {queryQuietly: fakeQuery};

await expect(getSuccessfulTransactionConsensusNs(transactionId, 0, false)).rejects.toThrow();
await expect(getSuccessfulTransactionConsensusTimestamp(transactionId, 0, false)).rejects.toThrow();
verifyFakeCallCountAndLastCallParamsArg(fakeQuery, 1, [
transactionId.getEntityId().getEncodedId(),
transactionId.getValidStartNs(),
validStart,
0,
false,
validStart + maxTransactionConsensusTimestampRangeNs,
]);
});

test('with db query error', async () => {
const fakeQuery = sinon.fake.rejects(new Error('db runtime error'));
global.pool = {queryQuietly: fakeQuery};

await expect(getSuccessfulTransactionConsensusNs(transactionId, 0, false)).rejects.toThrow();
await expect(getSuccessfulTransactionConsensusTimestamp(transactionId, 0, false)).rejects.toThrow();
verifyFakeCallCountAndLastCallParamsArg(fakeQuery, 1, [
transactionId.getEntityId().getEncodedId(),
transactionId.getValidStartNs(),
validStart,
0,
false,
validStart + maxTransactionConsensusTimestampRangeNs,
]);
});
});

describe('getRCDFileInfoByConsensusNs', () => {
const consensusNs = '1578342501111222333';
describe('getRCDFileInfoByConsensusTimestamp', () => {
const consensusTimestamp = 1578342501111222333n;
const upperBound = consensusTimestamp + config.query.maxRecordFileCloseIntervalNs;
const expectedRCDFileName = '2020-02-09T18_30_25.001721Z.rcd';
const validQueryResult = {
rows: [{bytes: null, name: expectedRCDFileName, node_account_id: '3', version: 5}],
Expand All @@ -124,29 +131,29 @@ describe('getRCDFileInfoByConsensusNs', () => {
const fakeQuery = sinon.fake.resolves(validQueryResult);
global.pool = {queryQuietly: fakeQuery};

const info = await getRCDFileInfoByConsensusNs(consensusNs);
const info = await getRCDFileInfoByConsensusTimestamp(consensusTimestamp);
expect(info).toEqual(expectedRCDFileInfo);
verifyFakeCallCountAndLastCallParamsArg(fakeQuery, 1, consensusNs);
verifyFakeCallCountAndLastCallParamsArg(fakeQuery, 1, [consensusTimestamp, upperBound]);
});

test('with record file not found', async () => {
const fakeQuery = sinon.fake.resolves(emptyQueryResult);
global.pool = {queryQuietly: fakeQuery};

await expect(getRCDFileInfoByConsensusNs(consensusNs)).rejects.toThrow();
verifyFakeCallCountAndLastCallParamsArg(fakeQuery, 1, consensusNs);
await expect(getRCDFileInfoByConsensusTimestamp(consensusTimestamp)).rejects.toThrow();
verifyFakeCallCountAndLastCallParamsArg(fakeQuery, 1, [consensusTimestamp, upperBound]);
});

test('with db query error', async () => {
const fakeQuery = sinon.fake.rejects(new Error('db runtime error'));
global.pool = {queryQuietly: fakeQuery};

await expect(getRCDFileInfoByConsensusNs(consensusNs)).rejects.toThrow();
verifyFakeCallCountAndLastCallParamsArg(fakeQuery, 1, consensusNs);
await expect(getRCDFileInfoByConsensusTimestamp(consensusTimestamp)).rejects.toThrow();
verifyFakeCallCountAndLastCallParamsArg(fakeQuery, 1, [consensusTimestamp, upperBound]);
});
});

describe('getAddressBooksAndNodeAccountIdsByConsensusNs', () => {
describe('getAddressBooksAndNodeAccountIdsByConsensusTimestamp', () => {
const nodeAccountId3 = EntityId.parse('0.0.3');
const nodeAccountId4 = EntityId.parse('0.0.4');
const nodeAccountId5 = EntityId.parse('0.0.5');
Expand All @@ -159,7 +166,7 @@ describe('getAddressBooksAndNodeAccountIdsByConsensusNs', () => {
_.map(nodeAccountIds, (id) => id.toString()),
','
);
const transactionConsensusNs = '1234567899000000021';
const transactionConsensusTimestamp = '1234567899000000021';

let queryResultWithNodeAccountIds;
let queryResultWithMemos;
Expand Down Expand Up @@ -208,7 +215,7 @@ describe('getAddressBooksAndNodeAccountIdsByConsensusNs', () => {
global.pool = {queryQuietly: queryStub};

if (expectPass) {
const result = await getAddressBooksAndNodeAccountIdsByConsensusNs(transactionConsensusNs);
const result = await getAddressBooksAndNodeAccountIdsByConsensusTimestamp(transactionConsensusTimestamp);
expect(result.addressBooks).toEqual(
_.map(queryResult.rows, (row) => Buffer.from(row.file_data).toString('base64'))
);
Expand All @@ -224,7 +231,9 @@ describe('getAddressBooksAndNodeAccountIdsByConsensusNs', () => {

expect(queryStub.callCount).toEqual(1);
} else {
await expect(getAddressBooksAndNodeAccountIdsByConsensusNs(transactionConsensusNs)).rejects.toThrow();
await expect(
getAddressBooksAndNodeAccountIdsByConsensusTimestamp(transactionConsensusTimestamp)
).rejects.toThrow();
expect(queryStub.callCount).toEqual(1);
}
};
Expand Down
Loading

0 comments on commit d2f7848

Please sign in to comment.