Skip to content

Performance Benchmarks #90

Performance Benchmarks

Performance Benchmarks #90

Workflow file for this run

name: Performance Benchmarks
on:
schedule:
# Run nightly to track performance trends
- cron: '0 6 * * *'
workflow_dispatch:
inputs:
baseline_update:
description: 'Update performance baseline'
type: boolean
default: false
permissions:
contents: read
pull-requests: write # For PR comments
jobs:
performance:
runs-on: ubuntu-latest
timeout-minutes: 30
services:
postgres:
image: postgis/postgis:16-3.4
env:
POSTGRES_PASSWORD: test
POSTGRES_DB: honua_test
ports:
- 5432:5432
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
env:
ConnectionStrings__DefaultConnection: "Server=localhost;Port=5432;Database=honua_test;User Id=postgres;Password=test;"
ASPNETCORE_ENVIRONMENT: Testing
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 2 # Need previous commit for comparison
- name: Setup .NET
uses: actions/setup-dotnet@v4
with:
dotnet-version: '10.0.x'
- name: Cache NuGet packages
uses: actions/cache@v5
with:
path: ~/.nuget/packages
key: ${{ runner.os }}-nuget-${{ hashFiles('**/*.csproj') }}
restore-keys: |
${{ runner.os }}-nuget-
- name: Restore dependencies
run: dotnet restore --locked-mode
- name: Build solution
run: dotnet build -c Release --no-restore
- name: Build benchmarks
run: dotnet build benchmarks/Honua.Benchmarks -c Release --no-restore
- name: Install jq for result parsing
run: sudo apt-get update && sudo apt-get install -y jq
- name: Run performance benchmarks
run: |
mkdir -p benchmark-results performance-reports
./scripts/run-performance-tests.sh --quick
continue-on-error: true # Don't fail on benchmark errors, report them instead
- name: Parse benchmark results
id: parse-results
run: |
if [ -f performance-reports/*/results.json ]; then
# Extract key metrics
RESULTS_FILE=$(find performance-reports -name "results.json" | head -1)
# Calculate average latency across all benchmarks
AVG_LATENCY=$(jq -r '[.Benchmarks[].Statistics.Mean] | add / length / 1000000 | round' "$RESULTS_FILE")
P95_LATENCY=$(jq -r '[.Benchmarks[].Statistics.Percentile95] | max / 1000000 | round' "$RESULTS_FILE")
P99_LATENCY=$(jq -r '[.Benchmarks[].Statistics.Percentile99] | max / 1000000 | round' "$RESULTS_FILE")
echo "avg_latency=$AVG_LATENCY" >> $GITHUB_OUTPUT
echo "p95_latency=$P95_LATENCY" >> $GITHUB_OUTPUT
echo "p99_latency=$P99_LATENCY" >> $GITHUB_OUTPUT
echo "results_available=true" >> $GITHUB_OUTPUT
else
echo "results_available=false" >> $GITHUB_OUTPUT
fi
- name: Compare with baseline
id: compare-baseline
if: steps.parse-results.outputs.results_available == 'true'
run: |
REGRESSION_DETECTED=false
REGRESSION_DETAILS=""
if [ -f performance-baseline.json ]; then
echo "Comparing with baseline..."
RESULTS_FILE=$(find performance-reports -name "results.json" | head -1)
if ! python scripts/check-perf-regression.py \
--baseline performance-baseline.json \
--current "$RESULTS_FILE" \
--threshold 0.10 \
--report perf-regression.md \
--fail-on-regression; then
REGRESSION_DETECTED=true
REGRESSION_DETAILS="See perf-regression.md for details"
fi
fi
echo "regression_detected=$REGRESSION_DETECTED" >> $GITHUB_OUTPUT
echo "regression_details=$REGRESSION_DETAILS" >> $GITHUB_OUTPUT
- name: Update baseline (if requested)
if: github.event.inputs.baseline_update == 'true' && steps.parse-results.outputs.results_available == 'true'
run: |
RESULTS_FILE=$(find performance-reports -name "results.json" | head -1)
cp "$RESULTS_FILE" performance-baseline.json
git config user.name 'github-actions[bot]'
git config user.email 'github-actions[bot]@users.noreply.github.com'
git add performance-baseline.json
git commit -m "perf: update performance baseline [skip ci]"
git push
- name: Comment on PR
if: github.event_name == 'pull_request' && steps.parse-results.outputs.results_available == 'true'
uses: actions/github-script@v8
with:
script: |
const avgLatency = '${{ steps.parse-results.outputs.avg_latency }}';
const p95Latency = '${{ steps.parse-results.outputs.p95_latency }}';
const p99Latency = '${{ steps.parse-results.outputs.p99_latency }}';
const regressionDetected = '${{ steps.compare-baseline.outputs.regression_detected }}' === 'true';
const regressionDetails = '${{ steps.compare-baseline.outputs.regression_details }}';
let status = regressionDetected ? '❌' : '✅';
let title = regressionDetected ? 'Performance Regression Detected' : 'Performance Check Passed';
let body = `## ${status} ${title}
### Performance Metrics
- **Average Latency**: ${avgLatency}ms
- **95th Percentile**: ${p95Latency}ms
- **99th Percentile**: ${p99Latency}ms
### Targets
- Query p50: < 50ms ✅
- Query p99: < 300ms ${p99Latency <= 300 ? '✅' : '❌'}
`;
if (regressionDetected) {
body += `### ⚠️ Performance Regression
${regressionDetails}
Please review the changes and optimize performance before merging.
`;
}
body += `
<details>
<summary>View detailed results</summary>
Run \`./scripts/run-performance-tests.sh\` locally for detailed analysis.
</details>`;
await github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body
});
- name: Upload benchmark results
uses: actions/upload-artifact@v4
if: steps.parse-results.outputs.results_available == 'true'
with:
name: performance-results-${{ github.run_number }}
path: |
performance-reports/
benchmark-results/
retention-days: 30
- name: Fail on regression
if: steps.compare-baseline.outputs.regression_detected == 'true'
run: |
echo "❌ Performance regression detected!"
echo "${{ steps.compare-baseline.outputs.regression_details }}"
exit 1