ci: Add performance impact step to CI (#9916)

This commit is contained in:
Manuel
2025-11-08 21:21:43 +01:00
committed by GitHub
parent 76826447f8
commit 92788a19d3
4 changed files with 706 additions and 2 deletions

291
.github/workflows/ci-performance.yml vendored Normal file
View File

@@ -0,0 +1,291 @@
name: ci-performance
on:
pull_request:
branches:
- alpha
- beta
- release
- 'release-[0-9]+.x.x'
- next-major
paths-ignore:
- '**.md'
- 'docs/**'
env:
NODE_VERSION: 24.11.0
MONGODB_VERSION: 8.0.4
permissions:
contents: read
pull-requests: write
issues: write
jobs:
performance-check:
name: Benchmarks
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout base branch
uses: actions/checkout@v4
with:
ref: ${{ github.base_ref }}
fetch-depth: 1
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies (base)
run: npm ci
- name: Build Parse Server (base)
run: npm run build
- name: Run baseline benchmarks
id: baseline
run: |
echo "Checking if benchmark script exists..."
if [ ! -f "benchmark/performance.js" ]; then
echo "⚠️ Benchmark script not found in base branch - this is expected for new features"
echo "Skipping baseline benchmark"
echo '[]' > baseline.json
echo "Baseline: N/A (benchmark script not in base branch)" > baseline-output.txt
exit 0
fi
echo "Running baseline benchmarks..."
npm run benchmark > baseline-output.txt 2>&1 || true
echo "Benchmark command completed with exit code: $?"
echo "Output file size: $(wc -c < baseline-output.txt) bytes"
echo "--- Begin baseline-output.txt ---"
cat baseline-output.txt
echo "--- End baseline-output.txt ---"
# Extract JSON from output (everything between first [ and last ])
sed -n '/^\[/,/^\]/p' baseline-output.txt > baseline.json || echo '[]' > baseline.json
echo "Extracted JSON size: $(wc -c < baseline.json) bytes"
echo "Baseline benchmark results:"
cat baseline.json
continue-on-error: true
- name: Upload baseline results
uses: actions/upload-artifact@v4
with:
name: baseline-benchmark
path: |
baseline.json
baseline-output.txt
retention-days: 7
- name: Checkout PR branch
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 1
clean: true
- name: Setup Node.js (PR)
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODE_VERSION }}
cache: 'npm'
- name: Install dependencies (PR)
run: npm ci
- name: Build Parse Server (PR)
run: npm run build
- name: Run PR benchmarks
id: pr-bench
run: |
echo "Running PR benchmarks..."
npm run benchmark > pr-output.txt 2>&1 || true
echo "Benchmark command completed with exit code: $?"
echo "Output file size: $(wc -c < pr-output.txt) bytes"
echo "--- Begin pr-output.txt ---"
cat pr-output.txt
echo "--- End pr-output.txt ---"
# Extract JSON from output (everything between first [ and last ])
sed -n '/^\[/,/^\]/p' pr-output.txt > pr.json || echo '[]' > pr.json
echo "Extracted JSON size: $(wc -c < pr.json) bytes"
echo "PR benchmark results:"
cat pr.json
continue-on-error: true
- name: Upload PR results
uses: actions/upload-artifact@v4
with:
name: pr-benchmark
path: |
pr.json
pr-output.txt
retention-days: 7
- name: Verify benchmark files exist
run: |
echo "Checking for benchmark result files..."
if [ ! -f baseline.json ] || [ ! -s baseline.json ]; then
echo "⚠️ baseline.json is missing or empty, creating empty array"
echo '[]' > baseline.json
fi
if [ ! -f pr.json ] || [ ! -s pr.json ]; then
echo "⚠️ pr.json is missing or empty, creating empty array"
echo '[]' > pr.json
fi
echo "baseline.json size: $(wc -c < baseline.json) bytes"
echo "pr.json size: $(wc -c < pr.json) bytes"
- name: Store benchmark result (PR)
uses: benchmark-action/github-action-benchmark@v1
if: github.event_name == 'pull_request' && hashFiles('pr.json') != ''
continue-on-error: true
with:
name: Parse Server Performance
tool: 'customSmallerIsBetter'
output-file-path: pr.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: false
save-data-file: false
alert-threshold: '110%'
comment-on-alert: true
fail-on-alert: false
alert-comment-cc-users: '@parse-community/maintainers'
summary-always: true
- name: Compare benchmark results
id: compare
run: |
node -e "
const fs = require('fs');
let baseline, pr;
try {
baseline = JSON.parse(fs.readFileSync('baseline.json', 'utf8'));
pr = JSON.parse(fs.readFileSync('pr.json', 'utf8'));
} catch (e) {
console.log('⚠️ Could not parse benchmark results');
process.exit(0);
}
// Handle case where baseline doesn't exist (new feature)
if (!Array.isArray(baseline) || baseline.length === 0) {
if (!Array.isArray(pr) || pr.length === 0) {
console.log('⚠️ Benchmark results are empty or invalid');
process.exit(0);
}
console.log('# Performance Benchmark Results\n');
console.log('> Baseline not available - this appears to be a new feature\n');
console.log('| Benchmark | Value | Details |');
console.log('|-----------|-------|---------|');
pr.forEach(result => {
console.log(\`| \${result.name} | \${result.value.toFixed(2)} ms | \${result.extra} |\`);
});
console.log('');
console.log('✅ **New benchmarks established for this feature.**');
process.exit(0);
}
if (!Array.isArray(pr) || pr.length === 0) {
console.log('⚠️ PR benchmark results are empty or invalid');
process.exit(0);
}
console.log('# Performance Comparison\n');
console.log('| Benchmark | Baseline | PR | Change | Status |');
console.log('|-----------|----------|----|---------| ------ |');
let hasRegression = false;
let hasImprovement = false;
baseline.forEach(baseResult => {
const prResult = pr.find(p => p.name === baseResult.name);
if (!prResult) {
console.log(\`| \${baseResult.name} | \${baseResult.value.toFixed(2)} ms | N/A | - | ⚠️ Missing |\`);
return;
}
const baseValue = parseFloat(baseResult.value);
const prValue = parseFloat(prResult.value);
const change = ((prValue - baseValue) / baseValue * 100);
const changeStr = change > 0 ? \`+\${change.toFixed(1)}%\` : \`\${change.toFixed(1)}%\`;
let status = '✅';
if (change > 20) {
status = '❌ Much Slower';
hasRegression = true;
} else if (change > 10) {
status = '⚠️ Slower';
hasRegression = true;
} else if (change < -10) {
status = '🚀 Faster';
hasImprovement = true;
}
console.log(\`| \${baseResult.name} | \${baseValue.toFixed(2)} ms | \${prValue.toFixed(2)} ms | \${changeStr} | \${status} |\`);
});
console.log('');
if (hasRegression) {
console.log('⚠️ **Performance regressions detected.** Please review the changes.');
} else if (hasImprovement) {
console.log('🚀 **Performance improvements detected!** Great work!');
} else {
console.log('✅ **No significant performance changes.**');
}
" | tee comparison.md
- name: Upload comparison
uses: actions/upload-artifact@v4
with:
name: benchmark-comparison
path: comparison.md
retention-days: 30
- name: Prepare comment body
if: github.event_name == 'pull_request'
run: |
echo "## Performance Impact Report" > comment.md
echo "" >> comment.md
if [ -f comparison.md ]; then
cat comparison.md >> comment.md
else
echo "⚠️ Could not generate performance comparison." >> comment.md
fi
echo "" >> comment.md
echo "<details>" >> comment.md
echo "<summary>📊 View detailed results</summary>" >> comment.md
echo "" >> comment.md
echo "### Baseline Results" >> comment.md
echo "\`\`\`json" >> comment.md
cat baseline.json >> comment.md
echo "\`\`\`" >> comment.md
echo "" >> comment.md
echo "### PR Results" >> comment.md
echo "\`\`\`json" >> comment.md
cat pr.json >> comment.md
echo "\`\`\`" >> comment.md
echo "" >> comment.md
echo "</details>" >> comment.md
echo "" >> comment.md
echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-100} iterations per test on Node.js ${{ env.NODE_VERSION }}*" >> comment.md
- name: Comment PR with results
if: github.event_name == 'pull_request'
uses: thollander/actions-comment-pull-request@v2
continue-on-error: true
with:
filePath: comment.md
comment_tag: performance-benchmark
mode: recreate
- name: Generate job summary
if: always()
run: |
if [ -f comparison.md ]; then
cat comparison.md >> $GITHUB_STEP_SUMMARY
else
echo "⚠️ Benchmark comparison not available" >> $GITHUB_STEP_SUMMARY
fi

View File

@@ -21,9 +21,13 @@
- [Good to Know](#good-to-know)
- [Troubleshooting](#troubleshooting)
- [Please Do's](#please-dos)
- [TypeScript Tests](#typescript-tests)
- [TypeScript Tests](#typescript-tests)
- [Test against Postgres](#test-against-postgres)
- [Postgres with Docker](#postgres-with-docker)
- [Performance Testing](#performance-testing)
- [Adding Tests](#adding-tests)
- [Adding Benchmarks](#adding-benchmarks)
- [Benchmark Guidelines](#benchmark-guidelines)
- [Breaking Changes](#breaking-changes)
- [Deprecation Policy](#deprecation-policy)
- [Feature Considerations](#feature-considerations)
@@ -298,6 +302,58 @@ RUN chmod +x /docker-entrypoint-initdb.d/setup-dbs.sh
Note that the script above will ONLY be executed during initialization of the container with no data in the database, see the official [Postgres image](https://hub.docker.com/_/postgres) for details. If you want to use the script to run again be sure there is no data in the /var/lib/postgresql/data of the container.
### Performance Testing
Parse Server includes an automated performance benchmarking system that runs on every pull request to detect performance regressions and track improvements over time.
#### Adding Tests
You should consider adding performance benchmarks if your contribution:
- **Introduces a performance-critical feature**: Features that will be frequently used in production environments, such as new query operations, authentication methods, or data processing functions.
- **Modifies existing critical paths**: Changes to core functionality like object CRUD operations, query execution, user authentication, file operations, or Cloud Code execution.
- **Has potential performance impact**: Any change that affects database operations, network requests, data parsing, caching mechanisms, or algorithmic complexity.
- **Optimizes performance**: If your PR specifically aims to improve performance, adding benchmarks helps verify the improvement and prevents future regressions.
#### Adding Benchmarks
Performance benchmarks are located in [`benchmark/performance.js`](benchmark/performance.js). To add a new benchmark:
1. **Identify the operation to benchmark**: Determine the specific operation you want to measure (e.g., a new query type, a new API endpoint).
2. **Create a benchmark function**: Follow the existing patterns in `benchmark/performance.js`:
```javascript
async function benchmarkNewFeature() {
return measureOperation('Feature Name', async () => {
// Your operation to benchmark
const result = await someOperation();
}, ITERATIONS);
}
```
3. **Add to benchmark suite**: Register your benchmark in the `runBenchmarks()` function:
```javascript
console.error('Running New Feature benchmark...');
await cleanupDatabase();
results.push(await benchmarkNewFeature());
```
4. **Test locally**: Run the benchmarks locally to verify they work:
```bash
npm run benchmark:quick # Quick test with 10 iterations
npm run benchmark # Full test with 100 iterations
```
For new features where no baseline exists, the CI will establish new benchmarks that future PRs will be compared against.
#### Benchmark Guidelines
- **Keep benchmarks focused**: Each benchmark should test a single, well-defined operation.
- **Use realistic data**: Test with data that reflects real-world usage patterns.
- **Clean up between runs**: Use `cleanupDatabase()` to ensure consistent test conditions.
- **Consider iteration count**: Use fewer iterations for expensive operations (see `ITERATIONS` environment variable).
- **Document what you're testing**: Add clear comments explaining what the benchmark measures and why it's important.
## Breaking Changes
Breaking changes should be avoided whenever possible. For a breaking change to be accepted, the benefits of the change have to clearly outweigh the costs of developers having to adapt their deployments. If a breaking change is only cosmetic it will likely be rejected and preferred to become obsolete organically during the course of further development, unless it is required as part of a larger change. Breaking changes should follow the [Deprecation Policy](#deprecation-policy).

354
benchmark/performance.js Normal file
View File

@@ -0,0 +1,354 @@
/**
* Performance Benchmark Suite for Parse Server
*
* This suite measures the performance of critical Parse Server operations
* using the Node.js Performance API. Results are output in a format
* compatible with github-action-benchmark.
*
* Run with: npm run benchmark
*/
const Parse = require('parse/node');
const { performance, PerformanceObserver } = require('perf_hooks');
const { MongoClient } = require('mongodb');
// Configuration
const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_benchmark_test';
const SERVER_URL = 'http://localhost:1337/parse';
const APP_ID = 'benchmark-app-id';
const MASTER_KEY = 'benchmark-master-key';
const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '100', 10);
// Parse Server instance
let parseServer;
let mongoClient;
/**
* Initialize Parse Server for benchmarking
*/
async function initializeParseServer() {
const express = require('express');
const { default: ParseServer } = require('../lib/index.js');
const app = express();
parseServer = new ParseServer({
databaseURI: MONGODB_URI,
appId: APP_ID,
masterKey: MASTER_KEY,
serverURL: SERVER_URL,
silent: true,
allowClientClassCreation: true,
});
app.use('/parse', parseServer.app);
return new Promise((resolve, reject) => {
const server = app.listen(1337, (err) => {
if (err) {
reject(new Error(`Failed to start server: ${err.message}`));
return;
}
Parse.initialize(APP_ID);
Parse.masterKey = MASTER_KEY;
Parse.serverURL = SERVER_URL;
resolve(server);
});
server.on('error', (err) => {
reject(new Error(`Server error: ${err.message}`));
});
});
}
/**
* Clean up database between benchmarks
*/
async function cleanupDatabase() {
try {
if (!mongoClient) {
mongoClient = await MongoClient.connect(MONGODB_URI);
}
const db = mongoClient.db();
const collections = await db.listCollections().toArray();
for (const collection of collections) {
if (!collection.name.startsWith('system.')) {
await db.collection(collection.name).deleteMany({});
}
}
} catch (error) {
throw new Error(`Failed to cleanup database: ${error.message}`);
}
}
/**
* Measure average time for an async operation over multiple iterations
*/
async function measureOperation(name, operation, iterations = ITERATIONS) {
const times = [];
for (let i = 0; i < iterations; i++) {
const start = performance.now();
await operation();
const end = performance.now();
times.push(end - start);
}
// Calculate statistics
times.sort((a, b) => a - b);
const sum = times.reduce((acc, val) => acc + val, 0);
const mean = sum / times.length;
const p50 = times[Math.floor(times.length * 0.5)];
const p95 = times[Math.floor(times.length * 0.95)];
const p99 = times[Math.floor(times.length * 0.99)];
const min = times[0];
const max = times[times.length - 1];
return {
name,
value: mean,
unit: 'ms',
range: `${min.toFixed(2)} - ${max.toFixed(2)}`,
extra: `p50: ${p50.toFixed(2)}ms, p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms`,
};
}
/**
* Benchmark: Object Create
*/
async function benchmarkObjectCreate() {
let counter = 0;
return measureOperation('Object Create', async () => {
const TestObject = Parse.Object.extend('BenchmarkTest');
const obj = new TestObject();
obj.set('testField', `test-value-${counter++}`);
obj.set('number', counter);
obj.set('boolean', true);
await obj.save();
});
}
/**
* Benchmark: Object Read (by ID)
*/
async function benchmarkObjectRead() {
// Setup: Create test objects
const TestObject = Parse.Object.extend('BenchmarkTest');
const objects = [];
for (let i = 0; i < ITERATIONS; i++) {
const obj = new TestObject();
obj.set('testField', `read-test-${i}`);
objects.push(obj);
}
await Parse.Object.saveAll(objects);
let counter = 0;
return measureOperation('Object Read', async () => {
const query = new Parse.Query('BenchmarkTest');
await query.get(objects[counter++ % objects.length].id);
});
}
/**
* Benchmark: Object Update
*/
async function benchmarkObjectUpdate() {
// Setup: Create test objects
const TestObject = Parse.Object.extend('BenchmarkTest');
const objects = [];
for (let i = 0; i < ITERATIONS; i++) {
const obj = new TestObject();
obj.set('testField', `update-test-${i}`);
obj.set('counter', 0);
objects.push(obj);
}
await Parse.Object.saveAll(objects);
let counter = 0;
return measureOperation('Object Update', async () => {
const obj = objects[counter++ % objects.length];
obj.increment('counter');
obj.set('lastUpdated', new Date());
await obj.save();
});
}
/**
* Benchmark: Simple Query
*/
async function benchmarkSimpleQuery() {
// Setup: Create test data
const TestObject = Parse.Object.extend('BenchmarkTest');
const objects = [];
for (let i = 0; i < 100; i++) {
const obj = new TestObject();
obj.set('category', i % 10);
obj.set('value', i);
objects.push(obj);
}
await Parse.Object.saveAll(objects);
let counter = 0;
return measureOperation('Simple Query', async () => {
const query = new Parse.Query('BenchmarkTest');
query.equalTo('category', counter++ % 10);
await query.find();
});
}
/**
* Benchmark: Batch Save (saveAll)
*/
async function benchmarkBatchSave() {
const BATCH_SIZE = 10;
return measureOperation('Batch Save (10 objects)', async () => {
const TestObject = Parse.Object.extend('BenchmarkTest');
const objects = [];
for (let i = 0; i < BATCH_SIZE; i++) {
const obj = new TestObject();
obj.set('batchField', `batch-${i}`);
obj.set('timestamp', new Date());
objects.push(obj);
}
await Parse.Object.saveAll(objects);
}, Math.floor(ITERATIONS / BATCH_SIZE)); // Fewer iterations for batch operations
}
/**
* Benchmark: User Signup
*/
async function benchmarkUserSignup() {
let counter = 0;
return measureOperation('User Signup', async () => {
counter++;
const user = new Parse.User();
user.set('username', `benchmark_user_${Date.now()}_${counter}`);
user.set('password', 'benchmark_password');
user.set('email', `benchmark${counter}@example.com`);
await user.signUp();
}, Math.floor(ITERATIONS / 10)); // Fewer iterations for user operations
}
/**
* Benchmark: User Login
*/
async function benchmarkUserLogin() {
// Setup: Create test users
const users = [];
for (let i = 0; i < 10; i++) {
const user = new Parse.User();
user.set('username', `benchmark_login_user_${i}`);
user.set('password', 'benchmark_password');
user.set('email', `login${i}@example.com`);
await user.signUp();
users.push({ username: user.get('username'), password: 'benchmark_password' });
await Parse.User.logOut();
}
let counter = 0;
return measureOperation('User Login', async () => {
const userCreds = users[counter++ % users.length];
await Parse.User.logIn(userCreds.username, userCreds.password);
await Parse.User.logOut();
}, Math.floor(ITERATIONS / 10)); // Fewer iterations for user operations
}
/**
* Run all benchmarks
*/
async function runBenchmarks() {
console.error('Starting Parse Server Performance Benchmarks...');
console.error(`Iterations per benchmark: ${ITERATIONS}`);
console.error('');
let server;
try {
// Initialize Parse Server
console.error('Initializing Parse Server...');
server = await initializeParseServer();
// Wait for server to be ready
await new Promise(resolve => setTimeout(resolve, 2000));
const results = [];
// Run each benchmark with database cleanup
console.error('Running Object Create benchmark...');
await cleanupDatabase();
results.push(await benchmarkObjectCreate());
console.error('Running Object Read benchmark...');
await cleanupDatabase();
results.push(await benchmarkObjectRead());
console.error('Running Object Update benchmark...');
await cleanupDatabase();
results.push(await benchmarkObjectUpdate());
console.error('Running Simple Query benchmark...');
await cleanupDatabase();
results.push(await benchmarkSimpleQuery());
console.error('Running Batch Save benchmark...');
await cleanupDatabase();
results.push(await benchmarkBatchSave());
console.error('Running User Signup benchmark...');
await cleanupDatabase();
results.push(await benchmarkUserSignup());
console.error('Running User Login benchmark...');
await cleanupDatabase();
results.push(await benchmarkUserLogin());
// Output results in github-action-benchmark format
console.log(JSON.stringify(results, null, 2));
console.error('');
console.error('Benchmarks completed successfully!');
console.error('');
console.error('Summary:');
results.forEach(result => {
console.error(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`);
});
} catch (error) {
console.error('Error running benchmarks:', error);
process.exit(1);
} finally {
// Cleanup
if (mongoClient) {
await mongoClient.close();
}
if (server) {
server.close();
}
// Give some time for cleanup
setTimeout(() => process.exit(0), 1000);
}
}
// Run benchmarks if executed directly
if (require.main === module) {
runBenchmarks();
}
module.exports = { runBenchmarks };

View File

@@ -138,7 +138,10 @@
"prettier": "prettier --write {src,spec}/{**/*,*}.js",
"prepare": "npm run build",
"postinstall": "node -p 'require(\"./postinstall.js\")()'",
"madge:circular": "node_modules/.bin/madge ./src --circular"
"madge:circular": "node_modules/.bin/madge ./src --circular",
"benchmark": "cross-env MONGODB_VERSION=8.0.4 MONGODB_TOPOLOGY=standalone mongodb-runner exec -t standalone --version 8.0.4 -- --port 27017 -- npm run benchmark:only",
"benchmark:only": "node benchmark/performance.js",
"benchmark:quick": "cross-env BENCHMARK_ITERATIONS=10 npm run benchmark:only"
},
"types": "types/index.d.ts",
"engines": {