Performance Optimization
Optimera prestanda för web, databas och applikationer
Vad du hittar här
Omfattande guide för prestandaoptimering på alla nivåer - från frontend till databas. Lär dig mäta, analysera och förbättra prestanda med moderna verktyg och tekniker.
Innehåll
Performance Optimization Examples
Web Performance Metrics
Mät och analysera viktiga prestandamått för webbapplikationer.
// Performance Observer API
const observer = new PerformanceObserver((list) => {
for (const entry of list.getEntries()) {
console.log({
name: entry.name,
type: entry.entryType,
duration: entry.duration,
startTime: entry.startTime
});
}
});
// Observera olika typer av prestanda
observer.observe({
entryTypes: ['navigation', 'resource', 'paint', 'layout-shift', 'largest-contentful-paint']
});
// Core Web Vitals
function getCoreWebVitals() {
// Largest Contentful Paint (LCP)
new PerformanceObserver((entryList) => {
const entries = entryList.getEntries();
const lastEntry = entries[entries.length - 1];
console.log('LCP:', lastEntry.renderTime || lastEntry.loadTime);
}).observe({ entryTypes: ['largest-contentful-paint'] });
// First Input Delay (FID)
new PerformanceObserver((entryList) => {
const firstInput = entryList.getEntries()[0];
const fid = firstInput.processingStart - firstInput.startTime;
console.log('FID:', fid);
}).observe({ type: 'first-input', buffered: true });
// Cumulative Layout Shift (CLS)
let clsValue = 0;
new PerformanceObserver((entryList) => {
for (const entry of entryList.getEntries()) {
if (!entry.hadRecentInput) {
clsValue += entry.value;
}
}
console.log('CLS:', clsValue);
}).observe({ type: 'layout-shift', buffered: true });
}
// Custom performance marks
performance.mark('myFunction-start');
// ... function code ...
performance.mark('myFunction-end');
performance.measure('myFunction', 'myFunction-start', 'myFunction-end');
const measure = performance.getEntriesByName('myFunction')[0];
console.log(`Function took ${measure.duration}ms`);
💡 Förklaring: Performance Observer API ger detaljerad insikt i sidans prestanda i realtid
Database Query Optimization
Optimera databasprestanda med indexering och query-analys.
-- PostgreSQL Query Optimization
-- Analysera query performance
EXPLAIN ANALYZE
SELECT u.*, p.title, p.created_at
FROM users u
LEFT JOIN posts p ON p.user_id = u.id
WHERE u.created_at > '2024-01-01'
ORDER BY u.created_at DESC
LIMIT 100;
-- Skapa index för vanliga queries
CREATE INDEX idx_users_created_at ON users(created_at DESC);
CREATE INDEX idx_posts_user_id ON posts(user_id);
CREATE INDEX idx_posts_created_at ON posts(created_at DESC);
-- Composite index för WHERE och ORDER BY
CREATE INDEX idx_users_status_created ON users(status, created_at DESC);
-- Partial index för filtrerade queries
CREATE INDEX idx_active_users ON users(created_at DESC)
WHERE status = 'active';
-- Query för att hitta saknade index
SELECT
schemaname,
tablename,
attname,
n_distinct,
most_common_vals
FROM pg_stats
WHERE schemaname = 'public'
AND n_distinct > 100
AND NOT EXISTS (
SELECT 1
FROM pg_indexes
WHERE tablename = pg_stats.tablename
AND indexdef LIKE '%' || attname || '%'
);
-- Hitta långsamma queries
SELECT
query,
calls,
mean_exec_time,
total_exec_time,
min_exec_time,
max_exec_time
FROM pg_stat_statements
WHERE mean_exec_time > 100
ORDER BY mean_exec_time DESC
LIMIT 20;
-- Connection pooling med PgBouncer config
[databases]
mydb = host=localhost port=5432 dbname=mydb
[pgbouncer]
pool_mode = transaction
max_client_conn = 1000
default_pool_size = 25
reserve_pool_size = 5
reserve_pool_timeout = 3
server_idle_timeout = 600
💡 Förklaring: Rätt indexering och connection pooling kan förbättra databasprestanda dramatiskt
Node.js Performance Optimization
Optimera Node.js applikationer för maximal prestanda.
// Cluster mode för multi-core användning
const cluster = require('cluster');
const os = require('os');
if (cluster.isMaster) {
const numCPUs = os.cpus().length;
console.log(`Master ${process.pid} is running`);
// Fork workers
for (let i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on('exit', (worker, code, signal) => {
console.log(`Worker ${worker.process.pid} died`);
cluster.fork(); // Restart worker
});
} else {
// Worker process
require('./server.js');
}
// Memory optimization
const v8 = require('v8');
// Increase heap size
require('v8').setFlagsFromString('--max-old-space-size=4096');
// Monitor memory usage
setInterval(() => {
const usage = process.memoryUsage();
console.log({
rss: `${Math.round(usage.rss / 1024 / 1024)}MB`,
heapTotal: `${Math.round(usage.heapTotal / 1024 / 1024)}MB`,
heapUsed: `${Math.round(usage.heapUsed / 1024 / 1024)}MB`,
external: `${Math.round(usage.external / 1024 / 1024)}MB`
});
}, 30000);
// Async performance patterns
const pLimit = require('p-limit');
const limit = pLimit(10); // Max 10 concurrent operations
async function processItems(items) {
const promises = items.map(item =>
limit(() => processItem(item))
);
return Promise.all(promises);
}
// Cache frequently accessed data
const NodeCache = require('node-cache');
const cache = new NodeCache({
stdTTL: 600, // 10 minutes
checkperiod: 120 // Check every 2 minutes
});
async function getCachedData(key, fetchFunction) {
const cached = cache.get(key);
if (cached) return cached;
const data = await fetchFunction();
cache.set(key, data);
return data;
}
// Stream processing for large data
const { pipeline } = require('stream/promises');
const { Transform } = require('stream');
const processStream = new Transform({
objectMode: true,
async transform(chunk, encoding, callback) {
try {
const processed = await processChunk(chunk);
callback(null, processed);
} catch (error) {
callback(error);
}
}
});
await pipeline(
readStream,
processStream,
writeStream
);
💡 Förklaring: Clustering, caching och streaming är nycklar till skalbar Node.js-prestanda
Frontend Performance Optimization
Optimera frontend-prestanda med moderna tekniker.
// Webpack optimization config
module.exports = {
optimization: {
usedExports: true,
minimize: true,
sideEffects: false,
splitChunks: {
chunks: 'all',
cacheGroups: {
vendor: {
test: /[\\/]node_modules[\\/]/,
name: 'vendors',
priority: 10,
reuseExistingChunk: true
},
common: {
minChunks: 2,
priority: 5,
reuseExistingChunk: true
}
}
}
}
};
// Lazy loading with React
import { lazy, Suspense } from 'react';
const HeavyComponent = lazy(() =>
import(/* webpackChunkName: "heavy" */ './HeavyComponent')
);
function App() {
return (
<Suspense fallback={<div>Loading...</div>}>
<HeavyComponent />
</Suspense>
);
}
// Image optimization
const ImageOptimizer = ({ src, alt, ...props }) => {
const [isIntersecting, setIsIntersecting] = useState(false);
const imgRef = useRef();
useEffect(() => {
const observer = new IntersectionObserver(
([entry]) => {
if (entry.isIntersecting) {
setIsIntersecting(true);
observer.disconnect();
}
},
{ threshold: 0.1 }
);
if (imgRef.current) {
observer.observe(imgRef.current);
}
return () => observer.disconnect();
}, []);
return (
<div ref={imgRef}>
{isIntersecting ? (
<img
src={src}
alt={alt}
loading="lazy"
decoding="async"
{...props}
/>
) : (
<div style={{ backgroundColor: '#f0f0f0', ...props.style }} />
)}
</div>
);
};
// Service Worker for caching
// sw.js
const CACHE_NAME = 'app-v1';
const urlsToCache = [
'/',
'/styles/main.css',
'/scripts/main.js'
];
self.addEventListener('install', event => {
event.waitUntil(
caches.open(CACHE_NAME)
.then(cache => cache.addAll(urlsToCache))
);
});
self.addEventListener('fetch', event => {
event.respondWith(
caches.match(event.request)
.then(response => {
// Cache hit - return response
if (response) {
return response;
}
return fetch(event.request).then(response => {
// Check if valid response
if (!response || response.status !== 200 || response.type !== 'basic') {
return response;
}
// Clone and cache the response
const responseToCache = response.clone();
caches.open(CACHE_NAME)
.then(cache => {
cache.put(event.request, responseToCache);
});
return response;
});
})
);
});
💡 Förklaring: Code splitting, lazy loading och service workers minskar laddningstider avsevärt
Monitoring & Analysis Tools
Lighthouse CI
Automatisera prestandatester i CI/CD pipeline.
# .lighthouserc.js
module.exports = {
ci: {
collect: {
url: ['http://localhost:3000/', 'http://localhost:3000/about'],
numberOfRuns: 3,
settings: {
preset: 'desktop'
}
},
assert: {
preset: 'lighthouse:recommended',
assertions: {
'categories:performance': ['warn', { minScore: 0.9 }],
'categories:accessibility': ['error', { minScore: 0.95 }],
'categories:seo': ['warn', { minScore: 0.9 }],
'first-contentful-paint': ['warn', { maxNumericValue: 2000 }],
'largest-contentful-paint': ['warn', { maxNumericValue: 2500 }],
'interactive': ['warn', { maxNumericValue: 3500 }],
'cumulative-layout-shift': ['warn', { maxNumericValue: 0.1 }]
}
},
upload: {
target: 'temporary-public-storage'
}
}
};
# GitHub Action
name: Lighthouse CI
on: [push, pull_request]
jobs:
lighthouse:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-node@v2
- run: npm install
- run: npm run build
- run: npm install -g @lhci/cli@0.9.x
- run: lhci autorun
APM with New Relic
Application Performance Monitoring setup.
// newrelic.js
exports.config = {
app_name: ['My Application'],
license_key: process.env.NEW_RELIC_LICENSE_KEY,
logging: {
level: 'info'
},
distributed_tracing: {
enabled: true
},
transaction_tracer: {
enabled: true,
transaction_threshold: 'apdex_f',
record_sql: 'obfuscated',
explain_threshold: 500
},
error_collector: {
enabled: true,
ignore_status_codes: [404]
}
};
// Custom instrumentation
const newrelic = require('newrelic');
// Transaction tracking
app.get('/api/users/:id', (req, res) => {
newrelic.startSegment('fetchUser', true, async () => {
const user = await db.getUser(req.params.id);
// Custom attributes
newrelic.addCustomAttribute('userId', req.params.id);
newrelic.addCustomAttribute('userType', user.type);
res.json(user);
});
});
// Background job monitoring
function processJob(job) {
return newrelic.startBackgroundTransaction('processJob', () => {
return processJobLogic(job);
});
}
Redis Caching Strategy
Implementera effektiv caching med Redis.
const redis = require('redis');
const { promisify } = require('util');
const client = redis.createClient({
host: process.env.REDIS_HOST,
port: process.env.REDIS_PORT,
password: process.env.REDIS_PASSWORD,
retry_strategy: (options) => {
if (options.error && options.error.code === 'ECONNREFUSED') {
return new Error('Redis connection refused');
}
if (options.total_retry_time > 1000 * 60 * 60) {
return new Error('Redis retry time exhausted');
}
if (options.attempt > 10) {
return undefined;
}
return Math.min(options.attempt * 100, 3000);
}
});
// Promisify Redis methods
const getAsync = promisify(client.get).bind(client);
const setAsync = promisify(client.set).bind(client);
const delAsync = promisify(client.del).bind(client);
const existsAsync = promisify(client.exists).bind(client);
// Cache middleware
const cache = (keyPattern, ttl = 300) => {
return async (req, res, next) => {
const key = keyPattern.replace(':id', req.params.id);
try {
const cached = await getAsync(key);
if (cached) {
return res.json(JSON.parse(cached));
}
} catch (error) {
console.error('Redis get error:', error);
}
// Store original send
const originalSend = res.json;
// Override send method
res.json = async function(data) {
try {
await setAsync(key, JSON.stringify(data), 'EX', ttl);
} catch (error) {
console.error('Redis set error:', error);
}
originalSend.call(this, data);
};
next();
};
};
// Usage
app.get('/api/users/:id', cache('user::id', 600), getUserHandler);
// Cache invalidation
async function invalidateUserCache(userId) {
const keys = [
`user:${userId}`,
`user:${userId}:posts`,
`user:${userId}:profile`
];
await Promise.all(keys.map(key => delAsync(key)));
}
// Batch operations
const pipeline = client.pipeline();
users.forEach(user => {
pipeline.set(`user:${user.id}`, JSON.stringify(user), 'EX', 3600);
});
await pipeline.exec();
Optimization Techniques
🔧 Bundle Size Analysis
Analysera och minska bundle-storlek
💡 Lösning: Använd webpack-bundle-analyzer för att visualisera dependencies
🔧 Tree Shaking
Ta bort oanvänd kod automatiskt
💡 Lösning: Aktivera production mode och använd ES6 modules
🔧 HTTP/2 Push
Förladdning av kritiska resurser
💡 Lösning: Konfigurera server push för CSS och kritisk JS
🔧 Brotli Compression
Bättre komprimering än gzip
💡 Lösning: Aktivera Brotli på server-nivå för 15-20% mindre filer
Key Performance Metrics
📈 Response Time
Tid från request till response
🎯 Mål: < 200ms för API-anrop
📈 Time to First Byte (TTFB)
Tid till första byte från servern
🎯 Mål: < 600ms
📈 First Contentful Paint (FCP)
När användaren ser första innehållet
🎯 Mål: < 1.8s
📈 Time to Interactive (TTI)
När sidan blir interaktiv
🎯 Mål: < 3.8s
🏆Performance Best Practices
Mät innan du optimerar
Använd profiling tools för att identifiera verkliga flaskhalsar, gissa inte.
Optimera kritiska vägar först
Fokusera på de delar som påverkar användarupplevelsen mest.
Cacha smart på alla nivåer
Implementera caching från CDN till databas för maximal effekt.
Automatisera prestandatester
Integrera Lighthouse och liknande verktyg i CI/CD för att fånga regressioner.