.NET & C# Development · Lesson 62 of 92
Redis Cache-Aside Pattern — Scale Beyond One Server
Why Caching Matters in OrderFlow
The products catalogue is read on almost every order creation. Product data rarely changes. Without caching:
Client → API → SQL Server → 12ms per request
At 1,000 req/s = 12 seconds of SQL Server CPU per secondWith Redis:
Client → API → Redis hit → 0.3ms per request
SQL Server only called when cache misses (1-2% of requests)Caching isn't just performance — it's reducing database load and improving resilience.
Setup
dotnet add OrderFlow.Infrastructure package Microsoft.Extensions.Caching.StackExchangeRedis
dotnet add OrderFlow.Infrastructure package StackExchange.Redis// In DependencyInjection.cs
services.AddStackExchangeRedisCache(opt =>
{
opt.Configuration = configuration.GetConnectionString("Redis");
opt.InstanceName = "orderflow:"; // key prefix to avoid collisions
});
// For direct Redis operations (pub/sub, transactions)
services.AddSingleton<IConnectionMultiplexer>(
ConnectionMultiplexer.Connect(configuration.GetConnectionString("Redis")!));A Typed Cache Service
IDistributedCache works with raw bytes — wrap it in a typed service:
// OrderFlow.Infrastructure/Caching/CacheService.cs
public class CacheService(IDistributedCache cache) : ICacheService
{
private static readonly JsonSerializerOptions _jsonOptions = new()
{
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
};
public async Task<T?> GetAsync<T>(string key, CancellationToken ct = default)
{
var bytes = await cache.GetAsync(key, ct);
if (bytes is null) return default;
return JsonSerializer.Deserialize<T>(bytes, _jsonOptions);
}
public async Task SetAsync<T>(
string key,
T value,
TimeSpan? absoluteExpiry = null,
TimeSpan? slidingExpiry = null,
CancellationToken ct = default)
{
var options = new DistributedCacheEntryOptions();
if (absoluteExpiry is not null) options.AbsoluteExpirationRelativeToNow = absoluteExpiry;
if (slidingExpiry is not null) options.SlidingExpiration = slidingExpiry;
var bytes = JsonSerializer.SerializeToUtf8Bytes(value, _jsonOptions);
await cache.SetAsync(key, bytes, options, ct);
}
public Task RemoveAsync(string key, CancellationToken ct = default) =>
cache.RemoveAsync(key, ct);
public async Task<T> GetOrSetAsync<T>(
string key,
Func<CancellationToken, Task<T>> factory,
TimeSpan expiry,
CancellationToken ct = default)
{
var cached = await GetAsync<T>(key, ct);
if (cached is not null) return cached;
var value = await factory(ct);
await SetAsync(key, value, absoluteExpiry: expiry, ct: ct);
return value;
}
}
public interface ICacheService
{
Task<T?> GetAsync<T>(string key, CancellationToken ct = default);
Task SetAsync<T>(string key, T value, TimeSpan? absoluteExpiry = null, TimeSpan? slidingExpiry = null, CancellationToken ct = default);
Task RemoveAsync(string key, CancellationToken ct = default);
Task<T> GetOrSetAsync<T>(string key, Func<CancellationToken, Task<T>> factory, TimeSpan expiry, CancellationToken ct = default);
}Cache Keys — Be Consistent
// OrderFlow.Infrastructure/Caching/CacheKeys.cs
public static class CacheKeys
{
public static string Product(Guid id) => $"products:{id}";
public static string ProductBySku(string sku) => $"products:sku:{sku.ToLower()}";
public static string ProductsList() => "products:list";
public static string Customer(Guid id) => $"customers:{id}";
public static string OrderSummaries(Guid customerId) => $"orders:customer:{customerId}";
}Consistent key naming prevents typos and makes cache invalidation predictable.
Cache-Aside Pattern in Query Handlers
// OrderFlow.Application/Products/Queries/GetProductByIdQueryHandler.cs
public class GetProductByIdQueryHandler(
IProductRepository repo,
ICacheService cache) : IRequestHandler<GetProductByIdQuery, ProductDto?>
{
public async Task<ProductDto?> Handle(GetProductByIdQuery query, CancellationToken ct)
{
var cacheKey = CacheKeys.Product(query.ProductId);
// 1. Try cache first
var cached = await cache.GetAsync<ProductDto>(cacheKey, ct);
if (cached is not null)
{
return cached;
}
// 2. Cache miss — hit the database
var product = await repo.GetByIdAsync(query.ProductId, ct);
if (product is null) return null;
var dto = product.ToDto();
// 3. Write to cache — 30 min TTL (products change infrequently)
await cache.SetAsync(cacheKey, dto, absoluteExpiry: TimeSpan.FromMinutes(30), ct: ct);
return dto;
}
}
// GetOrSetAsync makes this even more concise
public async Task<ProductDto?> Handle(GetProductByIdQuery query, CancellationToken ct)
{
var product = await repo.GetByIdAsync(query.ProductId, ct);
if (product is null) return null;
return await cache.GetOrSetAsync(
CacheKeys.Product(query.ProductId),
async _ => (await repo.GetByIdAsync(query.ProductId, ct))!.ToDto(),
TimeSpan.FromMinutes(30),
ct);
}Cache the Products List
// GetAllProductsQueryHandler.cs
public async Task<PagedResponse<ProductDto>> Handle(
GetAllProductsQuery query, CancellationToken ct)
{
// Only cache the default (first) page — other pages are too dynamic
if (query.Page == 1 && query.Search is null)
{
var cacheKey = CacheKeys.ProductsList();
var cached = await cache.GetAsync<PagedResponse<ProductDto>>(cacheKey, ct);
if (cached is not null) return cached;
var result = await repo.GetPagedAsync(1, query.PageSize, null, ct);
await cache.SetAsync(cacheKey, result, absoluteExpiry: TimeSpan.FromMinutes(5), ct: ct);
return result;
}
// For search results or other pages, always hit the DB
return await repo.GetPagedAsync(query.Page, query.PageSize, query.Search, ct);
}Cache Invalidation via Domain Events
When a product is updated, its cached entries must be removed immediately:
// OrderFlow.Domain/Events/ProductUpdatedEvent.cs
public record ProductUpdatedEvent(Guid ProductId, string Sku) : IDomainEvent;// Domain entity raises the event
public void UpdatePrice(decimal newPrice)
{
if (newPrice < 0) throw new ArgumentException("Price cannot be negative.");
Price = newPrice;
RaiseDomainEvent(new ProductUpdatedEvent(Id, Sku));
}// OrderFlow.Application/Products/Events/InvalidateProductCacheHandler.cs
public class InvalidateProductCacheHandler(ICacheService cache)
: INotificationHandler<ProductUpdatedEvent>
{
public async Task Handle(ProductUpdatedEvent notification, CancellationToken ct)
{
// Remove all cache entries related to this product
await Task.WhenAll(
cache.RemoveAsync(CacheKeys.Product(notification.ProductId), ct),
cache.RemoveAsync(CacheKeys.ProductBySku(notification.Sku), ct),
cache.RemoveAsync(CacheKeys.ProductsList(), ct));
}
}This is the cleanest pattern: domain events drive cache invalidation — no manual "remember to clear cache" in every command handler.
Caching Stock Levels — A Tricky Case
Stock levels change on every order. Don't cache them with a long TTL:
// ❌ Wrong — stale stock data could allow overselling
await cache.SetAsync(CacheKeys.Product(id), dto, absoluteExpiry: TimeSpan.FromMinutes(30));
// ✅ Better — very short TTL for inventory data; still helps burst traffic
await cache.SetAsync(
CacheKeys.Product(id),
dto,
absoluteExpiry: TimeSpan.FromSeconds(10)); // 10s: reduces DB load on burst, minimal staleness risk
// ✅ Best — serve product details from cache but fetch stock live
var cached = await cache.GetAsync<ProductDetailsDto>(CacheKeys.ProductDetails(id));
// ProductDetailsDto has everything EXCEPT StockLevel
var stock = await repo.GetCurrentStockAsync(id, ct); // always live
return cached with { StockLevel = stock };Distributed Locking — Prevent Cache Stampede
When a cached item expires and 100 requests arrive simultaneously, all 100 hit the database at once — the "thundering herd" problem:
// Use Redis distributed lock to ensure only one request regenerates the cache
public async Task<T> GetOrSetWithLockAsync<T>(
string key,
Func<CancellationToken, Task<T>> factory,
TimeSpan expiry,
CancellationToken ct = default)
{
var cached = await GetAsync<T>(key, ct);
if (cached is not null) return cached;
var lockKey = $"{key}:lock";
var db = _redis.GetDatabase();
// Try to acquire lock (expires after 30s to prevent deadlock)
var acquired = await db.StringSetAsync(
lockKey, "1",
TimeSpan.FromSeconds(30),
When.NotExists);
if (!acquired)
{
// Another instance is regenerating — wait briefly and retry from cache
await Task.Delay(100, ct);
return await GetAsync<T>(key, ct) ?? await factory(ct);
}
try
{
var value = await factory(ct);
await SetAsync(key, value, absoluteExpiry: expiry, ct: ct);
return value;
}
finally
{
await db.KeyDeleteAsync(lockKey);
}
}Response Caching for Public Endpoints
For unauthenticated endpoints like a public product catalogue:
// Add middleware
app.UseResponseCaching();
app.UseOutputCache();
// On the endpoint
group.MapGet("/public", GetPublicProducts)
.AllowAnonymous()
.CacheOutput(p => p.Expire(TimeSpan.FromMinutes(5)).Tag("products"));
// Invalidate by tag when products change
await outputCache.EvictByTagAsync("products", ct);Monitoring Cache Performance
// Log cache hit/miss rate
public async Task<T?> GetAsync<T>(string key, CancellationToken ct = default)
{
var stopwatch = Stopwatch.StartNew();
var bytes = await cache.GetAsync(key, ct);
stopwatch.Stop();
var hit = bytes is not null;
_metrics.RecordCacheOperation(key.Split(':')[0], hit, stopwatch.Elapsed);
if (bytes is null) return default;
return JsonSerializer.Deserialize<T>(bytes, _jsonOptions);
}Redis also has built-in stats:
redis-cli INFO stats | grep keyspace_hits
redis-cli INFO stats | grep keyspace_misses
# Target: >95% hit rate on hot pathsChoosing TTL Values for OrderFlow
| Data | TTL | Rationale | |------|-----|-----------| | Product details | 30 min | Changes infrequently; invalidate on update event | | Product list (page 1) | 5 min | Marketing changes prices; short TTL = acceptable lag | | Customer profile | 60 min | Rarely changes | | Order summaries | 2 min | Updated on each order action | | Stock levels | 10 sec | High change rate; only useful for burst absorption | | Auth/JWT | Don't cache | Always validate tokens; stateless |
Key Takeaways
- Cache-aside (read-through, write-invalidate) is the most flexible pattern for .NET APIs
- Typed
ICacheServicewrapsIDistributedCache— no raw bytes in your handlers - Consistent key naming (
products:{id}) is not optional — it's what makes invalidation possible - Domain events drive cache invalidation — no manual "remember to clear the cache" scattered through handlers
- Different TTLs for different data — long for static data, seconds for volatile data like stock levels
- Distributed locking prevents cache stampede under high load
- Target >95% cache hit rate on hot read paths — measure with Redis
INFO stats
What is the Cache-Aside (lazy loading) pattern?