The GreenhouseCacheService class implements all Redis operations with proper error handling and logging.
Show Complete implementation (221 lines)
package com.apptolast.invernaderos.features.greenhouseimport com.fasterxml.jackson.module.kotlin.jacksonObjectMapperimport java.time.Instantimport java.util.concurrent.TimeUnitimport org.slf4j.LoggerFactoryimport org.springframework.data.redis.core.RedisTemplateimport org.springframework.stereotype.Service/** * Service for managing GREENHOUSE message cache in Redis. * * Uses Redis Sorted Set to store messages ordered by timestamp for efficient * time-based queries. * * MULTI-TENANT ISOLATION: * - Each tenant has its own cache key: "greenhouse:messages:{tenantId}" * - Prevents cross-tenant data access * - Required after PostgreSQL multi-tenant migration (V3-V10) * * Redis Structure: * - Key: "greenhouse:messages:{tenantId}" * - Type: Sorted Set (ZSET) * - Score: timestamp in epoch millis * - Value: JSON serialized RealDataDto */@Serviceclass GreenhouseCacheService(private val redisTemplate: RedisTemplate<String, String>) { private val logger = LoggerFactory.getLogger(GreenhouseCacheService::class.java) private val objectMapper = jacksonObjectMapper() companion object { private const val MESSAGES_KEY_PREFIX = "greenhouse:messages" private const val MAX_CACHED_MESSAGES = 1000L private const val TTL_HOURS = 24L private const val DEFAULT_TENANT_ID = "DEFAULT" /** * Generates the cache key specific for a tenant. * Format: "greenhouse:messages:{tenantId}" * * @param tenantId Tenant ID (uses DEFAULT for legacy data) * @return Tenant-isolated cache key */ private fun getMessagesKey(tenantId: String?): String { val safeTenantId = tenantId?.takeIf { it.isNotBlank() } ?: DEFAULT_TENANT_ID return "$MESSAGES_KEY_PREFIX:$safeTenantId" } } /** * Caches a message in Redis with tenant isolation. * * @param message The message to cache (must include tenantId) */ fun cacheMessage(message: RealDataDto) { try { val tenantId = message.tenantId val messagesKey = getMessagesKey(tenantId) val score = message.timestamp.toEpochMilli().toDouble() val jsonValue = message.toJson() // Add message to tenant's sorted set redisTemplate.opsForZSet().add(messagesKey, jsonValue, score) // Keep only last MAX_CACHED_MESSAGES per tenant val currentSize = redisTemplate.opsForZSet().size(messagesKey) ?: 0 if (currentSize > MAX_CACHED_MESSAGES) { // Remove oldest messages val toRemove = currentSize - MAX_CACHED_MESSAGES redisTemplate.opsForZSet().removeRange(messagesKey, 0, toRemove - 1) } // Set 24-hour TTL (renewed with each write) redisTemplate.expire(messagesKey, TTL_HOURS, TimeUnit.HOURS) logger.debug( "Message cached in Redis for tenant=$tenantId: timestamp=${message.timestamp}" ) } catch (e: Exception) { logger.error("Error caching message in Redis for tenant=${message.tenantId}", e) } } /** * Gets the last N messages from cache for a specific tenant. * * @param tenantId Tenant ID (null = DEFAULT tenant for backward compatibility) * @param limit Number of messages to retrieve (default 100) * @return List of messages ordered by timestamp DESC (most recent first) */ fun getRecentMessages(tenantId: String? = null, limit: Int = 100): List<RealDataDto> { return try { val messagesKey = getMessagesKey(tenantId) // Get last 'limit' messages from tenant's sorted set // -limit to -1 means last 'limit' elements val messages = redisTemplate.opsForZSet().reverseRange(messagesKey, 0, limit.toLong() - 1) messages?.mapNotNull { json -> try { objectMapper.readValue(json, RealDataDto::class.java) } catch (e: Exception) { logger.error("Error deserializing message from Redis: $json", e) null } } ?: emptyList() } catch (e: Exception) { logger.error("Error getting recent messages from Redis for tenant=$tenantId", e) emptyList() } } /** * Gets messages in a specific time range for a tenant. * * @param tenantId Tenant ID (null = DEFAULT tenant for backward compatibility) * @param startTime Start timestamp * @param endTime End timestamp * @return List of messages in the specified range */ fun getMessagesByTimeRange( tenantId: String? = null, startTime: Instant, endTime: Instant ): List<RealDataDto> { return try { val messagesKey = getMessagesKey(tenantId) val minScore = startTime.toEpochMilli().toDouble() val maxScore = endTime.toEpochMilli().toDouble() val messages = redisTemplate.opsForZSet().reverseRangeByScore(messagesKey, minScore, maxScore) messages?.mapNotNull { json -> try { objectMapper.readValue(json, RealDataDto::class.java) } catch (e: Exception) { logger.error("Error deserializing message from Redis: $json", e) null } } ?: emptyList() } catch (e: Exception) { logger.error( "Error getting messages by time range from Redis for tenant=$tenantId", e ) emptyList() } } /** * Gets the most recent message from cache for a tenant. * * @param tenantId Tenant ID (null = DEFAULT tenant for backward compatibility) * @return The most recent message or null if no messages */ fun getLatestMessage(tenantId: String? = null): RealDataDto? { return try { val messagesKey = getMessagesKey(tenantId) val messages = redisTemplate.opsForZSet().reverseRange(messagesKey, 0, 0) messages?.firstOrNull()?.let { json -> try { objectMapper.readValue(json, RealDataDto::class.java) } catch (e: Exception) { logger.error("Error deserializing message from Redis: $json", e) null } } } catch (e: Exception) { logger.error("Error getting latest message from Redis for tenant=$tenantId", e) null } } /** * Counts the total number of messages in cache for a tenant. * * @param tenantId Tenant ID (null = DEFAULT tenant for backward compatibility) * @return Number of cached messages */ fun countMessages(tenantId: String? = null): Long { return try { val messagesKey = getMessagesKey(tenantId) redisTemplate.opsForZSet().size(messagesKey) ?: 0L } catch (e: Exception) { logger.error("Error counting messages in Redis for tenant=$tenantId", e) 0L } } /** * Clears all messages from cache for a specific tenant. * * @param tenantId Tenant ID (null = DEFAULT tenant). * To clear ALL tenants' caches, use clearAllTenantsCache() instead. */ fun clearCache(tenantId: String? = null) { try { val messagesKey = getMessagesKey(tenantId) redisTemplate.delete(messagesKey) logger.info("GREENHOUSE message cache cleared for tenant=$tenantId") } catch (e: Exception) { logger.error("Error clearing Redis cache for tenant=$tenantId", e) } } /** Gets cache statistics for a tenant */ fun getCacheStats(tenantId: String? = null): Map<String, Any> { val count = countMessages(tenantId) return mapOf("count" to count, "tenantId" to (tenantId ?: "DEFAULT")) }}
# Memory Managementmaxmemory 900mbmaxmemory-policy volatile-lru # Evict keys with TTL using LRU# Persistencesave 300 10 # Save every 5 min if 10+ changessave 60 10000 # Save every 1 min if 10000+ changesrdbcompression yesrdbchecksum yesdbfilename dump.rdbdir /data# Performancetimeout 300 # Close idle clients after 5 minutestcp-keepalive 60maxclients 10000# Securityrequirepass ${REDIS_PASSWORD}protected-mode yesrename-command FLUSHDB "" # Disabled for safetyrename-command FLUSHALL "" # Disabled for safetyrename-command CONFIG "" # Disabled for safety# Loggingloglevel noticelogfile ""
volatile-lruEvicts least recently used keys with TTL
Persistence
RDB SnapshotsEvery 5 min (10+ changes) or 1 min (10k+ changes)
Compression
RDB Compression: ONReduces disk usage by ~70%
Connection Pool
100 max connectionsSupports 100 concurrent API requests
Memory Limit: Redis is configured with maxmemory 900mb. If exceeded, keys with TTL are evicted using LRU algorithm.Current Usage: ~500KB per tenant (1000 messages × 500 bytes)
Symptoms: New sensor data not appearing in cachePossible Causes:
Redis connection timeout
Wrong tenant ID
Redis memory full (eviction)
Solutions:
# Check Redis connectivityredis-cli -a "${REDIS_PASSWORD}" PING# Check memory usageredis-cli -a "${REDIS_PASSWORD}" INFO memory# Check cache key existsredis-cli -a "${REDIS_PASSWORD}" EXISTS greenhouse:messages:SARA# View application logskubectl logs -f deployment/invernaderos-api-prod | grep "Cache"
High memory usage
Symptoms: Redis using >900MB memoryPossible Causes:
Too many tenants with 1000 messages each
Large JSON payloads
Memory fragmentation
Solutions:
# Check number of keysredis-cli -a "${REDIS_PASSWORD}" DBSIZE# Get memory usage per key typeredis-cli -a "${REDIS_PASSWORD}" --bigkeys# Check fragmentation ratioredis-cli -a "${REDIS_PASSWORD}" INFO memory | grep fragmentation# If fragmentation > 1.5, restart Rediskubectl rollout restart statefulset/redis -n apptolast-invernadero-api
Slow cache queries
Symptoms: API response time >500ms for cache queriesPossible Causes:
Large result sets (requesting 1000+ messages)
Redis CPU bottleneck
Network latency
Solutions:
# Check Redis CPU usagekubectl top pod -n apptolast-invernadero-api -l app=redis# Monitor slow queries (>10ms)redis-cli -a "${REDIS_PASSWORD}" SLOWLOG GET 10# Reduce query limit# Instead of: getRecentMessages(tenantId, 1000)# Use: getRecentMessages(tenantId, 100)