Fix some blocked/allow laggards after migrating. Add DuckDB for outstanding analyitcs performance. Start adding an import for all bot networks

This commit is contained in:
Dan Milne
2025-11-18 16:40:05 +11:00
parent ef56779584
commit 3f274c842c
37 changed files with 3522 additions and 151 deletions

View File

@@ -23,9 +23,10 @@ class AnalyticsController < ApplicationController
# Cache key includes period and start_time (hour-aligned for consistency)
cache_key_base = "analytics/#{@time_period}/#{@start_time.to_i}"
# Core statistics - cached
# Core statistics - cached (uses DuckDB if available)
@total_events = Rails.cache.fetch("#{cache_key_base}/total_events", expires_in: cache_ttl) do
Event.where("timestamp >= ?", @start_time).count
with_duckdb_fallback { EventDdb.count_since(@start_time) } ||
Event.where("timestamp >= ?", @start_time).count
end
@total_rules = Rails.cache.fetch("analytics/total_rules", expires_in: 5.minutes) do
@@ -40,31 +41,33 @@ class AnalyticsController < ApplicationController
NetworkRange.count
end
# Event breakdown by action - cached
# Event breakdown by action - cached (uses DuckDB if available)
@event_breakdown = Rails.cache.fetch("#{cache_key_base}/event_breakdown", expires_in: cache_ttl) do
Event.where("timestamp >= ?", @start_time)
.group(:waf_action)
.count
# Keys are already strings ("allow", "deny", etc.) from the enum
with_duckdb_fallback { EventDdb.breakdown_by_action(@start_time) } ||
Event.where("timestamp >= ?", @start_time)
.group(:waf_action)
.count
end
# Top countries by event count - cached (now uses denormalized country column)
# Top countries by event count - cached (uses DuckDB if available)
@top_countries = Rails.cache.fetch("#{cache_key_base}/top_countries", expires_in: cache_ttl) do
Event.where("timestamp >= ? AND country IS NOT NULL", @start_time)
.group(:country)
.count
.sort_by { |_, count| -count }
.first(10)
with_duckdb_fallback { EventDdb.top_countries(@start_time, 10) } ||
Event.where("timestamp >= ? AND country IS NOT NULL", @start_time)
.group(:country)
.count
.sort_by { |_, count| -count }
.first(10)
end
# Top blocked IPs - cached
# Top blocked IPs - cached (uses DuckDB if available)
@top_blocked_ips = Rails.cache.fetch("#{cache_key_base}/top_blocked_ips", expires_in: cache_ttl) do
Event.where("timestamp >= ?", @start_time)
.where(waf_action: 1) # deny action in enum
.group(:ip_address)
.count
.sort_by { |_, count| -count }
.first(10)
with_duckdb_fallback { EventDdb.top_blocked_ips(@start_time, 10) } ||
Event.where("timestamp >= ?", @start_time)
.where(waf_action: 0) # deny action in enum
.group(:ip_address)
.count
.sort_by { |_, count| -count }
.first(10)
end
# Network range intelligence breakdown - cached
@@ -92,7 +95,7 @@ class AnalyticsController < ApplicationController
total_users: User.count,
active_rules: Rule.enabled.count,
disabled_rules: Rule.where(enabled: false).count,
recent_errors: Event.where("timestamp >= ? AND waf_action = ?", @start_time, 1).count # 1 = deny
recent_errors: Event.where("timestamp >= ? AND waf_action = ?", @start_time, 0).count # 0 = deny
}
end
@@ -117,38 +120,90 @@ class AnalyticsController < ApplicationController
@time_period = params[:period]&.to_sym || :day
@start_time = calculate_start_time(@time_period)
# Top networks by request volume (using denormalized network_range_id)
# Use a subquery approach to avoid PostgreSQL GROUP BY issues with network_ranges.*
event_stats = Event.where("timestamp >= ?", @start_time)
.where.not(network_range_id: nil)
.group(:network_range_id)
.select("network_range_id, COUNT(*) as event_count, COUNT(DISTINCT ip_address) as unique_ips")
# Top networks by request volume - use DuckDB if available
network_stats = with_duckdb_fallback { EventDdb.top_networks(@start_time, 50) }
# Join the stats back to NetworkRange to get full network details
@top_networks = NetworkRange.joins("INNER JOIN (#{event_stats.to_sql}) stats ON stats.network_range_id = network_ranges.id")
.select("network_ranges.*, stats.event_count, stats.unique_ips")
.order("stats.event_count DESC")
.limit(50)
if network_stats
# DuckDB path: array format [network_range_id, event_count, unique_ips]
network_ids = network_stats.map { |row| row[0] }
stats_by_id = network_stats.to_h { |row| [row[0], { event_count: row[1], unique_ips: row[2] }] }
@top_networks = NetworkRange.where(id: network_ids)
.to_a
.map do |network|
stats = stats_by_id[network.id]
network.define_singleton_method(:event_count) { stats[:event_count] }
network.define_singleton_method(:unique_ips) { stats[:unique_ips] }
# Add inherited intelligence support
intelligence = network.inherited_intelligence
if intelligence[:inherited]
network.define_singleton_method(:display_company) { intelligence[:company] }
network.define_singleton_method(:display_country) { intelligence[:country] }
network.define_singleton_method(:inherited_from) { intelligence[:parent_cidr] }
network.define_singleton_method(:has_inherited_data?) { true }
else
network.define_singleton_method(:display_company) { network.company }
network.define_singleton_method(:display_country) { network.country }
network.define_singleton_method(:inherited_from) { nil }
network.define_singleton_method(:has_inherited_data?) { false }
end
network
end
.sort_by { |n| -n.event_count }
else
# PostgreSQL fallback
event_stats = Event.where("timestamp >= ?", @start_time)
.where.not(network_range_id: nil)
.group(:network_range_id)
.select("network_range_id, COUNT(*) as event_count, COUNT(DISTINCT ip_address) as unique_ips")
@top_networks = NetworkRange.joins("INNER JOIN (#{event_stats.to_sql}) stats ON stats.network_range_id = network_ranges.id")
.select("network_ranges.*, stats.event_count, stats.unique_ips")
.order("stats.event_count DESC")
.limit(50)
# Add inherited intelligence support for PostgreSQL fallback
@top_networks = @top_networks.to_a.map do |network|
intelligence = network.inherited_intelligence
if intelligence[:inherited]
network.define_singleton_method(:display_company) { intelligence[:company] }
network.define_singleton_method(:display_country) { intelligence[:country] }
network.define_singleton_method(:inherited_from) { intelligence[:parent_cidr] }
network.define_singleton_method(:has_inherited_data?) { true }
else
network.define_singleton_method(:display_company) { network.company }
network.define_singleton_method(:display_country) { network.country }
network.define_singleton_method(:inherited_from) { nil }
network.define_singleton_method(:has_inherited_data?) { false }
end
network
end
end
# Network type breakdown with traffic stats
@network_breakdown = calculate_network_type_stats(@start_time)
# Company breakdown for top traffic sources (using denormalized company column)
@top_companies = Event.where("timestamp >= ? AND company IS NOT NULL", @start_time)
# Company breakdown for top traffic sources - use DuckDB if available
@top_companies = with_duckdb_fallback { EventDdb.top_companies(@start_time, 20) } ||
Event.where("timestamp >= ? AND company IS NOT NULL", @start_time)
.group(:company)
.select("company, COUNT(*) as event_count, COUNT(DISTINCT ip_address) as unique_ips, COUNT(DISTINCT network_range_id) as network_count")
.order("event_count DESC")
.limit(20)
# ASN breakdown (using denormalized asn columns)
@top_asns = Event.where("timestamp >= ? AND asn IS NOT NULL", @start_time)
# ASN breakdown - use DuckDB if available
@top_asns = with_duckdb_fallback { EventDdb.top_asns(@start_time, 15) } ||
Event.where("timestamp >= ? AND asn IS NOT NULL", @start_time)
.group(:asn, :asn_org)
.select("asn, asn_org, COUNT(*) as event_count, COUNT(DISTINCT ip_address) as unique_ips, COUNT(DISTINCT network_range_id) as network_count")
.order("event_count DESC")
.limit(15)
# Geographic breakdown (using denormalized country column)
@top_countries = Event.where("timestamp >= ? AND country IS NOT NULL", @start_time)
# Geographic breakdown - use DuckDB if available
@top_countries = with_duckdb_fallback { EventDdb.top_countries_with_stats(@start_time, 15) } ||
Event.where("timestamp >= ? AND country IS NOT NULL", @start_time)
.group(:country)
.select("country, COUNT(*) as event_count, COUNT(DISTINCT ip_address) as unique_ips")
.order("event_count DESC")
@@ -191,12 +246,15 @@ class AnalyticsController < ApplicationController
# Historical hours are cached for full TTL, current hour cached briefly for freshness
# Cache historical hours (1-23 hours ago) - these are complete and won't change
# No expiration - will stick around until evicted by cache store
# No expiration - will stick around until evicted by cache store (uses DuckDB if available)
historical_timeline = Rails.cache.fetch("#{cache_key_base}/chart_historical") do
historical_start = 23.hours.ago.beginning_of_hour
events_by_hour = Event.where("timestamp >= ? AND timestamp < ?", historical_start, Time.current.beginning_of_hour)
.group("DATE_TRUNC('hour', timestamp)")
.count
current_hour_start = Time.current.beginning_of_hour
events_by_hour = with_duckdb_fallback { EventDdb.hourly_timeline(historical_start, current_hour_start) } ||
Event.where("timestamp >= ? AND timestamp < ?", historical_start, current_hour_start)
.group("DATE_TRUNC('hour', timestamp)")
.count
(1..23).map do |hour_ago|
hour_time = hour_ago.hours.ago.beginning_of_hour
@@ -209,6 +267,7 @@ class AnalyticsController < ApplicationController
end
# Current hour (0 hours ago) - cache very briefly since it's actively accumulating
# ALWAYS use PostgreSQL for current hour to get real-time data (DuckDB syncs every minute)
current_hour_data = Rails.cache.fetch("#{cache_key_base}/chart_current_hour", expires_in: 1.minute) do
hour_time = Time.current.beginning_of_hour
count = Event.where("timestamp >= ?", hour_time).count
@@ -290,6 +349,12 @@ class AnalyticsController < ApplicationController
end
def calculate_network_type_stats(start_time)
# Try DuckDB first, fallback to PostgreSQL
duckdb_stats = with_duckdb_fallback { EventDdb.network_type_stats(start_time) }
return duckdb_stats if duckdb_stats
# PostgreSQL fallback
# Get all network types with their traffic statistics using denormalized columns
network_types = [
{ type: 'datacenter', label: 'Datacenter', column: :is_datacenter },
@@ -333,6 +398,12 @@ class AnalyticsController < ApplicationController
end
def calculate_suspicious_patterns(start_time)
# Try DuckDB first, fallback to PostgreSQL
duckdb_patterns = with_duckdb_fallback { EventDdb.suspicious_patterns(start_time) }
return duckdb_patterns if duckdb_patterns
# PostgreSQL fallback
patterns = {}
# High volume networks (top 1% by request count) - using denormalized network_range_id
@@ -358,9 +429,9 @@ class AnalyticsController < ApplicationController
high_deny_networks = Event.where("timestamp >= ? AND network_range_id IS NOT NULL", start_time)
.group(:network_range_id)
.select("network_range_id,
COUNT(CASE WHEN waf_action = 1 THEN 1 END) as denied_count,
COUNT(CASE WHEN waf_action = 0 THEN 1 END) as denied_count,
COUNT(*) as total_count")
.having("COUNT(CASE WHEN waf_action = 1 THEN 1 END)::float / COUNT(*) > 0.5")
.having("COUNT(CASE WHEN waf_action = 0 THEN 1 END)::float / COUNT(*) > 0.5")
.having("COUNT(*) >= 10") # minimum threshold
patterns[:high_deny_rate] = {
@@ -392,12 +463,14 @@ class AnalyticsController < ApplicationController
{
id: network.id,
cidr: network.cidr,
company: network.company,
company: network.display_company,
asn: network.asn,
country: network.country,
country: network.display_country,
network_type: network.network_type,
event_count: network.event_count,
unique_ips: network.unique_ips
unique_ips: network.unique_ips,
has_inherited_data: network.has_inherited_data?,
inherited_from: network.inherited_from
}
},
network_breakdown: @network_breakdown,
@@ -449,4 +522,27 @@ class AnalyticsController < ApplicationController
}
end
end
# Helper method to try DuckDB first, fall back to PostgreSQL
def with_duckdb_fallback(&block)
result = yield
result.nil? ? nil : result # Return result or nil to trigger fallback
rescue StandardError => e
Rails.logger.warn "[Analytics] DuckDB query failed, falling back to PostgreSQL: #{e.message}"
nil # Return nil to trigger fallback
end
# Check if DuckDB has recent data (within last 2 minutes)
# Returns true if DuckDB is up-to-date, false if potentially stale
def duckdb_is_fresh?
newest = AnalyticsDuckdbService.instance.newest_event_timestamp
return false if newest.nil?
# Consider fresh if newest event is within 2 minutes
# (sync job runs every 1 minute, so 2 minutes allows for some lag)
newest >= 2.minutes.ago
rescue StandardError => e
Rails.logger.warn "[Analytics] Error checking DuckDB freshness: #{e.message}"
false
end
end