Skip to content

Commit

Permalink
RedisCacheStore: avoid monkey patching redis
Browse files Browse the repository at this point in the history
ConnectionPool#with is aliased as #then which is equivalent
to Object#then since Ruby 2.5. So if we use that we don't need
to monkey patch anything.
  • Loading branch information
byroot committed Aug 1, 2022
1 parent c617b8f commit 5aa9d16
Showing 1 changed file with 11 additions and 20 deletions.
31 changes: 11 additions & 20 deletions activesupport/lib/active_support/cache/redis_cache_store.rb
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,6 @@

module ActiveSupport
module Cache
module ConnectionPoolLike
def with
yield self
end
end

::Redis.include(ConnectionPoolLike)
::Redis::Distributed.include(ConnectionPoolLike)

# Redis cache store.
#
# Deployment note: Take care to use a *dedicated Redis cache* rather
Expand Down Expand Up @@ -216,7 +207,7 @@ def delete_matched(matcher, options = nil)
unless String === matcher
raise ArgumentError, "Only Redis glob strings are supported: #{matcher.inspect}"
end
redis.with do |c|
redis.then do |c|
pattern = namespace_key(matcher, options)
cursor = "0"
# Fetch keys in batches using SCAN to avoid blocking the Redis server.
Expand Down Expand Up @@ -255,7 +246,7 @@ def increment(name, amount = 1, options = nil)
options = merged_options(options)
key = normalize_key(name, options)

redis.with do |c|
redis.then do |c|
c.incrby(key, amount).tap do
write_key_expiry(c, key, options)
end
Expand Down Expand Up @@ -286,7 +277,7 @@ def decrement(name, amount = 1, options = nil)
options = merged_options(options)
key = normalize_key(name, options)

redis.with do |c|
redis.then do |c|
c.decrby(key, amount).tap do
write_key_expiry(c, key, options)
end
Expand All @@ -312,14 +303,14 @@ def clear(options = nil)
if namespace = merged_options(options)[:namespace]
delete_matched "*", namespace: namespace
else
redis.with { |c| c.flushdb }
redis.then { |c| c.flushdb }
end
end
end

# Get info from redis servers.
def stats
redis.with { |c| c.info }
redis.then { |c| c.info }
end

def mset_capable? # :nodoc:
Expand All @@ -345,7 +336,7 @@ def read_entry(key, **options)

def read_serialized_entry(key, raw: false, **options)
failsafe :read_entry do
redis.with { |c| c.get(key) }
redis.then { |c| c.get(key) }
end
end

Expand All @@ -357,7 +348,7 @@ def read_multi_entries(names, **options)
keys = names.map { |name| normalize_key(name, options) }

values = failsafe(:read_multi_entries, returning: {}) do
redis.with { |c| c.mget(*keys) }
redis.then { |c| c.mget(*keys) }
end

names.zip(values).each_with_object({}) do |(name, value), results|
Expand Down Expand Up @@ -392,7 +383,7 @@ def write_serialized_entry(key, payload, raw: false, unless_exist: false, expire
end

failsafe :write_entry, returning: false do
redis.with { |c| c.set key, payload, **modifiers }
redis.then { |c| c.set key, payload, **modifiers }
end
end

Expand All @@ -405,13 +396,13 @@ def write_key_expiry(client, key, options)
# Delete an entry from the cache.
def delete_entry(key, options)
failsafe :delete_entry, returning: false do
redis.with { |c| c.del key }
redis.then { |c| c.del key }
end
end

# Deletes multiple entries in the cache. Returns the number of entries deleted.
def delete_multi_entries(entries, **_options)
redis.with { |c| c.del(entries) }
redis.then { |c| c.del(entries) }
end

# Nonstandard store provider API to write multiple values at once.
Expand All @@ -420,7 +411,7 @@ def write_multi_entries(entries, expires_in: nil, race_condition_ttl: nil, **opt
if mset_capable? && expires_in.nil? && race_condition_ttl.nil?
failsafe :write_multi_entries do
payload = serialize_entries(entries, **options)
redis.with do |c|
redis.then do |c|
c.mapped_mset(payload)
end
end
Expand Down

0 comments on commit 5aa9d16

Please sign in to comment.