Skip to content

Commit

Permalink
Make Redis pool size and idle timeout configurable.
Browse files Browse the repository at this point in the history
  • Loading branch information
brocaar committed Aug 26, 2018
1 parent 59f02c3 commit c1308cc
Show file tree
Hide file tree
Showing 25 changed files with 173 additions and 139 deletions.
10 changes: 10 additions & 0 deletions cmd/loraserver/cmd/configfile.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,16 @@ automigrate={{ .PostgreSQL.Automigrate }}
# https://www.iana.org/assignments/uri-schemes/prov/redis
url="{{ .Redis.URL }}"
# Max idle connections in the pool.
max_idle={{ .Redis.MaxIdle }}
# Idle timeout.
#
# Close connections after remaining idle for this duration. If the value
# is zero, then idle connections are not closed. You should set
# the timeout to a value less than the server's timeout.
idle_timeout="{{ .Redis.IdleTimeout }}"
# Network-server settings.
[network_server]
Expand Down
6 changes: 5 additions & 1 deletion cmd/loraserver/cmd/print_ds.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,11 @@ var printDSCmd = &cobra.Command{
log.Fatalf("hex encoded DevEUI must be given as an argument")
}

config.C.Redis.Pool = common.NewRedisPool(config.C.Redis.URL)
config.C.Redis.Pool = common.NewRedisPool(
config.C.Redis.URL,
config.C.Redis.MaxIdle,
config.C.Redis.IdleTimeout,
)

var devEUI lorawan.EUI64
if err := devEUI.UnmarshalText([]byte(args[0])); err != nil {
Expand Down
2 changes: 2 additions & 0 deletions cmd/loraserver/cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ func init() {
viper.SetDefault("network_server.band.name", "EU_863_870")
viper.SetDefault("network_server.api.bind", "0.0.0.0:8000")
viper.SetDefault("redis.url", "redis://localhost:6379")
viper.SetDefault("redis.max_idle", 10)
viper.SetDefault("redis.idle_timeout", 5*time.Minute)
viper.SetDefault("postgresql.dsn", "postgres://localhost/loraserver_ns?sslmode=disable")
viper.SetDefault("postgresql.automigrate", true)
viper.SetDefault("network_server.gateway.backend.mqtt.server", "tcp://localhost:1883")
Expand Down
6 changes: 5 additions & 1 deletion cmd/loraserver/cmd/root_run.go
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,11 @@ func enableUplinkChannels() error {

func setRedisPool() error {
log.WithField("url", config.C.Redis.URL).Info("setup redis connection pool")
config.C.Redis.Pool = common.NewRedisPool(config.C.Redis.URL)
config.C.Redis.Pool = common.NewRedisPool(
config.C.Redis.URL,
config.C.Redis.MaxIdle,
config.C.Redis.IdleTimeout,
)
return nil
}

Expand Down
10 changes: 10 additions & 0 deletions docs/content/install/config.md
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,16 @@ automigrate=true
# https://www.iana.org/assignments/uri-schemes/prov/redis
url="redis://localhost:6379"

# Max idle connections in the pool.
max_idle=10

# Idle timeout.
#
# Close connections after remaining idle for this duration. If the value
# is zero, then idle connections are not closed. You should set
# the timeout to a value less than the server's timeout.
idle_timeout="5m0s"


# Network-server settings.
[network_server]
Expand Down
4 changes: 4 additions & 0 deletions docs/content/overview/changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ The new JSON structure re-uses the messages defined for
[Protocol Buffers](https://developers.google.com/protocol-buffers/docs/proto3#json)
based serialization.

### Improvements

* Make Redis pool size and idle timeout configurable.

## v2.0.2

### Bugfixes
Expand Down
118 changes: 59 additions & 59 deletions internal/adr/adr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,108 +100,108 @@ func TestADR(t *testing.T) {
TXPowerIndex: 1,
MaxSupportedDR: getMaxAllowedDR(), // 5
MaxSupportedTXPowerIndex: getMaxTXPowerOffsetIndex(), // 5
DR: 3,
ExpectedDR: 3,
ExpectedTXPowerIndex: 1,
DR: 3,
ExpectedDR: 3,
ExpectedTXPowerIndex: 1,
},
{
Name: "one step: one step data-rate increase",
NStep: 1,
TXPowerIndex: 1,
MaxSupportedDR: getMaxAllowedDR(),
MaxSupportedTXPowerIndex: getMaxTXPowerOffsetIndex(), // 5
DR: 4,
ExpectedDR: 5,
ExpectedTXPowerIndex: 1,
DR: 4,
ExpectedDR: 5,
ExpectedTXPowerIndex: 1,
},
{
Name: "one step: one step tx-power decrease",
NStep: 1,
TXPowerIndex: 1,
MaxSupportedDR: getMaxAllowedDR(),
MaxSupportedTXPowerIndex: getMaxTXPowerOffsetIndex(), // 5
DR: 5,
ExpectedDR: 5,
ExpectedTXPowerIndex: 2,
DR: 5,
ExpectedDR: 5,
ExpectedTXPowerIndex: 2,
},
{
Name: "two steps: two steps data-rate increase",
NStep: 2,
TXPowerIndex: 1,
MaxSupportedDR: getMaxAllowedDR(),
MaxSupportedTXPowerIndex: getMaxTXPowerOffsetIndex(), // 5
DR: 3,
ExpectedDR: 5,
ExpectedTXPowerIndex: 1,
DR: 3,
ExpectedDR: 5,
ExpectedTXPowerIndex: 1,
},
{
Name: "two steps: one step data-rate increase (due to max supported dr), one step tx-power decrease",
NStep: 2,
TXPowerIndex: 1,
MaxSupportedDR: 4,
MaxSupportedTXPowerIndex: getMaxTXPowerOffsetIndex(), // 5
DR: 3,
ExpectedDR: 4,
ExpectedTXPowerIndex: 2,
DR: 3,
ExpectedDR: 4,
ExpectedTXPowerIndex: 2,
},
{
Name: "two steps: one step data-rate increase, one step tx-power decrease",
NStep: 2,
TXPowerIndex: 1,
MaxSupportedDR: getMaxAllowedDR(),
MaxSupportedTXPowerIndex: getMaxTXPowerOffsetIndex(), // 5
DR: 4,
ExpectedDR: 5,
ExpectedTXPowerIndex: 2,
DR: 4,
ExpectedDR: 5,
ExpectedTXPowerIndex: 2,
},
{
Name: "two steps: two steps tx-power decrease",
NStep: 2,
TXPowerIndex: 1,
MaxSupportedDR: getMaxAllowedDR(),
MaxSupportedTXPowerIndex: getMaxTXPowerOffsetIndex(), // 5
DR: 5,
ExpectedDR: 5,
ExpectedTXPowerIndex: 3,
DR: 5,
ExpectedDR: 5,
ExpectedTXPowerIndex: 3,
},
{
Name: "two steps: one step tx-power decrease due to max supported tx power index",
NStep: 2,
TXPowerIndex: 1,
MaxSupportedDR: getMaxAllowedDR(),
MaxSupportedTXPowerIndex: 2,
DR: 5,
ExpectedDR: 5,
ExpectedTXPowerIndex: 2,
DR: 5,
ExpectedDR: 5,
ExpectedTXPowerIndex: 2,
},
{
Name: "one negative step: one step power increase",
NStep: -1,
TXPowerIndex: 1,
MaxSupportedDR: getMaxAllowedDR(),
MaxSupportedTXPowerIndex: getMaxTXPowerOffsetIndex(), // 5
DR: 4,
ExpectedDR: 4,
ExpectedTXPowerIndex: 0,
DR: 4,
ExpectedDR: 4,
ExpectedTXPowerIndex: 0,
},
{
Name: "one negative step, nothing to do (adr engine will never decrease data-rate)",
NStep: -1,
TXPowerIndex: 0,
MaxSupportedDR: getMaxAllowedDR(),
MaxSupportedTXPowerIndex: getMaxTXPowerOffsetIndex(), // 5
DR: 4,
ExpectedDR: 4,
ExpectedTXPowerIndex: 0,
DR: 4,
ExpectedDR: 4,
ExpectedTXPowerIndex: 0,
},
{
Name: "10 negative steps, should not adjust anything (as we already reached the min tx-power index)",
NStep: -10,
TXPowerIndex: 1,
MinSupportedTXPowerIndex: 1,
DR: 4,
ExpectedDR: 4,
ExpectedTXPowerIndex: 1,
DR: 4,
ExpectedDR: 4,
ExpectedTXPowerIndex: 1,
},
}

Expand All @@ -217,7 +217,7 @@ func TestADR(t *testing.T) {
})

Convey("Given a clean Redis database", func() {
config.C.Redis.Pool = common.NewRedisPool(conf.RedisURL)
config.C.Redis.Pool = common.NewRedisPool(conf.RedisURL, 10, 0)
test.MustFlushRedis(config.C.Redis.Pool)

Convey("Given a testtable for HandleADR", func() {
Expand Down Expand Up @@ -270,8 +270,8 @@ func TestADR(t *testing.T) {
DevAddr: [4]byte{1, 2, 3, 4},
DevEUI: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
EnabledUplinkChannels: []int{0, 1, 2},
DR: 2,
ADR: true,
DR: 2,
ADR: true,
UplinkHistory: []storage.UplinkHistory{
{MaxSNR: -7},
},
Expand All @@ -285,9 +285,9 @@ func TestADR(t *testing.T) {
DevAddr: [4]byte{1, 2, 3, 4},
DevEUI: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
EnabledUplinkChannels: []int{0, 1, 2},
DR: 5,
TXPowerIndex: 3,
ADR: true,
DR: 5,
TXPowerIndex: 3,
ADR: true,
UplinkHistory: []storage.UplinkHistory{
{MaxSNR: 1, TXPowerIndex: 3},
},
Expand Down Expand Up @@ -320,10 +320,10 @@ func TestADR(t *testing.T) {
DevAddr: [4]byte{1, 2, 3, 4},
DevEUI: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
EnabledUplinkChannels: []int{0, 1, 2},
DR: 5,
TXPowerIndex: 4,
NbTrans: 3,
ADR: true,
DR: 5,
TXPowerIndex: 4,
NbTrans: 3,
ADR: true,
UplinkHistory: []storage.UplinkHistory{
{MaxSNR: -5, TXPowerIndex: 4},
},
Expand Down Expand Up @@ -354,8 +354,8 @@ func TestADR(t *testing.T) {
DevAddr: [4]byte{1, 2, 3, 4},
DevEUI: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
EnabledUplinkChannels: []int{0, 1, 2},
DR: 2,
ADR: true,
DR: 2,
ADR: true,
UplinkHistory: []storage.UplinkHistory{
{MaxSNR: -7, TXPowerIndex: 0},
},
Expand Down Expand Up @@ -397,8 +397,8 @@ func TestADR(t *testing.T) {
DevAddr: [4]byte{1, 2, 3, 4},
DevEUI: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
EnabledUplinkChannels: []int{0, 1, 2, 3, 4, 6},
DR: 2,
ADR: true,
DR: 2,
ADR: true,
UplinkHistory: []storage.UplinkHistory{
{MaxSNR: -7, TXPowerIndex: 0},
},
Expand Down Expand Up @@ -443,9 +443,9 @@ func TestADR(t *testing.T) {
DevAddr: [4]byte{1, 2, 3, 4},
DevEUI: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
EnabledUplinkChannels: []int{0, 1, 2},
DR: 5,
TXPowerIndex: 3,
NbTrans: 1,
DR: 5,
TXPowerIndex: 3,
NbTrans: 1,
UplinkHistory: []storage.UplinkHistory{
{MaxSNR: 7, TXPowerIndex: 0},
{MaxSNR: -5, TXPowerIndex: 3},
Expand All @@ -460,9 +460,9 @@ func TestADR(t *testing.T) {
DevAddr: [4]byte{1, 2, 3, 4},
DevEUI: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
EnabledUplinkChannels: []int{0, 1, 2},
DR: 5,
TXPowerIndex: 3,
NbTrans: 1,
DR: 5,
TXPowerIndex: 3,
NbTrans: 1,
UplinkHistory: []storage.UplinkHistory{
{MaxSNR: -20, TXPowerIndex: 0},
{MaxSNR: -20, TXPowerIndex: 3},
Expand Down Expand Up @@ -495,9 +495,9 @@ func TestADR(t *testing.T) {
DevAddr: [4]byte{1, 2, 3, 4},
DevEUI: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
EnabledUplinkChannels: []int{0, 1, 2},
DR: 5,
TXPowerIndex: 3,
NbTrans: 1,
DR: 5,
TXPowerIndex: 3,
NbTrans: 1,
UplinkHistory: []storage.UplinkHistory{
{FCnt: 0, MaxSNR: -20, TXPowerIndex: 3},
{FCnt: 1, MaxSNR: -20, TXPowerIndex: 3},
Expand Down Expand Up @@ -585,9 +585,9 @@ func TestADR(t *testing.T) {
DevAddr: [4]byte{1, 2, 3, 4},
DevEUI: [8]byte{1, 2, 3, 4, 5, 6, 7, 8},
EnabledUplinkChannels: []int{0, 1, 2},
DR: 5,
TXPowerIndex: 3,
ADR: true,
DR: 5,
TXPowerIndex: 3,
ADR: true,
UplinkHistory: []storage.UplinkHistory{
{MaxSNR: 1, TXPowerIndex: 3},
},
Expand Down
Loading

0 comments on commit c1308cc

Please sign in to comment.