Skip to content

Commit

Permalink
Add support for temporal CT logs (letsencrypt#3853)
Browse files Browse the repository at this point in the history
Required a little bit of rework of the RA issuance flow (to add parsing of the precert to determine the expiration date, and moving final cert parsing before final cert submission) and RA tests, but I think it shouldn't create any issues...

Fixes letsencrypt#3197.
  • Loading branch information
Roland Bracewell Shoemaker authored and jsha committed Sep 14, 2018
1 parent d39babd commit 196f019
Show file tree
Hide file tree
Showing 8 changed files with 281 additions and 48 deletions.
16 changes: 6 additions & 10 deletions cmd/boulder-ra/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"flag"
"fmt"
"os"
"strconv"
"time"

caPB "github.com/letsencrypt/boulder/ca/proto"
Expand Down Expand Up @@ -162,18 +161,15 @@ func main() {
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to Publisher")
pubc = bgrpc.NewPublisherClientWrapper(pubPB.NewPublisherClient(conn))

if c.RA.CTLogGroups != nil {
groups := make([]cmd.CTGroup, len(c.RA.CTLogGroups))
for i, logs := range c.RA.CTLogGroups {
groups[i] = cmd.CTGroup{
Name: strconv.Itoa(i),
Logs: logs,
for _, g := range c.RA.CTLogGroups2 {
for _, l := range g.Logs {
if l.TemporalSet != nil {
err := l.Setup()
cmd.FailOnError(err, "Failed to setup a temporal log set")
}
}
ctp = ctpolicy.New(pubc, groups, nil, logger, scope)
} else if c.RA.CTLogGroups2 != nil {
ctp = ctpolicy.New(pubc, c.RA.CTLogGroups2, c.RA.InformationalCTLogs, logger, scope)
}
ctp = ctpolicy.New(pubc, c.RA.CTLogGroups2, c.RA.InformationalCTLogs, logger, scope)

saConn, err := bgrpc.ClientSetup(c.RA.SAService, tlsConfig, clientMetrics, clk)
cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
Expand Down
81 changes: 73 additions & 8 deletions cmd/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -261,14 +261,6 @@ func (d *ConfigDuration) UnmarshalYAML(unmarshal func(interface{}) error) error
return nil
}

// LogDescription contains the information needed to submit certificates
// to a CT log and verify returned receipts
type LogDescription struct {
URI string
Key string
SubmitFinalCert bool
}

// GRPCClientConfig contains the information needed to talk to the gRPC service
type GRPCClientConfig struct {
// NOTE: this field is deprecated in favor of ServerAddress, as we only ever
Expand Down Expand Up @@ -309,6 +301,79 @@ type CAADistributedResolverConfig struct {
Proxies []string
}

// LogShard describes a single shard of a temporally sharded
// CT log
type LogShard struct {
URI string
Key string
WindowStart time.Time
WindowEnd time.Time
}

// TemporalSet contains a set of temporal shards of a single log
type TemporalSet struct {
Name string
Shards []LogShard
}

// Setup initializes the TemporalSet by parsing the start and end dates
// and verifying WindowEnd > WindowStart
func (ts *TemporalSet) Setup() error {
if ts.Name == "" {
return errors.New("Name cannot be empty")
}
if len(ts.Shards) == 0 {
return errors.New("temporal set contains no shards")
}
for i := range ts.Shards {
if ts.Shards[i].WindowEnd.Before(ts.Shards[i].WindowStart) ||
ts.Shards[i].WindowEnd.Equal(ts.Shards[i].WindowStart) {
return errors.New("WindowStart must be before WindowEnd")
}
}
return nil
}

// pick chooses the correct shard from a TemporalSet to use for the given
// expiration time. In the case where two shards have overlapping windows
// the earlier of the two shards will be chosen.
func (ts *TemporalSet) pick(exp time.Time) (*LogShard, error) {
for _, shard := range ts.Shards {
if exp.Before(shard.WindowStart) {
continue
}
if !exp.Before(shard.WindowEnd) {
continue
}
return &shard, nil
}
return nil, fmt.Errorf("no valid shard available for temporal set %q for expiration date %q", ts.Name, exp)
}

// LogDescription contains the information needed to submit certificates
// to a CT log and verify returned receipts. If TemporalSet is non-nil then
// URI and Key should be empty.
type LogDescription struct {
URI string
Key string
SubmitFinalCert bool

*TemporalSet
}

// Info returns the URI and key of the log, either from a plain log description
// or from the earliest valid shard from a temporal log set
func (ld LogDescription) Info(exp time.Time) (string, string, error) {
if ld.TemporalSet == nil {
return ld.URI, ld.Key, nil
}
shard, err := ld.TemporalSet.pick(exp)
if err != nil {
return "", "", err
}
return shard.URI, shard.Key, nil
}

type CTGroup struct {
Name string
Logs []LogDescription
Expand Down
109 changes: 109 additions & 0 deletions cmd/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@ import (
"regexp"
"strings"
"testing"
"time"

"github.com/jmhodges/clock"
"github.com/letsencrypt/boulder/test"
)

Expand Down Expand Up @@ -98,3 +100,110 @@ func TestTLSConfigLoad(t *testing.T) {
})
}
}

func TestTemporalSetup(t *testing.T) {
for _, tc := range []struct {
ts TemporalSet
err string
}{
{
ts: TemporalSet{},
err: "Name cannot be empty",
},
{
ts: TemporalSet{
Name: "temporal set",
},
err: "temporal set contains no shards",
},
{
ts: TemporalSet{
Name: "temporal set",
Shards: []LogShard{
{
WindowStart: time.Time{},
WindowEnd: time.Time{},
},
},
},
err: "WindowStart must be before WindowEnd",
},
{
ts: TemporalSet{
Name: "temporal set",
Shards: []LogShard{
{
WindowStart: time.Time{}.Add(time.Hour),
WindowEnd: time.Time{},
},
},
},
err: "WindowStart must be before WindowEnd",
},
{
ts: TemporalSet{
Name: "temporal set",
Shards: []LogShard{
{
WindowStart: time.Time{},
WindowEnd: time.Time{}.Add(time.Hour),
},
},
},
err: "",
},
} {
err := tc.ts.Setup()
if err != nil && tc.err != err.Error() {
t.Errorf("got error %q, wanted %q", err, tc.err)
} else if err == nil && tc.err != "" {
t.Errorf("unexpected error %q", err)
}
}
}

func TestLogInfo(t *testing.T) {
ld := LogDescription{
URI: "basic-uri",
Key: "basic-key",
}
uri, key, err := ld.Info(time.Time{})
test.AssertNotError(t, err, "Info failed")
test.AssertEquals(t, uri, ld.URI)
test.AssertEquals(t, key, ld.Key)

fc := clock.NewFake()
ld.TemporalSet = &TemporalSet{}
uri, key, err = ld.Info(fc.Now())
test.AssertError(t, err, "Info should fail with a TemporalSet with no viable shards")
ld.TemporalSet.Shards = []LogShard{{WindowStart: fc.Now().Add(time.Hour), WindowEnd: fc.Now().Add(time.Hour * 2)}}
uri, key, err = ld.Info(fc.Now())
test.AssertError(t, err, "Info should fail with a TemporalSet with no viable shards")

fc.Add(time.Hour * 4)
now := fc.Now()
ld.TemporalSet.Shards = []LogShard{
{
WindowStart: now.Add(time.Hour * -4),
WindowEnd: now.Add(time.Hour * -2),
URI: "a",
Key: "a",
},
{
WindowStart: now.Add(time.Hour * -2),
WindowEnd: now.Add(time.Hour * 2),
URI: "b",
Key: "b",
},
{
WindowStart: now.Add(time.Hour * 2),
WindowEnd: now.Add(time.Hour * 4),
URI: "c",
Key: "c",
},
}
uri, key, err = ld.Info(now)
test.AssertNotError(t, err, "Info failed")
test.AssertEquals(t, uri, "b")
test.AssertEquals(t, key, "b")
}
47 changes: 31 additions & 16 deletions ctpolicy/ctpolicy.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ type result struct {
// once it has the first SCT it cancels all of the other submissions and returns.
// It allows up to len(group)-1 of the submissions to fail as we only care about
// getting a single SCT.
func (ctp *CTPolicy) race(ctx context.Context, cert core.CertDER, group cmd.CTGroup) ([]byte, error) {
func (ctp *CTPolicy) race(ctx context.Context, cert core.CertDER, group cmd.CTGroup, expiration time.Time) ([]byte, error) {
results := make(chan result, len(group.Logs))
isPrecert := true
// Randomize the order in which we send requests to the logs in a group
Expand All @@ -94,21 +94,26 @@ func (ctp *CTPolicy) race(ctx context.Context, cert core.CertDER, group cmd.CTGr
if ctx.Err() != nil {
return
}
uri, key, err := ld.Info(expiration)
if err != nil {
ctp.log.Errf("unable to get log info: %s", err)
return
}
sct, err := ctp.pub.SubmitToSingleCTWithResult(ctx, &pubpb.Request{
LogURL: &ld.URI,
LogPublicKey: &ld.Key,
LogURL: &uri,
LogPublicKey: &key,
Der: cert,
Precert: &isPrecert,
})
if err != nil {
// Only log the error if it is not a result of the context being canceled
if !canceled.Is(err) {
ctp.log.Warningf("ct submission to %q failed: %s", ld.URI, err)
ctp.log.Warningf("ct submission to %q failed: %s", uri, err)
}
results <- result{err: err}
return
}
results <- result{sct: sct.Sct, log: ld.URI}
results <- result{sct: sct.Sct, log: uri}
}(i, ld)
}

Expand All @@ -133,13 +138,13 @@ func (ctp *CTPolicy) race(ctx context.Context, cert core.CertDER, group cmd.CTGr

// GetSCTs attempts to retrieve a SCT from each configured grouping of logs and returns
// the set of SCTs to the caller.
func (ctp *CTPolicy) GetSCTs(ctx context.Context, cert core.CertDER) (core.SCTDERs, error) {
func (ctp *CTPolicy) GetSCTs(ctx context.Context, cert core.CertDER, expiration time.Time) (core.SCTDERs, error) {
results := make(chan result, len(ctp.groups))
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
for i, g := range ctp.groups {
go func(i int, g cmd.CTGroup) {
sct, err := ctp.race(subCtx, cert, g)
sct, err := ctp.race(subCtx, cert, g, expiration)
// Only one of these will be non-nil
if err != nil {
results <- result{err: berrors.MissingSCTsError("CT log group %q: %s", g.Name, err)}
Expand All @@ -154,14 +159,19 @@ func (ctp *CTPolicy) GetSCTs(ctx context.Context, cert core.CertDER) (core.SCTDE
// submissions are running in a goroutine and we don't want them to be
// cancelled when the caller of CTPolicy.GetSCTs returns and cancels
// its RPC context.
_, err := ctp.pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{
LogURL: &l.URI,
LogPublicKey: &l.Key,
uri, key, err := l.Info(expiration)
if err != nil {
ctp.log.Errf("unable to get log info: %s", err)
return
}
_, err = ctp.pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{
LogURL: &uri,
LogPublicKey: &key,
Der: cert,
Precert: &isPrecert,
})
if err != nil {
ctp.log.Warningf("ct submission to informational log %q failed: %s", l.URI, err)
ctp.log.Warningf("ct submission to informational log %q failed: %s", uri, err)
}
}(log)
}
Expand All @@ -182,19 +192,24 @@ func (ctp *CTPolicy) GetSCTs(ctx context.Context, cert core.CertDER) (core.SCTDE

// SubmitFinalCert submits finalized certificates created from precertificates
// to any configured logs
func (ctp *CTPolicy) SubmitFinalCert(cert []byte) {
func (ctp *CTPolicy) SubmitFinalCert(cert []byte, expiration time.Time) {
falseVar := false
for _, log := range ctp.finalLogs {
go func(l cmd.LogDescription) {
_, err := ctp.pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{
LogURL: &l.URI,
LogPublicKey: &l.Key,
uri, key, err := l.Info(expiration)
if err != nil {
ctp.log.Errf("unable to get log info: %s", err)
return
}
_, err = ctp.pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{
LogURL: &uri,
LogPublicKey: &key,
Der: cert,
Precert: &falseVar,
StoreSCT: &falseVar,
})
if err != nil {
ctp.log.Warningf("ct submission of final cert to log %q failed: %s", l.URI, err)
ctp.log.Warningf("ct submission of final cert to log %q failed: %s", uri, err)
}
}(log)
}
Expand Down
6 changes: 3 additions & 3 deletions ctpolicy/ctpolicy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func TestGetSCTs(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ctp := New(tc.mock, tc.groups, nil, blog.NewMock(), metrics.NewNoopScope())
ret, err := ctp.GetSCTs(tc.ctx, []byte{0})
ret, err := ctp.GetSCTs(tc.ctx, []byte{0}, time.Time{})
if tc.result != nil {
test.AssertDeepEquals(t, ret, tc.result)
} else if tc.errRegexp != nil {
Expand Down Expand Up @@ -162,7 +162,7 @@ func TestGetSCTsMetrics(t *testing.T) {
},
},
}, nil, blog.NewMock(), metrics.NewNoopScope())
_, err := ctp.GetSCTs(context.Background(), []byte{0})
_, err := ctp.GetSCTs(context.Background(), []byte{0}, time.Time{})
test.AssertNotError(t, err, "GetSCTs failed")
test.AssertEquals(t, test.CountCounter(ctp.winnerCounter.With(prometheus.Labels{"log": "ghi", "group": "a"})), 1)
test.AssertEquals(t, test.CountCounter(ctp.winnerCounter.With(prometheus.Labels{"log": "ghi", "group": "b"})), 1)
Expand Down Expand Up @@ -190,7 +190,7 @@ func TestStagger(t *testing.T) {
},
},
}, nil, blog.NewMock(), metrics.NewNoopScope())
_, err := ctp.GetSCTs(context.Background(), []byte{0})
_, err := ctp.GetSCTs(context.Background(), []byte{0}, time.Time{})
test.AssertNotError(t, err, "GetSCTs failed")
if countingPub.count != 1 {
t.Errorf("wrong number of requests to publisher. got %d, expected 1", countingPub.count)
Expand Down
Loading

0 comments on commit 196f019

Please sign in to comment.