Use caddy's certmagic library for extensible/robust ACME handling (#14177)

* use certmagic for more extensible/robust ACME cert handling

* accept TOS based on config option

Signed-off-by: Andrew Thornton <art27@cantab.net>

Co-authored-by: zeripath <art27@cantab.net>
Co-authored-by: Lauris BH <lauris@nix.lv>
mj-v1.14.3
techknowlogick 3 years ago committed by GitHub
parent bc05ddc0eb
commit d2ea21d0d8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -69,7 +69,7 @@ steps:
- name: checks-backend
pull: always
image: golang:1.14
image: golang:1.15
commands:
- make checks-backend
depends_on: [lint-backend]
@ -82,7 +82,7 @@ steps:
- name: build-backend-no-gcc
pull: always
image: golang:1.13 # this step is kept as the lowest version of golang that we support
image: golang:1.14 # this step is kept as the lowest version of golang that we support
environment:
GO111MODULE: on
GOPROXY: off

@ -25,7 +25,7 @@ HAS_GO = $(shell hash $(GO) > /dev/null 2>&1 && echo "GO" || echo "NOGO" )
COMMA := ,
XGO_VERSION := go-1.15.x
MIN_GO_VERSION := 001013000
MIN_GO_VERSION := 001014000
MIN_NODE_VERSION := 010013000
DOCKER_IMAGE ?= gitea/gitea

@ -22,7 +22,6 @@ import (
context2 "github.com/gorilla/context"
"github.com/urfave/cli"
"golang.org/x/crypto/acme/autocert"
ini "gopkg.in/ini.v1"
)
@ -72,36 +71,6 @@ func runHTTPRedirector() {
}
}
func runLetsEncrypt(listenAddr, domain, directory, email string, m http.Handler) error {
certManager := autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(domain),
Cache: autocert.DirCache(directory),
Email: email,
}
go func() {
log.Info("Running Let's Encrypt handler on %s", setting.HTTPAddr+":"+setting.PortToRedirect)
// all traffic coming into HTTP will be redirect to HTTPS automatically (LE HTTP-01 validation happens here)
var err = runHTTP("tcp", setting.HTTPAddr+":"+setting.PortToRedirect, certManager.HTTPHandler(http.HandlerFunc(runLetsEncryptFallbackHandler)))
if err != nil {
log.Fatal("Failed to start the Let's Encrypt handler on port %s: %v", setting.PortToRedirect, err)
}
}()
return runHTTPSWithTLSConfig("tcp", listenAddr, certManager.TLSConfig(), context2.ClearHandler(m))
}
func runLetsEncryptFallbackHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(w, "Use HTTPS", http.StatusBadRequest)
return
}
// Remove the trailing slash at the end of setting.AppURL, the request
// URI always contains a leading slash, which would result in a double
// slash
target := strings.TrimSuffix(setting.AppURL, "/") + r.URL.RequestURI()
http.Redirect(w, r, target, http.StatusFound)
}
func runWeb(ctx *cli.Context) error {
managerCtx, cancel := context.WithCancel(context.Background())
graceful.InitManager(managerCtx)

@ -0,0 +1,69 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package cmd
import (
"net/http"
"strings"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"github.com/caddyserver/certmagic"
context2 "github.com/gorilla/context"
)
func runLetsEncrypt(listenAddr, domain, directory, email string, m http.Handler) error {
// If HTTP Challenge enabled, needs to be serving on port 80. For TLSALPN needs 443.
// Due to docker port mapping this can't be checked programatically
// TODO: these are placeholders until we add options for each in settings with appropriate warning
enableHTTPChallenge := true
enableTLSALPNChallenge := true
magic := certmagic.NewDefault()
magic.Storage = &certmagic.FileStorage{Path: directory}
myACME := certmagic.NewACMEManager(magic, certmagic.ACMEManager{
Email: email,
Agreed: setting.LetsEncryptTOS,
DisableHTTPChallenge: !enableHTTPChallenge,
DisableTLSALPNChallenge: !enableTLSALPNChallenge,
})
magic.Issuer = myACME
// this obtains certificates or renews them if necessary
err := magic.ManageSync([]string{domain})
if err != nil {
return err
}
tlsConfig := magic.TLSConfig()
if enableHTTPChallenge {
go func() {
log.Info("Running Let's Encrypt handler on %s", setting.HTTPAddr+":"+setting.PortToRedirect)
// all traffic coming into HTTP will be redirect to HTTPS automatically (LE HTTP-01 validation happens here)
var err = runHTTP("tcp", setting.HTTPAddr+":"+setting.PortToRedirect, myACME.HTTPChallengeHandler(http.HandlerFunc(runLetsEncryptFallbackHandler)))
if err != nil {
log.Fatal("Failed to start the Let's Encrypt handler on port %s: %v", setting.PortToRedirect, err)
}
}()
}
return runHTTPSWithTLSConfig("tcp", listenAddr, tlsConfig, context2.ClearHandler(m))
}
func runLetsEncryptFallbackHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(w, "Use HTTPS", http.StatusBadRequest)
return
}
// Remove the trailing slash at the end of setting.AppURL, the request
// URI always contains a leading slash, which would result in a double
// slash
target := strings.TrimSuffix(setting.AppURL, "/") + r.URL.RequestURI()
http.Redirect(w, r, target, http.StatusFound)
}

@ -19,7 +19,7 @@ params:
author: The Gitea Authors
website: https://docs.gitea.io
version: 1.13.1
minGoVersion: 1.13
minGoVersion: 1.14
goVersion: 1.15
minNodeVersion: 10.13

@ -24,6 +24,7 @@ require (
github.com/andybalholm/brotli v1.0.1 // indirect
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
github.com/blevesearch/bleve/v2 v2.0.1
github.com/caddyserver/certmagic v0.12.0
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/denisenkom/go-mssqldb v0.9.0
github.com/dgrijalva/jwt-go v3.2.0+incompatible

@ -196,6 +196,8 @@ github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA=
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/caddyserver/certmagic v0.12.0 h1:1f7kxykaJkOVVpXJ8ZrC6RAO5F6+kKm9U7dBFbLNeug=
github.com/caddyserver/certmagic v0.12.0/go.mod h1:tr26xh+9fY5dN0J6IPAlMj07qpog22PJKa7Nw7j835U=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@ -668,6 +670,7 @@ github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGAR
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@ -704,6 +707,7 @@ github.com/klauspost/compress v1.11.3 h1:dB4Bn0tN3wdCzQxnS8r06kV74qN/TAfaIS0bVE8
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.5/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -734,6 +738,8 @@ github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.8.1-0.20200908161135-083382b7e6fc h1:ERSU1OvZ6MdWhHieo2oT7xwR/HCksqKdgK6iYPU5pHI=
github.com/lib/pq v1.8.1-0.20200908161135-083382b7e6fc/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/libdns/libdns v0.1.0 h1:0ctCOrVJsVzj53mop1angHp/pE3hmAhP7KiHvR0HD04=
github.com/libdns/libdns v0.1.0/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lunny/bluemonday v1.0.5-0.20201227154428-ca34796141e8 h1:1omo92DLtxQu6VwVPSZAmduHaK5zssed6cvkHyl1XOg=
@ -790,9 +796,13 @@ github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc8
github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
github.com/mgechev/revive v1.0.3-0.20200921231451-246eac737dc7 h1:ydVkpU/M4/c45yT3e5lzMeguKJm9GxGgsawx4/XlwK0=
github.com/mgechev/revive v1.0.3-0.20200921231451-246eac737dc7/go.mod h1:no/hfevHbndpXR5CaJahkYCfM/FFpmM/dSOwFGU7Z1o=
github.com/mholt/acmez v0.1.1 h1:KQODCqk+hBn3O7qfCRPj6L96uG65T5BSS95FKNEqtdA=
github.com/mholt/acmez v0.1.1/go.mod h1:8qnn8QA/Ewx8E3ZSsmscqsIjhhpxuy9vqdgbX2ceceM=
github.com/mholt/archiver/v3 v3.5.0 h1:nE8gZIrw66cu4osS/U7UW7YDuGMHssxKutU8IfWxwWE=
github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.30 h1:Qww6FseFn8PRfw07jueqIXqodm0JKiiKuK0DeXSqfyo=
github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4=
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
github.com/minio/minio-go/v7 v7.0.6 h1:9czXaG0LEZ9s74smSqy0rm034MxngQoP6HTTuSc5GEs=
@ -1124,12 +1134,19 @@ go.opentelemetry.io/otel v0.14.0/go.mod h1:vH5xEuwy7Rts0GNtsCW3HYQoZDY+OmBJ6t1bF
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -1153,6 +1170,8 @@ golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620 h1:3wPMTskHO3+O6jqTEXyFcsnuxMQOqYSaHsDxcbUXpqA=
@ -1178,6 +1197,7 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@ -1254,6 +1274,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1286,6 +1307,7 @@ golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1368,6 +1390,7 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@ -1546,6 +1569,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
mvdan.cc/xurls/v2 v2.2.0 h1:NSZPykBXJFCetGZykLAxaL6SIpvbVy/UFEniIfHAa8A=
mvdan.cc/xurls/v2 v2.2.0/go.mod h1:EV1RMtya9D6G5DMYPGD8zTQzaHet6Jh8gFlRgGRJeO8=

@ -0,0 +1 @@
_gitignore/

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -0,0 +1,526 @@
<p align="center">
<a href="https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc"><img src="https://user-images.githubusercontent.com/1128849/49704830-49d37200-fbd5-11e8-8385-767e0cd033c3.png" alt="CertMagic" width="550"></a>
</p>
<h3 align="center">Easy and Powerful TLS Automation</h3>
<p align="center">The same library used by the <a href="https://caddyserver.com">Caddy Web Server</a></p>
<p align="center">
<a href="https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc"><img src="https://img.shields.io/badge/godoc-reference-blue.svg"></a>
<a href="https://github.com/caddyserver/certmagic/actions?query=workflow%3ATests"><img src="https://github.com/caddyserver/certmagic/workflows/Tests/badge.svg"></a>
<a href="https://sourcegraph.com/github.com/caddyserver/certmagic?badge"><img src="https://sourcegraph.com/github.com/caddyserver/certmagic/-/badge.svg"></a>
</p>
Caddy's automagic TLS features&mdash;now for your own Go programs&mdash;in one powerful and easy-to-use library!
CertMagic is the most mature, robust, and capable ACME client integration for Go... and perhaps ever.
With CertMagic, you can add one line to your Go application to serve securely over TLS, without ever having to touch certificates.
Instead of:
```go
// plaintext HTTP, gross 🤢
http.ListenAndServe(":80", mux)
```
Use CertMagic:
```go
// encrypted HTTPS with HTTP->HTTPS redirects - yay! 🔒😍
certmagic.HTTPS([]string{"example.com"}, mux)
```
That line of code will serve your HTTP router `mux` over HTTPS, complete with HTTP->HTTPS redirects. It obtains and renews the TLS certificates. It staples OCSP responses for greater privacy and security. As long as your domain name points to your server, CertMagic will keep its connections secure.
Compared to other ACME client libraries for Go, only CertMagic supports the full suite of ACME features, and no other library matches CertMagic's maturity and reliability.
CertMagic - Automatic HTTPS using Let's Encrypt
===============================================
**Sponsored by Relica - Cross-platform local and cloud file backup:**
<a href="https://relicabackup.com"><img src="https://caddyserver.com/resources/images/sponsors/relica.png" width="220" alt="Relica - Cross-platform file backup to the cloud, local disks, or other computers"></a>
## Menu
- [Features](#features)
- [Requirements](#requirements)
- [Installation](#installation)
- [Usage](#usage)
- [Package Overview](#package-overview)
- [Certificate authority](#certificate-authority)
- [The `Config` type](#the-config-type)
- [Defaults](#defaults)
- [Providing an email address](#providing-an-email-address)
- [Rate limiting](#rate-limiting)
- [Development and testing](#development-and-testing)
- [Examples](#examples)
- [Serving HTTP handlers with HTTPS](#serving-http-handlers-with-https)
- [Starting a TLS listener](#starting-a-tls-listener)
- [Getting a tls.Config](#getting-a-tlsconfig)
- [Advanced use](#advanced-use)
- [Wildcard Certificates](#wildcard-certificates)
- [Behind a load balancer (or in a cluster)](#behind-a-load-balancer-or-in-a-cluster)
- [The ACME Challenges](#the-acme-challenges)
- [HTTP Challenge](#http-challenge)
- [TLS-ALPN Challenge](#tls-alpn-challenge)
- [DNS Challenge](#dns-challenge)
- [On-Demand TLS](#on-demand-tls)
- [Storage](#storage)
- [Cache](#cache)
- [Contributing](#contributing)
- [Project History](#project-history)
- [Credits and License](#credits-and-license)
## Features
- Fully automated certificate management including issuance and renewal
- One-liner, fully managed HTTPS servers
- Full control over almost every aspect of the system
- HTTP->HTTPS redirects
- Solves all 3 ACME challenges: HTTP, TLS-ALPN, and DNS
- Most robust error handling of _any_ ACME client
- Challenges are randomized to avoid accidental dependence
- Challenges are rotated to overcome certain network blockages
- Robust retries for up to 30 days
- Exponential backoff with carefully-tuned intervals
- Retries with optional test/staging CA endpoint instead of production, to avoid rate limits
- Written in Go, a language with memory-safety guarantees
- Powered by [ACMEz](https://github.com/mholt/acmez), _the_ premier ACME client library for Go
- All [libdns](https://github.com/libdns) DNS providers work out-of-the-box
- Pluggable storage implementations (default: file system)
- Wildcard certificates
- Automatic OCSP stapling ([done right](https://gist.github.com/sleevi/5efe9ef98961ecfb4da8#gistcomment-2336055)) [keeps your sites online!](https://twitter.com/caddyserver/status/1234874273724084226)
- Will [automatically attempt](https://twitter.com/mholt6/status/1235577699541762048) to replace [revoked certificates](https://community.letsencrypt.org/t/2020-02-29-caa-rechecking-bug/114591/3?u=mholt)!
- Staples stored to disk in case of responder outages
- Distributed solving of all challenges (works behind load balancers)
- Highly efficient, coordinated management in a fleet
- Active locking
- Smart queueing
- Supports "on-demand" issuance of certificates (during TLS handshakes!)
- Caddy / CertMagic pioneered this technology
- Custom decision functions to regulate and throttle on-demand behavior
- Optional event hooks for observation
- Works with any certificate authority (CA) compliant with the ACME specification
- Certificate revocation (please, only if private key is compromised)
- Must-Staple (optional; not default)
- Cross-platform support! Mac, Windows, Linux, BSD, Android...
- Scales to hundreds of thousands of names/certificates per instance
- Use in conjunction with your own certificates
## Requirements
1. Public DNS name(s) you control
2. Server reachable from public Internet
- Or use the DNS challenge to waive this requirement
3. Control over port 80 (HTTP) and/or 443 (HTTPS)
- Or they can be forwarded to other ports you control
- Or use the DNS challenge to waive this requirement
- (This is a requirement of the ACME protocol, not a library limitation)
4. Persistent storage
- Typically the local file system (default)
- Other integrations available/possible
**_Before using this library, your domain names MUST be pointed (A/AAAA records) at your server (unless you use the DNS challenge)!_**
## Installation
```bash
$ go get github.com/caddyserver/certmagic
```
## Usage
### Package Overview
#### Certificate authority
This library uses Let's Encrypt by default, but you can use any certificate authority that conforms to the ACME specification. Known/common CAs are provided as consts in the package, for example `LetsEncryptStagingCA` and `LetsEncryptProductionCA`.
#### The `Config` type
The `certmagic.Config` struct is how you can wield the power of this fully armed and operational battle station. However, an empty/uninitialized `Config` is _not_ a valid one! In time, you will learn to use the force of `certmagic.NewDefault()` as I have.
#### Defaults
The default `Config` value is called `certmagic.Default`. Change its fields to suit your needs, then call `certmagic.NewDefault()` when you need a valid `Config` value. In other words, `certmagic.Default` is a template and is not valid for use directly.
You can set the default values easily, for example: `certmagic.Default.Issuer = ...`.
Similarly, to configure ACME-specific defaults, use `certmagic.DefaultACME`.
The high-level functions in this package (`HTTPS()`, `Listen()`, `ManageSync()`, and `ManageAsync()`) use the default config exclusively. This is how most of you will interact with the package. This is suitable when all your certificates are managed the same way. However, if you need to manage certificates differently depending on their name, you will need to make your own cache and configs (keep reading).
#### Providing an email address
Although not strictly required, this is highly recommended best practice. It allows you to receive expiration emails if your certificates are expiring for some reason, and also allows the CA's engineers to potentially get in touch with you if something is wrong. I recommend setting `certmagic.DefaultACME.Email` or always setting the `Email` field of a new `Config` struct.
#### Rate limiting
To avoid firehosing the CA's servers, CertMagic has built-in rate limiting. Currently, its default limit is up to 10 transactions (obtain or renew) every 1 minute (sliding window). This can be changed by setting the `RateLimitEvents` and `RateLimitEventsWindow` variables, if desired.
The CA may still enforce their own rate limits, and there's nothing (well, nothing ethical) CertMagic can do to bypass them for you.
Additionally, CertMagic will retry failed validations with exponential backoff for up to 30 days, with a reasonable maximum interval between attempts (an "attempt" means trying each enabled challenge type once).
### Development and Testing
Note that Let's Encrypt imposes [strict rate limits](https://letsencrypt.org/docs/rate-limits/) at its production endpoint, so using it while developing your application may lock you out for a few days if you aren't careful!
While developing your application and testing it, use [their staging endpoint](https://letsencrypt.org/docs/staging-environment/) which has much higher rate limits. Even then, don't hammer it: but it's much safer for when you're testing. When deploying, though, use their production CA because their staging CA doesn't issue trusted certificates.
To use staging, set `certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA` or set `CA` of every `ACMEManager` struct.
### Examples
There are many ways to use this library. We'll start with the highest-level (simplest) and work down (more control).
All these high-level examples use `certmagic.Default` and `certmagic.DefaultACME` for the config and the default cache and storage for serving up certificates.
First, we'll follow best practices and do the following:
```go
// read and agree to your CA's legal documents
certmagic.DefaultACME.Agreed = true
// provide an email address
certmagic.DefaultACME.Email = "you@yours.com"
// use the staging endpoint while we're developing
certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA
```
For fully-functional program examples, check out [this Twitter thread](https://twitter.com/mholt6/status/1073103805112147968) (or read it [unrolled into a single post](https://threadreaderapp.com/thread/1073103805112147968.html)). (Note that the package API has changed slightly since these posts.)
#### Serving HTTP handlers with HTTPS
```go
err := certmagic.HTTPS([]string{"example.com", "www.example.com"}, mux)
if err != nil {
return err
}
```
This starts HTTP and HTTPS listeners and redirects HTTP to HTTPS!
#### Starting a TLS listener
```go
ln, err := certmagic.Listen([]string{"example.com"})
if err != nil {
return err
}
```
#### Getting a tls.Config
```go
tlsConfig, err := certmagic.TLS([]string{"example.com"})
if err != nil {
return err
}
```
#### Advanced use
For more control (particularly, if you need a different way of managing each certificate), you'll make and use a `Cache` and a `Config` like so:
```go
cache := certmagic.NewCache(certmagic.CacheOptions{
GetConfigForCert: func(cert certmagic.Certificate) (*certmagic.Config, error) {
// do whatever you need to do to get the right
// configuration for this certificate; keep in
// mind that this config value is used as a
// template, and will be completed with any
// defaults that are set in the Default config
return &certmagic.Config{
// ...
}, nil
},
...
})
magic := certmagic.New(cache, certmagic.Config{
// any customizations you need go here
})
myACME := certmagic.NewACMEManager(magic, ACMEManager{
CA: certmagic.LetsEncryptStagingCA,
Email: "you@yours.com",
Agreed: true,
// plus any other customizations you need
})
magic.Issuer = myACME
// this obtains certificates or renews them if necessary
err := magic.ManageSync([]string{"example.com", "sub.example.com"})
if err != nil {
return err
}
// to use its certificates and solve the TLS-ALPN challenge,
// you can get a TLS config to use in a TLS listener!
tlsConfig := magic.TLSConfig()
//// OR ////
// if you already have a TLS config you don't want to replace,
// we can simply set its GetCertificate field and append the
// TLS-ALPN challenge protocol to the NextProtos
myTLSConfig.GetCertificate = magic.GetCertificate
myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol}
// the HTTP challenge has to be handled by your HTTP server;
// if you don't have one, you should have disabled it earlier
// when you made the certmagic.Config
httpMux = myACME.HTTPChallengeHandler(httpMux)
```
Great! This example grants you much more flexibility for advanced programs. However, _the vast majority of you will only use the high-level functions described earlier_, especially since you can still customize them by setting the package-level `Default` config.
### Wildcard certificates
At time of writing (December 2018), Let's Encrypt only issues wildcard certificates with the DNS challenge. You can easily enable the DNS challenge with CertMagic for numerous providers (see the relevant section in the docs).
### Behind a load balancer (or in a cluster)
CertMagic runs effectively behind load balancers and/or in cluster/fleet environments. In other words, you can have 10 or 1,000 servers all serving the same domain names, all sharing certificates and OCSP staples.
To do so, simply ensure that each instance is using the same Storage. That is the sole criteria for determining whether an instance is part of a cluster.
The default Storage is implemented using the file system, so mounting the same shared folder is sufficient (see [Storage](#storage) for more on that)! If you need an alternate Storage implementation, feel free to use one, provided that all the instances use the _same_ one. :)
See [Storage](#storage) and the associated [pkg.go.dev](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Storage) for more information!
## The ACME Challenges
This section describes how to solve the ACME challenges. Challenges are how you demonstrate to the certificate authority some control over your domain name, thus authorizing them to grant you a certificate for that name. [The great innovation of ACME](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) is that verification by CAs can now be automated, rather than having to click links in emails (who ever thought that was a good idea??).
If you're using the high-level convenience functions like `HTTPS()`, `Listen()`, or `TLS()`, the HTTP and/or TLS-ALPN challenges are solved for you because they also start listeners. However, if you're making a `Config` and you start your own server manually, you'll need to be sure the ACME challenges can be solved so certificates can be renewed.
The HTTP and TLS-ALPN challenges are the defaults because they don't require configuration from you, but they require that your server is accessible from external IPs on low ports. If that is not possible in your situation, you can enable the DNS challenge, which will disable the HTTP and TLS-ALPN challenges and use the DNS challenge exclusively.
Technically, only one challenge needs to be enabled for things to work, but using multiple is good for reliability in case a challenge is discontinued by the CA. This happened to the TLS-SNI challenge in early 2018&mdash;many popular ACME clients such as Traefik and Autocert broke, resulting in downtime for some sites, until new releases were made and patches deployed, because they used only one challenge; Caddy, however&mdash;this library's forerunner&mdash;was unaffected because it also used the HTTP challenge. If multiple challenges are enabled, they are chosen randomly to help prevent false reliance on a single challenge type. And if one fails, any remaining enabled challenges are tried before giving up.
### HTTP Challenge
Per the ACME spec, the HTTP challenge requires port 80, or at least packet forwarding from port 80. It works by serving a specific HTTP response that only the genuine server would have to a normal HTTP request at a special endpoint.
If you are running an HTTP server, solving this challenge is very easy: just wrap your handler in `HTTPChallengeHandler` _or_ call `SolveHTTPChallenge()` inside your own `ServeHTTP()` method.
For example, if you're using the standard library:
```go
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
fmt.Fprintf(w, "Lookit my cool website over HTTPS!")
})
http.ListenAndServe(":80", myACME.HTTPChallengeHandler(mux))
```
If wrapping your handler is not a good solution, try this inside your `ServeHTTP()` instead:
```go
magic := certmagic.NewDefault()
myACME := certmagic.NewACMEManager(magic, certmagic.DefaultACME)
func ServeHTTP(w http.ResponseWriter, req *http.Request) {
if myACME.HandleHTTPChallenge(w, r) {
return // challenge handled; nothing else to do
}
...
}
```
If you are not running an HTTP server, you should disable the HTTP challenge _or_ run an HTTP server whose sole job it is to solve the HTTP challenge.
### TLS-ALPN Challenge
Per the ACME spec, the TLS-ALPN challenge requires port 443, or at least packet forwarding from port 443. It works by providing a special certificate using a standard TLS extension, Application Layer Protocol Negotiation (ALPN), having a special value. This is the most convenient challenge type because it usually requires no extra configuration and uses the standard TLS port which is where the certificates are used, also.
This challenge is easy to solve: just use the provided `tls.Config` when you make your TLS listener:
```go
// use this to configure a TLS listener
tlsConfig := magic.TLSConfig()
```
Or make two simple changes to an existing `tls.Config`:
```go
myTLSConfig.GetCertificate = magic.GetCertificate
myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol}
```
Then just make sure your TLS listener is listening on port 443:
```go
ln, err := tls.Listen("tcp", ":443", myTLSConfig)
```
### DNS Challenge
The DNS challenge is perhaps the most useful challenge because it allows you to obtain certificates without your server needing to be publicly accessible on the Internet, and it's the only challenge by which Let's Encrypt will issue wildcard certificates.
This challenge works by setting a special record in the domain's zone. To do this automatically, your DNS provider needs to offer an API by which changes can be made to domain names, and the changes need to take effect immediately for best results. CertMagic supports [all DNS providers with `libdns` implementations](https://github.com/libdns)! It always cleans up the temporary record after the challenge completes.
To enable it, just set the `DNS01Solver` field on a `certmagic.ACMEManager` struct, or set the default `certmagic.ACMEManager.DNS01Solver` variable. For example, if my domains' DNS was served by Cloudflare:
```go
import "github.com/libdns/cloudflare"
certmagic.DefaultACME.DNS01Solver = &certmagic.DNS01Solver{
DNSProvider: cloudflare.Provider{
APIToken: "topsecret",
},
}
```
Now the DNS challenge will be used by default, and I can obtain certificates for wildcard domains, too. Enabling the DNS challenge disables the other challenges for that `certmagic.ACMEManager` instance.
## On-Demand TLS
Normally, certificates are obtained and renewed before a listener starts serving, and then those certificates are maintained throughout the lifetime of the program. In other words, the certificate names are static. But sometimes you don't know all the names ahead of time, or you don't want to manage all the certificates up front. This is where On-Demand TLS shines.
Originally invented for use in Caddy (which was the first program to use such technology), On-Demand TLS makes it possible and easy to serve certificates for arbitrary or specific names during the lifetime of the server. When a TLS handshake is received, CertMagic will read the Server Name Indication (SNI) value and either load and present that certificate in the ServerHello, or if one does not exist, it will obtain it from a CA right then-and-there.
Of course, this has some obvious security implications. You don't want to DoS a CA or allow arbitrary clients to fill your storage with spammy TLS handshakes. That's why, when you enable On-Demand issuance, you should set limits or policy to allow getting certificates. CertMagic has an implicit whitelist built-in which is sufficient for nearly everyone, but also has a more advanced way to control on-demand issuance.
The simplest way to enable on-demand issuance is to set the OnDemand field of a Config (or the default package-level value):
```go
certmagic.Default.OnDemand = new(certmagic.OnDemandConfig)
```
By setting this to a non-nil value, on-demand TLS is enabled for that config. For convenient security, CertMagic's high-level abstraction functions such as `HTTPS()`, `TLS()`, `ManageSync()`, `ManageAsync()`, and `Listen()` (which all accept a list of domain names) will whitelist those names automatically so only certificates for those names can be obtained when using the Default config. Usually this is sufficient for most users.
However, if you require advanced control over which domains can be issued certificates on-demand (for example, if you do not know which domain names you are managing, or just need to defer their operations until later), you should implement your own DecisionFunc:
```go
// if the decision function returns an error, a certificate
// may not be obtained for that name at that time
certmagic.Default.OnDemand = &certmagic.OnDemandConfig{
DecisionFunc: func(name string) error {
if name != "example.com" {
return fmt.Errorf("not allowed")
}
return nil
},
}
```
The [pkg.go.dev](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#OnDemandConfig) describes how to use this in full detail, so please check it out!
## Storage
CertMagic relies on storage to store certificates and other TLS assets (OCSP staple cache, coordinating locks, etc). Persistent storage is a requirement when using CertMagic: ephemeral storage will likely lead to rate limiting on the CA-side as CertMagic will always have to get new certificates.
By default, CertMagic stores assets on the local file system in `$HOME/.local/share/certmagic` (and honors `$XDG_DATA_HOME` if set). CertMagic will create the directory if it does not exist. If writes are denied, things will not be happy, so make sure CertMagic can write to it!
The notion of a "cluster" or "fleet" of instances that may be serving the same site and sharing certificates, etc, is tied to storage. Simply, any instances that use the same storage facilities are considered part of the cluster. So if you deploy 100 instances of CertMagic behind a load balancer, they are all part of the same cluster if they share the same storage configuration. Sharing storage could be mounting a shared folder, or implementing some other distributed storage system such as a database server or KV store.
The easiest way to change the storage being used is to set `certmagic.DefaultStorage` to a value that satisfies the [Storage interface](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Storage). Keep in mind that a valid `Storage` must be able to implement some operations atomically in order to provide locking and synchronization.
If you write a Storage implementation, please add it to the [project wiki](https://github.com/caddyserver/certmagic/wiki/Storage-Implementations) so people can find it!
## Cache
All of the certificates in use are de-duplicated and cached in memory for optimal performance at handshake-time. This cache must be backed by persistent storage as described above.
Most applications will not need to interact with certificate caches directly. Usually, the closest you will come is to set the package-wide `certmagic.DefaultStorage` variable (before attempting to create any Configs). However, if your use case requires using different storage facilities for different Configs (that's highly unlikely and NOT recommended! Even Caddy doesn't get that crazy), you will need to call `certmagic.NewCache()` and pass in the storage you want to use, then get new `Config` structs with `certmagic.NewWithCache()` and pass in the cache.
Again, if you're needing to do this, you've probably over-complicated your application design.
## FAQ
### Can I use some of my own certificates while using CertMagic?
Yes, just call the relevant method on the `Config` to add your own certificate to the cache:
- [`CacheUnmanagedCertificatePEMBytes()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedCertificatePEMBytes)
- [`CacheUnmanagedCertificatePEMFile()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedCertificatePEMFile)
- [`CacheUnmanagedTLSCertificate()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedTLSCertificate)
Keep in mind that unmanaged certificates are (obviously) not renewed for you, so you'll have to replace them when you do. However, OCSP stapling is performed even for unmanaged certificates that qualify.
### Does CertMagic obtain SAN certificates?
Technically all certificates these days are SAN certificates because CommonName is deprecated. But if you're asking whether CertMagic issues and manages certificates with multiple SANs, the answer is no. But it does support serving them, if you provide your own.
### How can I listen on ports 80 and 443? Do I have to run as root?
On Linux, you can use `setcap` to grant your binary the permission to bind low ports:
```bash
$ sudo setcap cap_net_bind_service=+ep /path/to/your/binary
```
and then you will not need to run with root privileges.
## Contributing
We welcome your contributions! Please see our **[contributing guidelines](https://github.com/caddyserver/certmagic/blob/master/.github/CONTRIBUTING.md)** for instructions.
## Project History
CertMagic is the core of Caddy's advanced TLS automation code, extracted into a library. The underlying ACME client implementation is [ACMEz](https://github.com/mholt/acmez). CertMagic's code was originally a central part of Caddy even before Let's Encrypt entered public beta in 2015.
In the years since then, Caddy's TLS automation techniques have been widely adopted, tried and tested in production, and served millions of sites and secured trillions of connections.
Now, CertMagic is _the actual library used by Caddy_. It's incredibly powerful and feature-rich, but also easy to use for simple Go programs: one line of code can enable fully-automated HTTPS applications with HTTP->HTTPS redirects.
Caddy is known for its robust HTTPS+ACME features. When ACME certificate authorities have had outages, in some cases Caddy was the only major client that didn't experience any downtime. Caddy can weather OCSP outages lasting days, or CA outages lasting weeks, without taking your sites offline.
Caddy was also the first to sport "on-demand" issuance technology, which obtains certificates during the first TLS handshake for an allowed SNI name.
Consequently, CertMagic brings all these (and more) features and capabilities right into your own Go programs.
You can [watch a 2016 dotGo talk](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) by the author of this library about using ACME to automate certificate management in Go programs:
[![Matthew Holt speaking at dotGo 2016 about ACME in Go](https://user-images.githubusercontent.com/1128849/49921557-2d506780-fe6b-11e8-97bf-6053b6b4eb48.png)](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme)
## Credits and License
CertMagic is a project by [Matthew Holt](https://twitter.com/mholt6), who is the author; and various contributors, who are credited in the commit history of either CertMagic or Caddy.
CertMagic is licensed under Apache 2.0, an open source license. For convenience, its main points are summarized as follows (but this is no replacement for the actual license text):
- The author owns the copyright to this code
- Use, distribute, and modify the software freely
- Private and internal use is allowed
- License text and copyright notices must stay intact and be included with distributions
- Any and all changes to the code must be documented

@ -0,0 +1,369 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"bufio"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"os"
"path"
"sort"
"strings"
"github.com/mholt/acmez/acme"
)
// getAccount either loads or creates a new account, depending on if
// an account can be found in storage for the given CA + email combo.
func (am *ACMEManager) getAccount(ca, email string) (acme.Account, error) {
regBytes, err := am.config.Storage.Load(am.storageKeyUserReg(ca, email))
if err != nil {
if _, ok := err.(ErrNotExist); ok {
return am.newAccount(email)
}
return acme.Account{}, err
}
keyBytes, err := am.config.Storage.Load(am.storageKeyUserPrivateKey(ca, email))
if err != nil {
if _, ok := err.(ErrNotExist); ok {
return am.newAccount(email)
}
return acme.Account{}, err
}
var acct acme.Account
err = json.Unmarshal(regBytes, &acct)
if err != nil {
return acct, err
}
acct.PrivateKey, err = decodePrivateKey(keyBytes)
if err != nil {
return acct, fmt.Errorf("could not decode account's private key: %v", err)
}
// TODO: July 2020 - transition to new ACME lib and account structure;
// for a while, we will need to convert old accounts to new structure
acct, err = am.transitionAccountToACMEzJuly2020Format(ca, acct, regBytes)
if err != nil {
return acct, fmt.Errorf("one-time account transition: %v", err)
}
return acct, err
}
// TODO: this is a temporary transition helper starting July 2020.
// It can go away when we think enough time has passed that most active assets have transitioned.
func (am *ACMEManager) transitionAccountToACMEzJuly2020Format(ca string, acct acme.Account, regBytes []byte) (acme.Account, error) {
if acct.Status != "" && acct.Location != "" {
return acct, nil
}
var oldAcct struct {
Email string `json:"Email"`
Registration struct {
Body struct {
Status string `json:"status"`
TermsOfServiceAgreed bool `json:"termsOfServiceAgreed"`
Orders string `json:"orders"`
ExternalAccountBinding json.RawMessage `json:"externalAccountBinding"`
} `json:"body"`
URI string `json:"uri"`
} `json:"Registration"`
}
err := json.Unmarshal(regBytes, &oldAcct)
if err != nil {
return acct, fmt.Errorf("decoding into old account type: %v", err)
}
acct.Status = oldAcct.Registration.Body.Status
acct.TermsOfServiceAgreed = oldAcct.Registration.Body.TermsOfServiceAgreed
acct.Location = oldAcct.Registration.URI
acct.ExternalAccountBinding = oldAcct.Registration.Body.ExternalAccountBinding
acct.Orders = oldAcct.Registration.Body.Orders
if oldAcct.Email != "" {
acct.Contact = []string{"mailto:" + oldAcct.Email}
}
err = am.saveAccount(ca, acct)
if err != nil {
return acct, fmt.Errorf("saving converted account: %v", err)
}
return acct, nil
}
// newAccount generates a new private key for a new ACME account, but
// it does not register or save the account.
func (*ACMEManager) newAccount(email string) (acme.Account, error) {
var acct acme.Account
if email != "" {
acct.Contact = []string{"mailto:" + email} // TODO: should we abstract the contact scheme?
}
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return acct, fmt.Errorf("generating private key: %v", err)
}
acct.PrivateKey = privateKey
return acct, nil
}
// saveAccount persists an ACME account's info and private key to storage.
// It does NOT register the account via ACME or prompt the user.
func (am *ACMEManager) saveAccount(ca string, account acme.Account) error {
regBytes, err := json.MarshalIndent(account, "", "\t")
if err != nil {
return err
}
keyBytes, err := encodePrivateKey(account.PrivateKey)
if err != nil {
return err
}
// extract primary contact (email), without scheme (e.g. "mailto:")
primaryContact := getPrimaryContact(account)
all := []keyValue{
{
key: am.storageKeyUserReg(ca, primaryContact),
value: regBytes,
},
{
key: am.storageKeyUserPrivateKey(ca, primaryContact),
value: keyBytes,
},
}
return storeTx(am.config.Storage, all)
}
// getEmail does everything it can to obtain an email address
// from the user within the scope of memory and storage to use
// for ACME TLS. If it cannot get an email address, it does nothing
// (If user is prompted, it will warn the user of
// the consequences of an empty email.) This function MAY prompt
// the user for input. If allowPrompts is false, the user
// will NOT be prompted and an empty email may be returned.
func (am *ACMEManager) getEmail(allowPrompts bool) error {
leEmail := am.Email
// First try package default email
if leEmail == "" {
leEmail = DefaultACME.Email // TODO: racey with line 122 (or whichever line assigns to DefaultACME.Email below)
}
// Then try to get most recent user email from storage
var gotRecentEmail bool
if leEmail == "" {
leEmail, gotRecentEmail = am.mostRecentAccountEmail(am.CA)
}
if !gotRecentEmail && leEmail == "" && allowPrompts {
// Looks like there is no email address readily available,
// so we will have to ask the user if we can.
var err error
leEmail, err = am.promptUserForEmail()
if err != nil {
return err
}
// User might have just signified their agreement
am.Agreed = DefaultACME.Agreed
}
// save the email for later and ensure it is consistent
// for repeated use; then update cfg with the email
DefaultACME.Email = strings.TrimSpace(strings.ToLower(leEmail)) // TODO: this is racey with line 99
am.Email = DefaultACME.Email
return nil
}
// promptUserForEmail prompts the user for an email address
// and returns the email address they entered (which could
// be the empty string). If no error is returned, then Agreed
// will also be set to true, since continuing through the
// prompt signifies agreement.
func (am *ACMEManager) promptUserForEmail() (string, error) {
// prompt the user for an email address and terms agreement
reader := bufio.NewReader(stdin)
am.promptUserAgreement("")
fmt.Println("Please enter your email address to signify agreement and to be notified")
fmt.Println("in case of issues. You can leave it blank, but we don't recommend it.")
fmt.Print(" Email address: ")
leEmail, err := reader.ReadString('\n')
if err != nil && err != io.EOF {
return "", fmt.Errorf("reading email address: %v", err)
}
leEmail = strings.TrimSpace(leEmail)
DefaultACME.Agreed = true
return leEmail, nil
}
// promptUserAgreement simply outputs the standard user
// agreement prompt with the given agreement URL.
// It outputs a newline after the message.
func (am *ACMEManager) promptUserAgreement(agreementURL string) {
userAgreementPrompt := `Your sites will be served over HTTPS automatically using an automated CA.
By continuing, you agree to the CA's terms of service`
if agreementURL == "" {
fmt.Printf("\n\n%s.\n", userAgreementPrompt)
return
}
fmt.Printf("\n\n%s at:\n %s\n", userAgreementPrompt, agreementURL)
}
// askUserAgreement prompts the user to agree to the agreement
// at the given agreement URL via stdin. It returns whether the
// user agreed or not.
func (am *ACMEManager) askUserAgreement(agreementURL string) bool {
am.promptUserAgreement(agreementURL)
fmt.Print("Do you agree to the terms? (y/n): ")
reader := bufio.NewReader(stdin)
answer, err := reader.ReadString('\n')
if err != nil {
return false
}
answer = strings.ToLower(strings.TrimSpace(answer))
return answer == "y" || answer == "yes"
}
func (am *ACMEManager) storageKeyCAPrefix(caURL string) string {
return path.Join(prefixACME, StorageKeys.Safe(am.issuerKey(caURL)))
}
func (am *ACMEManager) storageKeyUsersPrefix(caURL string) string {
return path.Join(am.storageKeyCAPrefix(caURL), "users")
}
func (am *ACMEManager) storageKeyUserPrefix(caURL, email string) string {
if email == "" {
email = emptyEmail
}
return path.Join(am.storageKeyUsersPrefix(caURL), StorageKeys.Safe(email))
}
func (am *ACMEManager) storageKeyUserReg(caURL, email string) string {
return am.storageSafeUserKey(caURL, email, "registration", ".json")
}
func (am *ACMEManager) storageKeyUserPrivateKey(caURL, email string) string {
return am.storageSafeUserKey(caURL, email, "private", ".key")
}
// storageSafeUserKey returns a key for the given email, with the default
// filename, and the filename ending in the given extension.
func (am *ACMEManager) storageSafeUserKey(ca, email, defaultFilename, extension string) string {
if email == "" {
email = emptyEmail
}
email = strings.ToLower(email)
filename := am.emailUsername(email)
if filename == "" {
filename = defaultFilename
}
filename = StorageKeys.Safe(filename)
return path.Join(am.storageKeyUserPrefix(ca, email), filename+extension)
}
// emailUsername returns the username portion of an email address (part before
// '@') or the original input if it can't find the "@" symbol.
func (*ACMEManager) emailUsername(email string) string {
at := strings.Index(email, "@")
if at == -1 {
return email
} else if at == 0 {
return email[1:]
}
return email[:at]
}
// mostRecentAccountEmail finds the most recently-written account file
// in storage. Since this is part of a complex sequence to get a user
// account, errors here are discarded to simplify code flow in
// the caller, and errors are not important here anyway.
func (am *ACMEManager) mostRecentAccountEmail(caURL string) (string, bool) {
accountList, err := am.config.Storage.List(am.storageKeyUsersPrefix(caURL), false)
if err != nil || len(accountList) == 0 {
return "", false
}
// get all the key infos ahead of sorting, because
// we might filter some out
stats := make(map[string]KeyInfo)
for i, u := range accountList {
keyInfo, err := am.config.Storage.Stat(u)
if err != nil {
continue
}
if keyInfo.IsTerminal {
// I found a bug when macOS created a .DS_Store file in
// the users folder, and CertMagic tried to use that as
// the user email because it was newer than the other one
// which existed... sure, this isn't a perfect fix but
// frankly one's OS shouldn't mess with the data folder
// in the first place.
accountList = append(accountList[:i], accountList[i+1:]...)
continue
}
stats[u] = keyInfo
}
sort.Slice(accountList, func(i, j int) bool {
iInfo := stats[accountList[i]]
jInfo := stats[accountList[j]]
return jInfo.Modified.Before(iInfo.Modified)
})
if len(accountList) == 0 {
return "", false
}
account, err := am.getAccount(caURL, path.Base(accountList[0]))
if err != nil {
return "", false
}
return getPrimaryContact(account), true
}
// getPrimaryContact returns the first contact on the account (if any)
// without the scheme. (I guess we assume an email address.)
func getPrimaryContact(account acme.Account) string {
// TODO: should this be abstracted with some lower-level helper?
var primaryContact string
if len(account.Contact) > 0 {
primaryContact = account.Contact[0]
if idx := strings.Index(primaryContact, ":"); idx >= 0 {
primaryContact = primaryContact[idx+1:]
}
}
return primaryContact
}
// agreementTestURL is set during tests to skip requiring
// setting up an entire ACME CA endpoint.
var agreementTestURL string
// stdin is used to read the user's input if prompted;
// this is changed by tests during tests.
var stdin = io.ReadWriter(os.Stdin)
// The name of the folder for accounts where the email
// address was not provided; default 'username' if you will,
// but only for local/storage use, not with the CA.
const emptyEmail = "default"

@ -0,0 +1,339 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
weakrand "math/rand"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
)
func init() {
weakrand.Seed(time.Now().UnixNano())
}
// acmeClient holds state necessary for us to perform
// ACME operations for certificate management. Call
// ACMEManager.newACMEClient() to get a valid one to .
type acmeClient struct {
mgr *ACMEManager
acmeClient *acmez.Client
account acme.Account
}
// newACMEClient creates the underlying ACME library client type.
// If useTestCA is true, am.TestCA will be used if it is set;
// otherwise, the primary CA will still be used.
func (am *ACMEManager) newACMEClient(ctx context.Context, useTestCA, interactive bool) (*acmeClient, error) {
// ensure defaults are filled in
var caURL string
if useTestCA {
caURL = am.TestCA
}
if caURL == "" {
caURL = am.CA
}
if caURL == "" {
caURL = DefaultACME.CA
}
certObtainTimeout := am.CertObtainTimeout
if certObtainTimeout == 0 {
certObtainTimeout = DefaultACME.CertObtainTimeout
}
// ensure endpoint is secure (assume HTTPS if scheme is missing)
if !strings.Contains(caURL, "://") {
caURL = "https://" + caURL
}
u, err := url.Parse(caURL)
if err != nil {
return nil, err
}
if u.Scheme != "https" && !isLoopback(u.Host) && !isInternal(u.Host) {
return nil, fmt.Errorf("%s: insecure CA URL (HTTPS required)", caURL)
}
// look up or create the ACME account
account, err := am.getAccount(caURL, am.Email)
if err != nil {
return nil, fmt.Errorf("getting ACME account: %v", err)
}
// set up the dialers and resolver for the ACME client's HTTP client
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 2 * time.Minute,
}
if am.Resolver != "" {
dialer.Resolver = &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, network, _ string) (net.Conn, error) {
return (&net.Dialer{
Timeout: 15 * time.Second,
}).DialContext(ctx, network, am.Resolver)
},
}
}
// TODO: we could potentially reuse the HTTP transport and client
hc := am.httpClient // TODO: is this racey?
if am.httpClient == nil {
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: dialer.DialContext,
TLSHandshakeTimeout: 15 * time.Second,
ResponseHeaderTimeout: 15 * time.Second,
ExpectContinueTimeout: 2 * time.Second,
ForceAttemptHTTP2: true,
}
if am.TrustedRoots != nil {
transport.TLSClientConfig = &tls.Config{
RootCAs: am.TrustedRoots,
}
}
hc = &http.Client{
Transport: transport,
Timeout: HTTPTimeout,
}
am.httpClient = hc
}
client := &acmez.Client{
Client: &acme.Client{
Directory: caURL,
PollTimeout: certObtainTimeout,
UserAgent: buildUAString(),
HTTPClient: hc,
},
ChallengeSolvers: make(map[string]acmez.Solver),
}
if am.Logger != nil {
l := am.Logger.Named("acme_client")
client.Client.Logger, client.Logger = l, l
}
// configure challenges (most of the time, DNS challenge is
// exclusive of other ones because it is usually only used
// in situations where the default challenges would fail)
if am.DNS01Solver == nil {
// enable HTTP-01 challenge
if !am.DisableHTTPChallenge {
useHTTPPort := HTTPChallengePort
if HTTPPort > 0 && HTTPPort != HTTPChallengePort {
useHTTPPort = HTTPPort
}
if am.AltHTTPPort > 0 {
useHTTPPort = am.AltHTTPPort
}
client.ChallengeSolvers[acme.ChallengeTypeHTTP01] = distributedSolver{
acmeManager: am,
solver: &httpSolver{
acmeManager: am,
address: net.JoinHostPort(am.ListenHost, strconv.Itoa(useHTTPPort)),
},
caURL: client.Directory,
}
}
// enable TLS-ALPN-01 challenge
if !am.DisableTLSALPNChallenge {
useTLSALPNPort := TLSALPNChallengePort
if HTTPSPort > 0 && HTTPSPort != TLSALPNChallengePort {
useTLSALPNPort = HTTPSPort
}
if am.AltTLSALPNPort > 0 {
useTLSALPNPort = am.AltTLSALPNPort
}
client.ChallengeSolvers[acme.ChallengeTypeTLSALPN01] = distributedSolver{
acmeManager: am,
solver: &tlsALPNSolver{
config: am.config,
address: net.JoinHostPort(am.ListenHost, strconv.Itoa(useTLSALPNPort)),
},
caURL: client.Directory,
}
}
} else {
// use DNS challenge exclusively
client.ChallengeSolvers[acme.ChallengeTypeDNS01] = am.DNS01Solver
}
// register account if it is new
if account.Status == "" {
if am.NewAccountFunc != nil {
err = am.NewAccountFunc(ctx, am, account)
if err != nil {
return nil, fmt.Errorf("account pre-registration callback: %v", err)
}
}
// agree to terms
if interactive {
if !am.Agreed {
var termsURL string
dir, err := client.GetDirectory(ctx)
if err != nil {
return nil, fmt.Errorf("getting directory: %w", err)
}
if dir.Meta != nil {
termsURL = dir.Meta.TermsOfService
}
if termsURL != "" {
am.Agreed = am.askUserAgreement(termsURL)
if !am.Agreed {
return nil, fmt.Errorf("user must agree to CA terms")
}
}
}
} else {
// can't prompt a user who isn't there; they should
// have reviewed the terms beforehand
am.Agreed = true
}
account.TermsOfServiceAgreed = am.Agreed
// associate account with external binding, if configured
if am.ExternalAccount != nil {
err := account.SetExternalAccountBinding(ctx, client.Client, *am.ExternalAccount)
if err != nil {
return nil, err
}
}
// create account
account, err = client.NewAccount(ctx, account)
if err != nil {
return nil, fmt.Errorf("registering account with server: %w", err)
}
// persist the account to storage
err = am.saveAccount(caURL, account)
if err != nil {
return nil, fmt.Errorf("could not save account: %v", err)
}
}
c := &acmeClient{
mgr: am,
acmeClient: client,
account: account,
}
return c, nil
}
func (c *acmeClient) throttle(ctx context.Context, names []string) error {
// throttling is scoped to CA + account email
rateLimiterKey := c.acmeClient.Directory + "," + c.mgr.Email
rateLimitersMu.Lock()
rl, ok := rateLimiters[rateLimiterKey]
if !ok {
rl = NewRateLimiter(RateLimitEvents, RateLimitEventsWindow)
rateLimiters[rateLimiterKey] = rl
// TODO: stop rate limiter when it is garbage-collected...
}
rateLimitersMu.Unlock()
if c.mgr.Logger != nil {
c.mgr.Logger.Info("waiting on internal rate limiter", zap.Strings("identifiers", names))
}
err := rl.Wait(ctx)
if err != nil {
return err
}
if c.mgr.Logger != nil {
c.mgr.Logger.Info("done waiting on internal rate limiter", zap.Strings("identifiers", names))
}
return nil
}
func (c *acmeClient) usingTestCA() bool {
return c.mgr.TestCA != "" && c.acmeClient.Directory == c.mgr.TestCA
}
func (c *acmeClient) revoke(ctx context.Context, cert *x509.Certificate, reason int) error {
return c.acmeClient.RevokeCertificate(ctx, c.account,
cert, c.account.PrivateKey, reason)
}
func buildUAString() string {
ua := "CertMagic"
if UserAgent != "" {
ua = UserAgent + " " + ua
}
return ua
}
// These internal rate limits are designed to prevent accidentally
// firehosing a CA's ACME endpoints. They are not intended to
// replace or replicate the CA's actual rate limits.
//
// Let's Encrypt's rate limits can be found here:
// https://letsencrypt.org/docs/rate-limits/
//
// Currently (as of December 2019), Let's Encrypt's most relevant
// rate limit for large deployments is 300 new orders per account
// per 3 hours (on average, or best case, that's about 1 every 36
// seconds, or 2 every 72 seconds, etc.); but it's not reasonable
// to try to assume that our internal state is the same as the CA's
// (due to process restarts, config changes, failed validations,
// etc.) and ultimately, only the CA's actual rate limiter is the
// authority. Thus, our own rate limiters do not attempt to enforce
// external rate limits. Doing so causes problems when the domains
// are not in our control (i.e. serving customer sites) and/or lots
// of domains fail validation: they clog our internal rate limiter
// and nearly starve out (or at least slow down) the other domains
// that need certificates. Failed transactions are already retried
// with exponential backoff, so adding in rate limiting can slow
// things down even more.
//
// Instead, the point of our internal rate limiter is to avoid
// hammering the CA's endpoint when there are thousands or even
// millions of certificates under management. Our goal is to
// allow small bursts in a relatively short timeframe so as to
// not block any one domain for too long, without unleashing
// thousands of requests to the CA at once.
var (
rateLimiters = make(map[string]*RingBufferRateLimiter)
rateLimitersMu sync.RWMutex
// RateLimitEvents is how many new events can be allowed
// in RateLimitEventsWindow.
RateLimitEvents = 10
// RateLimitEventsWindow is the size of the sliding
// window that throttles events.
RateLimitEventsWindow = 1 * time.Minute
)
// Some default values passed down to the underlying ACME client.
var (
UserAgent string
HTTPTimeout = 30 * time.Second
)

@ -0,0 +1,350 @@
package certmagic
import (
"context"
"crypto/x509"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
)
// ACMEManager gets certificates using ACME. It implements the PreChecker,
// Issuer, and Revoker interfaces.
//
// It is NOT VALID to use an ACMEManager without calling NewACMEManager().
// It fills in default values from DefaultACME as well as setting up
// internal state that is necessary for valid use. Always call
// NewACMEManager() to get a valid ACMEManager value.
type ACMEManager struct {
// The endpoint of the directory for the ACME
// CA we are to use
CA string
// TestCA is the endpoint of the directory for
// an ACME CA to use to test domain validation,
// but any certs obtained from this CA are
// discarded
TestCA string
// The email address to use when creating or
// selecting an existing ACME server account
Email string
// Set to true if agreed to the CA's
// subscriber agreement
Agreed bool
// An optional external account to associate
// with this ACME account
ExternalAccount *acme.EAB
// Disable all HTTP challenges
DisableHTTPChallenge bool
// Disable all TLS-ALPN challenges
DisableTLSALPNChallenge bool
// The host (ONLY the host, not port) to listen
// on if necessary to start a listener to solve
// an ACME challenge
ListenHost string
// The alternate port to use for the ACME HTTP
// challenge; if non-empty, this port will be
// used instead of HTTPChallengePort to spin up
// a listener for the HTTP challenge
AltHTTPPort int
// The alternate port to use for the ACME
// TLS-ALPN challenge; the system must forward
// TLSALPNChallengePort to this port for
// challenge to succeed
AltTLSALPNPort int
// The solver for the dns-01 challenge;
// usually this is a DNS01Solver value
// from this package
DNS01Solver acmez.Solver
// TrustedRoots specifies a pool of root CA
// certificates to trust when communicating
// over a network to a peer.
TrustedRoots *x509.CertPool
// The maximum amount of time to allow for
// obtaining a certificate. If empty, the
// default from the underlying ACME lib is
// used. If set, it must not be too low so
// as to cancel challenges too early.
CertObtainTimeout time.Duration
// Address of custom DNS resolver to be used
// when communicating with ACME server
Resolver string
// Callback function that is called before a
// new ACME account is registered with the CA;
// it allows for last-second config changes
// of the ACMEManager (TODO: this feature is
// still EXPERIMENTAL and subject to change)
NewAccountFunc func(context.Context, *ACMEManager, acme.Account) error
// Set a logger to enable logging
Logger *zap.Logger
config *Config
httpClient *http.Client
}
// NewACMEManager constructs a valid ACMEManager based on a template
// configuration; any empty values will be filled in by defaults in
// DefaultACME. The associated config is also required.
//
// Typically, you'll create the Config first, then call NewACMEManager(),
// then assign the return value to the Issuer/Revoker fields of the Config.
func NewACMEManager(cfg *Config, template ACMEManager) *ACMEManager {
if cfg == nil {
panic("cannot make valid ACMEManager without an associated CertMagic config")
}
if template.CA == "" {
template.CA = DefaultACME.CA
}
if template.TestCA == "" && template.CA == DefaultACME.CA {
// only use the default test CA if the CA is also
// the default CA; no point in testing against
// Let's Encrypt's staging server if we are not
// using their production server too
template.TestCA = DefaultACME.TestCA
}
if template.Email == "" {
template.Email = DefaultACME.Email
}
if !template.Agreed {
template.Agreed = DefaultACME.Agreed
}
if template.ExternalAccount == nil {
template.ExternalAccount = DefaultACME.ExternalAccount
}
if !template.DisableHTTPChallenge {
template.DisableHTTPChallenge = DefaultACME.DisableHTTPChallenge
}
if !template.DisableTLSALPNChallenge {
template.DisableTLSALPNChallenge = DefaultACME.DisableTLSALPNChallenge
}
if template.ListenHost == "" {
template.ListenHost = DefaultACME.ListenHost
}
if template.AltHTTPPort == 0 {
template.AltHTTPPort = DefaultACME.AltHTTPPort
}
if template.AltTLSALPNPort == 0 {
template.AltTLSALPNPort = DefaultACME.AltTLSALPNPort
}
if template.DNS01Solver == nil {
template.DNS01Solver = DefaultACME.DNS01Solver
}
if template.TrustedRoots == nil {
template.TrustedRoots = DefaultACME.TrustedRoots
}
if template.CertObtainTimeout == 0 {
template.CertObtainTimeout = DefaultACME.CertObtainTimeout
}
if template.Resolver == "" {
template.Resolver = DefaultACME.Resolver
}
if template.NewAccountFunc == nil {
template.NewAccountFunc = DefaultACME.NewAccountFunc
}
if template.Logger == nil {
template.Logger = DefaultACME.Logger
}
template.config = cfg
return &template
}
// IssuerKey returns the unique issuer key for the
// confgured CA endpoint.
func (am *ACMEManager) IssuerKey() string {
return am.issuerKey(am.CA)
}
func (am *ACMEManager) issuerKey(ca string) string {
key := ca
if caURL, err := url.Parse(key); err == nil {
key = caURL.Host
if caURL.Path != "" {
// keep the path, but make sure it's a single
// component (i.e. no forward slashes, and for
// good measure, no backward slashes either)
const hyphen = "-"
repl := strings.NewReplacer(
"/", hyphen,
"\\", hyphen,
)
path := strings.Trim(repl.Replace(caURL.Path), hyphen)
if path != "" {
key += hyphen + path
}
}
}
return key
}
// PreCheck performs a few simple checks before obtaining or
// renewing a certificate with ACME, and returns whether this
// batch is eligible for certificates if using Let's Encrypt.
// It also ensures that an email address is available.
func (am *ACMEManager) PreCheck(_ context.Context, names []string, interactive bool) error {
letsEncrypt := strings.Contains(am.CA, "api.letsencrypt.org")
if letsEncrypt {
for _, name := range names {
if !SubjectQualifiesForPublicCert(name) {
return fmt.Errorf("subject does not qualify for a Let's Encrypt certificate: %s", name)
}
}
}
return am.getEmail(interactive)
}
// Issue implements the Issuer interface. It obtains a certificate for the given csr using
// the ACME configuration am.
func (am *ACMEManager) Issue(ctx context.Context, csr *x509.CertificateRequest) (*IssuedCertificate, error) {
if am.config == nil {
panic("missing config pointer (must use NewACMEManager)")
}
var isRetry bool
if attempts, ok := ctx.Value(AttemptsCtxKey).(*int); ok {
isRetry = *attempts > 0
}
cert, usedTestCA, err := am.doIssue(ctx, csr, isRetry)
if err != nil {
return nil, err
}
// important to note that usedTestCA is not necessarily the same as isRetry
// (usedTestCA can be true if the main CA and the test CA happen to be the same)
if isRetry && usedTestCA && am.CA != am.TestCA {
// succeeded with testing endpoint, so try again with production endpoint
// (only if the production endpoint is different from the testing endpoint)
// TODO: This logic is imperfect and could benefit from some refinement.
// The two CA endpoints likely have different states, which could cause one
// to succeed and the other to fail, even if it's not a validation error.
// Two common cases would be:
// 1) Rate limiter state. This is more likely to cause prod to fail while
// staging succeeds, since prod usually has tighter rate limits. Thus, if
// initial attempt failed in prod due to rate limit, first retry (on staging)
// might succeed, and then trying prod again right way would probably still
// fail; normally this would terminate retries but the right thing to do in
// this case is to back off and retry again later. We could refine this logic
// to stick with the production endpoint on retries unless the error changes.
// 2) Cached authorizations state. If a domain validates successfully with
// one endpoint, but then the other endpoint is used, it might fail, e.g. if
// DNS was just changed or is still propagating. In this case, the second CA
// should continue to be retried with backoff, without switching back to the
// other endpoint. This is more likely to happen if a user is testing with
// the staging CA as the main CA, then changes their configuration once they
// think they are ready for the production endpoint.
cert, _, err = am.doIssue(ctx, csr, false)
if err != nil {
// succeeded with test CA but failed just now with the production CA;
// either we are observing differing internal states of each CA that will
// work out with time, or there is a bug/misconfiguration somewhere
// externally; it is hard to tell which! one easy cue is whether the
// error is specifically a 429 (Too Many Requests); if so, we should
// probably keep retrying
var problem acme.Problem
if errors.As(err, &problem) {
if problem.Status == http.StatusTooManyRequests {
// DON'T abort retries; the test CA succeeded (even
// if it's cached, it recently succeeded!) so we just
// need to keep trying (with backoff) until this CA's
// rate limits expire...
// TODO: as mentioned in comment above, we would benefit
// by pinning the main CA at this point instead of
// needlessly retrying with the test CA first each time
return nil, err
}
}
return nil, ErrNoRetry{err}
}
}
return cert, err
}
func (am *ACMEManager) doIssue(ctx context.Context, csr *x509.CertificateRequest, useTestCA bool) (*IssuedCertificate, bool, error) {
client, err := am.newACMEClient(ctx, useTestCA, false)
if err != nil {
return nil, false, err
}
usingTestCA := client.usingTestCA()
nameSet := namesFromCSR(csr)
if !useTestCA {
if err := client.throttle(ctx, nameSet); err != nil {
return nil, usingTestCA, err
}
}
certChains, err := client.acmeClient.ObtainCertificateUsingCSR(ctx, client.account, csr)
if err != nil {
return nil, usingTestCA, fmt.Errorf("%v %w (ca=%s)", nameSet, err, client.acmeClient.Directory)
}
// TODO: ACME server could in theory issue a cert with multiple chains,
// but we don't (yet) have a way to choose one, so just use first one
ic := &IssuedCertificate{
Certificate: certChains[0].ChainPEM,
Metadata: certChains[0],
}
return ic, usingTestCA, nil
}
// Revoke implements the Revoker interface. It revokes the given certificate.
func (am *ACMEManager) Revoke(ctx context.Context, cert CertificateResource, reason int) error {
client, err := am.newACMEClient(ctx, false, false)
if err != nil {
return err
}
certs, err := parseCertsFromPEMBundle(cert.CertificatePEM)
if err != nil {
return err
}
return client.revoke(ctx, certs[0], reason)
}
// DefaultACME specifies the default settings
// to use for ACMEManagers.
var DefaultACME = ACMEManager{
CA: LetsEncryptProductionCA,
TestCA: LetsEncryptStagingCA,
}
// Some well-known CA endpoints available to use.
const (
LetsEncryptStagingCA = "https://acme-staging-v02.api.letsencrypt.org/directory"
LetsEncryptProductionCA = "https://acme-v02.api.letsencrypt.org/directory"
)
// prefixACME is the storage key prefix used for ACME-specific assets.
const prefixACME = "acme"
// Interface guards
var (
_ PreChecker = (*ACMEManager)(nil)
_ Issuer = (*ACMEManager)(nil)
_ Revoker = (*ACMEManager)(nil)
)

@ -0,0 +1,187 @@
package certmagic
import (
"context"
"errors"
"log"
"runtime"
"sync"
"time"
"go.uber.org/zap"
)
var jm = &jobManager{maxConcurrentJobs: 1000}
type jobManager struct {
mu sync.Mutex
maxConcurrentJobs int
activeWorkers int
queue []namedJob
names map[string]struct{}
}
type namedJob struct {
name string
job func() error
logger *zap.Logger
}
// Submit enqueues the given job with the given name. If name is non-empty
// and a job with the same name is already enqueued or running, this is a
// no-op. If name is empty, no duplicate prevention will occur. The job
// manager will then run this job as soon as it is able.
func (jm *jobManager) Submit(logger *zap.Logger, name string, job func() error) {
jm.mu.Lock()
defer jm.mu.Unlock()
if jm.names == nil {
jm.names = make(map[string]struct{})
}
if name != "" {
// prevent duplicate jobs
if _, ok := jm.names[name]; ok {
return
}
jm.names[name] = struct{}{}
}
jm.queue = append(jm.queue, namedJob{name, job, logger})
if jm.activeWorkers < jm.maxConcurrentJobs {
jm.activeWorkers++
go jm.worker()
}
}
func (jm *jobManager) worker() {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: certificate worker: %v\n%s", err, buf)
}
}()
for {
jm.mu.Lock()
if len(jm.queue) == 0 {
jm.activeWorkers--
jm.mu.Unlock()
return
}
next := jm.queue[0]
jm.queue = jm.queue[1:]
jm.mu.Unlock()
if err := next.job(); err != nil {
if next.logger != nil {
next.logger.Error("job failed", zap.Error(err))
}
}
if next.name != "" {
jm.mu.Lock()
delete(jm.names, next.name)
jm.mu.Unlock()
}
}
}
func doWithRetry(ctx context.Context, log *zap.Logger, f func(context.Context) error) error {
var attempts int
ctx = context.WithValue(ctx, AttemptsCtxKey, &attempts)
// the initial intervalIndex is -1, signaling
// that we should not wait for the first attempt
start, intervalIndex := time.Now(), -1
var err error
for time.Since(start) < maxRetryDuration {
var wait time.Duration
if intervalIndex >= 0 {
wait = retryIntervals[intervalIndex]
}
timer := time.NewTimer(wait)
select {
case <-ctx.Done():
timer.Stop()
return context.Canceled
case <-timer.C:
err = f(ctx)
attempts++
if err == nil || errors.Is(err, context.Canceled) {
return err
}
var errNoRetry ErrNoRetry
if errors.As(err, &errNoRetry) {
return err
}
if intervalIndex < len(retryIntervals)-1 {
intervalIndex++
}
if time.Since(start) < maxRetryDuration {
if log != nil {
log.Error("will retry",
zap.Error(err),
zap.Int("attempt", attempts),
zap.Duration("retrying_in", retryIntervals[intervalIndex]),
zap.Duration("elapsed", time.Since(start)),
zap.Duration("max_duration", maxRetryDuration))
}
} else {
if log != nil {
log.Error("final attempt; giving up",
zap.Error(err),
zap.Int("attempt", attempts),
zap.Duration("elapsed", time.Since(start)),
zap.Duration("max_duration", maxRetryDuration))
}
return nil
}
}
}
return err
}
// ErrNoRetry is an error type which signals
// to stop retries early.
type ErrNoRetry struct{ Err error }
// Unwrap makes it so that e wraps e.Err.
func (e ErrNoRetry) Unwrap() error { return e.Err }
func (e ErrNoRetry) Error() string { return e.Err.Error() }
type retryStateCtxKey struct{}
// AttemptsCtxKey is the context key for the value
// that holds the attempt counter. The value counts
// how many times the operation has been attempted.
// A value of 0 means first attempt.
var AttemptsCtxKey retryStateCtxKey
// retryIntervals are based on the idea of exponential
// backoff, but weighed a little more heavily to the
// front. We figure that intermittent errors would be
// resolved after the first retry, but any errors after
// that would probably require at least a few minutes
// to clear up: either for DNS to propagate, for the
// administrator to fix their DNS or network properties,
// or some other external factor needs to change. We
// chose intervals that we think will be most useful
// without introducing unnecessary delay. The last
// interval in this list will be used until the time
// of maxRetryDuration has elapsed.
var retryIntervals = []time.Duration{
1 * time.Minute,
2 * time.Minute,
2 * time.Minute,
5 * time.Minute, // elapsed: 10 min
10 * time.Minute,
20 * time.Minute,
20 * time.Minute, // elapsed: 1 hr
30 * time.Minute,
30 * time.Minute, // elapsed: 2 hr
1 * time.Hour,
3 * time.Hour, // elapsed: 6 hr
6 * time.Hour, // for up to maxRetryDuration
}
// maxRetryDuration is the maximum duration to try
// doing retries using the above intervals.
const maxRetryDuration = 24 * time.Hour * 30

@ -0,0 +1,326 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"fmt"
weakrand "math/rand" // seeded elsewhere
"strings"
"sync"
"time"
"go.uber.org/zap"
)
// Cache is a structure that stores certificates in memory.
// A Cache indexes certificates by name for quick access
// during TLS handshakes, and avoids duplicating certificates
// in memory. Generally, there should only be one per process.
// However, that is not a strict requirement; but using more
// than one is a code smell, and may indicate an
// over-engineered design.
//
// An empty cache is INVALID and must not be used. Be sure
// to call NewCache to get a valid value.
//
// These should be very long-lived values and must not be
// copied. Before all references leave scope to be garbage
// collected, ensure you call Stop() to stop maintenance on
// the certificates stored in this cache and release locks.
//
// Caches are not usually manipulated directly; create a
// Config value with a pointer to a Cache, and then use
// the Config to interact with the cache. Caches are
// agnostic of any particular storage or ACME config,
// since each certificate may be managed and stored
// differently.
type Cache struct {
// User configuration of the cache
options CacheOptions
// The cache is keyed by certificate hash
cache map[string]Certificate
// cacheIndex is a map of SAN to cache key (cert hash)
cacheIndex map[string][]string
// Protects the cache and index maps
mu sync.RWMutex
// Close this channel to cancel asset maintenance
stopChan chan struct{}
// Used to signal when stopping is completed
doneChan chan struct{}
logger *zap.Logger
}
// NewCache returns a new, valid Cache for efficiently
// accessing certificates in memory. It also begins a
// maintenance goroutine to tend to the certificates
// in the cache. Call Stop() when you are done with the
// cache so it can clean up locks and stuff.
//
// Most users of this package will not need to call this
// because a default certificate cache is created for you.
// Only advanced use cases require creating a new cache.
//
// This function panics if opts.GetConfigForCert is not
// set. The reason is that a cache absolutely needs to
// be able to get a Config with which to manage TLS
// assets, and it is not safe to assume that the Default
// config is always the correct one, since you have
// created the cache yourself.
//
// See the godoc for Cache to use it properly. When
// no longer needed, caches should be stopped with
// Stop() to clean up resources even if the process
// is being terminated, so that it can clean up
// any locks for other processes to unblock!
func NewCache(opts CacheOptions) *Cache {
// assume default options if necessary
if opts.OCSPCheckInterval <= 0 {
opts.OCSPCheckInterval = DefaultOCSPCheckInterval
}
if opts.RenewCheckInterval <= 0 {
opts.RenewCheckInterval = DefaultRenewCheckInterval
}
if opts.Capacity < 0 {
opts.Capacity = 0
}
// this must be set, because we cannot not
// safely assume that the Default Config
// is always the correct one to use
if opts.GetConfigForCert == nil {
panic("cache must be initialized with a GetConfigForCert callback")
}
c := &Cache{
options: opts,
cache: make(map[string]Certificate),
cacheIndex: make(map[string][]string),
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
logger: opts.Logger,
}
go c.maintainAssets(0)
return c
}
// Stop stops the maintenance goroutine for
// certificates in certCache. It blocks until
// stopping is complete. Once a cache is
// stopped, it cannot be reused.
func (certCache *Cache) Stop() {
close(certCache.stopChan) // signal to stop
<-certCache.doneChan // wait for stop to complete
}
// CacheOptions is used to configure certificate caches.
// Once a cache has been created with certain options,
// those settings cannot be changed.
type CacheOptions struct {
// REQUIRED. A function that returns a configuration
// used for managing a certificate, or for accessing
// that certificate's asset storage (e.g. for
// OCSP staples, etc). The returned Config MUST
// be associated with the same Cache as the caller.
//
// The reason this is a callback function, dynamically
// returning a Config (instead of attaching a static
// pointer to a Config on each certificate) is because
// the config for how to manage a domain's certificate
// might change from maintenance to maintenance. The
// cache is so long-lived, we cannot assume that the
// host's situation will always be the same; e.g. the
// certificate might switch DNS providers, so the DNS
// challenge (if used) would need to be adjusted from
// the last time it was run ~8 weeks ago.
GetConfigForCert ConfigGetter
// How often to check certificates for renewal;
// if unset, DefaultOCSPCheckInterval will be used.
OCSPCheckInterval time.Duration
// How often to check certificates for renewal;
// if unset, DefaultRenewCheckInterval will be used.
RenewCheckInterval time.Duration
// Maximum number of certificates to allow in the cache.
// If reached, certificates will be randomly evicted to
// make room for new ones. 0 means unlimited.
Capacity int
// Set a logger to enable logging
Logger *zap.Logger
}
// ConfigGetter is a function that returns a prepared,
// valid config that should be used when managing the
// given certificate or its assets.
type ConfigGetter func(Certificate) (*Config, error)
// cacheCertificate calls unsyncedCacheCertificate with a write lock.
//
// This function is safe for concurrent use.
func (certCache *Cache) cacheCertificate(cert Certificate) {
certCache.mu.Lock()
certCache.unsyncedCacheCertificate(cert)
certCache.mu.Unlock()
}
// unsyncedCacheCertificate adds cert to the in-memory cache unless
// it already exists in the cache (according to cert.Hash). It
// updates the name index.
//
// This function is NOT safe for concurrent use. Callers MUST acquire
// a write lock on certCache.mu first.
func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
// no-op if this certificate already exists in the cache
if _, ok := certCache.cache[cert.hash]; ok {
return
}
// if the cache is at capacity, make room for new cert
cacheSize := len(certCache.cache)
if certCache.options.Capacity > 0 && cacheSize >= certCache.options.Capacity {
// Go maps are "nondeterministic" but not actually random,
// so although we could just chop off the "front" of the
// map with less code, that is a heavily skewed eviction
// strategy; generating random numbers is cheap and
// ensures a much better distribution.
rnd := weakrand.Intn(cacheSize)
i := 0
for _, randomCert := range certCache.cache {
if i == rnd {
certCache.removeCertificate(randomCert)
break
}
i++
}
}
// store the certificate
certCache.cache[cert.hash] = cert
// update the index so we can access it by name
for _, name := range cert.Names {
certCache.cacheIndex[name] = append(certCache.cacheIndex[name], cert.hash)
}
}
// removeCertificate removes cert from the cache.
//
// This function is NOT safe for concurrent use; callers
// MUST first acquire a write lock on certCache.mu.
func (certCache *Cache) removeCertificate(cert Certificate) {
// delete all mentions of this cert from the name index
for _, name := range cert.Names {
keyList := certCache.cacheIndex[name]
for i, cacheKey := range keyList {
if cacheKey == cert.hash {
keyList = append(keyList[:i], keyList[i+1:]...)
}
}
if len(keyList) == 0 {
delete(certCache.cacheIndex, name)
} else {
certCache.cacheIndex[name] = keyList
}
}
// delete the actual cert from the cache
delete(certCache.cache, cert.hash)
}
// replaceCertificate atomically replaces oldCert with newCert in
// the cache.
//
// This method is safe for concurrent use.
func (certCache *Cache) replaceCertificate(oldCert, newCert Certificate) {
certCache.mu.Lock()
certCache.removeCertificate(oldCert)
certCache.unsyncedCacheCertificate(newCert)
certCache.mu.Unlock()
if certCache.logger != nil {
certCache.logger.Info("replaced certificate in cache",
zap.Strings("identifiers", newCert.Names),
zap.Time("new_expiration", newCert.Leaf.NotAfter))
}
}
func (certCache *Cache) getAllMatchingCerts(name string) []Certificate {
certCache.mu.RLock()
defer certCache.mu.RUnlock()
allCertKeys := certCache.cacheIndex[name]
certs := make([]Certificate, len(allCertKeys))
for i := range allCertKeys {
certs[i] = certCache.cache[allCertKeys[i]]
}
return certs
}
func (certCache *Cache) getAllCerts() []Certificate {
certCache.mu.RLock()
defer certCache.mu.RUnlock()
certs := make([]Certificate, 0, len(certCache.cache))
for _, cert := range certCache.cache {
certs = append(certs, cert)
}
return certs
}
func (certCache *Cache) getConfig(cert Certificate) (*Config, error) {
cfg, err := certCache.options.GetConfigForCert(cert)
if err != nil {
return nil, err
}
if cfg.certCache != nil && cfg.certCache != certCache {
return nil, fmt.Errorf("config returned for certificate %v is not nil and points to different cache; got %p, expected %p (this one)",
cert.Names, cfg.certCache, certCache)
}
return cfg, nil
}
// AllMatchingCertificates returns a list of all certificates that could
// be used to serve the given SNI name, including exact SAN matches and
// wildcard matches.
func (certCache *Cache) AllMatchingCertificates(name string) []Certificate {
// get exact matches first
certs := certCache.getAllMatchingCerts(name)
// then look for wildcard matches by replacing each
// label of the domain name with wildcards
labels := strings.Split(name, ".")
for i := range labels {
labels[i] = "*"
candidate := strings.Join(labels, ".")
certs = append(certs, certCache.getAllMatchingCerts(candidate)...)
}
return certs
}
var (
defaultCache *Cache
defaultCacheMu sync.Mutex
)

@ -0,0 +1,404 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"strings"
"time"
"go.uber.org/zap"
"golang.org/x/crypto/ocsp"
)
// Certificate is a tls.Certificate with associated metadata tacked on.
// Even if the metadata can be obtained by parsing the certificate,
// we are more efficient by extracting the metadata onto this struct,
// but at the cost of slightly higher memory use.
type Certificate struct {
tls.Certificate
// Names is the list of subject names this
// certificate is signed for.
Names []string
// Optional; user-provided, and arbitrary.
Tags []string
// OCSP contains the certificate's parsed OCSP response.
ocsp *ocsp.Response
// The hex-encoded hash of this cert's chain's bytes.
hash string
// Whether this certificate is under our management
managed bool
}
// NeedsRenewal returns true if the certificate is
// expiring soon (according to cfg) or has expired.
func (cert Certificate) NeedsRenewal(cfg *Config) bool {
return currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio)
}
// Expired returns true if the certificate has expired.
func (cert Certificate) Expired() bool {
if cert.Leaf == nil {
// ideally cert.Leaf would never be nil, but this can happen for
// "synthetic" certs like those made to solve the TLS-ALPN challenge
// which adds a special cert directly to the cache, since
// tls.X509KeyPair() discards the leaf; oh well
return false
}
return time.Now().After(cert.Leaf.NotAfter)
}
// currentlyInRenewalWindow returns true if the current time is
// within the renewal window, according to the given start/end
// dates and the ratio of the renewal window. If true is returned,
// the certificate being considered is due for renewal.
func currentlyInRenewalWindow(notBefore, notAfter time.Time, renewalWindowRatio float64) bool {
if notAfter.IsZero() {
return false
}
lifetime := notAfter.Sub(notBefore)
if renewalWindowRatio == 0 {
renewalWindowRatio = DefaultRenewalWindowRatio
}
renewalWindow := time.Duration(float64(lifetime) * renewalWindowRatio)
renewalWindowStart := notAfter.Add(-renewalWindow)
return time.Now().After(renewalWindowStart)
}
// HasTag returns true if cert.Tags has tag.
func (cert Certificate) HasTag(tag string) bool {
for _, t := range cert.Tags {
if t == tag {
return true
}
}
return false
}
// CacheManagedCertificate loads the certificate for domain into the
// cache, from the TLS storage for managed certificates. It returns a
// copy of the Certificate that was put into the cache.
//
// This is a lower-level method; normally you'll call Manage() instead.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheManagedCertificate(domain string) (Certificate, error) {
cert, err := cfg.loadManagedCertificate(domain)
if err != nil {
return cert, err
}
cfg.certCache.cacheCertificate(cert)
cfg.emit("cached_managed_cert", cert.Names)
return cert, nil
}
// loadManagedCertificate loads the managed certificate for domain,
// but it does not add it to the cache. It just loads from storage.
func (cfg *Config) loadManagedCertificate(domain string) (Certificate, error) {
certRes, err := cfg.loadCertResource(domain)
if err != nil {
return Certificate{}, err
}
cert, err := cfg.makeCertificateWithOCSP(certRes.CertificatePEM, certRes.PrivateKeyPEM)
if err != nil {
return cert, err
}
cert.managed = true
return cert, nil
}
// CacheUnmanagedCertificatePEMFile loads a certificate for host using certFile
// and keyFile, which must be in PEM format. It stores the certificate in
// the in-memory cache.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedCertificatePEMFile(certFile, keyFile string, tags []string) error {
cert, err := cfg.makeCertificateFromDiskWithOCSP(cfg.Storage, certFile, keyFile)
if err != nil {
return err
}
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
cfg.emit("cached_unmanaged_cert", cert.Names)
return nil
}
// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache.
// It staples OCSP if possible.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedTLSCertificate(tlsCert tls.Certificate, tags []string) error {
var cert Certificate
err := fillCertFromLeaf(&cert, tlsCert)
if err != nil {
return err
}
_, err = stapleOCSP(cfg.Storage, &cert, nil)
if err != nil && cfg.Logger != nil {
cfg.Logger.Warn("stapling OCSP", zap.Error(err))
}
cfg.emit("cached_unmanaged_cert", cert.Names)
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
return nil
}
// CacheUnmanagedCertificatePEMBytes makes a certificate out of the PEM bytes
// of the certificate and key, then caches it in memory.
//
// This method is safe for concurrent use.
func (cfg *Config) CacheUnmanagedCertificatePEMBytes(certBytes, keyBytes []byte, tags []string) error {
cert, err := cfg.makeCertificateWithOCSP(certBytes, keyBytes)
if err != nil {
return err
}
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
cfg.emit("cached_unmanaged_cert", cert.Names)
return nil
}
// makeCertificateFromDiskWithOCSP makes a Certificate by loading the
// certificate and key files. It fills out all the fields in
// the certificate except for the Managed and OnDemand flags.
// (It is up to the caller to set those.) It staples OCSP.
func (cfg Config) makeCertificateFromDiskWithOCSP(storage Storage, certFile, keyFile string) (Certificate, error) {
certPEMBlock, err := ioutil.ReadFile(certFile)
if err != nil {
return Certificate{}, err
}
keyPEMBlock, err := ioutil.ReadFile(keyFile)
if err != nil {
return Certificate{}, err
}
return cfg.makeCertificateWithOCSP(certPEMBlock, keyPEMBlock)
}
// makeCertificateWithOCSP is the same as makeCertificate except that it also
// staples OCSP to the certificate.
func (cfg Config) makeCertificateWithOCSP(certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
cert, err := makeCertificate(certPEMBlock, keyPEMBlock)
if err != nil {
return cert, err
}
_, err = stapleOCSP(cfg.Storage, &cert, certPEMBlock)
if err != nil && cfg.Logger != nil {
cfg.Logger.Warn("stapling OCSP", zap.Error(err))
}
return cert, nil
}
// makeCertificate turns a certificate PEM bundle and a key PEM block into
// a Certificate with necessary metadata from parsing its bytes filled into
// its struct fields for convenience (except for the OnDemand and Managed
// flags; it is up to the caller to set those properties!). This function
// does NOT staple OCSP.
func makeCertificate(certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
var cert Certificate
// Convert to a tls.Certificate
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
if err != nil {
return cert, err
}
// Extract necessary metadata
err = fillCertFromLeaf(&cert, tlsCert)
if err != nil {
return cert, err
}
return cert, nil
}
// fillCertFromLeaf populates cert from tlsCert. If it succeeds, it
// guarantees that cert.Leaf is non-nil.
func fillCertFromLeaf(cert *Certificate, tlsCert tls.Certificate) error {
if len(tlsCert.Certificate) == 0 {
return fmt.Errorf("certificate is empty")
}
cert.Certificate = tlsCert
// the leaf cert should be the one for the site; we must set
// the tls.Certificate.Leaf field so that TLS handshakes are
// more efficient
leaf, err := x509.ParseCertificate(tlsCert.Certificate[0])
if err != nil {
return err
}
cert.Certificate.Leaf = leaf
// for convenience, we do want to assemble all the
// subjects on the certificate into one list
if leaf.Subject.CommonName != "" { // TODO: CommonName is deprecated
cert.Names = []string{strings.ToLower(leaf.Subject.CommonName)}
}
for _, name := range leaf.DNSNames {
if name != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, strings.ToLower(name))
}
}
for _, ip := range leaf.IPAddresses {
if ipStr := ip.String(); ipStr != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, strings.ToLower(ipStr))
}
}
for _, email := range leaf.EmailAddresses {
if email != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, strings.ToLower(email))
}
}
for _, u := range leaf.URIs {
if u.String() != leaf.Subject.CommonName { // TODO: CommonName is deprecated
cert.Names = append(cert.Names, u.String())
}
}
if len(cert.Names) == 0 {
return fmt.Errorf("certificate has no names")
}
// save the hash of this certificate (chain) and
// expiration date, for necessity and efficiency
cert.hash = hashCertificateChain(cert.Certificate.Certificate)
return nil
}
// managedCertInStorageExpiresSoon returns true if cert (being a
// managed certificate) is expiring within RenewDurationBefore.
// It returns false if there was an error checking the expiration
// of the certificate as found in storage, or if the certificate
// in storage is NOT expiring soon. A certificate that is expiring
// soon in our cache but is not expiring soon in storage probably
// means that another instance renewed the certificate in the
// meantime, and it would be a good idea to simply load the cert
// into our cache rather than repeating the renewal process again.
func (cfg *Config) managedCertInStorageExpiresSoon(cert Certificate) (bool, error) {
certRes, err := cfg.loadCertResource(cert.Names[0])
if err != nil {
return false, err
}
tlsCert, err := tls.X509KeyPair(certRes.CertificatePEM, certRes.PrivateKeyPEM)
if err != nil {
return false, err
}
leaf, err := x509.ParseCertificate(tlsCert.Certificate[0])
if err != nil {
return false, err
}
return currentlyInRenewalWindow(leaf.NotBefore, leaf.NotAfter, cfg.RenewalWindowRatio), nil
}
// reloadManagedCertificate reloads the certificate corresponding to the name(s)
// on oldCert into the cache, from storage. This also replaces the old certificate
// with the new one, so that all configurations that used the old cert now point
// to the new cert. It assumes that the new certificate for oldCert.Names[0] is
// already in storage.
func (cfg *Config) reloadManagedCertificate(oldCert Certificate) error {
if cfg.Logger != nil {
cfg.Logger.Info("reloading managed certificate", zap.Strings("identifiers", oldCert.Names))
}
newCert, err := cfg.loadManagedCertificate(oldCert.Names[0])
if err != nil {
return fmt.Errorf("loading managed certificate for %v from storage: %v", oldCert.Names, err)
}
cfg.certCache.replaceCertificate(oldCert, newCert)
return nil
}
// SubjectQualifiesForCert returns true if subj is a name which,
// as a quick sanity check, looks like it could be the subject
// of a certificate. Requirements are:
// - must not be empty
// - must not start or end with a dot (RFC 1034)
// - must not contain common accidental special characters
func SubjectQualifiesForCert(subj string) bool {
// must not be empty
return strings.TrimSpace(subj) != "" &&
// must not start or end with a dot
!strings.HasPrefix(subj, ".") &&
!strings.HasSuffix(subj, ".") &&
// if it has a wildcard, must be a left-most label
(!strings.Contains(subj, "*") || strings.HasPrefix(subj, "*.")) &&
// must not contain other common special characters
!strings.ContainsAny(subj, "()[]{}<> \t\n\"\\!@#$%^&|;'+=")
}
// SubjectQualifiesForPublicCert returns true if the subject
// name appears eligible for automagic TLS with a public
// CA such as Let's Encrypt. For example: localhost and IP
// addresses are not eligible because we cannot obtain certs
// for those names with a public CA. Wildcard names are
// allowed, as long as they conform to CABF requirements (only
// one wildcard label, and it must be the left-most label).
func SubjectQualifiesForPublicCert(subj string) bool {
// must at least qualify for certificate
return SubjectQualifiesForCert(subj) &&
// localhost is ineligible
subj != "localhost" &&
// .localhost TLD is ineligible
!strings.HasSuffix(subj, ".localhost") &&
// .local TLD is ineligible
!strings.HasSuffix(subj, ".local") &&
// only one wildcard label allowed, and it must be left-most
(!strings.Contains(subj, "*") ||
(strings.Count(subj, "*") == 1 &&
len(subj) > 2 &&
strings.HasPrefix(subj, "*."))) &&
// cannot be an IP address (as of yet), see
// https://community.letsencrypt.org/t/certificate-for-static-ip/84/2?u=mholt
net.ParseIP(subj) == nil
}
// MatchWildcard returns true if subject (a candidate DNS name)
// matches wildcard (a reference DNS name), mostly according to
// RFC6125-compliant wildcard rules.
func MatchWildcard(subject, wildcard string) bool {
if subject == wildcard {
return true
}
if !strings.Contains(wildcard, "*") {
return false
}
labels := strings.Split(subject, ".")
for i := range labels {
if labels[i] == "" {
continue // invalid label
}
labels[i] = "*"
candidate := strings.Join(labels, ".")
if candidate == wildcard {
return true
}
}
return false
}

@ -0,0 +1,480 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package certmagic automates the obtaining and renewal of TLS certificates,
// including TLS & HTTPS best practices such as robust OCSP stapling, caching,
// HTTP->HTTPS redirects, and more.
//
// Its high-level API serves your HTTP handlers over HTTPS if you simply give
// the domain name(s) and the http.Handler; CertMagic will create and run
// the HTTPS server for you, fully managing certificates during the lifetime
// of the server. Similarly, it can be used to start TLS listeners or return
// a ready-to-use tls.Config -- whatever layer you need TLS for, CertMagic
// makes it easy. See the HTTPS, Listen, and TLS functions for that.
//
// If you need more control, create a Cache using NewCache() and then make
// a Config using New(). You can then call Manage() on the config. But if
// you use this lower-level API, you'll have to be sure to solve the HTTP
// and TLS-ALPN challenges yourself (unless you disabled them or use the
// DNS challenge) by using the provided Config.GetCertificate function
// in your tls.Config and/or Config.HTTPChallangeHandler in your HTTP
// handler.
//
// See the package's README for more instruction.
package certmagic
import (
"context"
"crypto"
"crypto/tls"
"crypto/x509"
"fmt"
"log"
"net"
"net/http"
"sort"
"strings"
"sync"
"time"
)
// HTTPS serves mux for all domainNames using the HTTP
// and HTTPS ports, redirecting all HTTP requests to HTTPS.
// It uses the Default config.
//
// This high-level convenience function is opinionated and
// applies sane defaults for production use, including
// timeouts for HTTP requests and responses. To allow very
// long-lived connections, you should make your own
// http.Server values and use this package's Listen(), TLS(),
// or Config.TLSConfig() functions to customize to your needs.
// For example, servers which need to support large uploads or
// downloads with slow clients may need to use longer timeouts,
// thus this function is not suitable.
//
// Calling this function signifies your acceptance to
// the CA's Subscriber Agreement and/or Terms of Service.
func HTTPS(domainNames []string, mux http.Handler) error {
if mux == nil {
mux = http.DefaultServeMux
}
DefaultACME.Agreed = true
cfg := NewDefault()
err := cfg.ManageSync(domainNames)
if err != nil {
return err
}
httpWg.Add(1)
defer httpWg.Done()
// if we haven't made listeners yet, do so now,
// and clean them up when all servers are done
lnMu.Lock()
if httpLn == nil && httpsLn == nil {
httpLn, err = net.Listen("tcp", fmt.Sprintf(":%d", HTTPPort))
if err != nil {
lnMu.Unlock()
return err
}
tlsConfig := cfg.TLSConfig()
tlsConfig.NextProtos = append([]string{"h2", "http/1.1"}, tlsConfig.NextProtos...)
httpsLn, err = tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), tlsConfig)
if err != nil {
httpLn.Close()
httpLn = nil
lnMu.Unlock()
return err
}
go func() {
httpWg.Wait()
lnMu.Lock()
httpLn.Close()
httpsLn.Close()
lnMu.Unlock()
}()
}
hln, hsln := httpLn, httpsLn
lnMu.Unlock()
// create HTTP/S servers that are configured
// with sane default timeouts and appropriate
// handlers (the HTTP server solves the HTTP
// challenge and issues redirects to HTTPS,
// while the HTTPS server simply serves the
// user's handler)
httpServer := &http.Server{
ReadHeaderTimeout: 5 * time.Second,
ReadTimeout: 5 * time.Second,
WriteTimeout: 5 * time.Second,
IdleTimeout: 5 * time.Second,
}
if am, ok := cfg.Issuer.(*ACMEManager); ok {
httpServer.Handler = am.HTTPChallengeHandler(http.HandlerFunc(httpRedirectHandler))
}
httpsServer := &http.Server{
ReadHeaderTimeout: 10 * time.Second,
ReadTimeout: 30 * time.Second,
WriteTimeout: 2 * time.Minute,
IdleTimeout: 5 * time.Minute,
Handler: mux,
}
log.Printf("%v Serving HTTP->HTTPS on %s and %s",
domainNames, hln.Addr(), hsln.Addr())
go httpServer.Serve(hln)
return httpsServer.Serve(hsln)
}
func httpRedirectHandler(w http.ResponseWriter, r *http.Request) {
toURL := "https://"
// since we redirect to the standard HTTPS port, we
// do not need to include it in the redirect URL
requestHost := hostOnly(r.Host)
toURL += requestHost
toURL += r.URL.RequestURI()
// get rid of this disgusting unencrypted HTTP connection 🤢
w.Header().Set("Connection", "close")
http.Redirect(w, r, toURL, http.StatusMovedPermanently)
}
// TLS enables management of certificates for domainNames
// and returns a valid tls.Config. It uses the Default
// config.
//
// Because this is a convenience function that returns
// only a tls.Config, it does not assume HTTP is being
// served on the HTTP port, so the HTTP challenge is
// disabled (no HTTPChallengeHandler is necessary). The
// package variable Default is modified so that the
// HTTP challenge is disabled.
//
// Calling this function signifies your acceptance to
// the CA's Subscriber Agreement and/or Terms of Service.
func TLS(domainNames []string) (*tls.Config, error) {
DefaultACME.Agreed = true
DefaultACME.DisableHTTPChallenge = true
cfg := NewDefault()
return cfg.TLSConfig(), cfg.ManageSync(domainNames)
}
// Listen manages certificates for domainName and returns a
// TLS listener. It uses the Default config.
//
// Because this convenience function returns only a TLS-enabled
// listener and does not presume HTTP is also being served,
// the HTTP challenge will be disabled. The package variable
// Default is modified so that the HTTP challenge is disabled.
//
// Calling this function signifies your acceptance to
// the CA's Subscriber Agreement and/or Terms of Service.
func Listen(domainNames []string) (net.Listener, error) {
DefaultACME.Agreed = true
DefaultACME.DisableHTTPChallenge = true
cfg := NewDefault()
err := cfg.ManageSync(domainNames)
if err != nil {
return nil, err
}
return tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), cfg.TLSConfig())
}
// ManageSync obtains certificates for domainNames and keeps them
// renewed using the Default config.
//
// This is a slightly lower-level function; you will need to
// wire up support for the ACME challenges yourself. You can
// obtain a Config to help you do that by calling NewDefault().
//
// You will need to ensure that you use a TLS config that gets
// certificates from this Config and that the HTTP and TLS-ALPN
// challenges can be solved. The easiest way to do this is to
// use NewDefault().TLSConfig() as your TLS config and to wrap
// your HTTP handler with NewDefault().HTTPChallengeHandler().
// If you don't have an HTTP server, you will need to disable
// the HTTP challenge.
//
// If you already have a TLS config you want to use, you can
// simply set its GetCertificate field to
// NewDefault().GetCertificate.
//
// Calling this function signifies your acceptance to
// the CA's Subscriber Agreement and/or Terms of Service.
func ManageSync(domainNames []string) error {
DefaultACME.Agreed = true
return NewDefault().ManageSync(domainNames)
}
// ManageAsync is the same as ManageSync, except that
// certificates are managed asynchronously. This means
// that the function will return before certificates
// are ready, and errors that occur during certificate
// obtain or renew operations are only logged. It is
// vital that you monitor the logs if using this method,
// which is only recommended for automated/non-interactive
// environments.
func ManageAsync(ctx context.Context, domainNames []string) error {
DefaultACME.Agreed = true
return NewDefault().ManageAsync(ctx, domainNames)
}
// OnDemandConfig configures on-demand TLS (certificate
// operations as-needed, like during TLS handshakes,
// rather than immediately).
//
// When this package's high-level convenience functions
// are used (HTTPS, Manage, etc., where the Default
// config is used as a template), this struct regulates
// certificate operations using an implicit whitelist
// containing the names passed into those functions if
// no DecisionFunc is set. This ensures some degree of
// control by default to avoid certificate operations for
// aribtrary domain names. To override this whitelist,
// manually specify a DecisionFunc. To impose rate limits,
// specify your own DecisionFunc.
type OnDemandConfig struct {
// If set, this function will be called to determine
// whether a certificate can be obtained or renewed
// for the given name. If an error is returned, the
// request will be denied.
DecisionFunc func(name string) error
// List of whitelisted hostnames (SNI values) for
// deferred (on-demand) obtaining of certificates.
// Used only by higher-level functions in this
// package to persist the list of hostnames that
// the config is supposed to manage. This is done
// because it seems reasonable that if you say
// "Manage [domain names...]", then only those
// domain names should be able to have certs;
// we don't NEED this feature, but it makes sense
// for higher-level convenience functions to be
// able to retain their convenience (alternative
// is: the user manually creates a DecisionFunc
// that whitelists the same names it already
// passed into Manage) and without letting clients
// have their run of any domain names they want.
// Only enforced if len > 0.
hostWhitelist []string
}
func (o *OnDemandConfig) whitelistContains(name string) bool {
for _, n := range o.hostWhitelist {
if strings.EqualFold(n, name) {
return true
}
}
return false
}
// isLoopback returns true if the hostname of addr looks
// explicitly like a common local hostname. addr must only
// be a host or a host:port combination.
func isLoopback(addr string) bool {
host := hostOnly(addr)
return host == "localhost" ||
strings.Trim(host, "[]") == "::1" ||
strings.HasPrefix(host, "127.")
}
// isInternal returns true if the IP of addr
// belongs to a private network IP range. addr
// must only be an IP or an IP:port combination.
// Loopback addresses are considered false.
func isInternal(addr string) bool {
privateNetworks := []string{
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
"fc00::/7",
}
host := hostOnly(addr)
ip := net.ParseIP(host)
if ip == nil {
return false
}
for _, privateNetwork := range privateNetworks {
_, ipnet, _ := net.ParseCIDR(privateNetwork)
if ipnet.Contains(ip) {
return true
}
}
return false
}
// hostOnly returns only the host portion of hostport.
// If there is no port or if there is an error splitting
// the port off, the whole input string is returned.
func hostOnly(hostport string) string {
host, _, err := net.SplitHostPort(hostport)
if err != nil {
return hostport // OK; probably had no port to begin with
}
return host
}
// PreChecker is an interface that can be optionally implemented by
// Issuers. Pre-checks are performed before each call (or batch of
// identical calls) to Issue(), giving the issuer the option to ensure
// it has all the necessary information/state.
type PreChecker interface {
PreCheck(ctx context.Context, names []string, interactive bool) error
}
// Issuer is a type that can issue certificates.
type Issuer interface {
// Issue obtains a certificate for the given CSR. It
// must honor context cancellation if it is long-running.
// It can also use the context to find out if the current
// call is part of a retry, via AttemptsCtxKey.
Issue(ctx context.Context, request *x509.CertificateRequest) (*IssuedCertificate, error)
// IssuerKey must return a string that uniquely identifies
// this particular configuration of the Issuer such that
// any certificates obtained by this Issuer will be treated
// as identical if they have the same SANs.
//
// Certificates obtained from Issuers with the same IssuerKey
// will overwrite others with the same SANs. For example, an
// Issuer might be able to obtain certificates from different
// CAs, say A and B. It is likely that the CAs have different
// use cases and purposes (e.g. testing and production), so
// their respective certificates should not overwrite eaach
// other.
IssuerKey() string
}
// Revoker can revoke certificates. Reason codes are defined
// by RFC 5280 §5.3.1: https://tools.ietf.org/html/rfc5280#section-5.3.1
// and are available as constants in our ACME library.
type Revoker interface {
Revoke(ctx context.Context, cert CertificateResource, reason int) error
}
// KeyGenerator can generate a private key.
type KeyGenerator interface {
// GenerateKey generates a private key. The returned
// PrivateKey must be able to expose its associated
// public key.
GenerateKey() (crypto.PrivateKey, error)
}
// IssuedCertificate represents a certificate that was just issued.
type IssuedCertificate struct {
// The PEM-encoding of DER-encoded ASN.1 data.
Certificate []byte
// Any extra information to serialize alongside the
// certificate in storage.
Metadata interface{}
}
// CertificateResource associates a certificate with its private
// key and other useful information, for use in maintaining the
// certificate.
type CertificateResource struct {
// The list of names on the certificate;
// for convenience only.
SANs []string `json:"sans,omitempty"`
// The PEM-encoding of DER-encoded ASN.1 data
// for the cert or chain.
CertificatePEM []byte `json:"-"`
// The PEM-encoding of the certificate's private key.
PrivateKeyPEM []byte `json:"-"`
// Any extra information associated with the certificate,
// usually provided by the issuer implementation.
IssuerData interface{} `json:"issuer_data,omitempty"`
}
// NamesKey returns the list of SANs as a single string,
// truncated to some ridiculously long size limit. It
// can act as a key for the set of names on the resource.
func (cr *CertificateResource) NamesKey() string {
sort.Strings(cr.SANs)
result := strings.Join(cr.SANs, ",")
if len(result) > 1024 {
const trunc = "_trunc"
result = result[:1024-len(trunc)] + trunc
}
return result
}
// Default contains the package defaults for the
// various Config fields. This is used as a template
// when creating your own Configs with New(), and it
// is also used as the Config by all the high-level
// functions in this package.
//
// The fields of this value will be used for Config
// fields which are unset. Feel free to modify these
// defaults, but do not use this Config by itself: it
// is only a template. Valid configurations can be
// obtained by calling New() (if you have your own
// certificate cache) or NewDefault() (if you only
// need a single config and want to use the default
// cache). This is the only Config which can access
// the default certificate cache.
var Default = Config{
RenewalWindowRatio: DefaultRenewalWindowRatio,
Storage: defaultFileStorage,
KeySource: DefaultKeyGenerator,
}
const (
// HTTPChallengePort is the officially-designated port for
// the HTTP challenge according to the ACME spec.
HTTPChallengePort = 80
// TLSALPNChallengePort is the officially-designated port for
// the TLS-ALPN challenge according to the ACME spec.
TLSALPNChallengePort = 443
)
// Port variables must remain their defaults unless you
// forward packets from the defaults to whatever these
// are set to; otherwise ACME challenges will fail.
var (
// HTTPPort is the port on which to serve HTTP
// and, by extension, the HTTP challenge (unless
// Default.AltHTTPPort is set).
HTTPPort = 80
// HTTPSPort is the port on which to serve HTTPS
// and, by extension, the TLS-ALPN challenge
// (unless Default.AltTLSALPNPort is set).
HTTPSPort = 443
)
// Variables for conveniently serving HTTPS.
var (
httpLn, httpsLn net.Listener
lnMu sync.Mutex
httpWg sync.WaitGroup
)
// Maximum size for the stack trace when recovering from panics.
const stackTraceBufferSize = 1024 * 128

@ -0,0 +1,808 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"bytes"
"context"
"crypto"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"fmt"
weakrand "math/rand"
"net"
"net/url"
"strings"
"time"
"github.com/mholt/acmez"
"go.uber.org/zap"
)
// Config configures a certificate manager instance.
// An empty Config is not valid: use New() to obtain
// a valid Config.
type Config struct {
// How much of a certificate's lifetime becomes the
// renewal window, which is the span of time at the
// end of the certificate's validity period in which
// it should be renewed; for most certificates, the
// global default is good, but for extremely short-
// lived certs, you may want to raise this to ~0.5.
RenewalWindowRatio float64
// An optional event callback clients can set
// to subscribe to certain things happening
// internally by this config; invocations are
// synchronous, so make them return quickly!
OnEvent func(event string, data interface{})
// DefaultServerName specifies a server name
// to use when choosing a certificate if the
// ClientHello's ServerName field is empty
DefaultServerName string
// The state needed to operate on-demand TLS;
// if non-nil, on-demand TLS is enabled and
// certificate operations are deferred to
// TLS handshakes (or as-needed)
// TODO: Can we call this feature "Reactive/Lazy/Passive TLS" instead?
OnDemand *OnDemandConfig
// Add the must staple TLS extension to the CSR
MustStaple bool
// The type that issues certificates; the
// default Issuer is ACMEManager
Issuer Issuer
// The type that revokes certificates; must
// be configured in conjunction with the Issuer
// field such that both the Issuer and Revoker
// are related (because issuance information is
// required for revocation)
Revoker Revoker
// The source of new private keys for certificates;
// the default KeySource is StandardKeyGenerator
KeySource KeyGenerator
// CertSelection chooses one of the certificates
// with which the ClientHello will be completed;
// if not set, DefaultCertificateSelector will
// be used
CertSelection CertificateSelector
// The storage to access when storing or
// loading TLS assets
Storage Storage
// Set a logger to enable logging
Logger *zap.Logger
// required pointer to the in-memory cert cache
certCache *Cache
}
// NewDefault makes a valid config based on the package
// Default config. Most users will call this function
// instead of New() since most use cases require only a
// single config for any and all certificates.
//
// If your requirements are more advanced (for example,
// multiple configs depending on the certificate), then use
// New() instead. (You will need to make your own Cache
// first.) If you only need a single Config to manage your
// certs (even if that config changes, as long as it is the
// only one), customize the Default package variable before
// calling NewDefault().
//
// All calls to NewDefault() will return configs that use the
// same, default certificate cache. All configs returned
// by NewDefault() are based on the values of the fields of
// Default at the time it is called.
func NewDefault() *Config {
defaultCacheMu.Lock()
if defaultCache == nil {
defaultCache = NewCache(CacheOptions{
// the cache will likely need to renew certificates,
// so it will need to know how to do that, which
// depends on the certificate being managed and which
// can change during the lifetime of the cache; this
// callback makes it possible to get the latest and
// correct config with which to manage the cert,
// but if the user does not provide one, we can only
// assume that we are to use the default config
GetConfigForCert: func(Certificate) (*Config, error) {
return NewDefault(), nil
},
})
}
certCache := defaultCache
defaultCacheMu.Unlock()
return newWithCache(certCache, Default)
}
// New makes a new, valid config based on cfg and
// uses the provided certificate cache. certCache
// MUST NOT be nil or this function will panic.
//
// Use this method when you have an advanced use case
// that requires a custom certificate cache and config
// that may differ from the Default. For example, if
// not all certificates are managed/renewed the same
// way, you need to make your own Cache value with a
// GetConfigForCert callback that returns the correct
// configuration for each certificate. However, for
// the vast majority of cases, there will be only a
// single Config, thus the default cache (which always
// uses the default Config) and default config will
// suffice, and you should use New() instead.
func New(certCache *Cache, cfg Config) *Config {
if certCache == nil {
panic("a certificate cache is required")
}
if certCache.options.GetConfigForCert == nil {
panic("cache must have GetConfigForCert set in its options")
}
return newWithCache(certCache, cfg)
}
// newWithCache ensures that cfg is a valid config by populating
// zero-value fields from the Default Config. If certCache is
// nil, this function panics.
func newWithCache(certCache *Cache, cfg Config) *Config {
if certCache == nil {
panic("cannot make a valid config without a pointer to a certificate cache")
}
if cfg.OnDemand == nil {
cfg.OnDemand = Default.OnDemand
}
if cfg.RenewalWindowRatio == 0 {
cfg.RenewalWindowRatio = Default.RenewalWindowRatio
}
if cfg.OnEvent == nil {
cfg.OnEvent = Default.OnEvent
}
if cfg.KeySource == nil {
cfg.KeySource = Default.KeySource
}
if cfg.DefaultServerName == "" {
cfg.DefaultServerName = Default.DefaultServerName
}
if cfg.OnDemand == nil {
cfg.OnDemand = Default.OnDemand
}
if !cfg.MustStaple {
cfg.MustStaple = Default.MustStaple
}
if cfg.Storage == nil {
cfg.Storage = Default.Storage
}
if cfg.Issuer == nil {
cfg.Issuer = Default.Issuer
if cfg.Issuer == nil {
// okay really, we need an issuer,
// that's kind of the point; most
// people would probably want ACME
cfg.Issuer = NewACMEManager(&cfg, DefaultACME)
}
// issuer and revoker go together; if user
// specifies their own issuer, we don't want
// to override their revoker, hence we only
// do this if Issuer was also nil
if cfg.Revoker == nil {
cfg.Revoker = Default.Revoker
if cfg.Revoker == nil {
cfg.Revoker = NewACMEManager(&cfg, DefaultACME)
}
}
}
// absolutely don't allow a nil storage,
// because that would make almost anything
// a config can do pointless
if cfg.Storage == nil {
cfg.Storage = defaultFileStorage
}
// ensure the unexported fields are valid
cfg.certCache = certCache
return &cfg
}
// ManageSync causes the certificates for domainNames to be managed
// according to cfg. If cfg.OnDemand is not nil, then this simply
// whitelists the domain names and defers the certificate operations
// to when they are needed. Otherwise, the certificates for each
// name are loaded from storage or obtained from the CA. If loaded
// from storage, they are renewed if they are expiring or expired.
// It then caches the certificate in memory and is prepared to serve
// them up during TLS handshakes.
//
// Note that name whitelisting for on-demand management only takes
// effect if cfg.OnDemand.DecisionFunc is not set (is nil); it will
// not overwrite an existing DecisionFunc, nor will it overwrite
// its decision; i.e. the implicit whitelist is only used if no
// DecisionFunc is set.
//
// This method is synchronous, meaning that certificates for all
// domainNames must be successfully obtained (or renewed) before
// it returns. It returns immediately on the first error for any
// of the given domainNames. This behavior is recommended for
// interactive use (i.e. when an administrator is present) so
// that errors can be reported and fixed immediately.
func (cfg *Config) ManageSync(domainNames []string) error {
return cfg.manageAll(nil, domainNames, false)
}
// ManageAsync is the same as ManageSync, except that ACME
// operations are performed asynchronously (in the background).
// This method returns before certificates are ready. It is
// crucial that the administrator monitors the logs and is
// notified of any errors so that corrective action can be
// taken as soon as possible. Any errors returned from this
// method occurred before ACME transactions started.
//
// As long as logs are monitored, this method is typically
// recommended for non-interactive environments.
//
// If there are failures loading, obtaining, or renewing a
// certificate, it will be retried with exponential backoff
// for up to about 30 days, with a maximum interval of about
// 24 hours. Cancelling ctx will cancel retries and shut down
// any goroutines spawned by ManageAsync.
func (cfg *Config) ManageAsync(ctx context.Context, domainNames []string) error {
return cfg.manageAll(ctx, domainNames, true)
}
func (cfg *Config) manageAll(ctx context.Context, domainNames []string, async bool) error {
if ctx == nil {
ctx = context.Background()
}
for _, domainName := range domainNames {
// if on-demand is configured, defer obtain and renew operations
if cfg.OnDemand != nil {
if !cfg.OnDemand.whitelistContains(domainName) {
cfg.OnDemand.hostWhitelist = append(cfg.OnDemand.hostWhitelist, domainName)
}
continue
}
// otherwise, begin management immediately
err := cfg.manageOne(ctx, domainName, async)
if err != nil {
return err
}
}
return nil
}
func (cfg *Config) manageOne(ctx context.Context, domainName string, async bool) error {
// first try loading existing certificate from storage
cert, err := cfg.CacheManagedCertificate(domainName)
if err != nil {
if _, ok := err.(ErrNotExist); !ok {
return fmt.Errorf("%s: caching certificate: %v", domainName, err)
}
// if we don't have one in storage, obtain one
obtain := func() error {
err := cfg.ObtainCert(ctx, domainName, !async)
if err != nil {
return fmt.Errorf("%s: obtaining certificate: %w", domainName, err)
}
cert, err = cfg.CacheManagedCertificate(domainName)
if err != nil {
return fmt.Errorf("%s: caching certificate after obtaining it: %v", domainName, err)
}
return nil
}
if async {
// Leave the job name empty so as to allow duplicate 'obtain'
// jobs; this is because Caddy calls ManageAsync() before the
// previous config is stopped (and before its context is
// canceled), which means that if an obtain job is still
// running for the same domain, Submit() would not queue the
// new one because it is still running, even though it is
// (probably) about to be canceled (it might not if the new
// config fails to finish loading, however). In any case, we
// presume it is safe to enqueue a duplicate obtain job because
// either the old one (or sometimes the new one) is about to be
// canceled. This seems like reasonable logic for any consumer
// of this lib. See https://github.com/caddyserver/caddy/issues/3202
jm.Submit(cfg.Logger, "", obtain)
return nil
}
return obtain()
}
// for an existing certificate, make sure it is renewed
renew := func() error {
err := cfg.RenewCert(ctx, domainName, !async)
if err != nil {
return fmt.Errorf("%s: renewing certificate: %w", domainName, err)
}
// successful renewal, so update in-memory cache
err = cfg.reloadManagedCertificate(cert)
if err != nil {
return fmt.Errorf("%s: reloading renewed certificate into memory: %v", domainName, err)
}
return nil
}
if cert.NeedsRenewal(cfg) {
if async {
jm.Submit(cfg.Logger, "renew_"+domainName, renew)
return nil
}
return renew()
}
return nil
}
// ObtainCert obtains a certificate for name using cfg, as long
// as a certificate does not already exist in storage for that
// name. The name must qualify and cfg must be flagged as Managed.
// This function is a no-op if storage already has a certificate
// for name.
//
// It only obtains and stores certificates (and their keys),
// it does not load them into memory. If interactive is true,
// the user may be shown a prompt.
// TODO: consider moving interactive param into the Config struct,
// and maybe retry settings into the Config struct as well? (same for RenewCert)
func (cfg *Config) ObtainCert(ctx context.Context, name string, interactive bool) error {
if cfg.storageHasCertResources(name) {
return nil
}
issuer, err := cfg.getPrecheckedIssuer(ctx, []string{name}, interactive)
if err != nil {
return err
}
if issuer == nil {
return nil
}
return cfg.obtainWithIssuer(ctx, issuer, name, interactive)
}
func loggerNamed(l *zap.Logger, name string) *zap.Logger {
if l == nil {
return nil
}
return l.Named(name)
}
func (cfg *Config) obtainWithIssuer(ctx context.Context, issuer Issuer, name string, interactive bool) error {
log := loggerNamed(cfg.Logger, "obtain")
if log != nil {
log.Info("acquiring lock", zap.String("identifier", name))
}
// ensure idempotency of the obtain operation for this name
lockKey := cfg.lockKey("cert_acme", name)
err := acquireLock(ctx, cfg.Storage, lockKey)
if err != nil {
return err
}
defer func() {
if log != nil {
log.Info("releasing lock", zap.String("identifier", name))
}
if err := releaseLock(cfg.Storage, lockKey); err != nil {
if log != nil {
log.Error("unable to unlock",
zap.String("identifier", name),
zap.String("lock_key", lockKey),
zap.Error(err))
}
}
}()
if log != nil {
log.Info("lock acquired", zap.String("identifier", name))
}
f := func(ctx context.Context) error {
// check if obtain is still needed -- might have been obtained during lock
if cfg.storageHasCertResources(name) {
if log != nil {
log.Info("certificate already exists in storage", zap.String("identifier", name))
}
return nil
}
privateKey, err := cfg.KeySource.GenerateKey()
if err != nil {
return err
}
privKeyPEM, err := encodePrivateKey(privateKey)
if err != nil {
return err
}
csr, err := cfg.generateCSR(privateKey, []string{name})
if err != nil {
return err
}
issuedCert, err := issuer.Issue(ctx, csr)
if err != nil {
return fmt.Errorf("[%s] Obtain: %w", name, err)
}
// success - immediately save the certificate resource
certRes := CertificateResource{
SANs: namesFromCSR(csr),
CertificatePEM: issuedCert.Certificate,
PrivateKeyPEM: privKeyPEM,
IssuerData: issuedCert.Metadata,
}
err = cfg.saveCertResource(certRes)
if err != nil {
return fmt.Errorf("[%s] Obtain: saving assets: %v", name, err)
}
cfg.emit("cert_obtained", name)
if log != nil {
log.Info("certificate obtained successfully", zap.String("identifier", name))
}
return nil
}
if interactive {
err = f(ctx)
} else {
err = doWithRetry(ctx, log, f)
}
return err
}
// RenewCert renews the certificate for name using cfg. It stows the
// renewed certificate and its assets in storage if successful. It
// DOES NOT update the in-memory cache with the new certificate.
func (cfg *Config) RenewCert(ctx context.Context, name string, interactive bool) error {
issuer, err := cfg.getPrecheckedIssuer(ctx, []string{name}, interactive)
if err != nil {
return err
}
if issuer == nil {
return nil
}
return cfg.renewWithIssuer(ctx, issuer, name, interactive)
}
func (cfg *Config) renewWithIssuer(ctx context.Context, issuer Issuer, name string, interactive bool) error {
log := loggerNamed(cfg.Logger, "renew")
if log != nil {
log.Info("acquiring lock", zap.String("identifier", name))
}
// ensure idempotency of the renew operation for this name
lockKey := cfg.lockKey("cert_acme", name)
err := acquireLock(ctx, cfg.Storage, lockKey)
if err != nil {
return err
}
defer func() {
if log != nil {
log.Info("releasing lock", zap.String("identifier", name))
}
if err := releaseLock(cfg.Storage, lockKey); err != nil {
if log != nil {
log.Error("unable to unlock",
zap.String("identifier", name),
zap.String("lock_key", lockKey),
zap.Error(err))
}
}
}()
if log != nil {
log.Info("lock acquired", zap.String("identifier", name))
}
f := func(ctx context.Context) error {
// prepare for renewal (load PEM cert, key, and meta)
certRes, err := cfg.loadCertResource(name)
if err != nil {
return err
}
// check if renew is still needed - might have been renewed while waiting for lock
timeLeft, needsRenew := cfg.managedCertNeedsRenewal(certRes)
if !needsRenew {
if log != nil {
log.Info("certificate appears to have been renewed already",
zap.String("identifier", name),
zap.Duration("remaining", timeLeft))
}
return nil
}
if log != nil {
log.Info("renewing certificate",
zap.String("identifier", name),
zap.Duration("remaining", timeLeft))
}
privateKey, err := decodePrivateKey(certRes.PrivateKeyPEM)
if err != nil {
return err
}
csr, err := cfg.generateCSR(privateKey, []string{name})
if err != nil {
return err
}
issuedCert, err := issuer.Issue(ctx, csr)
if err != nil {
return fmt.Errorf("[%s] Renew: %w", name, err)
}
// success - immediately save the renewed certificate resource
newCertRes := CertificateResource{
SANs: namesFromCSR(csr),
CertificatePEM: issuedCert.Certificate,
PrivateKeyPEM: certRes.PrivateKeyPEM,
IssuerData: issuedCert.Metadata,
}
err = cfg.saveCertResource(newCertRes)
if err != nil {
return fmt.Errorf("[%s] Renew: saving assets: %v", name, err)
}
cfg.emit("cert_renewed", name)
if log != nil {
log.Info("certificate renewed successfully", zap.String("identifier", name))
}
return nil
}
if interactive {
err = f(ctx)
} else {
err = doWithRetry(ctx, log, f)
}
return err
}
func (cfg *Config) generateCSR(privateKey crypto.PrivateKey, sans []string) (*x509.CertificateRequest, error) {
csrTemplate := new(x509.CertificateRequest)
for _, name := range sans {
if ip := net.ParseIP(name); ip != nil {
csrTemplate.IPAddresses = append(csrTemplate.IPAddresses, ip)
} else if strings.Contains(name, "@") {
csrTemplate.EmailAddresses = append(csrTemplate.EmailAddresses, name)
} else if u, err := url.Parse(name); err == nil && strings.Contains(name, "/") {
csrTemplate.URIs = append(csrTemplate.URIs, u)
} else {
csrTemplate.DNSNames = append(csrTemplate.DNSNames, name)
}
}
if cfg.MustStaple {
csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, mustStapleExtension)
}
csrDER, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, privateKey)
if err != nil {
return nil, err
}
return x509.ParseCertificateRequest(csrDER)
}
// RevokeCert revokes the certificate for domain via ACME protocol. It requires
// that cfg.Issuer is properly configured with the same issuer that issued the
// certificate being revoked. See RFC 5280 §5.3.1 for reason codes.
func (cfg *Config) RevokeCert(ctx context.Context, domain string, reason int, interactive bool) error {
rev := cfg.Revoker
if rev == nil {
rev = Default.Revoker
}
certRes, err := cfg.loadCertResource(domain)
if err != nil {
return err
}
issuerKey := cfg.Issuer.IssuerKey()
if !cfg.Storage.Exists(StorageKeys.SitePrivateKey(issuerKey, domain)) {
return fmt.Errorf("private key not found for %s", certRes.SANs)
}
err = rev.Revoke(ctx, certRes, reason)
if err != nil {
return err
}
cfg.emit("cert_revoked", domain)
err = cfg.Storage.Delete(StorageKeys.SiteCert(issuerKey, domain))
if err != nil {
return fmt.Errorf("certificate revoked, but unable to delete certificate file: %v", err)
}
err = cfg.Storage.Delete(StorageKeys.SitePrivateKey(issuerKey, domain))
if err != nil {
return fmt.Errorf("certificate revoked, but unable to delete private key: %v", err)
}
err = cfg.Storage.Delete(StorageKeys.SiteMeta(issuerKey, domain))
if err != nil {
return fmt.Errorf("certificate revoked, but unable to delete certificate metadata: %v", err)
}
return nil
}
// TLSConfig is an opinionated method that returns a
// recommended, modern TLS configuration that can be
// used to configure TLS listeners, which also supports
// the TLS-ALPN challenge and serves up certificates
// managed by cfg.
//
// Unlike the package TLS() function, this method does
// not, by itself, enable certificate management for
// any domain names.
//
// Feel free to further customize the returned tls.Config,
// but do not mess with the GetCertificate or NextProtos
// fields unless you know what you're doing, as they're
// necessary to solve the TLS-ALPN challenge.
func (cfg *Config) TLSConfig() *tls.Config {
return &tls.Config{
// these two fields necessary for TLS-ALPN challenge
GetCertificate: cfg.GetCertificate,
NextProtos: []string{acmez.ACMETLS1Protocol},
// the rest recommended for modern TLS servers
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{
tls.X25519,
tls.CurveP256,
},
CipherSuites: preferredDefaultCipherSuites(),
PreferServerCipherSuites: true,
}
}
// getPrecheckedIssuer returns an Issuer with pre-checks
// completed, if it is also a PreChecker. It also checks
// that storage is functioning. If a nil Issuer is returned
// with a nil error, that means to skip this operation
// (not an error, just a no-op).
func (cfg *Config) getPrecheckedIssuer(ctx context.Context, names []string, interactive bool) (Issuer, error) {
// ensure storage is writeable and readable
// TODO: this is not necessary every time; should only
// perform check once every so often for each storage,
// which may require some global state...
err := cfg.checkStorage()
if err != nil {
return nil, fmt.Errorf("failed storage check: %v - storage is probably misconfigured", err)
}
if prechecker, ok := cfg.Issuer.(PreChecker); ok {
err := prechecker.PreCheck(ctx, names, interactive)
if err != nil {
return nil, err
}
}
return cfg.Issuer, nil
}
// checkStorage tests the storage by writing random bytes
// to a random key, and then loading those bytes and
// comparing the loaded value. If this fails, the provided
// cfg.Storage mechanism should not be used.
func (cfg *Config) checkStorage() error {
key := fmt.Sprintf("rw_test_%d", weakrand.Int())
contents := make([]byte, 1024*10) // size sufficient for one or two ACME resources
_, err := weakrand.Read(contents)
if err != nil {
return err
}
err = cfg.Storage.Store(key, contents)
if err != nil {
return err
}
defer func() {
deleteErr := cfg.Storage.Delete(key)
if deleteErr != nil {
if cfg.Logger != nil {
cfg.Logger.Error("deleting test key from storage",
zap.String("key", key), zap.Error(err))
}
}
// if there was no other error, make sure
// to return any error returned from Delete
if err == nil {
err = deleteErr
}
}()
loaded, err := cfg.Storage.Load(key)
if err != nil {
return err
}
if !bytes.Equal(contents, loaded) {
return fmt.Errorf("load yielded different value than was stored; expected %d bytes, got %d bytes of differing elements", len(contents), len(loaded))
}
return nil
}
// storageHasCertResources returns true if the storage
// associated with cfg's certificate cache has all the
// resources related to the certificate for domain: the
// certificate, the private key, and the metadata.
func (cfg *Config) storageHasCertResources(domain string) bool {
issuerKey := cfg.Issuer.IssuerKey()
certKey := StorageKeys.SiteCert(issuerKey, domain)
keyKey := StorageKeys.SitePrivateKey(issuerKey, domain)
metaKey := StorageKeys.SiteMeta(issuerKey, domain)
return cfg.Storage.Exists(certKey) &&
cfg.Storage.Exists(keyKey) &&
cfg.Storage.Exists(metaKey)
}
// lockKey returns a key for a lock that is specific to the operation
// named op being performed related to domainName and this config's CA.
func (cfg *Config) lockKey(op, domainName string) string {
return fmt.Sprintf("%s_%s_%s", op, domainName, cfg.Issuer.IssuerKey())
}
// managedCertNeedsRenewal returns true if certRes is
// expiring soon or already expired, or if the process
// of checking the expiration returned an error.
func (cfg *Config) managedCertNeedsRenewal(certRes CertificateResource) (time.Duration, bool) {
cert, err := makeCertificate(certRes.CertificatePEM, certRes.PrivateKeyPEM)
if err != nil {
return 0, true
}
return time.Until(cert.Leaf.NotAfter), cert.NeedsRenewal(cfg)
}
func (cfg *Config) emit(eventName string, data interface{}) {
if cfg.OnEvent == nil {
return
}
cfg.OnEvent(eventName, data)
}
// CertificateSelector is a type which can select a certificate to use given multiple choices.
type CertificateSelector interface {
SelectCertificate(*tls.ClientHelloInfo, []Certificate) (Certificate, error)
}
// Constants for PKIX MustStaple extension.
var (
tlsFeatureExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24}
ocspMustStapleFeature = []byte{0x30, 0x03, 0x02, 0x01, 0x05}
mustStapleExtension = pkix.Extension{
Id: tlsFeatureExtensionOID,
Value: ocspMustStapleFeature,
}
)

@ -0,0 +1,324 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"hash/fnv"
"strings"
"github.com/klauspost/cpuid"
)
// encodePrivateKey marshals a EC or RSA private key into a PEM-encoded array of bytes.
func encodePrivateKey(key crypto.PrivateKey) ([]byte, error) {
var pemType string
var keyBytes []byte
switch key := key.(type) {
case *ecdsa.PrivateKey:
var err error
pemType = "EC"
keyBytes, err = x509.MarshalECPrivateKey(key)
if err != nil {
return nil, err
}
case *rsa.PrivateKey:
pemType = "RSA"
keyBytes = x509.MarshalPKCS1PrivateKey(key)
case ed25519.PrivateKey:
var err error
pemType = "ED25519"
keyBytes, err = x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unsupported key type: %T", key)
}
pemKey := pem.Block{Type: pemType + " PRIVATE KEY", Bytes: keyBytes}
return pem.EncodeToMemory(&pemKey), nil
}
// decodePrivateKey loads a PEM-encoded ECC/RSA private key from an array of bytes.
// Borrowed from Go standard library, to handle various private key and PEM block types.
// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L291-L308
// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L238)
func decodePrivateKey(keyPEMBytes []byte) (crypto.Signer, error) {
keyBlockDER, _ := pem.Decode(keyPEMBytes)
if keyBlockDER.Type != "PRIVATE KEY" && !strings.HasSuffix(keyBlockDER.Type, " PRIVATE KEY") {
return nil, fmt.Errorf("unknown PEM header %q", keyBlockDER.Type)
}
if key, err := x509.ParsePKCS1PrivateKey(keyBlockDER.Bytes); err == nil {
return key, nil
}
if key, err := x509.ParsePKCS8PrivateKey(keyBlockDER.Bytes); err == nil {
switch key := key.(type) {
case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey:
return key.(crypto.Signer), nil
default:
return nil, fmt.Errorf("found unknown private key type in PKCS#8 wrapping: %T", key)
}
}
if key, err := x509.ParseECPrivateKey(keyBlockDER.Bytes); err == nil {
return key, nil
}
return nil, fmt.Errorf("unknown private key type")
}
// parseCertsFromPEMBundle parses a certificate bundle from top to bottom and returns
// a slice of x509 certificates. This function will error if no certificates are found.
func parseCertsFromPEMBundle(bundle []byte) ([]*x509.Certificate, error) {
var certificates []*x509.Certificate
var certDERBlock *pem.Block
for {
certDERBlock, bundle = pem.Decode(bundle)
if certDERBlock == nil {
break
}
if certDERBlock.Type == "CERTIFICATE" {
cert, err := x509.ParseCertificate(certDERBlock.Bytes)
if err != nil {
return nil, err
}
certificates = append(certificates, cert)
}
}
if len(certificates) == 0 {
return nil, fmt.Errorf("no certificates found in bundle")
}
return certificates, nil
}
// fastHash hashes input using a hashing algorithm that
// is fast, and returns the hash as a hex-encoded string.
// Do not use this for cryptographic purposes.
func fastHash(input []byte) string {
h := fnv.New32a()
h.Write(input)
return fmt.Sprintf("%x", h.Sum32())
}
// saveCertResource saves the certificate resource to disk. This
// includes the certificate file itself, the private key, and the
// metadata file.
func (cfg *Config) saveCertResource(cert CertificateResource) error {
metaBytes, err := json.MarshalIndent(cert, "", "\t")
if err != nil {
return fmt.Errorf("encoding certificate metadata: %v", err)
}
issuerKey := cfg.Issuer.IssuerKey()
certKey := cert.NamesKey()
all := []keyValue{
{
key: StorageKeys.SiteCert(issuerKey, certKey),
value: cert.CertificatePEM,
},
{
key: StorageKeys.SitePrivateKey(issuerKey, certKey),
value: cert.PrivateKeyPEM,
},
{
key: StorageKeys.SiteMeta(issuerKey, certKey),
value: metaBytes,
},
}
return storeTx(cfg.Storage, all)
}
func (cfg *Config) loadCertResource(certNamesKey string) (CertificateResource, error) {
var certRes CertificateResource
issuerKey := cfg.Issuer.IssuerKey()
certBytes, err := cfg.Storage.Load(StorageKeys.SiteCert(issuerKey, certNamesKey))
if err != nil {
return CertificateResource{}, err
}
certRes.CertificatePEM = certBytes
keyBytes, err := cfg.Storage.Load(StorageKeys.SitePrivateKey(issuerKey, certNamesKey))
if err != nil {
return CertificateResource{}, err
}
certRes.PrivateKeyPEM = keyBytes
metaBytes, err := cfg.Storage.Load(StorageKeys.SiteMeta(issuerKey, certNamesKey))
if err != nil {
return CertificateResource{}, err
}
err = json.Unmarshal(metaBytes, &certRes)
if err != nil {
return CertificateResource{}, fmt.Errorf("decoding certificate metadata: %v", err)
}
// TODO: July 2020 - transition to new ACME lib and cert resource structure;
// for a while, we will need to convert old cert resources to new structure
certRes, err = cfg.transitionCertMetaToACMEzJuly2020Format(certRes, metaBytes)
if err != nil {
return certRes, fmt.Errorf("one-time certificate resource transition: %v", err)
}
return certRes, nil
}
// TODO: this is a temporary transition helper starting July 2020.
// It can go away when we think enough time has passed that most active assets have transitioned.
func (cfg *Config) transitionCertMetaToACMEzJuly2020Format(certRes CertificateResource, metaBytes []byte) (CertificateResource, error) {
data, ok := certRes.IssuerData.(map[string]interface{})
if !ok {
return certRes, nil
}
if certURL, ok := data["url"].(string); ok && certURL != "" {
return certRes, nil
}
var oldCertRes struct {
SANs []string `json:"sans"`
IssuerData struct {
Domain string `json:"domain"`
CertURL string `json:"certUrl"`
CertStableURL string `json:"certStableUrl"`
} `json:"issuer_data"`
}
err := json.Unmarshal(metaBytes, &oldCertRes)
if err != nil {
return certRes, fmt.Errorf("decoding into old certificate resource type: %v", err)
}
data = map[string]interface{}{
"url": oldCertRes.IssuerData.CertURL,
}
certRes.IssuerData = data
err = cfg.saveCertResource(certRes)
if err != nil {
return certRes, fmt.Errorf("saving converted certificate resource: %v", err)
}
return certRes, nil
}
// hashCertificateChain computes the unique hash of certChain,
// which is the chain of DER-encoded bytes. It returns the
// hex encoding of the hash.
func hashCertificateChain(certChain [][]byte) string {
h := sha256.New()
for _, certInChain := range certChain {
h.Write(certInChain)
}
return fmt.Sprintf("%x", h.Sum(nil))
}
func namesFromCSR(csr *x509.CertificateRequest) []string {
var nameSet []string
nameSet = append(nameSet, csr.DNSNames...)
nameSet = append(nameSet, csr.EmailAddresses...)
for _, v := range csr.IPAddresses {
nameSet = append(nameSet, v.String())
}
for _, v := range csr.URIs {
nameSet = append(nameSet, v.String())
}
return nameSet
}
// preferredDefaultCipherSuites returns an appropriate
// cipher suite to use depending on hardware support
// for AES-NI.
//
// See https://github.com/mholt/caddy/issues/1674
func preferredDefaultCipherSuites() []uint16 {
if cpuid.CPU.AesNi() {
return defaultCiphersPreferAES
}
return defaultCiphersPreferChaCha
}
var (
defaultCiphersPreferAES = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
}
defaultCiphersPreferChaCha = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}
)
// StandardKeyGenerator is the standard, in-memory key source
// that uses crypto/rand.
type StandardKeyGenerator struct {
// The type of keys to generate.
KeyType KeyType
}
// GenerateKey generates a new private key according to kg.KeyType.
func (kg StandardKeyGenerator) GenerateKey() (crypto.PrivateKey, error) {
switch kg.KeyType {
case ED25519:
_, priv, err := ed25519.GenerateKey(rand.Reader)
return priv, err
case "", P256:
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
case P384:
return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
case RSA2048:
return rsa.GenerateKey(rand.Reader, 2048)
case RSA4096:
return rsa.GenerateKey(rand.Reader, 4096)
case RSA8192:
return rsa.GenerateKey(rand.Reader, 8192)
}
return nil, fmt.Errorf("unrecognized or unsupported key type: %s", kg.KeyType)
}
// DefaultKeyGenerator is the default key source.
var DefaultKeyGenerator = StandardKeyGenerator{KeyType: P256}
// KeyType enumerates the known/supported key types.
type KeyType string
// Constants for all key types we support.
const (
ED25519 = KeyType("ed25519")
P256 = KeyType("p256")
P384 = KeyType("p384")
RSA2048 = KeyType("rsa2048")
RSA4096 = KeyType("rsa4096")
RSA8192 = KeyType("rsa8192")
)

@ -0,0 +1,339 @@
package certmagic
import (
"errors"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/miekg/dns"
)
// Code in this file adapted from go-acme/lego, July 2020:
// https://github.com/go-acme/lego
// by Ludovic Fernandez and Dominik Menke
//
// It has been modified.
// findZoneByFQDN determines the zone apex for the given fqdn by recursing
// up the domain labels until the nameserver returns a SOA record in the
// answer section.
func findZoneByFQDN(fqdn string, nameservers []string) (string, error) {
if !strings.HasSuffix(fqdn, ".") {
fqdn += "."
}
soa, err := lookupSoaByFqdn(fqdn, nameservers)
if err != nil {
return "", err
}
return soa.zone, nil
}
func lookupSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) {
if !strings.HasSuffix(fqdn, ".") {
fqdn += "."
}
fqdnSOACacheMu.Lock()
defer fqdnSOACacheMu.Unlock()
// prefer cached version if fresh
if ent := fqdnSOACache[fqdn]; ent != nil && !ent.isExpired() {
return ent, nil
}
ent, err := fetchSoaByFqdn(fqdn, nameservers)
if err != nil {
return nil, err
}
// save result to cache, but don't allow
// the cache to grow out of control
if len(fqdnSOACache) >= 1000 {
for key := range fqdnSOACache {
delete(fqdnSOACache, key)
break
}
}
fqdnSOACache[fqdn] = ent
return ent, nil
}
func fetchSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) {
var err error
var in *dns.Msg
labelIndexes := dns.Split(fqdn)
for _, index := range labelIndexes {
domain := fqdn[index:]
in, err = dnsQuery(domain, dns.TypeSOA, nameservers, true)
if err != nil {
continue
}
if in == nil {
continue
}
switch in.Rcode {
case dns.RcodeSuccess:
// Check if we got a SOA RR in the answer section
if len(in.Answer) == 0 {
continue
}
// CNAME records cannot/should not exist at the root of a zone.
// So we skip a domain when a CNAME is found.
if dnsMsgContainsCNAME(in) {
continue
}
for _, ans := range in.Answer {
if soa, ok := ans.(*dns.SOA); ok {
return newSoaCacheEntry(soa), nil
}
}
case dns.RcodeNameError:
// NXDOMAIN
default:
// Any response code other than NOERROR and NXDOMAIN is treated as error
return nil, fmt.Errorf("unexpected response code '%s' for %s", dns.RcodeToString[in.Rcode], domain)
}
}
return nil, fmt.Errorf("could not find the start of authority for %s%s", fqdn, formatDNSError(in, err))
}
// dnsMsgContainsCNAME checks for a CNAME answer in msg
func dnsMsgContainsCNAME(msg *dns.Msg) bool {
for _, ans := range msg.Answer {
if _, ok := ans.(*dns.CNAME); ok {
return true
}
}
return false
}
func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (*dns.Msg, error) {
m := createDNSMsg(fqdn, rtype, recursive)
var in *dns.Msg
var err error
for _, ns := range nameservers {
in, err = sendDNSQuery(m, ns)
if err == nil && len(in.Answer) > 0 {
break
}
}
return in, err
}
func createDNSMsg(fqdn string, rtype uint16, recursive bool) *dns.Msg {
m := new(dns.Msg)
m.SetQuestion(fqdn, rtype)
m.SetEdns0(4096, false)
if !recursive {
m.RecursionDesired = false
}
return m
}
func sendDNSQuery(m *dns.Msg, ns string) (*dns.Msg, error) {
udp := &dns.Client{Net: "udp", Timeout: dnsTimeout}
in, _, err := udp.Exchange(m, ns)
// two kinds of errors we can handle by retrying with TCP:
// truncation and timeout; see https://github.com/caddyserver/caddy/issues/3639
truncated := in != nil && in.Truncated
timeoutErr := err != nil && strings.Contains(err.Error(), "timeout")
if truncated || timeoutErr {
tcp := &dns.Client{Net: "tcp", Timeout: dnsTimeout}
in, _, err = tcp.Exchange(m, ns)
}
return in, err
}
func formatDNSError(msg *dns.Msg, err error) string {
var parts []string
if msg != nil {
parts = append(parts, dns.RcodeToString[msg.Rcode])
}
if err != nil {
parts = append(parts, err.Error())
}
if len(parts) > 0 {
return ": " + strings.Join(parts, " ")
}
return ""
}
// soaCacheEntry holds a cached SOA record (only selected fields)
type soaCacheEntry struct {
zone string // zone apex (a domain name)
primaryNs string // primary nameserver for the zone apex
expires time.Time // time when this cache entry should be evicted
}
func newSoaCacheEntry(soa *dns.SOA) *soaCacheEntry {
return &soaCacheEntry{
zone: soa.Hdr.Name,
primaryNs: soa.Ns,
expires: time.Now().Add(time.Duration(soa.Refresh) * time.Second),
}
}
// isExpired checks whether a cache entry should be considered expired.
func (cache *soaCacheEntry) isExpired() bool {
return time.Now().After(cache.expires)
}
// systemOrDefaultNameservers attempts to get system nameservers from the
// resolv.conf file given by path before falling back to hard-coded defaults.
func systemOrDefaultNameservers(path string, defaults []string) []string {
config, err := dns.ClientConfigFromFile(path)
if err != nil || len(config.Servers) == 0 {
return defaults
}
return config.Servers
}
// populateNameserverPorts ensures that all nameservers have a port number.
func populateNameserverPorts(servers []string) {
for i := range servers {
_, port, _ := net.SplitHostPort(servers[i])
if port == "" {
servers[i] = net.JoinHostPort(servers[i], "53")
}
}
}
// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers.
func checkDNSPropagation(fqdn, value string, resolvers []string) (bool, error) {
if !strings.HasSuffix(fqdn, ".") {
fqdn += "."
}
// Initial attempt to resolve at the recursive NS
r, err := dnsQuery(fqdn, dns.TypeTXT, resolvers, true)
if err != nil {
return false, err
}
// TODO: make this configurable, maybe
// if !p.requireCompletePropagation {
// return true, nil
// }
if r.Rcode == dns.RcodeSuccess {
fqdn = updateDomainWithCName(r, fqdn)
}
authoritativeNss, err := lookupNameservers(fqdn, resolvers)
if err != nil {
return false, err
}
return checkAuthoritativeNss(fqdn, value, authoritativeNss)
}
// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record.
func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) {
for _, ns := range nameservers {
r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false)
if err != nil {
return false, err
}
if r.Rcode != dns.RcodeSuccess {
if r.Rcode == dns.RcodeNameError {
// if Present() succeeded, then it must show up eventually, or else
// something is really broken in the DNS provider or their API;
// no need for error here, simply have the caller try again
return false, nil
}
return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn)
}
var found bool
for _, rr := range r.Answer {
if txt, ok := rr.(*dns.TXT); ok {
record := strings.Join(txt.Txt, "")
if record == value {
found = true
break
}
}
}
if !found {
return false, nil
}
}
return true, nil
}
// lookupNameservers returns the authoritative nameservers for the given fqdn.
func lookupNameservers(fqdn string, resolvers []string) ([]string, error) {
var authoritativeNss []string
zone, err := findZoneByFQDN(fqdn, resolvers)
if err != nil {
return nil, fmt.Errorf("could not determine the zone: %w", err)
}
r, err := dnsQuery(zone, dns.TypeNS, resolvers, true)
if err != nil {
return nil, err
}
for _, rr := range r.Answer {
if ns, ok := rr.(*dns.NS); ok {
authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns))
}
}
if len(authoritativeNss) > 0 {
return authoritativeNss, nil
}
return nil, errors.New("could not determine authoritative nameservers")
}
// Update FQDN with CNAME if any
func updateDomainWithCName(r *dns.Msg, fqdn string) string {
for _, rr := range r.Answer {
if cn, ok := rr.(*dns.CNAME); ok {
if cn.Hdr.Name == fqdn {
return cn.Target
}
}
}
return fqdn
}
// recursiveNameservers are used to pre-check DNS propagation. It
// prepends user-configured nameservers (custom) to the defaults
// obtained from resolv.conf and defaultNameservers and ensures
// that all server addresses have a port value.
func recursiveNameservers(custom []string) []string {
servers := append(custom, systemOrDefaultNameservers(defaultResolvConf, defaultNameservers)...)
populateNameserverPorts(servers)
return servers
}
var defaultNameservers = []string{
"8.8.8.8:53",
"8.8.4.4:53",
"1.1.1.1:53",
"1.0.0.1:53",
}
var dnsTimeout = 10 * time.Second
var (
fqdnSOACache = map[string]*soaCacheEntry{}
fqdnSOACacheMu sync.Mutex
)
const defaultResolvConf = "/etc/resolv.conf"

@ -0,0 +1,381 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"runtime"
"time"
)
// FileStorage facilitates forming file paths derived from a root
// directory. It is used to get file paths in a consistent,
// cross-platform way or persisting ACME assets on the file system.
type FileStorage struct {
Path string
}
// Exists returns true if key exists in fs.
func (fs *FileStorage) Exists(key string) bool {
_, err := os.Stat(fs.Filename(key))
return !os.IsNotExist(err)
}
// Store saves value at key.
func (fs *FileStorage) Store(key string, value []byte) error {
filename := fs.Filename(key)
err := os.MkdirAll(filepath.Dir(filename), 0700)
if err != nil {
return err
}
return ioutil.WriteFile(filename, value, 0600)
}
// Load retrieves the value at key.
func (fs *FileStorage) Load(key string) ([]byte, error) {
contents, err := ioutil.ReadFile(fs.Filename(key))
if os.IsNotExist(err) {
return nil, ErrNotExist(err)
}
return contents, nil
}
// Delete deletes the value at key.
func (fs *FileStorage) Delete(key string) error {
err := os.Remove(fs.Filename(key))
if os.IsNotExist(err) {
return ErrNotExist(err)
}
return err
}
// List returns all keys that match prefix.
func (fs *FileStorage) List(prefix string, recursive bool) ([]string, error) {
var keys []string
walkPrefix := fs.Filename(prefix)
err := filepath.Walk(walkPrefix, func(fpath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info == nil {
return fmt.Errorf("%s: file info is nil", fpath)
}
if fpath == walkPrefix {
return nil
}
suffix, err := filepath.Rel(walkPrefix, fpath)
if err != nil {
return fmt.Errorf("%s: could not make path relative: %v", fpath, err)
}
keys = append(keys, path.Join(prefix, suffix))
if !recursive && info.IsDir() {
return filepath.SkipDir
}
return nil
})
return keys, err
}
// Stat returns information about key.
func (fs *FileStorage) Stat(key string) (KeyInfo, error) {
fi, err := os.Stat(fs.Filename(key))
if os.IsNotExist(err) {
return KeyInfo{}, ErrNotExist(err)
}
if err != nil {
return KeyInfo{}, err
}
return KeyInfo{
Key: key,
Modified: fi.ModTime(),
Size: fi.Size(),
IsTerminal: !fi.IsDir(),
}, nil
}
// Filename returns the key as a path on the file
// system prefixed by fs.Path.
func (fs *FileStorage) Filename(key string) string {
return filepath.Join(fs.Path, filepath.FromSlash(key))
}
// Lock obtains a lock named by the given key. It blocks
// until the lock can be obtained or an error is returned.
func (fs *FileStorage) Lock(ctx context.Context, key string) error {
filename := fs.lockFilename(key)
for {
err := createLockfile(filename)
if err == nil {
// got the lock, yay
return nil
}
if !os.IsExist(err) {
// unexpected error
return fmt.Errorf("creating lock file: %v", err)
}
// lock file already exists
var meta lockMeta
f, err := os.Open(filename)
if err == nil {
err2 := json.NewDecoder(f).Decode(&meta)
f.Close()
if err2 != nil {
return err2
}
}
switch {
case os.IsNotExist(err):
// must have just been removed; try again to create it
continue
case err != nil:
// unexpected error
return fmt.Errorf("accessing lock file: %v", err)
case fileLockIsStale(meta):
// lock file is stale - delete it and try again to create one
log.Printf("[INFO][%s] Lock for '%s' is stale (created: %s, last update: %s); removing then retrying: %s",
fs, key, meta.Created, meta.Updated, filename)
removeLockfile(filename)
continue
default:
// lockfile exists and is not stale;
// just wait a moment and try again,
// or return if context cancelled
select {
case <-time.After(fileLockPollInterval):
case <-ctx.Done():
return ctx.Err()
}
}
}
}
// Unlock releases the lock for name.
func (fs *FileStorage) Unlock(key string) error {
return removeLockfile(fs.lockFilename(key))
}
func (fs *FileStorage) String() string {
return "FileStorage:" + fs.Path
}
func (fs *FileStorage) lockFilename(key string) string {
return filepath.Join(fs.lockDir(), StorageKeys.Safe(key)+".lock")
}
func (fs *FileStorage) lockDir() string {
return filepath.Join(fs.Path, "locks")
}
func fileLockIsStale(meta lockMeta) bool {
ref := meta.Updated
if ref.IsZero() {
ref = meta.Created
}
// since updates are exactly every lockFreshnessInterval,
// add a grace period for the actual file read+write to
// take place
return time.Since(ref) > lockFreshnessInterval*2
}
// createLockfile atomically creates the lockfile
// identified by filename. A successfully created
// lockfile should be removed with removeLockfile.
func createLockfile(filename string) error {
err := atomicallyCreateFile(filename, true)
if err != nil {
return err
}
go keepLockfileFresh(filename)
// if the app crashes in removeLockfile(), there is a
// small chance the .unlock file is left behind; it's
// safe to simply remove it as it's a guard against
// double removal of the .lock file.
_ = os.Remove(filename + ".unlock")
return nil
}
// removeLockfile atomically removes filename,
// which must be a lockfile created by createLockfile.
// See discussion in PR #7 for more background:
// https://github.com/caddyserver/certmagic/pull/7
func removeLockfile(filename string) error {
unlockFilename := filename + ".unlock"
if err := atomicallyCreateFile(unlockFilename, false); err != nil {
if os.IsExist(err) {
// another process is handling the unlocking
return nil
}
return err
}
defer os.Remove(unlockFilename)
return os.Remove(filename)
}
// keepLockfileFresh continuously updates the lock file
// at filename with the current timestamp. It stops
// when the file disappears (happy path = lock released),
// or when there is an error at any point. Since it polls
// every lockFreshnessInterval, this function might
// not terminate until up to lockFreshnessInterval after
// the lock is released.
func keepLockfileFresh(filename string) {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: active locking: %v\n%s", err, buf)
}
}()
for {
time.Sleep(lockFreshnessInterval)
done, err := updateLockfileFreshness(filename)
if err != nil {
log.Printf("[ERROR] Keeping lock file fresh: %v - terminating lock maintenance (lockfile: %s)", err, filename)
return
}
if done {
return
}
}
}
// updateLockfileFreshness updates the lock file at filename
// with the current timestamp. It returns true if the parent
// loop can terminate (i.e. no more need to update the lock).
func updateLockfileFreshness(filename string) (bool, error) {
f, err := os.OpenFile(filename, os.O_RDWR, 0644)
if os.IsNotExist(err) {
return true, nil // lock released
}
if err != nil {
return true, err
}
defer f.Close()
// read contents
metaBytes, err := ioutil.ReadAll(io.LimitReader(f, 2048))
if err != nil {
return true, err
}
var meta lockMeta
if err := json.Unmarshal(metaBytes, &meta); err != nil {
return true, err
}
// truncate file and reset I/O offset to beginning
if err := f.Truncate(0); err != nil {
return true, err
}
if _, err := f.Seek(0, 0); err != nil {
return true, err
}
// write updated timestamp
meta.Updated = time.Now()
return false, json.NewEncoder(f).Encode(meta)
}
// atomicallyCreateFile atomically creates the file
// identified by filename if it doesn't already exist.
func atomicallyCreateFile(filename string, writeLockInfo bool) error {
// no need to check this error, we only really care about the file creation error
_ = os.MkdirAll(filepath.Dir(filename), 0700)
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644)
if err != nil {
return err
}
defer f.Close()
if writeLockInfo {
now := time.Now()
meta := lockMeta{
Created: now,
Updated: now,
}
err := json.NewEncoder(f).Encode(meta)
if err != nil {
return err
}
}
return nil
}
// homeDir returns the best guess of the current user's home
// directory from environment variables. If unknown, "." (the
// current directory) is returned instead.
func homeDir() string {
home := os.Getenv("HOME")
if home == "" && runtime.GOOS == "windows" {
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home = drive + path
if drive == "" || path == "" {
home = os.Getenv("USERPROFILE")
}
}
if home == "" {
home = "."
}
return home
}
func dataDir() string {
baseDir := filepath.Join(homeDir(), ".local", "share")
if xdgData := os.Getenv("XDG_DATA_HOME"); xdgData != "" {
baseDir = xdgData
}
return filepath.Join(baseDir, "certmagic")
}
// lockMeta is written into a lock file.
type lockMeta struct {
Created time.Time `json:"created,omitempty"`
Updated time.Time `json:"updated,omitempty"`
}
// lockFreshnessInterval is how often to update
// a lock's timestamp. Locks with a timestamp
// more than this duration in the past (plus a
// grace period for latency) can be considered
// stale.
const lockFreshnessInterval = 5 * time.Second
// fileLockPollInterval is how frequently
// to check the existence of a lock file
const fileLockPollInterval = 1 * time.Second
// Interface guard
var _ Storage = (*FileStorage)(nil)

@ -0,0 +1,12 @@
module github.com/caddyserver/certmagic
go 1.14
require (
github.com/klauspost/cpuid v1.2.5
github.com/libdns/libdns v0.1.0
github.com/mholt/acmez v0.1.1
github.com/miekg/dns v1.1.30
go.uber.org/zap v1.15.0
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de
)

@ -0,0 +1,82 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid v1.2.5 h1:VBd9MyVIiJHzzgnrLQG5Bcv75H4YaWrlKqWHjurxCGo=
github.com/klauspost/cpuid v1.2.5/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/libdns/libdns v0.1.0 h1:0ctCOrVJsVzj53mop1angHp/pE3hmAhP7KiHvR0HD04=
github.com/libdns/libdns v0.1.0/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40=
github.com/mholt/acmez v0.1.1 h1:KQODCqk+hBn3O7qfCRPj6L96uG65T5BSS95FKNEqtdA=
github.com/mholt/acmez v0.1.1/go.mod h1:8qnn8QA/Ewx8E3ZSsmscqsIjhhpxuy9vqdgbX2ceceM=
github.com/miekg/dns v1.1.30 h1:Qww6FseFn8PRfw07jueqIXqodm0JKiiKuK0DeXSqfyo=
github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de h1:ikNHVSjEfnvz6sxdSPCaPt572qowuyMDMJLLm3Db3ig=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 h1:VvQyQJN0tSuecqgcIxMWnnfG5kSmgy9KZR9sW3W5QeA=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=

@ -0,0 +1,573 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
)
// GetCertificate gets a certificate to satisfy clientHello. In getting
// the certificate, it abides the rules and settings defined in the
// Config that matches clientHello.ServerName. It first checks the in-
// memory cache, then, if the config enables "OnDemand", it accesses
// disk, then accesses the network if it must obtain a new certificate
// via ACME.
//
// This method is safe for use as a tls.Config.GetCertificate callback.
func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
cfg.emit("tls_handshake_started", clientHello)
// special case: serve up the certificate for a TLS-ALPN ACME challenge
// (https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05)
for _, proto := range clientHello.SupportedProtos {
if proto == acmez.ACMETLS1Protocol {
cfg.certCache.mu.RLock()
challengeCert, ok := cfg.certCache.cache[tlsALPNCertKeyName(clientHello.ServerName)]
cfg.certCache.mu.RUnlock()
if !ok {
// see if this challenge was started in a cluster; try distributed challenge solver
// (note that the tls.Config's ALPN settings must include the ACME TLS-ALPN challenge
// protocol string, otherwise a valid certificate will not solve the challenge; we
// should already have taken care of that when we made the tls.Config)
challengeCert, ok, err := cfg.tryDistributedChallengeSolver(clientHello)
if err != nil {
if cfg.Logger != nil {
cfg.Logger.Error("tls-alpn challenge",
zap.String("server_name", clientHello.ServerName),
zap.Error(err))
}
}
if ok {
if cfg.Logger != nil {
cfg.Logger.Info("served key authentication certificate",
zap.String("server_name", clientHello.ServerName),
zap.String("challenge", "tls-alpn-01"),
zap.String("remote", clientHello.Conn.RemoteAddr().String()),
zap.Bool("distributed", true))
}
return &challengeCert.Certificate, nil
}
return nil, fmt.Errorf("no certificate to complete TLS-ALPN challenge for SNI name: %s", clientHello.ServerName)
}
if cfg.Logger != nil {
cfg.Logger.Info("served key authentication certificate",
zap.String("server_name", clientHello.ServerName),
zap.String("challenge", "tls-alpn-01"),
zap.String("remote", clientHello.Conn.RemoteAddr().String()))
}
return &challengeCert.Certificate, nil
}
}
// get the certificate and serve it up
cert, err := cfg.getCertDuringHandshake(clientHello, true, true)
if err == nil {
cfg.emit("tls_handshake_completed", clientHello)
}
return &cert.Certificate, err
}
// getCertificate gets a certificate that matches name from the in-memory
// cache, according to the lookup table associated with cfg. The lookup then
// points to a certificate in the Instance certificate cache.
//
// The name is expected to already be normalized (e.g. lowercased).
//
// If there is no exact match for name, it will be checked against names of
// the form '*.example.com' (wildcard certificates) according to RFC 6125.
// If a match is found, matched will be true. If no matches are found, matched
// will be false and a "default" certificate will be returned with defaulted
// set to true. If defaulted is false, then no certificates were available.
//
// The logic in this function is adapted from the Go standard library,
// which is by the Go Authors.
//
// This function is safe for concurrent use.
func (cfg *Config) getCertificate(hello *tls.ClientHelloInfo) (cert Certificate, matched, defaulted bool) {
name := NormalizedName(hello.ServerName)
if name == "" {
// if SNI is empty, prefer matching IP address
if hello.Conn != nil {
addr := hello.Conn.LocalAddr().String()
ip, _, err := net.SplitHostPort(addr)
if err == nil {
addr = ip
}
cert, matched = cfg.selectCert(hello, addr)
if matched {
return
}
}
// fall back to a "default" certificate, if specified
if cfg.DefaultServerName != "" {
normDefault := NormalizedName(cfg.DefaultServerName)
cert, defaulted = cfg.selectCert(hello, normDefault)
if defaulted {
return
}
}
} else {
// if SNI is specified, try an exact match first
cert, matched = cfg.selectCert(hello, name)
if matched {
return
}
// try replacing labels in the name with
// wildcards until we get a match
labels := strings.Split(name, ".")
for i := range labels {
labels[i] = "*"
candidate := strings.Join(labels, ".")
cert, matched = cfg.selectCert(hello, candidate)
if matched {
return
}
}
// check the certCache directly to see if the SNI name is
// already the key of the certificate it wants; this implies
// that the SNI can contain the hash of a specific cert
// (chain) it wants and we will still be able to serve it up
// (this behavior, by the way, could be controversial as to
// whether it complies with RFC 6066 about SNI, but I think
// it does, soooo...)
// (this is how we solved the former ACME TLS-SNI challenge)
cfg.certCache.mu.RLock()
directCert, ok := cfg.certCache.cache[name]
cfg.certCache.mu.RUnlock()
if ok {
cert = directCert
matched = true
return
}
}
// otherwise, we're bingo on ammo; see issues
// caddyserver/caddy#2035 and caddyserver/caddy#1303 (any
// change to certificate matching behavior must
// account for hosts defined where the hostname
// is empty or a catch-all, like ":443" or
// "0.0.0.0:443")
return
}
// selectCert uses hello to select a certificate from the
// cache for name. If cfg.CertSelection is set, it will be
// used to make the decision. Otherwise, the first matching
// unexpired cert is returned. As a special case, if no
// certificates match name and cfg.CertSelection is set,
// then all certificates in the cache will be passed in
// for the cfg.CertSelection to make the final decision.
func (cfg *Config) selectCert(hello *tls.ClientHelloInfo, name string) (Certificate, bool) {
choices := cfg.certCache.getAllMatchingCerts(name)
if len(choices) == 0 {
if cfg.CertSelection == nil {
return Certificate{}, false
}
choices = cfg.certCache.getAllCerts()
}
if cfg.CertSelection == nil {
cert, err := DefaultCertificateSelector(hello, choices)
return cert, err == nil
}
cert, err := cfg.CertSelection.SelectCertificate(hello, choices)
return cert, err == nil
}
// DefaultCertificateSelector is the default certificate selection logic
// given a choice of certificates. If there is at least one certificate in
// choices, it always returns a certificate without error. It chooses the
// first non-expired certificate that the client supports if possible,
// otherwise it returns an expired certificate that the client supports,
// otherwise it just returns the first certificate in the list of choices.
func DefaultCertificateSelector(hello *tls.ClientHelloInfo, choices []Certificate) (Certificate, error) {
if len(choices) == 0 {
return Certificate{}, fmt.Errorf("no certificates available")
}
now := time.Now()
best := choices[0]
for _, choice := range choices {
if err := hello.SupportsCertificate(&choice.Certificate); err != nil {
continue
}
best = choice // at least the client supports it...
if now.After(choice.Leaf.NotBefore) && now.Before(choice.Leaf.NotAfter) {
return choice, nil // ...and unexpired, great! "Certificate, I choose you!"
}
}
return best, nil // all matching certs are expired or incompatible, oh well
}
// getCertDuringHandshake will get a certificate for hello. It first tries
// the in-memory cache. If no certificate for hello is in the cache, the
// config most closely corresponding to hello will be loaded. If that config
// allows it (OnDemand==true) and if loadIfNecessary == true, it goes to disk
// to load it into the cache and serve it. If it's not on disk and if
// obtainIfNecessary == true, the certificate will be obtained from the CA,
// cached, and served. If obtainIfNecessary is true, then loadIfNecessary
// must also be set to true. An error will be returned if and only if no
// certificate is available.
//
// This function is safe for concurrent use.
func (cfg *Config) getCertDuringHandshake(hello *tls.ClientHelloInfo, loadIfNecessary, obtainIfNecessary bool) (Certificate, error) {
log := loggerNamed(cfg.Logger, "on_demand")
// First check our in-memory cache to see if we've already loaded it
cert, matched, defaulted := cfg.getCertificate(hello)
if matched {
if cert.managed && cfg.OnDemand != nil && obtainIfNecessary {
// It's been reported before that if the machine goes to sleep (or
// suspends the process) that certs which are already loaded into
// memory won't get renewed in the background, so we need to check
// expiry on each handshake too, sigh:
// https://caddy.community/t/local-certificates-not-renewing-on-demand/9482
return cfg.optionalMaintenance(log, cert, hello)
}
return cert, nil
}
name := cfg.getNameFromClientHello(hello)
// If OnDemand is enabled, then we might be able to load or
// obtain a needed certificate
if cfg.OnDemand != nil && loadIfNecessary {
// Then check to see if we have one on disk
loadedCert, err := cfg.CacheManagedCertificate(name)
if err == nil {
loadedCert, err = cfg.handshakeMaintenance(hello, loadedCert)
if err != nil {
if log != nil {
log.Error("maintining newly-loaded certificate",
zap.String("server_name", name),
zap.Error(err))
}
}
return loadedCert, nil
}
if obtainIfNecessary {
// By this point, we need to ask the CA for a certificate
// Make sure the certificate should be obtained based on config
err := cfg.checkIfCertShouldBeObtained(name)
if err != nil {
return Certificate{}, err
}
// Obtain certificate from the CA
return cfg.obtainOnDemandCertificate(hello)
}
}
// Fall back to the default certificate if there is one
if defaulted {
return cert, nil
}
return Certificate{}, fmt.Errorf("no certificate available for '%s'", name)
}
// optionalMaintenance will perform maintenance on the certificate (if necessary) and
// will return the resulting certificate. This should only be done if the certificate
// is managed, OnDemand is enabled, and the scope is allowed to obtain certificates.
func (cfg *Config) optionalMaintenance(log *zap.Logger, cert Certificate, hello *tls.ClientHelloInfo) (Certificate, error) {
newCert, err := cfg.handshakeMaintenance(hello, cert)
if err == nil {
return newCert, nil
}
if log != nil {
log.Error("renewing certificate on-demand failed",
zap.Strings("subjects", cert.Names),
zap.Time("not_after", cert.Leaf.NotAfter),
zap.Error(err))
}
if cert.Expired() {
return cert, err
}
// still has time remaining, so serve it anyway
return cert, nil
}
// checkIfCertShouldBeObtained checks to see if an on-demand TLS certificate
// should be obtained for a given domain based upon the config settings. If
// a non-nil error is returned, do not issue a new certificate for name.
func (cfg *Config) checkIfCertShouldBeObtained(name string) error {
if cfg.OnDemand == nil {
return fmt.Errorf("not configured for on-demand certificate issuance")
}
if !SubjectQualifiesForCert(name) {
return fmt.Errorf("subject name does not qualify for certificate: %s", name)
}
if cfg.OnDemand.DecisionFunc != nil {
return cfg.OnDemand.DecisionFunc(name)
}
if len(cfg.OnDemand.hostWhitelist) > 0 &&
!cfg.OnDemand.whitelistContains(name) {
return fmt.Errorf("certificate for '%s' is not managed", name)
}
return nil
}
// obtainOnDemandCertificate obtains a certificate for hello.
// If another goroutine has already started obtaining a cert for
// hello, it will wait and use what the other goroutine obtained.
//
// This function is safe for use by multiple concurrent goroutines.
func (cfg *Config) obtainOnDemandCertificate(hello *tls.ClientHelloInfo) (Certificate, error) {
log := loggerNamed(cfg.Logger, "on_demand")
name := cfg.getNameFromClientHello(hello)
// We must protect this process from happening concurrently, so synchronize.
obtainCertWaitChansMu.Lock()
wait, ok := obtainCertWaitChans[name]
if ok {
// lucky us -- another goroutine is already obtaining the certificate.
// wait for it to finish obtaining the cert and then we'll use it.
obtainCertWaitChansMu.Unlock()
<-wait
return cfg.getCertDuringHandshake(hello, true, false)
}
// looks like it's up to us to do all the work and obtain the cert.
// make a chan others can wait on if needed
wait = make(chan struct{})
obtainCertWaitChans[name] = wait
obtainCertWaitChansMu.Unlock()
// obtain the certificate
if log != nil {
log.Info("obtaining new certificate", zap.String("server_name", name))
}
// TODO: use a proper context; we use one with timeout because retries are enabled because interactive is false
ctx, cancel := context.WithTimeout(context.TODO(), 90*time.Second)
defer cancel()
err := cfg.ObtainCert(ctx, name, false)
// immediately unblock anyone waiting for it; doing this in
// a defer would risk deadlock because of the recursive call
// to getCertDuringHandshake below when we return!
obtainCertWaitChansMu.Lock()
close(wait)
delete(obtainCertWaitChans, name)
obtainCertWaitChansMu.Unlock()
if err != nil {
// shucks; failed to solve challenge on-demand
return Certificate{}, err
}
// success; certificate was just placed on disk, so
// we need only restart serving the certificate
return cfg.getCertDuringHandshake(hello, true, false)
}
// handshakeMaintenance performs a check on cert for expiration and OCSP validity.
// If necessary, it will renew the certificate and/or refresh the OCSP staple.
// OCSP stapling errors are not returned, only logged.
//
// This function is safe for use by multiple concurrent goroutines.
func (cfg *Config) handshakeMaintenance(hello *tls.ClientHelloInfo, cert Certificate) (Certificate, error) {
log := loggerNamed(cfg.Logger, "on_demand")
// Check cert expiration
timeLeft := cert.Leaf.NotAfter.Sub(time.Now().UTC())
if currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio) {
if log != nil {
log.Info("certificate expires soon; attempting renewal",
zap.Strings("identifiers", cert.Names),
zap.Duration("remaining", timeLeft))
}
return cfg.renewDynamicCertificate(hello, cert)
}
// Check OCSP staple validity
if cert.ocsp != nil {
refreshTime := cert.ocsp.ThisUpdate.Add(cert.ocsp.NextUpdate.Sub(cert.ocsp.ThisUpdate) / 2)
if time.Now().After(refreshTime) {
_, err := stapleOCSP(cfg.Storage, &cert, nil)
if err != nil {
// An error with OCSP stapling is not the end of the world, and in fact, is
// quite common considering not all certs have issuer URLs that support it.
if log != nil {
log.Warn("stapling OCSP",
zap.String("server_name", hello.ServerName),
zap.Error(err))
}
}
cfg.certCache.mu.Lock()
cfg.certCache.cache[cert.hash] = cert
cfg.certCache.mu.Unlock()
}
}
return cert, nil
}
// renewDynamicCertificate renews the certificate for name using cfg. It returns the
// certificate to use and an error, if any. name should already be lower-cased before
// calling this function. name is the name obtained directly from the handshake's
// ClientHello.
//
// This function is safe for use by multiple concurrent goroutines.
func (cfg *Config) renewDynamicCertificate(hello *tls.ClientHelloInfo, currentCert Certificate) (Certificate, error) {
log := loggerNamed(cfg.Logger, "on_demand")
name := cfg.getNameFromClientHello(hello)
obtainCertWaitChansMu.Lock()
wait, ok := obtainCertWaitChans[name]
if ok {
// lucky us -- another goroutine is already renewing the certificate.
// wait for it to finish, then we'll use the new one.
obtainCertWaitChansMu.Unlock()
<-wait
return cfg.getCertDuringHandshake(hello, true, false)
}
// looks like it's up to us to do all the work and renew the cert
wait = make(chan struct{})
obtainCertWaitChans[name] = wait
obtainCertWaitChansMu.Unlock()
// Make sure a certificate for this name should be obtained on-demand
err := cfg.checkIfCertShouldBeObtained(name)
if err != nil {
// if not, remove from cache (it will be deleted from storage later)
cfg.certCache.mu.Lock()
cfg.certCache.removeCertificate(currentCert)
cfg.certCache.mu.Unlock()
return Certificate{}, err
}
// renew and reload the certificate
if log != nil {
log.Info("renewing certificate", zap.String("server_name", name))
}
// TODO: use a proper context; we use one with timeout because retries are enabled because interactive is false
ctx, cancel := context.WithTimeout(context.TODO(), 90*time.Second)
defer cancel()
err = cfg.RenewCert(ctx, name, false)
if err == nil {
// even though the recursive nature of the dynamic cert loading
// would just call this function anyway, we do it here to
// make the replacement as atomic as possible.
newCert, err := cfg.CacheManagedCertificate(name)
if err != nil {
if log != nil {
log.Error("loading renewed certificate", zap.String("server_name", name), zap.Error(err))
}
} else {
// replace the old certificate with the new one
cfg.certCache.replaceCertificate(currentCert, newCert)
}
}
// immediately unblock anyone waiting for it; doing this in
// a defer would risk deadlock because of the recursive call
// to getCertDuringHandshake below when we return!
obtainCertWaitChansMu.Lock()
close(wait)
delete(obtainCertWaitChans, name)
obtainCertWaitChansMu.Unlock()
if err != nil {
return Certificate{}, err
}
return cfg.getCertDuringHandshake(hello, true, false)
}
// tryDistributedChallengeSolver is to be called when the clientHello pertains to
// a TLS-ALPN challenge and a certificate is required to solve it. This method
// checks the distributed store of challenge info files and, if a matching ServerName
// is present, it makes a certificate to solve this challenge and returns it. For
// this to succeed, it requires that cfg.Issuer is of type *ACMEManager.
// A boolean true is returned if a valid certificate is returned.
func (cfg *Config) tryDistributedChallengeSolver(clientHello *tls.ClientHelloInfo) (Certificate, bool, error) {
am, ok := cfg.Issuer.(*ACMEManager)
if !ok {
return Certificate{}, false, nil
}
tokenKey := distributedSolver{acmeManager: am, caURL: am.CA}.challengeTokensKey(clientHello.ServerName)
chalInfoBytes, err := cfg.Storage.Load(tokenKey)
if err != nil {
if _, ok := err.(ErrNotExist); ok {
return Certificate{}, false, nil
}
return Certificate{}, false, fmt.Errorf("opening distributed challenge token file %s: %v", tokenKey, err)
}
var chalInfo acme.Challenge
err = json.Unmarshal(chalInfoBytes, &chalInfo)
if err != nil {
return Certificate{}, false, fmt.Errorf("decoding challenge token file %s (corrupted?): %v", tokenKey, err)
}
cert, err := acmez.TLSALPN01ChallengeCert(chalInfo)
if err != nil {
return Certificate{}, false, fmt.Errorf("making TLS-ALPN challenge certificate: %v", err)
}
if cert == nil {
return Certificate{}, false, fmt.Errorf("got nil TLS-ALPN challenge certificate but no error")
}
return Certificate{Certificate: *cert}, true, nil
}
// getNameFromClientHello returns a normalized form of hello.ServerName.
// If hello.ServerName is empty (i.e. client did not use SNI), then the
// associated connection's local address is used to extract an IP address.
func (*Config) getNameFromClientHello(hello *tls.ClientHelloInfo) string {
name := NormalizedName(hello.ServerName)
if name != "" || hello.Conn == nil {
return name
}
// if no SNI, try using IP address on the connection
localAddr := hello.Conn.LocalAddr().String()
localAddrHost, _, err := net.SplitHostPort(localAddr)
if err == nil {
return localAddrHost
}
return localAddr
}
// NormalizedName returns a cleaned form of serverName that is
// used for consistency when referring to a SNI value.
func NormalizedName(serverName string) string {
return strings.ToLower(strings.TrimSpace(serverName))
}
// obtainCertWaitChans is used to coordinate obtaining certs for each hostname.
var obtainCertWaitChans = make(map[string]chan struct{})
var obtainCertWaitChansMu sync.Mutex

@ -0,0 +1,133 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"encoding/json"
"net/http"
"strings"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
)
// HTTPChallengeHandler wraps h in a handler that can solve the ACME
// HTTP challenge. cfg is required, and it must have a certificate
// cache backed by a functional storage facility, since that is where
// the challenge state is stored between initiation and solution.
//
// If a request is not an ACME HTTP challenge, h will be invoked.
func (am *ACMEManager) HTTPChallengeHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if am.HandleHTTPChallenge(w, r) {
return
}
h.ServeHTTP(w, r)
})
}
// HandleHTTPChallenge uses am to solve challenge requests from an ACME
// server that were initiated by this instance or any other instance in
// this cluster (being, any instances using the same storage am does).
//
// If the HTTP challenge is disabled, this function is a no-op.
//
// If am is nil or if am does not have a certificate cache backed by
// usable storage, solving the HTTP challenge will fail.
//
// It returns true if it handled the request; if so, the response has
// already been written. If false is returned, this call was a no-op and
// the request has not been handled.
func (am *ACMEManager) HandleHTTPChallenge(w http.ResponseWriter, r *http.Request) bool {
if am == nil {
return false
}
if am.DisableHTTPChallenge {
return false
}
if !LooksLikeHTTPChallenge(r) {
return false
}
return am.distributedHTTPChallengeSolver(w, r)
}
// distributedHTTPChallengeSolver checks to see if this challenge
// request was initiated by this or another instance which uses the
// same storage as am does, and attempts to complete the challenge for
// it. It returns true if the request was handled; false otherwise.
func (am *ACMEManager) distributedHTTPChallengeSolver(w http.ResponseWriter, r *http.Request) bool {
if am == nil {
return false
}
host := hostOnly(r.Host)
tokenKey := distributedSolver{acmeManager: am, caURL: am.CA}.challengeTokensKey(host)
chalInfoBytes, err := am.config.Storage.Load(tokenKey)
if err != nil {
if _, ok := err.(ErrNotExist); !ok {
if am.Logger != nil {
am.Logger.Error("opening distributed HTTP challenge token file",
zap.String("host", host),
zap.Error(err))
}
}
return false
}
var challenge acme.Challenge
err = json.Unmarshal(chalInfoBytes, &challenge)
if err != nil {
if am.Logger != nil {
am.Logger.Error("decoding HTTP challenge token file (corrupted?)",
zap.String("host", host),
zap.String("token_key", tokenKey),
zap.Error(err))
}
return false
}
return am.answerHTTPChallenge(w, r, challenge)
}
// answerHTTPChallenge solves the challenge with chalInfo.
// Most of this code borrowed from xenolf's built-in HTTP-01
// challenge solver in March 2018.
func (am *ACMEManager) answerHTTPChallenge(w http.ResponseWriter, r *http.Request, challenge acme.Challenge) bool {
challengeReqPath := challenge.HTTP01ResourcePath()
if r.URL.Path == challengeReqPath &&
strings.EqualFold(hostOnly(r.Host), challenge.Identifier.Value) && // mitigate DNS rebinding attacks
r.Method == "GET" {
w.Header().Add("Content-Type", "text/plain")
w.Write([]byte(challenge.KeyAuthorization))
r.Close = true
if am.Logger != nil {
am.Logger.Info("served key authentication",
zap.String("identifier", challenge.Identifier.Value),
zap.String("challenge", "http-01"),
zap.String("remote", r.RemoteAddr))
}
return true
}
return false
}
// LooksLikeHTTPChallenge returns true if r looks like an ACME
// HTTP challenge request from an ACME server.
func LooksLikeHTTPChallenge(r *http.Request) bool {
return r.Method == "GET" && strings.HasPrefix(r.URL.Path, challengeBasePath)
}
const challengeBasePath = "/.well-known/acme-challenge"

@ -0,0 +1,581 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"crypto/x509"
"encoding/pem"
"fmt"
"log"
"path"
"runtime"
"strings"
"time"
"go.uber.org/zap"
"golang.org/x/crypto/ocsp"
)
// maintainAssets is a permanently-blocking function
// that loops indefinitely and, on a regular schedule, checks
// certificates for expiration and initiates a renewal of certs
// that are expiring soon. It also updates OCSP stapling. It
// should only be called once per cache. Panics are recovered,
// and if panicCount < 10, the function is called recursively,
// incrementing panicCount each time. Initial invocation should
// start panicCount at 0.
func (certCache *Cache) maintainAssets(panicCount int) {
log := loggerNamed(certCache.logger, "maintenance")
if log != nil {
log = log.With(zap.String("cache", fmt.Sprintf("%p", certCache)))
}
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
if log != nil {
log.Error("panic", zap.Any("error", err), zap.ByteString("stack", buf))
}
if panicCount < 10 {
certCache.maintainAssets(panicCount + 1)
}
}
}()
renewalTicker := time.NewTicker(certCache.options.RenewCheckInterval)
ocspTicker := time.NewTicker(certCache.options.OCSPCheckInterval)
if log != nil {
log.Info("started background certificate maintenance")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for {
select {
case <-renewalTicker.C:
err := certCache.RenewManagedCertificates(ctx)
if err != nil && log != nil {
log.Error("renewing managed certificates", zap.Error(err))
}
case <-ocspTicker.C:
certCache.updateOCSPStaples(ctx)
case <-certCache.stopChan:
renewalTicker.Stop()
ocspTicker.Stop()
// TODO: stop any in-progress maintenance operations and clear locks we made (this might be done now with our use of context)
if log != nil {
log.Info("stopped background certificate maintenance")
}
close(certCache.doneChan)
return
}
}
}
// RenewManagedCertificates renews managed certificates,
// including ones loaded on-demand. Note that this is done
// automatically on a regular basis; normally you will not
// need to call this. This method assumes non-interactive
// mode (i.e. operating in the background).
func (certCache *Cache) RenewManagedCertificates(ctx context.Context) error {
log := loggerNamed(certCache.logger, "maintenance")
// configs will hold a map of certificate name to the config
// to use when managing that certificate
configs := make(map[string]*Config)
// we use the queues for a very important reason: to do any and all
// operations that could require an exclusive write lock outside
// of the read lock! otherwise we get a deadlock, yikes. in other
// words, our first iteration through the certificate cache does NOT
// perform any operations--only queues them--so that more fine-grained
// write locks may be obtained during the actual operations.
var renewQueue, reloadQueue, deleteQueue []Certificate
certCache.mu.RLock()
for certKey, cert := range certCache.cache {
if !cert.managed {
continue
}
// the list of names on this cert should never be empty... programmer error?
if cert.Names == nil || len(cert.Names) == 0 {
if log != nil {
log.Warn("certificate has no names; removing from cache", zap.String("cert_key", certKey))
}
deleteQueue = append(deleteQueue, cert)
continue
}
// get the config associated with this certificate
cfg, err := certCache.getConfig(cert)
if err != nil {
if log != nil {
log.Error("unable to get configuration to manage certificate; unable to renew",
zap.Strings("identifiers", cert.Names),
zap.Error(err))
}
continue
}
if cfg == nil {
// this is bad if this happens, probably a programmer error (oops)
if log != nil {
log.Error("no configuration associated with certificate; unable to manage",
zap.Strings("identifiers", cert.Names))
}
continue
}
// if time is up or expires soon, we need to try to renew it
if cert.NeedsRenewal(cfg) {
configs[cert.Names[0]] = cfg
// see if the certificate in storage has already been renewed, possibly by another
// instance that didn't coordinate with this one; if so, just load it (this
// might happen if another instance already renewed it - kinda sloppy but checking disk
// first is a simple way to possibly drastically reduce rate limit problems)
storedCertExpiring, err := cfg.managedCertInStorageExpiresSoon(cert)
if err != nil {
// hmm, weird, but not a big deal, maybe it was deleted or something
if log != nil {
log.Warn("error while checking if stored certificate is also expiring soon",
zap.Strings("identifiers", cert.Names),
zap.Error(err))
}
} else if !storedCertExpiring {
// if the certificate is NOT expiring soon and there was no error, then we
// are good to just reload the certificate from storage instead of repeating
// a likely-unnecessary renewal procedure
reloadQueue = append(reloadQueue, cert)
continue
}
// the certificate in storage has not been renewed yet, so we will do it
// NOTE: It is super-important to note that the TLS-ALPN challenge requires
// a write lock on the cache in order to complete its challenge, so it is extra
// vital that this renew operation does not happen inside our read lock!
renewQueue = append(renewQueue, cert)
}
}
certCache.mu.RUnlock()
// Reload certificates that merely need to be updated in memory
for _, oldCert := range reloadQueue {
timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
if log != nil {
log.Info("certificate expires soon, but is already renewed in storage; reloading stored certificate",
zap.Strings("identifiers", oldCert.Names),
zap.Duration("remaining", timeLeft))
}
cfg := configs[oldCert.Names[0]]
// crucially, this happens OUTSIDE a lock on the certCache
err := cfg.reloadManagedCertificate(oldCert)
if err != nil {
if log != nil {
log.Error("loading renewed certificate",
zap.Strings("identifiers", oldCert.Names),
zap.Error(err))
}
continue
}
}
// Renewal queue
for _, oldCert := range renewQueue {
cfg := configs[oldCert.Names[0]]
err := certCache.queueRenewalTask(ctx, oldCert, cfg)
if err != nil {
if log != nil {
log.Error("queueing renewal task",
zap.Strings("identifiers", oldCert.Names),
zap.Error(err))
}
continue
}
}
// Deletion queue
certCache.mu.Lock()
for _, cert := range deleteQueue {
certCache.removeCertificate(cert)
}
certCache.mu.Unlock()
return nil
}
func (certCache *Cache) queueRenewalTask(ctx context.Context, oldCert Certificate, cfg *Config) error {
log := loggerNamed(certCache.logger, "maintenance")
timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
if log != nil {
log.Info("certificate expires soon; queuing for renewal",
zap.Strings("identifiers", oldCert.Names),
zap.Duration("remaining", timeLeft))
}
// Get the name which we should use to renew this certificate;
// we only support managing certificates with one name per cert,
// so this should be easy.
renewName := oldCert.Names[0]
// queue up this renewal job (is a no-op if already active or queued)
jm.Submit(cfg.Logger, "renew_"+renewName, func() error {
timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
if log != nil {
log.Info("attempting certificate renewal",
zap.Strings("identifiers", oldCert.Names),
zap.Duration("remaining", timeLeft))
}
// perform renewal - crucially, this happens OUTSIDE a lock on certCache
err := cfg.RenewCert(ctx, renewName, false)
if err != nil {
if cfg.OnDemand != nil {
// loaded dynamically, remove dynamically
certCache.mu.Lock()
certCache.removeCertificate(oldCert)
certCache.mu.Unlock()
}
return fmt.Errorf("%v %v", oldCert.Names, err)
}
// successful renewal, so update in-memory cache by loading
// renewed certificate so it will be used with handshakes
err = cfg.reloadManagedCertificate(oldCert)
if err != nil {
return ErrNoRetry{fmt.Errorf("%v %v", oldCert.Names, err)}
}
return nil
})
return nil
}
// updateOCSPStaples updates the OCSP stapling in all
// eligible, cached certificates.
//
// OCSP maintenance strives to abide the relevant points on
// Ryan Sleevi's recommendations for good OCSP support:
// https://gist.github.com/sleevi/5efe9ef98961ecfb4da8
func (certCache *Cache) updateOCSPStaples(ctx context.Context) {
log := loggerNamed(certCache.logger, "maintenance")
// temporary structures to store updates or tasks
// so that we can keep our locks short-lived
type ocspUpdate struct {
rawBytes []byte
parsed *ocsp.Response
}
type updateQueueEntry struct {
cert Certificate
certHash string
lastNextUpdate time.Time
}
updated := make(map[string]ocspUpdate)
var updateQueue []updateQueueEntry
var renewQueue []Certificate
configs := make(map[string]*Config)
// obtain brief read lock during our scan to see which staples need updating
certCache.mu.RLock()
for certHash, cert := range certCache.cache {
// no point in updating OCSP for expired or "synthetic" certificates
if cert.Leaf == nil || cert.Expired() {
continue
}
var lastNextUpdate time.Time
if cert.ocsp != nil {
lastNextUpdate = cert.ocsp.NextUpdate
if freshOCSP(cert.ocsp) {
continue // no need to update staple if ours is still fresh
}
}
updateQueue = append(updateQueue, updateQueueEntry{cert, certHash, lastNextUpdate})
}
certCache.mu.RUnlock()
// perform updates outside of any lock on certCache
for _, qe := range updateQueue {
cert := qe.cert
certHash := qe.certHash
lastNextUpdate := qe.lastNextUpdate
cfg, err := certCache.getConfig(cert)
if err != nil {
if log != nil {
log.Error("unable to refresh OCSP staple because getting automation config for certificate failed",
zap.Strings("identifiers", cert.Names),
zap.Error(err))
}
continue
}
if cfg == nil {
// this is bad if this happens, probably a programmer error (oops)
if log != nil {
log.Error("no configuration associated with certificate; unable to manage OCSP staples",
zap.Strings("identifiers", cert.Names))
}
continue
}
ocspResp, err := stapleOCSP(cfg.Storage, &cert, nil)
if err != nil {
if cert.ocsp != nil {
// if there was no staple before, that's fine; otherwise we should log the error
if log != nil {
log.Error("stapling OCSP",
zap.Strings("identifiers", cert.Names),
zap.Error(err))
}
}
continue
}
// By this point, we've obtained the latest OCSP response.
// If there was no staple before, or if the response is updated, make
// sure we apply the update to all names on the certificate.
if cert.ocsp != nil && (lastNextUpdate.IsZero() || lastNextUpdate != cert.ocsp.NextUpdate) {
if log != nil {
log.Info("advancing OCSP staple",
zap.Strings("identifiers", cert.Names),
zap.Time("from", lastNextUpdate),
zap.Time("to", cert.ocsp.NextUpdate))
}
updated[certHash] = ocspUpdate{rawBytes: cert.Certificate.OCSPStaple, parsed: cert.ocsp}
}
// If a managed certificate was revoked, we should attempt
// to replace it with a new one. If that fails, oh well.
if cert.managed && ocspResp.Status == ocsp.Revoked && len(cert.Names) > 0 {
renewQueue = append(renewQueue, cert)
configs[cert.Names[0]] = cfg
}
}
// These write locks should be brief since we have all the info we need now.
for certKey, update := range updated {
certCache.mu.Lock()
cert := certCache.cache[certKey]
cert.ocsp = update.parsed
cert.Certificate.OCSPStaple = update.rawBytes
certCache.cache[certKey] = cert
certCache.mu.Unlock()
}
// We attempt to replace any certificates that were revoked.
// Crucially, this happens OUTSIDE a lock on the certCache.
for _, oldCert := range renewQueue {
if log != nil {
log.Warn("OCSP status for managed certificate is REVOKED; attempting to replace with new certificate",
zap.Strings("identifiers", oldCert.Names),
zap.Time("expiration", oldCert.Leaf.NotAfter))
}
renewName := oldCert.Names[0]
cfg := configs[renewName]
// TODO: consider using a new key in this situation, but we don't know if key storage has been compromised...
err := cfg.RenewCert(ctx, renewName, false)
if err != nil {
// probably better to not serve a revoked certificate at all
if log != nil {
log.Error("unable to obtain new to certificate after OCSP status of REVOKED; removing from cache",
zap.Strings("identifiers", oldCert.Names),
zap.Error(err))
}
certCache.mu.Lock()
certCache.removeCertificate(oldCert)
certCache.mu.Unlock()
continue
}
err = cfg.reloadManagedCertificate(oldCert)
if err != nil {
if log != nil {
log.Error("after obtaining new certificate due to OCSP status of REVOKED",
zap.Strings("identifiers", oldCert.Names),
zap.Error(err))
}
continue
}
}
}
// CleanStorageOptions specifies how to clean up a storage unit.
type CleanStorageOptions struct {
OCSPStaples bool
ExpiredCerts bool
ExpiredCertGracePeriod time.Duration
}
// CleanStorage removes assets which are no longer useful,
// according to opts.
func CleanStorage(ctx context.Context, storage Storage, opts CleanStorageOptions) {
if opts.OCSPStaples {
err := deleteOldOCSPStaples(ctx, storage)
if err != nil {
log.Printf("[ERROR] Deleting old OCSP staples: %v", err)
}
}
if opts.ExpiredCerts {
err := deleteExpiredCerts(ctx, storage, opts.ExpiredCertGracePeriod)
if err != nil {
log.Printf("[ERROR] Deleting expired certificates: %v", err)
}
}
// TODO: delete stale locks?
}
func deleteOldOCSPStaples(ctx context.Context, storage Storage) error {
ocspKeys, err := storage.List(prefixOCSP, false)
if err != nil {
// maybe just hasn't been created yet; no big deal
return nil
}
for _, key := range ocspKeys {
// if context was cancelled, quit early; otherwise proceed
select {
case <-ctx.Done():
return ctx.Err()
default:
}
ocspBytes, err := storage.Load(key)
if err != nil {
log.Printf("[ERROR] While deleting old OCSP staples, unable to load staple file: %v", err)
continue
}
resp, err := ocsp.ParseResponse(ocspBytes, nil)
if err != nil {
// contents are invalid; delete it
err = storage.Delete(key)
if err != nil {
log.Printf("[ERROR] Purging corrupt staple file %s: %v", key, err)
}
continue
}
if time.Now().After(resp.NextUpdate) {
// response has expired; delete it
err = storage.Delete(key)
if err != nil {
log.Printf("[ERROR] Purging expired staple file %s: %v", key, err)
}
}
}
return nil
}
func deleteExpiredCerts(ctx context.Context, storage Storage, gracePeriod time.Duration) error {
issuerKeys, err := storage.List(prefixCerts, false)
if err != nil {
// maybe just hasn't been created yet; no big deal
return nil
}
for _, issuerKey := range issuerKeys {
siteKeys, err := storage.List(issuerKey, false)
if err != nil {
log.Printf("[ERROR] Listing contents of %s: %v", issuerKey, err)
continue
}
for _, siteKey := range siteKeys {
// if context was cancelled, quit early; otherwise proceed
select {
case <-ctx.Done():
return ctx.Err()
default:
}
siteAssets, err := storage.List(siteKey, false)
if err != nil {
log.Printf("[ERROR] Listing contents of %s: %v", siteKey, err)
continue
}
for _, assetKey := range siteAssets {
if path.Ext(assetKey) != ".crt" {
continue
}
certFile, err := storage.Load(assetKey)
if err != nil {
return fmt.Errorf("loading certificate file %s: %v", assetKey, err)
}
block, _ := pem.Decode(certFile)
if block == nil || block.Type != "CERTIFICATE" {
return fmt.Errorf("certificate file %s does not contain PEM-encoded certificate", assetKey)
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return fmt.Errorf("certificate file %s is malformed; error parsing PEM: %v", assetKey, err)
}
if expiredTime := time.Since(cert.NotAfter); expiredTime >= gracePeriod {
log.Printf("[INFO] Certificate %s expired %s ago; cleaning up", assetKey, expiredTime)
baseName := strings.TrimSuffix(assetKey, ".crt")
for _, relatedAsset := range []string{
assetKey,
baseName + ".key",
baseName + ".json",
} {
log.Printf("[INFO] Deleting %s because resource expired", relatedAsset)
err := storage.Delete(relatedAsset)
if err != nil {
log.Printf("[ERROR] Cleaning up asset related to expired certificate for %s: %s: %v",
baseName, relatedAsset, err)
}
}
}
}
// update listing; if folder is empty, delete it
siteAssets, err = storage.List(siteKey, false)
if err != nil {
continue
}
if len(siteAssets) == 0 {
log.Printf("[INFO] Deleting %s because key is empty", siteKey)
err := storage.Delete(siteKey)
if err != nil {
return fmt.Errorf("deleting empty site folder %s: %v", siteKey, err)
}
}
}
}
return nil
}
const (
// DefaultRenewCheckInterval is how often to check certificates for expiration.
// Scans are very lightweight, so this can be semi-frequent. This default should
// be smaller than <Minimum Cert Lifetime>*DefaultRenewalWindowRatio/3, which
// gives certificates plenty of chance to be renewed on time.
DefaultRenewCheckInterval = 10 * time.Minute
// DefaultRenewalWindowRatio is how much of a certificate's lifetime becomes the
// renewal window. The renewal window is the span of time at the end of the
// certificate's validity period in which it should be renewed. A default value
// of ~1/3 is pretty safe and recommended for most certificates.
DefaultRenewalWindowRatio = 1.0 / 3.0
// DefaultOCSPCheckInterval is how often to check if OCSP stapling needs updating.
DefaultOCSPCheckInterval = 1 * time.Hour
)

@ -0,0 +1,212 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"bytes"
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"time"
"golang.org/x/crypto/ocsp"
)
// stapleOCSP staples OCSP information to cert for hostname name.
// If you have it handy, you should pass in the PEM-encoded certificate
// bundle; otherwise the DER-encoded cert will have to be PEM-encoded.
// If you don't have the PEM blocks already, just pass in nil.
//
// Errors here are not necessarily fatal, it could just be that the
// certificate doesn't have an issuer URL.
//
// If a status was received, it returns that status. Note that the
// returned status is not always stapled to the certificate.
func stapleOCSP(storage Storage, cert *Certificate, pemBundle []byte) (*ocsp.Response, error) {
if pemBundle == nil {
// we need a PEM encoding only for some function calls below
bundle := new(bytes.Buffer)
for _, derBytes := range cert.Certificate.Certificate {
pem.Encode(bundle, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
}
pemBundle = bundle.Bytes()
}
var ocspBytes []byte
var ocspResp *ocsp.Response
var ocspErr error
var gotNewOCSP bool
// First try to load OCSP staple from storage and see if
// we can still use it.
ocspStapleKey := StorageKeys.OCSPStaple(cert, pemBundle)
cachedOCSP, err := storage.Load(ocspStapleKey)
if err == nil {
resp, err := ocsp.ParseResponse(cachedOCSP, nil)
if err == nil {
if freshOCSP(resp) {
// staple is still fresh; use it
ocspBytes = cachedOCSP
ocspResp = resp
}
} else {
// invalid contents; delete the file
// (we do this independently of the maintenance routine because
// in this case we know for sure this should be a staple file
// because we loaded it by name, whereas the maintenance routine
// just iterates the list of files, even if somehow a non-staple
// file gets in the folder. in this case we are sure it is corrupt.)
err := storage.Delete(ocspStapleKey)
if err != nil {
log.Printf("[WARNING] Unable to delete invalid OCSP staple file: %v", err)
}
}
}
// If we couldn't get a fresh staple by reading the cache,
// then we need to request it from the OCSP responder
if ocspResp == nil || len(ocspBytes) == 0 {
ocspBytes, ocspResp, ocspErr = getOCSPForCert(pemBundle)
if ocspErr != nil {
// An error here is not a problem because a certificate may simply
// not contain a link to an OCSP server. But we should log it anyway.
// There's nothing else we can do to get OCSP for this certificate,
// so we can return here with the error.
return nil, fmt.Errorf("no OCSP stapling for %v: %v", cert.Names, ocspErr)
}
gotNewOCSP = true
}
// By now, we should have a response. If good, staple it to
// the certificate. If the OCSP response was not loaded from
// storage, we persist it for next time.
if ocspResp.Status == ocsp.Good {
if ocspResp.NextUpdate.After(cert.Leaf.NotAfter) {
// uh oh, this OCSP response expires AFTER the certificate does, that's kinda bogus.
// it was the reason a lot of Symantec-validated sites (not Caddy) went down
// in October 2017. https://twitter.com/mattiasgeniar/status/919432824708648961
return ocspResp, fmt.Errorf("invalid: OCSP response for %v valid after certificate expiration (%s)",
cert.Names, cert.Leaf.NotAfter.Sub(ocspResp.NextUpdate))
}
cert.Certificate.OCSPStaple = ocspBytes
cert.ocsp = ocspResp
if gotNewOCSP {
err := storage.Store(ocspStapleKey, ocspBytes)
if err != nil {
return ocspResp, fmt.Errorf("unable to write OCSP staple file for %v: %v", cert.Names, err)
}
}
}
return ocspResp, nil
}
// getOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response,
// the parsed response, and an error, if any. The returned []byte can be passed directly
// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the
// issued certificate, this function will try to get the issuer certificate from the
// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return
// values are nil, the OCSP status may be assumed OCSPUnknown.
//
// Borrowed from xenolf.
func getOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) {
// TODO: Perhaps this should be synchronized too, with a Locker?
certificates, err := parseCertsFromPEMBundle(bundle)
if err != nil {
return nil, nil, err
}
// We expect the certificate slice to be ordered downwards the chain.
// SRV CRT -> CA. We need to pull the leaf and issuer certs out of it,
// which should always be the first two certificates. If there's no
// OCSP server listed in the leaf cert, there's nothing to do. And if
// we have only one certificate so far, we need to get the issuer cert.
issuedCert := certificates[0]
if len(issuedCert.OCSPServer) == 0 {
return nil, nil, fmt.Errorf("no OCSP server specified in certificate")
}
if len(certificates) == 1 {
if len(issuedCert.IssuingCertificateURL) == 0 {
return nil, nil, fmt.Errorf("no URL to issuing certificate")
}
resp, err := http.Get(issuedCert.IssuingCertificateURL[0])
if err != nil {
return nil, nil, fmt.Errorf("getting issuer certificate: %v", err)
}
defer resp.Body.Close()
issuerBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024*1024))
if err != nil {
return nil, nil, fmt.Errorf("reading issuer certificate: %v", err)
}
issuerCert, err := x509.ParseCertificate(issuerBytes)
if err != nil {
return nil, nil, fmt.Errorf("parsing issuer certificate: %v", err)
}
// insert it into the slice on position 0;
// we want it ordered right SRV CRT -> CA
certificates = append(certificates, issuerCert)
}
issuerCert := certificates[1]
ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil)
if err != nil {
return nil, nil, fmt.Errorf("creating OCSP request: %v", err)
}
reader := bytes.NewReader(ocspReq)
req, err := http.Post(issuedCert.OCSPServer[0], "application/ocsp-request", reader)
if err != nil {
return nil, nil, fmt.Errorf("making OCSP request: %v", err)
}
defer req.Body.Close()
ocspResBytes, err := ioutil.ReadAll(io.LimitReader(req.Body, 1024*1024))
if err != nil {
return nil, nil, fmt.Errorf("reading OCSP response: %v", err)
}
ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert)
if err != nil {
return nil, nil, fmt.Errorf("parsing OCSP response: %v", err)
}
return ocspResBytes, ocspRes, nil
}
// freshOCSP returns true if resp is still fresh,
// meaning that it is not expedient to get an
// updated response from the OCSP server.
func freshOCSP(resp *ocsp.Response) bool {
nextUpdate := resp.NextUpdate
// If there is an OCSP responder certificate, and it expires before the
// OCSP response, use its expiration date as the end of the OCSP
// response's validity period.
if resp.Certificate != nil && resp.Certificate.NotAfter.Before(nextUpdate) {
nextUpdate = resp.Certificate.NotAfter
}
// start checking OCSP staple about halfway through validity period for good measure
refreshTime := resp.ThisUpdate.Add(nextUpdate.Sub(resp.ThisUpdate) / 2)
return time.Now().Before(refreshTime)
}

@ -0,0 +1,243 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"log"
"runtime"
"sync"
"time"
)
// NewRateLimiter returns a rate limiter that allows up to maxEvents
// in a sliding window of size window. If maxEvents and window are
// both 0, or if maxEvents is non-zero and window is 0, rate limiting
// is disabled. This function panics if maxEvents is less than 0 or
// if maxEvents is 0 and window is non-zero, which is considered to be
// an invalid configuration, as it would never allow events.
func NewRateLimiter(maxEvents int, window time.Duration) *RingBufferRateLimiter {
if maxEvents < 0 {
panic("maxEvents cannot be less than zero")
}
if maxEvents == 0 && window != 0 {
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
}
rbrl := &RingBufferRateLimiter{
window: window,
ring: make([]time.Time, maxEvents),
started: make(chan struct{}),
stopped: make(chan struct{}),
ticket: make(chan struct{}),
}
go rbrl.loop()
<-rbrl.started // make sure loop is ready to receive before we return
return rbrl
}
// RingBufferRateLimiter uses a ring to enforce rate limits
// consisting of a maximum number of events within a single
// sliding window of a given duration. An empty value is
// not valid; use NewRateLimiter to get one.
type RingBufferRateLimiter struct {
window time.Duration
ring []time.Time // maxEvents == len(ring)
cursor int // always points to the oldest timestamp
mu sync.Mutex // protects ring, cursor, and window
started chan struct{}
stopped chan struct{}
ticket chan struct{}
}
// Stop cleans up r's scheduling goroutine.
func (r *RingBufferRateLimiter) Stop() {
close(r.stopped)
}
func (r *RingBufferRateLimiter) loop() {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: ring buffer rate limiter: %v\n%s", err, buf)
}
}()
for {
// if we've been stopped, return
select {
case <-r.stopped:
return
default:
}
if len(r.ring) == 0 {
if r.window == 0 {
// rate limiting is disabled; always allow immediately
r.permit()
continue
}
panic("invalid configuration: maxEvents = 0 and window != 0 does not allow any events")
}
// wait until next slot is available or until we've been stopped
r.mu.Lock()
then := r.ring[r.cursor].Add(r.window)
r.mu.Unlock()
waitDuration := time.Until(then)
waitTimer := time.NewTimer(waitDuration)
select {
case <-waitTimer.C:
r.permit()
case <-r.stopped:
waitTimer.Stop()
return
}
}
}
// Allow returns true if the event is allowed to
// happen right now. It does not wait. If the event
// is allowed, a ticket is claimed.
func (r *RingBufferRateLimiter) Allow() bool {
select {
case <-r.ticket:
return true
default:
return false
}
}
// Wait blocks until the event is allowed to occur. It returns an
// error if the context is cancelled.
func (r *RingBufferRateLimiter) Wait(ctx context.Context) error {
select {
case <-ctx.Done():
return context.Canceled
case <-r.ticket:
return nil
}
}
// MaxEvents returns the maximum number of events that
// are allowed within the sliding window.
func (r *RingBufferRateLimiter) MaxEvents() int {
r.mu.Lock()
defer r.mu.Unlock()
return len(r.ring)
}
// SetMaxEvents changes the maximum number of events that are
// allowed in the sliding window. If the new limit is lower,
// the oldest events will be forgotten. If the new limit is
// higher, the window will suddenly have capacity for new
// reservations. It panics if maxEvents is 0 and window size
// is not zero.
func (r *RingBufferRateLimiter) SetMaxEvents(maxEvents int) {
newRing := make([]time.Time, maxEvents)
r.mu.Lock()
defer r.mu.Unlock()
if r.window != 0 && maxEvents == 0 {
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
}
// only make the change if the new limit is different
if maxEvents == len(r.ring) {
return
}
// the new ring may be smaller; fast-forward to the
// oldest timestamp that will be kept in the new
// ring so the oldest ones are forgotten and the
// newest ones will be remembered
sizeDiff := len(r.ring) - maxEvents
for i := 0; i < sizeDiff; i++ {
r.advance()
}
if len(r.ring) > 0 {
// copy timestamps into the new ring until we
// have either copied all of them or have reached
// the capacity of the new ring
startCursor := r.cursor
for i := 0; i < len(newRing); i++ {
newRing[i] = r.ring[r.cursor]
r.advance()
if r.cursor == startCursor {
// new ring is larger than old one;
// "we've come full circle"
break
}
}
}
r.ring = newRing
r.cursor = 0
}
// Window returns the size of the sliding window.
func (r *RingBufferRateLimiter) Window() time.Duration {
r.mu.Lock()
defer r.mu.Unlock()
return r.window
}
// SetWindow changes r's sliding window duration to window.
// Goroutines that are already blocked on a call to Wait()
// will not be affected. It panics if window is non-zero
// but the max event limit is 0.
func (r *RingBufferRateLimiter) SetWindow(window time.Duration) {
r.mu.Lock()
defer r.mu.Unlock()
if window != 0 && len(r.ring) == 0 {
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
}
r.window = window
}
// permit allows one event through the throttle. This method
// blocks until a goroutine is waiting for a ticket or until
// the rate limiter is stopped.
func (r *RingBufferRateLimiter) permit() {
for {
select {
case r.started <- struct{}{}:
// notify parent goroutine that we've started; should
// only happen once, before constructor returns
continue
case <-r.stopped:
return
case r.ticket <- struct{}{}:
r.mu.Lock()
defer r.mu.Unlock()
if len(r.ring) > 0 {
r.ring[r.cursor] = time.Now()
r.advance()
}
return
}
}
}
// advance moves the cursor to the next position.
// It is NOT safe for concurrent use, so it must
// be called inside a lock on r.mu.
func (r *RingBufferRateLimiter) advance() {
r.cursor++
if r.cursor >= len(r.ring) {
r.cursor = 0
}
}

@ -0,0 +1,623 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"log"
"net"
"net/http"
"path"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/libdns/libdns"
"github.com/mholt/acmez"
"github.com/mholt/acmez/acme"
)
// httpSolver solves the HTTP challenge. It must be
// associated with a config and an address to use
// for solving the challenge. If multiple httpSolvers
// are initialized concurrently, the first one to
// begin will start the server, and the last one to
// finish will stop the server. This solver must be
// wrapped by a distributedSolver to work properly,
// because the only way the HTTP challenge handler
// can access the keyAuth material is by loading it
// from storage, which is done by distributedSolver.
type httpSolver struct {
closed int32 // accessed atomically
acmeManager *ACMEManager
address string
}
// Present starts an HTTP server if none is already listening on s.address.
func (s *httpSolver) Present(ctx context.Context, _ acme.Challenge) error {
solversMu.Lock()
defer solversMu.Unlock()
si := getSolverInfo(s.address)
si.count++
if si.listener != nil {
return nil // already be served by us
}
// notice the unusual error handling here; we
// only continue to start a challenge server if
// we got a listener; in all other cases return
ln, err := robustTryListen(s.address)
if ln == nil {
return err
}
// successfully bound socket, so save listener and start key auth HTTP server
si.listener = ln
go s.serve(si)
return nil
}
// serve is an HTTP server that serves only HTTP challenge responses.
func (s *httpSolver) serve(si *solverInfo) {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: http solver server: %v\n%s", err, buf)
}
}()
defer close(si.done)
httpServer := &http.Server{Handler: s.acmeManager.HTTPChallengeHandler(http.NewServeMux())}
httpServer.SetKeepAlivesEnabled(false)
err := httpServer.Serve(si.listener)
if err != nil && atomic.LoadInt32(&s.closed) != 1 {
log.Printf("[ERROR] key auth HTTP server: %v", err)
}
}
// CleanUp cleans up the HTTP server if it is the last one to finish.
func (s *httpSolver) CleanUp(ctx context.Context, _ acme.Challenge) error {
solversMu.Lock()
defer solversMu.Unlock()
si := getSolverInfo(s.address)
si.count--
if si.count == 0 {
// last one out turns off the lights
atomic.StoreInt32(&s.closed, 1)
if si.listener != nil {
si.listener.Close()
<-si.done
}
delete(solvers, s.address)
}
return nil
}
// tlsALPNSolver is a type that can solve TLS-ALPN challenges.
// It must have an associated config and address on which to
// serve the challenge.
type tlsALPNSolver struct {
config *Config
address string
}
// Present adds the certificate to the certificate cache and, if
// needed, starts a TLS server for answering TLS-ALPN challenges.
func (s *tlsALPNSolver) Present(ctx context.Context, chal acme.Challenge) error {
// load the certificate into the cache; this isn't strictly necessary
// if we're using the distributed solver since our GetCertificate
// function will check storage for the keyAuth anyway, but it seems
// like loading it into the cache is the right thing to do
cert, err := acmez.TLSALPN01ChallengeCert(chal)
if err != nil {
return err
}
certHash := hashCertificateChain(cert.Certificate)
s.config.certCache.mu.Lock()
s.config.certCache.cache[tlsALPNCertKeyName(chal.Identifier.Value)] = Certificate{
Certificate: *cert,
Names: []string{chal.Identifier.Value},
hash: certHash, // perhaps not necesssary
}
s.config.certCache.mu.Unlock()
// the rest of this function increments the
// challenge count for the solver at this
// listener address, and if necessary, starts
// a simple TLS server
solversMu.Lock()
defer solversMu.Unlock()
si := getSolverInfo(s.address)
si.count++
if si.listener != nil {
return nil // already be served by us
}
// notice the unusual error handling here; we
// only continue to start a challenge server if
// we got a listener; in all other cases return
ln, err := robustTryListen(s.address)
if ln == nil {
return err
}
// we were able to bind the socket, so make it into a TLS
// listener, store it with the solverInfo, and start the
// challenge server
si.listener = tls.NewListener(ln, s.config.TLSConfig())
go func() {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: tls-alpn solver server: %v\n%s", err, buf)
}
}()
defer close(si.done)
for {
conn, err := si.listener.Accept()
if err != nil {
if atomic.LoadInt32(&si.closed) == 1 {
return
}
log.Printf("[ERROR] TLS-ALPN challenge server: accept: %v", err)
continue
}
go s.handleConn(conn)
}
}()
return nil
}
// handleConn completes the TLS handshake and then closes conn.
func (*tlsALPNSolver) handleConn(conn net.Conn) {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, stackTraceBufferSize)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("panic: tls-alpn solver handler: %v\n%s", err, buf)
}
}()
defer conn.Close()
tlsConn, ok := conn.(*tls.Conn)
if !ok {
log.Printf("[ERROR] TLS-ALPN challenge server: expected tls.Conn but got %T: %#v", conn, conn)
return
}
err := tlsConn.Handshake()
if err != nil {
log.Printf("[ERROR] TLS-ALPN challenge server: handshake: %v", err)
return
}
}
// CleanUp removes the challenge certificate from the cache, and if
// it is the last one to finish, stops the TLS server.
func (s *tlsALPNSolver) CleanUp(ctx context.Context, chal acme.Challenge) error {
s.config.certCache.mu.Lock()
delete(s.config.certCache.cache, tlsALPNCertKeyName(chal.Identifier.Value))
s.config.certCache.mu.Unlock()
solversMu.Lock()
defer solversMu.Unlock()
si := getSolverInfo(s.address)
si.count--
if si.count == 0 {
// last one out turns off the lights
atomic.StoreInt32(&si.closed, 1)
if si.listener != nil {
si.listener.Close()
<-si.done
}
delete(solvers, s.address)
}
return nil
}
// tlsALPNCertKeyName returns the key to use when caching a cert
// for use with the TLS-ALPN ACME challenge. It is simply to help
// avoid conflicts (although at time of writing, there shouldn't
// be, since the cert cache is keyed by hash of certificate chain).
func tlsALPNCertKeyName(sniName string) string {
return sniName + ":acme-tls-alpn"
}
// DNS01Solver is a type that makes libdns providers usable
// as ACME dns-01 challenge solvers.
// See https://github.com/libdns/libdns
type DNS01Solver struct {
// The implementation that interacts with the DNS
// provider to set or delete records. (REQUIRED)
DNSProvider ACMEDNSProvider
// The TTL for the temporary challenge records.
TTL time.Duration
// Maximum time to wait for temporary record to appear.
PropagationTimeout time.Duration
// Preferred DNS resolver(s) to use when doing DNS lookups.
Resolvers []string
txtRecords map[string]dnsPresentMemory // keyed by domain name
txtRecordsMu sync.Mutex
}
// Present creates the DNS TXT record for the given ACME challenge.
func (s *DNS01Solver) Present(ctx context.Context, challenge acme.Challenge) error {
dnsName := challenge.DNS01TXTRecordName()
keyAuth := challenge.DNS01KeyAuthorization()
rec := libdns.Record{
Type: "TXT",
Name: dnsName,
Value: keyAuth,
TTL: s.TTL,
}
// multiple identifiers can have the same ACME challenge
// domain (e.g. example.com and *.example.com) so we need
// to ensure that we don't solve those concurrently and
// step on each challenges' metaphorical toes; see
// https://github.com/caddyserver/caddy/issues/3474
activeDNSChallenges.Lock(dnsName)
zone, err := findZoneByFQDN(dnsName, recursiveNameservers(s.Resolvers))
if err != nil {
return fmt.Errorf("could not determine zone for domain %q: %v", dnsName, err)
}
results, err := s.DNSProvider.AppendRecords(ctx, zone, []libdns.Record{rec})
if err != nil {
return fmt.Errorf("adding temporary record for zone %s: %w", zone, err)
}
if len(results) != 1 {
return fmt.Errorf("expected one record, got %d: %v", len(results), results)
}
// remember the record and zone we got so we can clean up more efficiently
s.txtRecordsMu.Lock()
if s.txtRecords == nil {
s.txtRecords = make(map[string]dnsPresentMemory)
}
s.txtRecords[dnsName] = dnsPresentMemory{dnsZone: zone, rec: results[0]}
s.txtRecordsMu.Unlock()
return nil
}
// Wait blocks until the TXT record created in Present() appears in
// authoritative lookups, i.e. until it has propagated, or until
// timeout, whichever is first.
func (s *DNS01Solver) Wait(ctx context.Context, challenge acme.Challenge) error {
dnsName := challenge.DNS01TXTRecordName()
keyAuth := challenge.DNS01KeyAuthorization()
timeout := s.PropagationTimeout
if timeout == 0 {
timeout = 2 * time.Minute
}
const interval = 2 * time.Second
resolvers := recursiveNameservers(s.Resolvers)
var err error
start := time.Now()
for time.Since(start) < timeout {
select {
case <-time.After(interval):
case <-ctx.Done():
return ctx.Err()
}
var ready bool
ready, err = checkDNSPropagation(dnsName, keyAuth, resolvers)
if err != nil {
return fmt.Errorf("checking DNS propagation of %s: %w", dnsName, err)
}
if ready {
return nil
}
}
return fmt.Errorf("timed out waiting for record to fully propagate; verify DNS provider configuration is correct - last error: %v", err)
}
// CleanUp deletes the DNS TXT record created in Present().
func (s *DNS01Solver) CleanUp(ctx context.Context, challenge acme.Challenge) error {
dnsName := challenge.DNS01TXTRecordName()
defer func() {
// always forget about it so we don't leak memory
s.txtRecordsMu.Lock()
delete(s.txtRecords, dnsName)
s.txtRecordsMu.Unlock()
// always do this last - but always do it!
activeDNSChallenges.Unlock(dnsName)
}()
// recall the record we created and zone we looked up
s.txtRecordsMu.Lock()
memory, ok := s.txtRecords[dnsName]
if !ok {
s.txtRecordsMu.Unlock()
return fmt.Errorf("no memory of presenting a DNS record for %s (probably OK if presenting failed)", challenge.Identifier.Value)
}
s.txtRecordsMu.Unlock()
// clean up the record
_, err := s.DNSProvider.DeleteRecords(ctx, memory.dnsZone, []libdns.Record{memory.rec})
if err != nil {
return fmt.Errorf("deleting temporary record for zone %s: %w", memory.dnsZone, err)
}
return nil
}
type dnsPresentMemory struct {
dnsZone string
rec libdns.Record
}
// ACMEDNSProvider defines the set of operations required for
// ACME challenges. A DNS provider must be able to append and
// delete records in order to solve ACME challenges. Find one
// you can use at https://github.com/libdns. If your provider
// isn't implemented yet, feel free to contribute!
type ACMEDNSProvider interface {
libdns.RecordAppender
libdns.RecordDeleter
}
// activeDNSChallenges synchronizes DNS challenges for
// names to ensure that challenges for the same ACME
// DNS name do not overlap; for example, the TXT record
// to make for both example.com and *.example.com are
// the same; thus we cannot solve them concurrently.
var activeDNSChallenges = newMapMutex()
// mapMutex implements named mutexes.
type mapMutex struct {
cond *sync.Cond
set map[interface{}]struct{}
}
func newMapMutex() *mapMutex {
return &mapMutex{
cond: sync.NewCond(new(sync.Mutex)),
set: make(map[interface{}]struct{}),
}
}
func (mmu *mapMutex) Lock(key interface{}) {
mmu.cond.L.Lock()
defer mmu.cond.L.Unlock()
for mmu.locked(key) {
mmu.cond.Wait()
}
mmu.set[key] = struct{}{}
return
}
func (mmu *mapMutex) Unlock(key interface{}) {
mmu.cond.L.Lock()
defer mmu.cond.L.Unlock()
delete(mmu.set, key)
mmu.cond.Broadcast()
}
func (mmu *mapMutex) locked(key interface{}) (ok bool) {
_, ok = mmu.set[key]
return
}
// distributedSolver allows the ACME HTTP-01 and TLS-ALPN challenges
// to be solved by an instance other than the one which initiated it.
// This is useful behind load balancers or in other cluster/fleet
// configurations. The only requirement is that the instance which
// initiates the challenge shares the same storage and locker with
// the others in the cluster. The storage backing the certificate
// cache in distributedSolver.config is crucial.
//
// Obviously, the instance which completes the challenge must be
// serving on the HTTPChallengePort for the HTTP-01 challenge or the
// TLSALPNChallengePort for the TLS-ALPN-01 challenge (or have all
// the packets port-forwarded) to receive and handle the request. The
// server which receives the challenge must handle it by checking to
// see if the challenge token exists in storage, and if so, decode it
// and use it to serve up the correct response. HTTPChallengeHandler
// in this package as well as the GetCertificate method implemented
// by a Config support and even require this behavior.
//
// In short: the only two requirements for cluster operation are
// sharing sync and storage, and using the facilities provided by
// this package for solving the challenges.
type distributedSolver struct {
// The config with a certificate cache
// with a reference to the storage to
// use which is shared among all the
// instances in the cluster - REQUIRED.
acmeManager *ACMEManager
// Since the distributedSolver is only a
// wrapper over an actual solver, place
// the actual solver here.
solver acmez.Solver
// The CA endpoint URL associated with
// this solver.
caURL string
}
// Present invokes the underlying solver's Present method
// and also stores domain, token, and keyAuth to the storage
// backing the certificate cache of dhs.acmeManager.
func (dhs distributedSolver) Present(ctx context.Context, chal acme.Challenge) error {
infoBytes, err := json.Marshal(chal)
if err != nil {
return err
}
err = dhs.acmeManager.config.Storage.Store(dhs.challengeTokensKey(chal.Identifier.Value), infoBytes)
if err != nil {
return err
}
err = dhs.solver.Present(ctx, chal)
if err != nil {
return fmt.Errorf("presenting with embedded solver: %v", err)
}
return nil
}
// CleanUp invokes the underlying solver's CleanUp method
// and also cleans up any assets saved to storage.
func (dhs distributedSolver) CleanUp(ctx context.Context, chal acme.Challenge) error {
err := dhs.acmeManager.config.Storage.Delete(dhs.challengeTokensKey(chal.Identifier.Value))
if err != nil {
return err
}
err = dhs.solver.CleanUp(ctx, chal)
if err != nil {
return fmt.Errorf("cleaning up embedded provider: %v", err)
}
return nil
}
// challengeTokensPrefix returns the key prefix for challenge info.
func (dhs distributedSolver) challengeTokensPrefix() string {
return path.Join(dhs.acmeManager.storageKeyCAPrefix(dhs.caURL), "challenge_tokens")
}
// challengeTokensKey returns the key to use to store and access
// challenge info for domain.
func (dhs distributedSolver) challengeTokensKey(domain string) string {
return path.Join(dhs.challengeTokensPrefix(), StorageKeys.Safe(domain)+".json")
}
// solverInfo associates a listener with the
// number of challenges currently using it.
type solverInfo struct {
closed int32 // accessed atomically
count int
listener net.Listener
done chan struct{} // used to signal when our own solver server is done
}
// getSolverInfo gets a valid solverInfo struct for address.
func getSolverInfo(address string) *solverInfo {
si, ok := solvers[address]
if !ok {
si = &solverInfo{done: make(chan struct{})}
solvers[address] = si
}
return si
}
// robustTryListen calls net.Listen for a TCP socket at addr.
// This function may return both a nil listener and a nil error!
// If it was able to bind the socket, it returns the listener
// and no error. If it wasn't able to bind the socket because
// the socket is already in use, then it returns a nil listener
// and nil error. If it had any other error, it returns the
// error. The intended error handling logic for this function
// is to proceed if the returned listener is not nil; otherwise
// return err (which may also be nil). In other words, this
// function ignores errors if the socket is already in use,
// which is useful for our challenge servers, where we assume
// that whatever is already listening can solve the challenges.
func robustTryListen(addr string) (net.Listener, error) {
var listenErr error
for i := 0; i < 2; i++ {
// doesn't hurt to sleep briefly before the second
// attempt in case the OS has timing issues
if i > 0 {
time.Sleep(100 * time.Millisecond)
}
// if we can bind the socket right away, great!
var ln net.Listener
ln, listenErr = net.Listen("tcp", addr)
if listenErr == nil {
return ln, nil
}
// if it failed just because the socket is already in use, we
// have no choice but to assume that whatever is using the socket
// can answer the challenge already, so we ignore the error
connectErr := dialTCPSocket(addr)
if connectErr == nil {
return nil, nil
}
// hmm, we couldn't connect to the socket, so something else must
// be wrong, right? wrong!! we've had reports across multiple OSes
// now that sometimes connections fail even though the OS told us
// that the address was already in use; either the listener is
// fluctuating between open and closed very, very quickly, or the
// OS is inconsistent and contradicting itself; I have been unable
// to reproduce this, so I'm now resorting to hard-coding substring
// matching in error messages as a really hacky and unreliable
// safeguard against this, until we can idenify exactly what was
// happening; see the following threads for more info:
// https://caddy.community/t/caddy-retry-error/7317
// https://caddy.community/t/v2-upgrade-to-caddy2-failing-with-errors/7423
if strings.Contains(listenErr.Error(), "address already in use") ||
strings.Contains(listenErr.Error(), "one usage of each socket address") {
log.Printf("[WARNING] OS reports a contradiction: %v - but we cannot connect to it, with this error: %v; continuing anyway 🤞 (I don't know what causes this... if you do, please help?)", listenErr, connectErr)
return nil, nil
}
}
return nil, fmt.Errorf("could not start listener for challenge server at %s: %v", addr, listenErr)
}
// dialTCPSocket connects to a TCP address just for the sake of
// seeing if it is open. It returns a nil error if a TCP connection
// can successfully be made to addr within a short timeout.
func dialTCPSocket(addr string) error {
conn, err := net.DialTimeout("tcp", addr, 250*time.Millisecond)
if err == nil {
conn.Close()
}
return err
}
// The active challenge solvers, keyed by listener address,
// and protected by a mutex. Note that the creation of
// solver listeners and the incrementing of their counts
// are atomic operations guarded by this mutex.
var (
solvers = make(map[string]*solverInfo)
solversMu sync.Mutex
)
// Interface guards
var (
_ acmez.Solver = (*DNS01Solver)(nil)
_ acmez.Waiter = (*DNS01Solver)(nil)
)

@ -0,0 +1,281 @@
// Copyright 2015 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certmagic
import (
"context"
"log"
"path"
"regexp"
"strings"
"sync"
"time"
)
// Storage is a type that implements a key-value store.
// Keys are prefix-based, with forward slash '/' as separators
// and without a leading slash.
//
// Processes running in a cluster will wish to use the
// same Storage value (its implementation and configuration)
// in order to share certificates and other TLS resources
// with the cluster.
//
// The Load, Delete, List, and Stat methods should return
// ErrNotExist if the key does not exist.
//
// Implementations of Storage must be safe for concurrent use.
type Storage interface {
// Locker provides atomic synchronization
// operations, making Storage safe to share.
Locker
// Store puts value at key.
Store(key string, value []byte) error
// Load retrieves the value at key.
Load(key string) ([]byte, error)
// Delete deletes key. An error should be
// returned only if the key still exists
// when the method returns.
Delete(key string) error
// Exists returns true if the key exists
// and there was no error checking.
Exists(key string) bool
// List returns all keys that match prefix.
// If recursive is true, non-terminal keys
// will be enumerated (i.e. "directories"
// should be walked); otherwise, only keys
// prefixed exactly by prefix will be listed.
List(prefix string, recursive bool) ([]string, error)
// Stat returns information about key.
Stat(key string) (KeyInfo, error)
}
// Locker facilitates synchronization of certificate tasks across
// machines and networks.
type Locker interface {
// Lock acquires the lock for key, blocking until the lock
// can be obtained or an error is returned. Note that, even
// after acquiring a lock, an idempotent operation may have
// already been performed by another process that acquired
// the lock before - so always check to make sure idempotent
// operations still need to be performed after acquiring the
// lock.
//
// The actual implementation of obtaining of a lock must be
// an atomic operation so that multiple Lock calls at the
// same time always results in only one caller receiving the
// lock at any given time.
//
// To prevent deadlocks, all implementations (where this concern
// is relevant) should put a reasonable expiration on the lock in
// case Unlock is unable to be called due to some sort of network
// failure or system crash. Additionally, implementations should
// honor context cancellation as much as possible (in case the
// caller wishes to give up and free resources before the lock
// can be obtained).
Lock(ctx context.Context, key string) error
// Unlock releases the lock for key. This method must ONLY be
// called after a successful call to Lock, and only after the
// critical section is finished, even if it errored or timed
// out. Unlock cleans up any resources allocated during Lock.
Unlock(key string) error
}
// KeyInfo holds information about a key in storage.
// Key and IsTerminal are required; Modified and Size
// are optional if the storage implementation is not
// able to get that information. Setting them will
// make certain operations more consistent or
// predictable, but it is not crucial to basic
// functionality.
type KeyInfo struct {
Key string
Modified time.Time
Size int64
IsTerminal bool // false for keys that only contain other keys (like directories)
}
// storeTx stores all the values or none at all.
func storeTx(s Storage, all []keyValue) error {
for i, kv := range all {
err := s.Store(kv.key, kv.value)
if err != nil {
for j := i - 1; j >= 0; j-- {
s.Delete(all[j].key)
}
return err
}
}
return nil
}
// keyValue pairs a key and a value.
type keyValue struct {
key string
value []byte
}
// KeyBuilder provides a namespace for methods that
// build keys and key prefixes, for addressing items
// in a Storage implementation.
type KeyBuilder struct{}
// CertsPrefix returns the storage key prefix for
// the given certificate issuer.
func (keys KeyBuilder) CertsPrefix(issuerKey string) string {
return path.Join(prefixCerts, keys.Safe(issuerKey))
}
// CertsSitePrefix returns a key prefix for items associated with
// the site given by domain using the given issuer key.
func (keys KeyBuilder) CertsSitePrefix(issuerKey, domain string) string {
return path.Join(keys.CertsPrefix(issuerKey), keys.Safe(domain))
}
// SiteCert returns the path to the certificate file for domain
// that is associated with the issuer with the given issuerKey.
func (keys KeyBuilder) SiteCert(issuerKey, domain string) string {
safeDomain := keys.Safe(domain)
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".crt")
}
// SitePrivateKey returns the path to the private key file for domain
// that is associated with the certificate from the given issuer with
// the given issuerKey.
func (keys KeyBuilder) SitePrivateKey(issuerKey, domain string) string {
safeDomain := keys.Safe(domain)
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".key")
}
// SiteMeta returns the path to the metadata file for domain that
// is associated with the certificate from the given issuer with
// the given issuerKey.
func (keys KeyBuilder) SiteMeta(issuerKey, domain string) string {
safeDomain := keys.Safe(domain)
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".json")
}
// OCSPStaple returns a key for the OCSP staple associated
// with the given certificate. If you have the PEM bundle
// handy, pass that in to save an extra encoding step.
func (keys KeyBuilder) OCSPStaple(cert *Certificate, pemBundle []byte) string {
var ocspFileName string
if len(cert.Names) > 0 {
firstName := keys.Safe(cert.Names[0])
ocspFileName = firstName + "-"
}
ocspFileName += fastHash(pemBundle)
return path.Join(prefixOCSP, ocspFileName)
}
// Safe standardizes and sanitizes str for use as
// a single component of a storage key. This method
// is idempotent.
func (keys KeyBuilder) Safe(str string) string {
str = strings.ToLower(str)
str = strings.TrimSpace(str)
// replace a few specific characters
repl := strings.NewReplacer(
" ", "_",
"+", "_plus_",
"*", "wildcard_",
":", "-",
"..", "", // prevent directory traversal (regex allows single dots)
)
str = repl.Replace(str)
// finally remove all non-word characters
return safeKeyRE.ReplaceAllLiteralString(str, "")
}
// CleanUpOwnLocks immediately cleans up all
// current locks obtained by this process. Since
// this does not cancel the operations that
// the locks are synchronizing, this should be
// called only immediately before process exit.
func CleanUpOwnLocks() {
locksMu.Lock()
defer locksMu.Unlock()
for lockKey, storage := range locks {
err := storage.Unlock(lockKey)
if err == nil {
delete(locks, lockKey)
} else {
log.Printf("[ERROR] Unable to clean up lock: %v (lock=%s storage=%s)",
err, lockKey, storage)
}
}
}
func acquireLock(ctx context.Context, storage Storage, lockKey string) error {
err := storage.Lock(ctx, lockKey)
if err == nil {
locksMu.Lock()
locks[lockKey] = storage
locksMu.Unlock()
}
return err
}
func releaseLock(storage Storage, lockKey string) error {
err := storage.Unlock(lockKey)
if err == nil {
locksMu.Lock()
delete(locks, lockKey)
locksMu.Unlock()
}
return err
}
// locks stores a reference to all the current
// locks obtained by this process.
var locks = make(map[string]Storage)
var locksMu sync.Mutex
// StorageKeys provides methods for accessing
// keys and key prefixes for items in a Storage.
// Typically, you will not need to use this
// because accessing storage is abstracted away
// for most cases. Only use this if you need to
// directly access TLS assets in your application.
var StorageKeys KeyBuilder
const (
prefixCerts = "certificates"
prefixOCSP = "ocsp"
)
// safeKeyRE matches any undesirable characters in storage keys.
// Note that this allows dots, so you'll have to strip ".." manually.
var safeKeyRE = regexp.MustCompile(`[^\w@.-]`)
// ErrNotExist is returned by Storage implementations when
// a resource is not found. It is similar to os.IsNotExist
// except this is a type, not a variable.
type ErrNotExist interface {
error
}
// defaultFileStorage is a convenient, default storage
// implementation using the local file system.
var defaultFileStorage = &FileStorage{Path: dataDir()}

@ -0,0 +1 @@
_gitignore/

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 Matthew Holt
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -0,0 +1,48 @@
libdns - Universal DNS provider APIs for Go
===========================================
<a href="https://pkg.go.dev/github.com/libdns/libdns"><img src="https://img.shields.io/badge/godoc-reference-blue.svg"></a>
`libdns` is a collection of free-range DNS provider client implementations written in Go! With libdns packages, your Go program can manage DNS records across any supported providers.
**⚠️ Work-in-progress. Exported APIs are subject to change. More documentation is coming soon.**
This repository defines the core interfaces that providers should implement. They are small and idiomatic Go interfaces with well-defined semantics.
The interfaces include:
- `RecordGetter` to list records.
- `RecordAppender` to append new records.
- `RecordSetter` to set (create or change existing) records.
- `RecordDeleter` to delete records.
## Implementing new providers
Providers are 100% written and maintained by the community! We all maintain just the packages for providers we use.
**[Instructions for adding new providers](https://github.com/libdns/libdns/wiki/Implementing-providers)** are on this repo's wiki. Please feel free to contribute.
## Similar projects
**[OctoDNS](https://github.com/github/octodns)** is a suite of tools written in Python for managing DNS. However, its approach is a bit heavy-handed when all you need are small, incremental changes to a zone:
> WARNING: OctoDNS assumes ownership of any domain you point it to. When you tell it to act it will do whatever is necessary to try and match up states including deleting any unexpected records. Be careful when playing around with OctoDNS.
This is incredibly useful when you are maintaining your own zone file, but risky when you just need incremental changes.
**[StackExchange/dnscontrol](https://github.com/StackExchange/dnscontrol)** is written in Go, but is similar to OctoDNS in that it tends to obliterate your entire zone and replace it with your input. Again, this is very useful if you are maintaining your own master list of records, but doesn't do well for simply adding or removing records.
**[go-acme/lego](https://github.com/go-acme/lego)** has support for a huge number of DNS providers (75+!), but their APIs are only capable of setting and deleting TXT records for ACME challenges.
**`libdns`** takes inspiration from the above projects but aims for a more generally-useful set of APIs that homogenize pretty well across providers. In contrast to the above projects, libdns can add, set, delete, and get arbitrary records from a zone without obliterating it (although syncing up an entire zone is also possible!). Its APIs also include context so long-running calls can be cancelled early, for example to accommodate on-line config changes downstream. libdns interfaces are also smaller and more composable. Additionally, libdns can grow to support a nearly infinite number of DNS providers without added bloat, because each provider implementation is a separate Go module, which keeps your builds lean and fast.
In summary, the goal is that libdns providers can do what the above libraries/tools can do, but with more flexibility: they can create and delete TXT records for ACME challenges, they can replace entire zones, but they can also do incremental changes or simply read records.
## Record abstraction
How records are represented across providers varies widely, and each kind of record has different fields and semantics. In time, our goal is for the `libdns.Record` type to be able to represent most of them as concisely and simply as possible, with the interface methods able to deliver on most of the possible zone operations.
Realistically, libdns should enable most common record manipulations, but may not be able to fit absolutely 100% of all possibilities with DNS in a provider-agnostic way. That is probably OK; and given the wide varieties in DNS record types and provider APIs, it would be unreasonable to expect otherwise. We are not aiming for 100% fulfillment of 100% of users' requirements; more like 100% fulfillment of ~90% of users' requirements.

@ -0,0 +1,3 @@
module github.com/libdns/libdns
go 1.14

@ -0,0 +1,85 @@
// Package libdns defines the core interfaces that should be implemented
// by DNS provider clients. They are small and idiomatic Go interfaces with
// well-defined semantics.
//
// All interface implementations must be safe for concurrent/parallel use.
// For example, if AppendRecords() is called at the same time and two API
// requests are made to the provider at the same time, the result of both
// requests must be visible after they both complete; if the provider does
// not synchronize the writing of the zone file and one request overwrites
// the other, then the client implementation must take care to synchronize
// on behalf of the incompetent provider. This synchronization need not be
// global, for example: the scope of synchronization might only need to be
// within the same zone, allowing multiple requests at once as long as all
// of them are for different zones. (Exact logic depends on the provider.)
package libdns
import (
"context"
"time"
)
// RecordGetter can get records from a DNS zone.
type RecordGetter interface {
// GetRecords returns all the records in the DNS zone.
//
// Implementations must honor context cancellation and be safe for
// concurrent use.
GetRecords(ctx context.Context, zone string) ([]Record, error)
}
// RecordAppender can non-destructively add new records to a DNS zone.
type RecordAppender interface {
// AppendRecords creates the requested records in the given zone
// and returns the populated records that were created. It never
// changes existing records.
//
// Implementations must honor context cancellation and be safe for
// concurrent use.
AppendRecords(ctx context.Context, zone string, recs []Record) ([]Record, error)
}
// RecordSetter can set new or update existing records in a DNS zone.
type RecordSetter interface {
// SetRecords updates the zone so that the records described in the
// input are reflected in the output. It may create or overwrite
// records or -- depending on the record type -- delete records to
// maintain parity with the input. No other records are affected.
// It returns the records which were set.
//
// Records that have an ID associating it with a particular resource
// on the provider will be directly replaced. If no ID is given, this
// method may use what information is given to do lookups and will
// ensure that only necessary changes are made to the zone.
//
// Implementations must honor context cancellation and be safe for
// concurrent use.
SetRecords(ctx context.Context, zone string, recs []Record) ([]Record, error)
}
// RecordDeleter can delete records from a DNS zone.
type RecordDeleter interface {
// DeleteRecords deletes the given records from the zone if they exist.
// It returns the records that were deleted.
//
// Records that have an ID to associate it with a particular resource on
// the provider will be directly deleted. If no ID is given, this method
// may use what information is given to do lookups and delete only
// matching records.
//
// Implementations must honor context cancellation and be safe for
// concurrent use.
DeleteRecords(ctx context.Context, zone string, recs []Record) ([]Record, error)
}
// Record is a generalized representation of a DNS record.
type Record struct {
// provider-specific metadata
ID string
// general record fields
Type string
Name string
Value string
TTL time.Duration
}

@ -0,0 +1 @@
_gitignore/

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -0,0 +1,59 @@
acmez - ACME client library for Go
==================================
[![godoc](https://pkg.go.dev/badge/github.com/mholt/acmez)](https://pkg.go.dev/github.com/mholt/acmez)
ACMEz ("ack-measy" or "acme-zee", whichever you prefer) is a fully-compliant [RFC 8555](https://tools.ietf.org/html/rfc8555) (ACME) implementation in pure Go. It is lightweight, has an elegant Go API, and its retry logic is highly robust against external errors. ACMEz is suitable for large-scale enterprise deployments.
**NOTE:** This module is for _getting_ certificates, not _managing_ certificates. Most users probably want certificate _management_ (keeping certificates renewed) rather than to interface directly with ACME. Developers who want to use certificates in their long-running Go programs should use [CertMagic](https://github.com/caddyserver/certmagic) instead; or, if their program is not written in Go, [Caddy](https://caddyserver.com/) can be used to manage certificates (even without running an HTTP or TLS server).
This module has two primary packages:
- **`acmez`** is a high-level wrapper for getting certificates. It implements the ACME order flow described in RFC 8555 including challenge solving using pluggable solvers.
- **`acme`** is a low-level RFC 8555 implementation that provides the fundamental ACME operations, mainly useful if you have advanced or niche requirements.
In other words, the `acmez` package is **porcelain** while the `acme` package is **plumbing** (to use git's terminology).
## Features
- Simple, elegant Go API
- Thoroughly documented with spec citations
- Robust to external errors
- Structured error values ("problems" as defined in RFC 7807)
- Smart retries (resilient against network and server hiccups)
- Challenge plasticity (randomized challenges, and will retry others if one fails)
- Context cancellation (suitable for high-frequency config changes or reloads)
- Highly flexible and customizable
- External Account Binding (EAB) support
- Tested with multiple ACME CAs (more than just Let's Encrypt)
- Supports niche aspects of RFC 8555 (such as alt cert chains and account key rollover)
- Efficient solving of large SAN lists (e.g. for slow DNS record propagation)
- Utility functions for solving challenges
- Helpers for RFC 8737 (tls-alpn-01 challenge)
The `acmez` package is "bring-your-own-solver." It provides helper utilities for http-01, dns-01, and tls-alpn-01 challenges, but does not actually solve them for you. You must write an implementation of `acmez.Solver` in order to get certificates. How this is done depends on the environment in which you're using this code.
This is not a command line utility either. The goal is to not add more external tooling to already-complex infrastructure: ACME and TLS should be built-in to servers rather than tacked on as an afterthought.
## Examples
See the `examples` folder for tutorials on how to use either package.
## History
In 2014, the ISRG was finishing the development of its automated CA infrastructure: the first of its kind to become publicly-trusted, under the name Let's Encrypt, which used a young protocol called ACME to automate domain validation and certificate issuance.
Meanwhile, a project called [Caddy](https://caddyserver.com) was being developed which would be the first and only web server to use HTTPS _automatically and by default_. To make that possible, another project called lego was commissioned by the Caddy project to become of the first-ever ACME client libraries, and the first client written in Go. It was made by Sebastian Erhart (xenolf), and on day 1 of Let's Encrypt's public beta, Caddy used lego to obtain its first certificate automatically at startup, making Caddy and lego the first-ever integrated ACME client.
Since then, Caddy has seen use in production longer than any other ACME client integration, and is well-known for being one of the most robust and reliable HTTPS implementations available today.
A few years later, Caddy's novel auto-HTTPS logic was extracted into a library called [CertMagic](https://github.com/caddyserver/certmagic) to be usable by any Go program. Caddy would continue to use CertMagic, which implemented the certificate _automation and management_ logic on top of the low-level certificate _obtain_ logic that lego provided.
Soon thereafter, the lego project shifted maintainership and the goals and vision of the project diverged from those of Caddy's use case of managing tens of thousands of certificates per instance. Eventually, [the original Caddy author announced work on a new ACME client library in Go](https://github.com/caddyserver/certmagic/issues/71) that exceeded Caddy's harsh requirements for large-scale enterprise deployments, lean builds, and simple API. This work finally came to fruition in 2020 as ACMEz.
---
(c) 2020 Matthew Holt

@ -0,0 +1,37 @@
This document contains Third Party Software Notices and/or Additional
Terms and Conditions for licensed third party software components
included within this product.
==
https://github.com/golang/crypto/blob/master/acme/jws.go
https://github.com/golang/crypto/blob/master/acme/jws_test.go
(with modifications)
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -0,0 +1,249 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"context"
"crypto"
"encoding/base64"
"encoding/json"
"fmt"
)
// Account represents a set of metadata associated with an account
// as defined by the ACME spec §7.1.2:
// https://tools.ietf.org/html/rfc8555#section-7.1.2
type Account struct {
// status (required, string): The status of this account. Possible
// values are "valid", "deactivated", and "revoked". The value
// "deactivated" should be used to indicate client-initiated
// deactivation whereas "revoked" should be used to indicate server-
// initiated deactivation. See Section 7.1.6.
Status string `json:"status"`
// contact (optional, array of string): An array of URLs that the
// server can use to contact the client for issues related to this
// account. For example, the server may wish to notify the client
// about server-initiated revocation or certificate expiration. For
// information on supported URL schemes, see Section 7.3.
Contact []string `json:"contact,omitempty"`
// termsOfServiceAgreed (optional, boolean): Including this field in a
// newAccount request, with a value of true, indicates the client's
// agreement with the terms of service. This field cannot be updated
// by the client.
TermsOfServiceAgreed bool `json:"termsOfServiceAgreed,omitempty"`
// externalAccountBinding (optional, object): Including this field in a
// newAccount request indicates approval by the holder of an existing
// non-ACME account to bind that account to this ACME account. This
// field is not updateable by the client (see Section 7.3.4).
//
// Use SetExternalAccountBinding() to set this field's value properly.
ExternalAccountBinding json.RawMessage `json:"externalAccountBinding,omitempty"`
// orders (required, string): A URL from which a list of orders
// submitted by this account can be fetched via a POST-as-GET
// request, as described in Section 7.1.2.1.
Orders string `json:"orders"`
// In response to new-account, "the server returns this account
// object in a 201 (Created) response, with the account URL
// in a Location header field." §7.3
//
// We transfer the value from the header to this field for
// storage and recall purposes.
Location string `json:"location,omitempty"`
// The private key to the account. Because it is secret, it is
// not serialized as JSON and must be stored separately (usually
// a PEM-encoded file).
PrivateKey crypto.Signer `json:"-"`
}
// SetExternalAccountBinding sets the ExternalAccountBinding field of the account.
// It only sets the field value; it does not register the account with the CA. (The
// client parameter is necessary because the EAB encoding depends on the directory.)
func (a *Account) SetExternalAccountBinding(ctx context.Context, client *Client, eab EAB) error {
if err := client.provision(ctx); err != nil {
return err
}
macKey, err := base64.RawURLEncoding.DecodeString(eab.MACKey)
if err != nil {
return fmt.Errorf("base64-decoding MAC key: %w", err)
}
eabJWS, err := jwsEncodeEAB(a.PrivateKey.Public(), macKey, keyID(eab.KeyID), client.dir.NewAccount)
if err != nil {
return fmt.Errorf("signing EAB content: %w", err)
}
a.ExternalAccountBinding = eabJWS
return nil
}
// NewAccount creates a new account on the ACME server.
//
// "A client creates a new account with the server by sending a POST
// request to the server's newAccount URL." §7.3
func (c *Client) NewAccount(ctx context.Context, account Account) (Account, error) {
if err := c.provision(ctx); err != nil {
return account, err
}
return c.postAccount(ctx, c.dir.NewAccount, accountObject{Account: account})
}
// GetAccount looks up an account on the ACME server.
//
// "If a client wishes to find the URL for an existing account and does
// not want an account to be created if one does not already exist, then
// it SHOULD do so by sending a POST request to the newAccount URL with
// a JWS whose payload has an 'onlyReturnExisting' field set to 'true'."
// §7.3.1
func (c *Client) GetAccount(ctx context.Context, account Account) (Account, error) {
if err := c.provision(ctx); err != nil {
return account, err
}
return c.postAccount(ctx, c.dir.NewAccount, accountObject{
Account: account,
OnlyReturnExisting: true,
})
}
// UpdateAccount updates account information on the ACME server.
//
// "If the client wishes to update this information in the future, it
// sends a POST request with updated information to the account URL.
// The server MUST ignore any updates to the 'orders' field,
// 'termsOfServiceAgreed' field (see Section 7.3.3), the 'status' field
// (except as allowed by Section 7.3.6), or any other fields it does not
// recognize." §7.3.2
//
// This method uses the account.Location value as the account URL.
func (c *Client) UpdateAccount(ctx context.Context, account Account) (Account, error) {
return c.postAccount(ctx, account.Location, accountObject{Account: account})
}
type keyChangeRequest struct {
Account string `json:"account"`
OldKey json.RawMessage `json:"oldKey"`
}
// AccountKeyRollover changes an account's associated key.
//
// "To change the key associated with an account, the client sends a
// request to the server containing signatures by both the old and new
// keys." §7.3.5
func (c *Client) AccountKeyRollover(ctx context.Context, account Account, newPrivateKey crypto.Signer) (Account, error) {
if err := c.provision(ctx); err != nil {
return account, err
}
oldPublicKeyJWK, err := jwkEncode(account.PrivateKey.Public())
if err != nil {
return account, fmt.Errorf("encoding old private key: %v", err)
}
keyChangeReq := keyChangeRequest{
Account: account.Location,
OldKey: []byte(oldPublicKeyJWK),
}
innerJWS, err := jwsEncodeJSON(keyChangeReq, newPrivateKey, "", "", c.dir.KeyChange)
if err != nil {
return account, fmt.Errorf("encoding inner JWS: %v", err)
}
_, err = c.httpPostJWS(ctx, account.PrivateKey, account.Location, c.dir.KeyChange, json.RawMessage(innerJWS), nil)
if err != nil {
return account, fmt.Errorf("rolling key on server: %w", err)
}
account.PrivateKey = newPrivateKey
return account, nil
}
func (c *Client) postAccount(ctx context.Context, endpoint string, account accountObject) (Account, error) {
// Normally, the account URL is the key ID ("kid")... except when the user
// is trying to get the correct account URL. In that case, we must ignore
// any existing URL we may have and not set the kid field on the request.
// Arguably, this is a user error (spec says "If client wishes to find the
// URL for an existing account", so why would the URL already be filled
// out?) but it's easy enough to infer their intent and make it work.
kid := account.Location
if account.OnlyReturnExisting {
kid = ""
}
resp, err := c.httpPostJWS(ctx, account.PrivateKey, kid, endpoint, account, &account.Account)
if err != nil {
return account.Account, err
}
account.Location = resp.Header.Get("Location")
return account.Account, nil
}
type accountObject struct {
Account
// If true, newAccount will be read-only, and Account.Location
// (which holds the account URL) must be empty.
OnlyReturnExisting bool `json:"onlyReturnExisting,omitempty"`
}
// EAB (External Account Binding) contains information
// necessary to bind or map an ACME account to some
// other account known by the CA.
//
// External account bindings are "used to associate an
// ACME account with an existing account in a non-ACME
// system, such as a CA customer database."
//
// "To enable ACME account binding, the CA operating the
// ACME server needs to provide the ACME client with a
// MAC key and a key identifier, using some mechanism
// outside of ACME." §7.3.4
type EAB struct {
// "The key identifier MUST be an ASCII string." §7.3.4
KeyID string `json:"key_id"`
// "The MAC key SHOULD be provided in base64url-encoded
// form, to maximize compatibility between non-ACME
// provisioning systems and ACME clients." §7.3.4
MACKey string `json:"mac_key"`
}
// Possible status values. From several spec sections:
// - Account §7.1.2 (valid, deactivated, revoked)
// - Order §7.1.3 (pending, ready, processing, valid, invalid)
// - Authorization §7.1.4 (pending, valid, invalid, deactivated, expired, revoked)
// - Challenge §7.1.5 (pending, processing, valid, invalid)
// - Status changes §7.1.6
const (
StatusPending = "pending"
StatusProcessing = "processing"
StatusValid = "valid"
StatusInvalid = "invalid"
StatusDeactivated = "deactivated"
StatusExpired = "expired"
StatusRevoked = "revoked"
StatusReady = "ready"
)

@ -0,0 +1,283 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"context"
"fmt"
"time"
)
// Authorization "represents a server's authorization for
// an account to represent an identifier. In addition to the
// identifier, an authorization includes several metadata fields, such
// as the status of the authorization (e.g., 'pending', 'valid', or
// 'revoked') and which challenges were used to validate possession of
// the identifier." §7.1.4
type Authorization struct {
// identifier (required, object): The identifier that the account is
// authorized to represent.
Identifier Identifier `json:"identifier"`
// status (required, string): The status of this authorization.
// Possible values are "pending", "valid", "invalid", "deactivated",
// "expired", and "revoked". See Section 7.1.6.
Status string `json:"status"`
// expires (optional, string): The timestamp after which the server
// will consider this authorization invalid, encoded in the format
// specified in [RFC3339]. This field is REQUIRED for objects with
// "valid" in the "status" field.
Expires time.Time `json:"expires,omitempty"`
// challenges (required, array of objects): For pending authorizations,
// the challenges that the client can fulfill in order to prove
// possession of the identifier. For valid authorizations, the
// challenge that was validated. For invalid authorizations, the
// challenge that was attempted and failed. Each array entry is an
// object with parameters required to validate the challenge. A
// client should attempt to fulfill one of these challenges, and a
// server should consider any one of the challenges sufficient to
// make the authorization valid.
Challenges []Challenge `json:"challenges"`
// wildcard (optional, boolean): This field MUST be present and true
// for authorizations created as a result of a newOrder request
// containing a DNS identifier with a value that was a wildcard
// domain name. For other authorizations, it MUST be absent.
// Wildcard domain names are described in Section 7.1.3.
Wildcard bool `json:"wildcard,omitempty"`
// "The server allocates a new URL for this authorization and returns a
// 201 (Created) response with the authorization URL in the Location
// header field" §7.4.1
//
// We transfer the value from the header to this field for storage and
// recall purposes.
Location string `json:"-"`
}
// IdentifierValue returns the Identifier.Value field, adjusted
// according to the Wildcard field.
func (authz Authorization) IdentifierValue() string {
if authz.Wildcard {
return "*." + authz.Identifier.Value
}
return authz.Identifier.Value
}
// fillChallengeFields populates extra fields in the challenge structs so that
// challenges can be solved without needing a bunch of unnecessary extra state.
func (authz *Authorization) fillChallengeFields(account Account) error {
accountThumbprint, err := jwkThumbprint(account.PrivateKey.Public())
if err != nil {
return fmt.Errorf("computing account JWK thumbprint: %v", err)
}
for i := 0; i < len(authz.Challenges); i++ {
authz.Challenges[i].Identifier = authz.Identifier
if authz.Challenges[i].KeyAuthorization == "" {
authz.Challenges[i].KeyAuthorization = authz.Challenges[i].Token + "." + accountThumbprint
}
}
return nil
}
// NewAuthorization creates a new authorization for an identifier using
// the newAuthz endpoint of the directory, if available. This function
// creates authzs out of the regular order flow.
//
// "Note that because the identifier in a pre-authorization request is
// the exact identifier to be included in the authorization object, pre-
// authorization cannot be used to authorize issuance of certificates
// containing wildcard domain names." §7.4.1
func (c *Client) NewAuthorization(ctx context.Context, account Account, id Identifier) (Authorization, error) {
if err := c.provision(ctx); err != nil {
return Authorization{}, err
}
if c.dir.NewAuthz == "" {
return Authorization{}, fmt.Errorf("server does not support newAuthz endpoint")
}
var authz Authorization
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, c.dir.NewAuthz, id, &authz)
if err != nil {
return authz, err
}
authz.Location = resp.Header.Get("Location")
err = authz.fillChallengeFields(account)
if err != nil {
return authz, err
}
return authz, nil
}
// GetAuthorization fetches an authorization object from the server.
//
// "Authorization resources are created by the server in response to
// newOrder or newAuthz requests submitted by an account key holder;
// their URLs are provided to the client in the responses to these
// requests."
//
// "When a client receives an order from the server in reply to a
// newOrder request, it downloads the authorization resources by sending
// POST-as-GET requests to the indicated URLs. If the client initiates
// authorization using a request to the newAuthz resource, it will have
// already received the pending authorization object in the response to
// that request." §7.5
func (c *Client) GetAuthorization(ctx context.Context, account Account, authzURL string) (Authorization, error) {
if err := c.provision(ctx); err != nil {
return Authorization{}, err
}
var authz Authorization
_, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, authzURL, nil, &authz)
if err != nil {
return authz, err
}
authz.Location = authzURL
err = authz.fillChallengeFields(account)
if err != nil {
return authz, err
}
return authz, nil
}
// PollAuthorization polls the authorization resource endpoint until the authorization is
// considered "finalized" which means that it either succeeded, failed, or was abandoned.
// It blocks until that happens or until the configured timeout.
//
// "Usually, the validation process will take some time, so the client
// will need to poll the authorization resource to see when it is
// finalized."
//
// "For challenges where the client can tell when the server
// has validated the challenge (e.g., by seeing an HTTP or DNS request
// from the server), the client SHOULD NOT begin polling until it has
// seen the validation request from the server." §7.5.1
func (c *Client) PollAuthorization(ctx context.Context, account Account, authz Authorization) (Authorization, error) {
start, interval, maxDuration := time.Now(), c.pollInterval(), c.pollTimeout()
if authz.Status != "" {
if finalized, err := authzIsFinalized(authz); finalized {
return authz, err
}
}
for time.Since(start) < maxDuration {
select {
case <-time.After(interval):
case <-ctx.Done():
return authz, ctx.Err()
}
// get the latest authz object
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, authz.Location, nil, &authz)
if err != nil {
return authz, fmt.Errorf("checking authorization status: %w", err)
}
if finalized, err := authzIsFinalized(authz); finalized {
return authz, err
}
// "The server MUST provide information about its retry state to the
// client via the 'error' field in the challenge and the Retry-After
// HTTP header field in response to requests to the challenge resource."
// §8.2
interval, err = retryAfter(resp, interval)
if err != nil {
return authz, err
}
}
return authz, fmt.Errorf("authorization took too long")
}
// DeactivateAuthorization deactivates an authorization on the server, which is
// a good idea if the authorization is not going to be utilized by the client.
//
// "If a client wishes to relinquish its authorization to issue
// certificates for an identifier, then it may request that the server
// deactivate each authorization associated with it by sending POST
// requests with the static object {"status": "deactivated"} to each
// authorization URL." §7.5.2
func (c *Client) DeactivateAuthorization(ctx context.Context, account Account, authzURL string) (Authorization, error) {
if err := c.provision(ctx); err != nil {
return Authorization{}, err
}
if authzURL == "" {
return Authorization{}, fmt.Errorf("empty authz url")
}
deactivate := struct {
Status string `json:"status"`
}{Status: "deactivated"}
var authz Authorization
_, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, authzURL, deactivate, &authz)
authz.Location = authzURL
return authz, err
}
// authzIsFinalized returns true if the authorization is finished,
// whether successfully or not. If not, an error will be returned.
// Post-valid statuses that make an authz unusable are treated as
// errors.
func authzIsFinalized(authz Authorization) (bool, error) {
switch authz.Status {
case StatusPending:
// "Authorization objects are created in the 'pending' state." §7.1.6
return false, nil
case StatusValid:
// "If one of the challenges listed in the authorization transitions
// to the 'valid' state, then the authorization also changes to the
// 'valid' state." §7.1.6
return true, nil
case StatusInvalid:
// "If the client attempts to fulfill a challenge and fails, or if
// there is an error while the authorization is still pending, then
// the authorization transitions to the 'invalid' state." §7.1.6
var firstProblem Problem
for _, chal := range authz.Challenges {
if chal.Error != nil {
firstProblem = *chal.Error
break
}
}
firstProblem.Resource = authz
return true, fmt.Errorf("authorization failed: %w", firstProblem)
case StatusExpired, StatusDeactivated, StatusRevoked:
// Once the authorization is in the 'valid' state, it can expire
// ('expired'), be deactivated by the client ('deactivated', see
// Section 7.5.2), or revoked by the server ('revoked')." §7.1.6
return true, fmt.Errorf("authorization %s", authz.Status)
case "":
return false, fmt.Errorf("status unknown")
default:
return true, fmt.Errorf("server set unrecognized authorization status: %s", authz.Status)
}
}

@ -0,0 +1,165 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"bytes"
"context"
"crypto"
"crypto/x509"
"encoding/base64"
"fmt"
"net/http"
)
// Certificate represents a certificate chain, which we usually refer
// to as "a certificate" because in practice an end-entity certificate
// is seldom useful/practical without a chain.
type Certificate struct {
// The certificate resource URL as provisioned by
// the ACME server. Some ACME servers may split
// the chain into multiple URLs that are Linked
// together, in which case this URL represents the
// starting point.
URL string `json:"url"`
// The PEM-encoded certificate chain, end-entity first.
ChainPEM []byte `json:"-"`
}
// GetCertificateChain downloads all available certificate chains originating from
// the given certURL. This is to be done after an order is finalized.
//
// "To download the issued certificate, the client simply sends a POST-
// as-GET request to the certificate URL."
//
// "The server MAY provide one or more link relation header fields
// [RFC8288] with relation 'alternate'. Each such field SHOULD express
// an alternative certificate chain starting with the same end-entity
// certificate. This can be used to express paths to various trust
// anchors. Clients can fetch these alternates and use their own
// heuristics to decide which is optimal." §7.4.2
func (c *Client) GetCertificateChain(ctx context.Context, account Account, certURL string) ([]Certificate, error) {
if err := c.provision(ctx); err != nil {
return nil, err
}
var chains []Certificate
addChain := func(certURL string) (*http.Response, error) {
// can't pool this buffer; bytes escape scope
buf := new(bytes.Buffer)
// TODO: set the Accept header? ("application/pem-certificate-chain") See end of §7.4.2
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, certURL, nil, buf)
if err != nil {
return resp, err
}
contentType := parseMediaType(resp)
switch contentType {
case "application/pem-certificate-chain":
chains = append(chains, Certificate{
URL: certURL,
ChainPEM: buf.Bytes(),
})
default:
return resp, fmt.Errorf("unrecognized Content-Type from server: %s", contentType)
}
// "For formats that can only express a single certificate, the server SHOULD
// provide one or more "Link: rel="up"" header fields pointing to an
// issuer or issuers so that ACME clients can build a certificate chain
// as defined in TLS (see Section 4.4.2 of [RFC8446])." (end of §7.4.2)
allUp := extractLinks(resp, "up")
for _, upURL := range allUp {
upCerts, err := c.GetCertificateChain(ctx, account, upURL)
if err != nil {
return resp, fmt.Errorf("retrieving next certificate in chain: %s: %w", upURL, err)
}
for _, upCert := range upCerts {
chains[len(chains)-1].ChainPEM = append(chains[len(chains)-1].ChainPEM, upCert.ChainPEM...)
}
}
return resp, nil
}
// always add preferred/first certificate chain
resp, err := addChain(certURL)
if err != nil {
return chains, err
}
// "The server MAY provide one or more link relation header fields
// [RFC8288] with relation 'alternate'. Each such field SHOULD express
// an alternative certificate chain starting with the same end-entity
// certificate. This can be used to express paths to various trust
// anchors. Clients can fetch these alternates and use their own
// heuristics to decide which is optimal." §7.4.2
alternates := extractLinks(resp, "alternate")
for _, altURL := range alternates {
resp, err = addChain(altURL)
if err != nil {
return nil, fmt.Errorf("retrieving alternate certificate chain at %s: %w", altURL, err)
}
}
return chains, nil
}
// RevokeCertificate revokes the given certificate. If the certificate key is not
// provided, then the account key is used instead. See §7.6.
func (c *Client) RevokeCertificate(ctx context.Context, account Account, cert *x509.Certificate, certKey crypto.Signer, reason int) error {
if err := c.provision(ctx); err != nil {
return err
}
body := struct {
Certificate string `json:"certificate"`
Reason int `json:"reason"`
}{
Certificate: base64.RawURLEncoding.EncodeToString(cert.Raw),
Reason: reason,
}
// "Revocation requests are different from other ACME requests in that
// they can be signed with either an account key pair or the key pair in
// the certificate." §7.6
kid := ""
if certKey == account.PrivateKey {
kid = account.Location
}
_, err := c.httpPostJWS(ctx, certKey, kid, c.dir.RevokeCert, body, nil)
return err
}
// Reasons for revoking a certificate, as defined
// by RFC 5280 §5.3.1.
// https://tools.ietf.org/html/rfc5280#section-5.3.1
const (
ReasonUnspecified = iota // 0
ReasonKeyCompromise // 1
ReasonCACompromise // 2
ReasonAffiliationChanged // 3
ReasonSuperseded // 4
ReasonCessationOfOperation // 5
ReasonCertificateHold // 6
_ // 7 (unused)
ReasonRemoveFromCRL // 8
ReasonPrivilegeWithdrawn // 9
ReasonAACompromise // 10
)

@ -0,0 +1,133 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"context"
"crypto/sha256"
"encoding/base64"
)
// Challenge holds information about an ACME challenge.
//
// "An ACME challenge object represents a server's offer to validate a
// client's possession of an identifier in a specific way. Unlike the
// other objects listed above, there is not a single standard structure
// for a challenge object. The contents of a challenge object depend on
// the validation method being used. The general structure of challenge
// objects and an initial set of validation methods are described in
// Section 8." §7.1.5
type Challenge struct {
// "Challenge objects all contain the following basic fields..." §8
// type (required, string): The type of challenge encoded in the
// object.
Type string `json:"type"`
// url (required, string): The URL to which a response can be posted.
URL string `json:"url"`
// status (required, string): The status of this challenge. Possible
// values are "pending", "processing", "valid", and "invalid" (see
// Section 7.1.6).
Status string `json:"status"`
// validated (optional, string): The time at which the server validated
// this challenge, encoded in the format specified in [RFC3339].
// This field is REQUIRED if the "status" field is "valid".
Validated string `json:"validated,omitempty"`
// error (optional, object): Error that occurred while the server was
// validating the challenge, if any, structured as a problem document
// [RFC7807]. Multiple errors can be indicated by using subproblems
// Section 6.7.1. A challenge object with an error MUST have status
// equal to "invalid".
Error *Problem `json:"error,omitempty"`
// "All additional fields are specified by the challenge type." §8
// (We also add our own for convenience.)
// "The token for a challenge is a string comprised entirely of
// characters in the URL-safe base64 alphabet." §8.1
//
// Used by the http-01, tls-alpn-01, and dns-01 challenges.
Token string `json:"token,omitempty"`
// A key authorization is a string that concatenates the token for the
// challenge with a key fingerprint, separated by a "." character (§8.1):
//
// keyAuthorization = token || '.' || base64url(Thumbprint(accountKey))
//
// This client package automatically assembles and sets this value for you.
KeyAuthorization string `json:"keyAuthorization,omitempty"`
// We attach the identifier that this challenge is associated with, which
// may be useful information for solving a challenge. It is not part of the
// structure as defined by the spec but is added by us to provide enough
// information to solve the DNS-01 challenge.
Identifier Identifier `json:"identifier,omitempty"`
}
// HTTP01ResourcePath returns the URI path for solving the http-01 challenge.
//
// "The path at which the resource is provisioned is comprised of the
// fixed prefix '/.well-known/acme-challenge/', followed by the 'token'
// value in the challenge." §8.3
func (c Challenge) HTTP01ResourcePath() string {
return "/.well-known/acme-challenge/" + c.Token
}
// DNS01TXTRecordName returns the name of the TXT record to create for
// solving the dns-01 challenge.
//
// "The client constructs the validation domain name by prepending the
// label '_acme-challenge' to the domain name being validated, then
// provisions a TXT record with the digest value under that name." §8.4
func (c Challenge) DNS01TXTRecordName() string {
return "_acme-challenge." + c.Identifier.Value
}
// DNS01KeyAuthorization encodes a key authorization value to be used
// in a TXT record for the _acme-challenge DNS record.
//
// "A client fulfills this challenge by constructing a key authorization
// from the 'token' value provided in the challenge and the client's
// account key. The client then computes the SHA-256 digest [FIPS180-4]
// of the key authorization.
//
// The record provisioned to the DNS contains the base64url encoding of
// this digest." §8.4
func (c Challenge) DNS01KeyAuthorization() string {
h := sha256.Sum256([]byte(c.KeyAuthorization))
return base64.RawURLEncoding.EncodeToString(h[:])
}
// InitiateChallenge "indicates to the server that it is ready for the challenge
// validation by sending an empty JSON body ('{}') carried in a POST request to
// the challenge URL (not the authorization URL)." §7.5.1
func (c *Client) InitiateChallenge(ctx context.Context, account Account, challenge Challenge) (Challenge, error) {
if err := c.provision(ctx); err != nil {
return Challenge{}, err
}
_, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, challenge.URL, struct{}{}, &challenge)
return challenge, err
}
// The standard or well-known ACME challenge types.
const (
ChallengeTypeHTTP01 = "http-01" // RFC 8555 §8.3
ChallengeTypeDNS01 = "dns-01" // RFC 8555 §8.4
ChallengeTypeTLSALPN01 = "tls-alpn-01" // RFC 8737 §3
)

@ -0,0 +1,240 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package acme full implements the ACME protocol specification as
// described in RFC 8555: https://tools.ietf.org/html/rfc8555.
//
// It is designed to work smoothly in large-scale deployments with
// high resilience to errors and intermittent network or server issues,
// with retries built-in at every layer of the HTTP request stack.
//
// NOTE: This is a low-level API. Most users will want the mholt/acmez
// package which is more concerned with configuring challenges and
// implementing the order flow. However, using this package directly
// is recommended for advanced use cases having niche requirements.
// See the examples in the examples/plumbing folder for a tutorial.
package acme
import (
"context"
"fmt"
"net/http"
"sync"
"time"
"go.uber.org/zap"
)
// Client facilitates ACME client operations as defined by the spec.
//
// Because the client is synchronized for concurrent use, it should
// not be copied.
//
// Many errors that are returned by a Client are likely to be of type
// Problem as long as the ACME server returns a structured error
// response. This package wraps errors that may be of type Problem,
// so you can access the details using the conventional Go pattern:
//
// var problem Problem
// if errors.As(err, &problem) {
// log.Printf("Houston, we have a problem: %+v", problem)
// }
//
// All Problem errors originate from the ACME server.
type Client struct {
// The ACME server's directory endpoint.
Directory string
// Custom HTTP client.
HTTPClient *http.Client
// Augmentation of the User-Agent header. Please set
// this so that CAs can troubleshoot bugs more easily.
UserAgent string
// Delay between poll attempts. Only used if server
// does not supply a Retry-Afer header. Default: 250ms
PollInterval time.Duration
// Maximum duration for polling. Default: 5m
PollTimeout time.Duration
// An optional logger. Default: no logs
Logger *zap.Logger
mu sync.Mutex // protects all unexported fields
dir Directory
nonces *stack
}
// GetDirectory retrieves the directory configured at c.Directory. It is
// NOT necessary to call this to provision the client. It is only useful
// if you want to access a copy of the directory yourself.
func (c *Client) GetDirectory(ctx context.Context) (Directory, error) {
if err := c.provision(ctx); err != nil {
return Directory{}, err
}
return c.dir, nil
}
func (c *Client) provision(ctx context.Context) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.nonces == nil {
c.nonces = new(stack)
}
err := c.provisionDirectory(ctx)
if err != nil {
return fmt.Errorf("provisioning client: %w", err)
}
return nil
}
func (c *Client) provisionDirectory(ctx context.Context) error {
// don't get directory again if we already have it;
// checking any one of the required fields will do
if c.dir.NewNonce != "" {
return nil
}
if c.Directory == "" {
return fmt.Errorf("missing directory URL")
}
// prefer cached version if it's recent enough
directoriesMu.Lock()
defer directoriesMu.Unlock()
if dir, ok := directories[c.Directory]; ok {
if time.Since(dir.retrieved) < 12*time.Hour {
c.dir = dir.Directory
return nil
}
}
_, err := c.httpReq(ctx, http.MethodGet, c.Directory, nil, &c.dir)
if err != nil {
return err
}
directories[c.Directory] = cachedDirectory{c.dir, time.Now()}
return nil
}
func (c *Client) nonce(ctx context.Context) (string, error) {
nonce := c.nonces.pop()
if nonce != "" {
return nonce, nil
}
if c.dir.NewNonce == "" {
return "", fmt.Errorf("directory missing newNonce endpoint")
}
resp, err := c.httpReq(ctx, http.MethodHead, c.dir.NewNonce, nil, nil)
if err != nil {
return "", fmt.Errorf("fetching new nonce from server: %w", err)
}
return resp.Header.Get(replayNonce), nil
}
func (c *Client) pollInterval() time.Duration {
if c.PollInterval == 0 {
return defaultPollInterval
}
return c.PollInterval
}
func (c *Client) pollTimeout() time.Duration {
if c.PollTimeout == 0 {
return defaultPollTimeout
}
return c.PollTimeout
}
// Directory acts as an index for the ACME server as
// specified in the spec: "In order to help clients
// configure themselves with the right URLs for each
// ACME operation, ACME servers provide a directory
// object." §7.1.1
type Directory struct {
NewNonce string `json:"newNonce"`
NewAccount string `json:"newAccount"`
NewOrder string `json:"newOrder"`
NewAuthz string `json:"newAuthz,omitempty"`
RevokeCert string `json:"revokeCert"`
KeyChange string `json:"keyChange"`
Meta *DirectoryMeta `json:"meta,omitempty"`
}
// DirectoryMeta is optional extra data that may be
// included in an ACME server directory. §7.1.1
type DirectoryMeta struct {
TermsOfService string `json:"termsOfService,omitempty"`
Website string `json:"website,omitempty"`
CAAIdentities []string `json:"caaIdentities,omitempty"`
ExternalAccountRequired bool `json:"externalAccountRequired,omitempty"`
}
// stack is a simple thread-safe stack.
type stack struct {
stack []string
stackMu sync.Mutex
}
func (s *stack) push(v string) {
if v == "" {
return
}
s.stackMu.Lock()
defer s.stackMu.Unlock()
if len(s.stack) >= 64 {
return
}
s.stack = append(s.stack, v)
}
func (s *stack) pop() string {
s.stackMu.Lock()
defer s.stackMu.Unlock()
n := len(s.stack)
if n == 0 {
return ""
}
v := s.stack[n-1]
s.stack = s.stack[:n-1]
return v
}
// Directories seldom (if ever) change in practice, and
// client structs are often ephemeral, so we can cache
// directories to speed things up a bit for the user.
// Keyed by directory URL.
var (
directories = make(map[string]cachedDirectory)
directoriesMu sync.Mutex
)
type cachedDirectory struct {
Directory
retrieved time.Time
}
// replayNonce is the header field that contains a new
// anti-replay nonce from the server.
const replayNonce = "Replay-Nonce"
const (
defaultPollInterval = 250 * time.Millisecond
defaultPollTimeout = 5 * time.Minute
)

@ -0,0 +1,394 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"bytes"
"context"
"crypto"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
"go.uber.org/zap"
)
// httpPostJWS performs robust HTTP requests by JWS-encoding the JSON of input.
// If output is specified, the response body is written into it: if the response
// Content-Type is JSON, it will be JSON-decoded into output (which must be a
// pointer); otherwise, if output is an io.Writer, the response body will be
// written to it uninterpreted. In all cases, the returned response value's
// body will have been drained and closed, so there is no need to close it again.
// It automatically retries in the case of network, I/O, or badNonce errors.
func (c *Client) httpPostJWS(ctx context.Context, privateKey crypto.Signer,
kid, endpoint string, input, output interface{}) (*http.Response, error) {
if err := c.provision(ctx); err != nil {
return nil, err
}
var resp *http.Response
var err error
// we can retry on internal server errors just in case it was a hiccup,
// but we probably don't need to retry so many times in that case
internalServerErrors, maxInternalServerErrors := 0, 3
// set a hard cap on the number of retries for any other reason
const maxAttempts = 10
var attempts int
for attempts = 1; attempts <= maxAttempts; attempts++ {
if attempts > 1 {
select {
case <-time.After(250 * time.Millisecond):
case <-ctx.Done():
return nil, ctx.Err()
}
}
var nonce string // avoid shadowing err
nonce, err = c.nonce(ctx)
if err != nil {
return nil, err
}
var encodedPayload []byte // avoid shadowing err
encodedPayload, err = jwsEncodeJSON(input, privateKey, keyID(kid), nonce, endpoint)
if err != nil {
return nil, fmt.Errorf("encoding payload: %v", err)
}
resp, err = c.httpReq(ctx, http.MethodPost, endpoint, encodedPayload, output)
if err == nil {
return resp, nil
}
// "When a server rejects a request because its nonce value was
// unacceptable (or not present), it MUST provide HTTP status code 400
// (Bad Request), and indicate the ACME error type
// 'urn:ietf:params:acme:error:badNonce'. An error response with the
// 'badNonce' error type MUST include a Replay-Nonce header field with a
// fresh nonce that the server will accept in a retry of the original
// query (and possibly in other requests, according to the server's
// nonce scoping policy). On receiving such a response, a client SHOULD
// retry the request using the new nonce." §6.5
var problem Problem
if errors.As(err, &problem) {
if problem.Type == ProblemTypeBadNonce {
if c.Logger != nil {
c.Logger.Debug("server rejected our nonce; retrying",
zap.String("detail", problem.Detail),
zap.Error(err))
}
continue
}
}
// internal server errors *could* just be a hiccup and it may be worth
// trying again, but not nearly so many times as for other reasons
if resp != nil && resp.StatusCode >= 500 {
internalServerErrors++
if internalServerErrors < maxInternalServerErrors {
continue
}
}
// for any other error, there's not much we can do automatically
break
}
return resp, fmt.Errorf("request to %s failed after %d attempts: %v",
endpoint, attempts, err)
}
// httpReq robustly performs an HTTP request using the given method to the given endpoint, honoring
// the given context's cancellation. The joseJSONPayload is optional; if not nil, it is expected to
// be a JOSE+JSON encoding. The output is also optional; if not nil, the response body will be read
// into output. If the response Content-Type is JSON, it will be JSON-decoded into output, which
// must be a pointer type. If the response is any other Content-Type and if output is a io.Writer,
// it will be written (without interpretation or decoding) to output. In all cases, the returned
// response value will have the body drained and closed, so there is no need to close it again.
//
// If there are any network or I/O errors, the request will be retried as safely and resiliently as
// possible.
func (c *Client) httpReq(ctx context.Context, method, endpoint string, joseJSONPayload []byte, output interface{}) (*http.Response, error) {
// even if the caller doesn't specify an output, we still use a
// buffer to store possible error response (we reset it later)
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
var resp *http.Response
var err error
// potentially retry the request if there's network, I/O, or server internal errors
const maxAttempts = 3
for attempt := 0; attempt < maxAttempts; attempt++ {
if attempt > 0 {
// traffic calming ahead
select {
case <-time.After(250 * time.Millisecond):
case <-ctx.Done():
return nil, ctx.Err()
}
}
var body io.Reader
if joseJSONPayload != nil {
body = bytes.NewReader(joseJSONPayload)
}
var req *http.Request
req, err = http.NewRequestWithContext(ctx, method, endpoint, body)
if err != nil {
return nil, fmt.Errorf("creating request: %w", err)
}
if len(joseJSONPayload) > 0 {
req.Header.Set("Content-Type", "application/jose+json")
}
// on first attempt, we need to reset buf since it
// came from the pool; after first attempt, we should
// still reset it because we might be retrying after
// a partial download
buf.Reset()
var retry bool
resp, retry, err = c.doHTTPRequest(req, buf)
if err != nil {
if retry {
if c.Logger != nil {
c.Logger.Warn("HTTP request failed; retrying",
zap.String("url", req.URL.String()),
zap.Error(err))
}
continue
}
break
}
// check for HTTP errors
switch {
case resp.StatusCode >= 200 && resp.StatusCode < 300: // OK
case resp.StatusCode >= 400 && resp.StatusCode < 600: // error
if parseMediaType(resp) == "application/problem+json" {
// "When the server responds with an error status, it SHOULD provide
// additional information using a problem document [RFC7807]." (§6.7)
var problem Problem
err = json.Unmarshal(buf.Bytes(), &problem)
if err != nil {
return resp, fmt.Errorf("HTTP %d: JSON-decoding problem details: %w (raw='%s')",
resp.StatusCode, err, buf.String())
}
if resp.StatusCode >= 500 && joseJSONPayload == nil {
// a 5xx status is probably safe to retry on even after a
// request that had no I/O errors; it could be that the
// server just had a hiccup... so try again, but only if
// there is no request body, because we can't replay a
// request that has an anti-replay nonce, obviously
err = problem
continue
}
return resp, problem
}
return resp, fmt.Errorf("HTTP %d: %s", resp.StatusCode, buf.String())
default: // what even is this
return resp, fmt.Errorf("unexpected status code: HTTP %d", resp.StatusCode)
}
// do not retry if we got this far (success)
break
}
if err != nil {
return resp, err
}
// if expecting a body, finally decode it
if output != nil {
contentType := parseMediaType(resp)
switch contentType {
case "application/json":
// unmarshal JSON
err = json.Unmarshal(buf.Bytes(), output)
if err != nil {
return resp, fmt.Errorf("JSON-decoding response body: %w", err)
}
default:
// don't interpret anything else here; just hope
// it's a Writer and copy the bytes
w, ok := output.(io.Writer)
if !ok {
return resp, fmt.Errorf("response Content-Type is %s but target container is not io.Writer: %T", contentType, output)
}
_, err = io.Copy(w, buf)
if err != nil {
return resp, err
}
}
}
return resp, nil
}
// doHTTPRequest performs an HTTP request at most one time. It returns the response
// (with drained and closed body), having drained any request body into buf. If
// retry == true is returned, then the request should be safe to retry in the case
// of an error. However, in some cases a retry may be recommended even if part of
// the response body has been read and written into buf. Thus, the buffer may have
// been partially written to and should be reset before being reused.
//
// This method remembers any nonce returned by the server.
func (c *Client) doHTTPRequest(req *http.Request, buf *bytes.Buffer) (resp *http.Response, retry bool, err error) {
req.Header.Set("User-Agent", c.userAgent())
resp, err = c.httpClient().Do(req)
if err != nil {
return resp, true, fmt.Errorf("performing request: %w", err)
}
defer resp.Body.Close()
if c.Logger != nil {
c.Logger.Debug("http request",
zap.String("method", req.Method),
zap.String("url", req.URL.String()),
zap.Reflect("headers", req.Header),
zap.Int("status_code", resp.StatusCode),
zap.Reflect("response_headers", resp.Header))
}
// "The server MUST include a Replay-Nonce header field
// in every successful response to a POST request and
// SHOULD provide it in error responses as well." §6.5
//
// "Before sending a POST request to the server, an ACME
// client needs to have a fresh anti-replay nonce to put
// in the 'nonce' header of the JWS. In most cases, the
// client will have gotten a nonce from a previous
// request." §7.2
//
// So basically, we need to remember the nonces we get
// and use them at the next opportunity.
c.nonces.push(resp.Header.Get(replayNonce))
// drain the response body, even if we aren't keeping it
// (this allows us to reuse the connection and also read
// any error information)
_, err = io.Copy(buf, resp.Body)
if err != nil {
// this is likely a network or I/O error, but is it worth retrying?
// technically the request has already completed, it was just our
// download of the response that failed; so we probably should not
// retry if the request succeeded... however, if there was an HTTP
// error, it likely didn't count against any server-enforced rate
// limits, and we DO want to know the error information, so it should
// be safe to retry the request in those cases AS LONG AS there is
// no request body, which in the context of ACME likely includes an
// anti-replay nonce, which obviously we can't reuse
retry = resp.StatusCode >= 400 && req.Body == nil
return resp, retry, fmt.Errorf("reading response body: %w", err)
}
return resp, false, nil
}
func (c *Client) httpClient() *http.Client {
if c.HTTPClient == nil {
return http.DefaultClient
}
return c.HTTPClient
}
func (c *Client) userAgent() string {
ua := fmt.Sprintf("acmez (%s; %s)", runtime.GOOS, runtime.GOARCH)
if c.UserAgent != "" {
ua = c.UserAgent + " " + ua
}
return ua
}
// extractLinks extracts the URL from the Link header with the
// designated relation rel. It may return more than value
// if there are multiple matching Link values.
//
// Originally by Isaac: https://github.com/eggsampler/acme
// and has been modified to support multiple matching Links.
func extractLinks(resp *http.Response, rel string) []string {
if resp == nil {
return nil
}
var links []string
for _, l := range resp.Header["Link"] {
matches := linkRegex.FindAllStringSubmatch(l, -1)
for _, m := range matches {
if len(m) != 3 {
continue
}
if m[2] == rel {
links = append(links, m[1])
}
}
}
return links
}
// parseMediaType returns only the media type from the
// Content-Type header of resp.
func parseMediaType(resp *http.Response) string {
if resp == nil {
return ""
}
ct := resp.Header.Get("Content-Type")
sep := strings.Index(ct, ";")
if sep < 0 {
return ct
}
return strings.TrimSpace(ct[:sep])
}
// retryAfter returns a duration from the response's Retry-After
// header field, if it exists. It can return an error if the
// header contains an invalid value. If there is no error but
// there is no Retry-After header provided, then the fallback
// duration is returned instead.
func retryAfter(resp *http.Response, fallback time.Duration) (time.Duration, error) {
if resp == nil {
return fallback, nil
}
raSeconds := resp.Header.Get("Retry-After")
if raSeconds == "" {
return fallback, nil
}
ra, err := strconv.Atoi(raSeconds)
if err != nil || ra < 0 {
return 0, fmt.Errorf("response had invalid Retry-After header: %s", raSeconds)
}
return time.Duration(ra) * time.Second, nil
}
var bufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
var linkRegex = regexp.MustCompile(`<(.+?)>;\s*rel="(.+?)"`)

@ -1,6 +1,25 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// --- ORIGINAL LICENSE ---
//
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// license that can be found in the THIRD-PARTY file.
//
// (This file has been modified from its original contents.)
// (And it has dragons. Don't wake the dragons.)
package acme
@ -11,26 +30,14 @@ import (
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/sha512"
_ "crypto/sha512" // need for EC keys
"encoding/asn1"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"hash"
"math/big"
)
// MACAlgorithm represents a JWS MAC signature algorithm.
// See https://tools.ietf.org/html/rfc7518#section-3.1 for more details.
type MACAlgorithm string
const (
MACAlgorithmHS256 = MACAlgorithm("HS256")
MACAlgorithmHS384 = MACAlgorithm("HS384")
MACAlgorithmHS512 = MACAlgorithm("HS512")
)
var errUnsupportedKey = fmt.Errorf("unknown key type; only RSA and ECDSA are supported")
// keyID is the account identity provided by a CA during registration.
type keyID string
@ -39,18 +46,36 @@ type keyID string
// See jwsEncodeJSON for details.
const noKeyID = keyID("")
// noPayload indicates jwsEncodeJSON will encode zero-length octet string
// in a JWS request. This is called POST-as-GET in RFC 8555 and is used to make
// authenticated GET requests via POSTing with an empty payload.
// See https://tools.ietf.org/html/rfc8555#section-6.3 for more details.
const noPayload = ""
// jsonWebSignature can be easily serialized into a JWS following
// https://tools.ietf.org/html/rfc7515#section-3.2.
type jsonWebSignature struct {
Protected string `json:"protected"`
Payload string `json:"payload"`
Sig string `json:"signature"`
// // noPayload indicates jwsEncodeJSON will encode zero-length octet string
// // in a JWS request. This is called POST-as-GET in RFC 8555 and is used to make
// // authenticated GET requests via POSTing with an empty payload.
// // See https://tools.ietf.org/html/rfc8555#section-6.3 for more details.
// const noPayload = ""
// jwsEncodeEAB creates a JWS payload for External Account Binding according to RFC 8555 §7.3.4.
func jwsEncodeEAB(accountKey crypto.PublicKey, hmacKey []byte, kid keyID, url string) ([]byte, error) {
// §7.3.4: "The 'alg' field MUST indicate a MAC-based algorithm"
alg, sha := "HS256", crypto.SHA256
// §7.3.4: "The 'nonce' field MUST NOT be present"
phead, err := jwsHead(alg, "", url, kid, nil)
if err != nil {
return nil, err
}
encodedKey, err := jwkEncode(accountKey)
if err != nil {
return nil, err
}
payload := base64.RawURLEncoding.EncodeToString([]byte(encodedKey))
payloadToSign := []byte(phead + "." + payload)
h := hmac.New(sha256.New, hmacKey)
h.Write(payloadToSign)
sig := h.Sum(nil)
return jwsFinal(sha, sig, phead, payload)
}
// jwsEncodeJSON signs claimset using provided key and a nonce.
@ -62,69 +87,39 @@ type jsonWebSignature struct {
// as "jwk" field value. The "jwk" and "kid" fields are mutually exclusive.
//
// See https://tools.ietf.org/html/rfc7515#section-7.
//
// If nonce is empty, it will not be encoded into the header.
func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid keyID, nonce, url string) ([]byte, error) {
alg, sha := jwsHasher(key.Public())
if alg == "" || !sha.Available() {
return nil, ErrUnsupportedKey
return nil, errUnsupportedKey
}
var phead string
switch kid {
case noKeyID:
jwk, err := jwkEncode(key.Public())
if err != nil {
return nil, err
}
phead = fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q,"url":%q}`, alg, jwk, nonce, url)
default:
phead = fmt.Sprintf(`{"alg":%q,"kid":%q,"nonce":%q,"url":%q}`, alg, kid, nonce, url)
phead, err := jwsHead(alg, nonce, url, kid, key)
if err != nil {
return nil, err
}
phead = base64.RawURLEncoding.EncodeToString([]byte(phead))
var payload string
if claimset != noPayload {
if claimset != nil {
cs, err := json.Marshal(claimset)
if err != nil {
return nil, err
}
payload = base64.RawURLEncoding.EncodeToString(cs)
}
hash := sha.New()
hash.Write([]byte(phead + "." + payload))
sig, err := jwsSign(key, sha, hash.Sum(nil))
if err != nil {
return nil, err
}
enc := jsonWebSignature{
Protected: phead,
Payload: payload,
Sig: base64.RawURLEncoding.EncodeToString(sig),
}
return json.Marshal(&enc)
}
// jwsWithMAC creates and signs a JWS using the given key and algorithm.
// "rawProtected" and "rawPayload" should not be base64-URL-encoded.
func jwsWithMAC(key []byte, alg MACAlgorithm, rawProtected, rawPayload []byte) (*jsonWebSignature, error) {
if len(key) == 0 {
return nil, errors.New("acme: cannot sign JWS with an empty MAC key")
}
protected := base64.RawURLEncoding.EncodeToString(rawProtected)
payload := base64.RawURLEncoding.EncodeToString(rawPayload)
payloadToSign := []byte(phead + "." + payload)
hash := sha.New()
_, _ = hash.Write(payloadToSign)
digest := hash.Sum(nil)
// Only HMACs are currently supported.
hmac, err := newHMAC(key, alg)
sig, err := jwsSign(key, sha, digest)
if err != nil {
return nil, err
}
if _, err := hmac.Write([]byte(protected + "." + payload)); err != nil {
return nil, err
}
mac := hmac.Sum(nil)
return &jsonWebSignature{
Protected: protected,
Payload: payload,
Sig: base64.RawURLEncoding.EncodeToString(mac),
}, nil
return jwsFinal(sha, sig, phead, payload)
}
// jwkEncode encodes public part of an RSA or ECDSA key into a JWK.
@ -165,28 +160,66 @@ func jwkEncode(pub crypto.PublicKey) (string, error) {
base64.RawURLEncoding.EncodeToString(y),
), nil
}
return "", ErrUnsupportedKey
return "", errUnsupportedKey
}
// jwsHead constructs the protected JWS header for the given fields.
// Since jwk and kid are mutually-exclusive, the jwk will be encoded
// only if kid is empty. If nonce is empty, it will not be encoded.
func jwsHead(alg, nonce, url string, kid keyID, key crypto.Signer) (string, error) {
phead := fmt.Sprintf(`{"alg":%q`, alg)
if kid == noKeyID {
jwk, err := jwkEncode(key.Public())
if err != nil {
return "", err
}
phead += fmt.Sprintf(`,"jwk":%s`, jwk)
} else {
phead += fmt.Sprintf(`,"kid":%q`, kid)
}
if nonce != "" {
phead += fmt.Sprintf(`,"nonce":%q`, nonce)
}
phead += fmt.Sprintf(`,"url":%q}`, url)
phead = base64.RawURLEncoding.EncodeToString([]byte(phead))
return phead, nil
}
// jwsFinal constructs the final JWS object.
func jwsFinal(sha crypto.Hash, sig []byte, phead, payload string) ([]byte, error) {
enc := struct {
Protected string `json:"protected"`
Payload string `json:"payload"`
Sig string `json:"signature"`
}{
Protected: phead,
Payload: payload,
Sig: base64.RawURLEncoding.EncodeToString(sig),
}
result, err := json.Marshal(&enc)
if err != nil {
return nil, err
}
return result, nil
}
// jwsSign signs the digest using the given key.
// The hash is unused for ECDSA keys.
//
// Note: non-stdlib crypto.Signer implementations are expected to return
// the signature in the format as specified in RFC7518.
// See https://tools.ietf.org/html/rfc7518 for more details.
func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) {
switch pub := key.Public().(type) {
case *rsa.PublicKey:
return key.Sign(rand.Reader, digest, hash)
case *ecdsa.PublicKey:
sigASN1, err := key.Sign(rand.Reader, digest, hash)
if key, ok := key.(*ecdsa.PrivateKey); ok {
// The key.Sign method of ecdsa returns ASN1-encoded signature.
// So, we use the package Sign function instead
// to get R and S values directly and format the result accordingly.
r, s, err := ecdsa.Sign(rand.Reader, key, digest)
if err != nil {
return nil, err
}
var rs struct{ R, S *big.Int }
if _, err := asn1.Unmarshal(sigASN1, &rs); err != nil {
return nil, err
}
rb, sb := rs.R.Bytes(), rs.S.Bytes()
size := pub.Params().BitSize / 8
rb, sb := r.Bytes(), s.Bytes()
size := key.Params().BitSize / 8
if size%8 > 0 {
size++
}
@ -195,7 +228,7 @@ func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error)
copy(sig[size*2-len(sb):], sb)
return sig, nil
}
return nil, ErrUnsupportedKey
return key.Sign(rand.Reader, digest, hash)
}
// jwsHasher indicates suitable JWS algorithm name and a hash function
@ -218,23 +251,9 @@ func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) {
return "", 0
}
// newHMAC returns an appropriate HMAC for the given MACAlgorithm.
func newHMAC(key []byte, alg MACAlgorithm) (hash.Hash, error) {
switch alg {
case MACAlgorithmHS256:
return hmac.New(sha256.New, key), nil
case MACAlgorithmHS384:
return hmac.New(sha512.New384, key), nil
case MACAlgorithmHS512:
return hmac.New(sha512.New, key), nil
default:
return nil, fmt.Errorf("acme: unsupported MAC algorithm: %v", alg)
}
}
// JWKThumbprint creates a JWK thumbprint out of pub
// jwkThumbprint creates a JWK thumbprint out of pub
// as specified in https://tools.ietf.org/html/rfc7638.
func JWKThumbprint(pub crypto.PublicKey) (string, error) {
func jwkThumbprint(pub crypto.PublicKey) (string, error) {
jwk, err := jwkEncode(pub)
if err != nil {
return "", err

@ -0,0 +1,247 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import (
"context"
"encoding/base64"
"errors"
"fmt"
"time"
)
// Order is an object that "represents a client's request for a certificate
// and is used to track the progress of that order through to issuance.
// Thus, the object contains information about the requested
// certificate, the authorizations that the server requires the client
// to complete, and any certificates that have resulted from this order."
// §7.1.3
type Order struct {
// status (required, string): The status of this order. Possible
// values are "pending", "ready", "processing", "valid", and
// "invalid". See Section 7.1.6.
Status string `json:"status"`
// expires (optional, string): The timestamp after which the server
// will consider this order invalid, encoded in the format specified
// in [RFC3339]. This field is REQUIRED for objects with "pending"
// or "valid" in the status field.
Expires time.Time `json:"expires,omitempty"`
// identifiers (required, array of object): An array of identifier
// objects that the order pertains to.
Identifiers []Identifier `json:"identifiers"`
// notBefore (optional, string): The requested value of the notBefore
// field in the certificate, in the date format defined in [RFC3339].
NotBefore *time.Time `json:"notBefore,omitempty"`
// notAfter (optional, string): The requested value of the notAfter
// field in the certificate, in the date format defined in [RFC3339].
NotAfter *time.Time `json:"notAfter,omitempty"`
// error (optional, object): The error that occurred while processing
// the order, if any. This field is structured as a problem document
// [RFC7807].
Error *Problem `json:"error,omitempty"`
// authorizations (required, array of string): For pending orders, the
// authorizations that the client needs to complete before the
// requested certificate can be issued (see Section 7.5), including
// unexpired authorizations that the client has completed in the past
// for identifiers specified in the order. The authorizations
// required are dictated by server policy; there may not be a 1:1
// relationship between the order identifiers and the authorizations
// required. For final orders (in the "valid" or "invalid" state),
// the authorizations that were completed. Each entry is a URL from
// which an authorization can be fetched with a POST-as-GET request.
Authorizations []string `json:"authorizations"`
// finalize (required, string): A URL that a CSR must be POSTed to once
// all of the order's authorizations are satisfied to finalize the
// order. The result of a successful finalization will be the
// population of the certificate URL for the order.
Finalize string `json:"finalize"`
// certificate (optional, string): A URL for the certificate that has
// been issued in response to this order.
Certificate string `json:"certificate"`
// Similar to new-account, the server returns a 201 response with
// the URL to the order object in the Location header.
//
// We transfer the value from the header to this field for
// storage and recall purposes.
Location string `json:"-"`
}
// Identifier is used in order and authorization (authz) objects.
type Identifier struct {
// type (required, string): The type of identifier. This document
// defines the "dns" identifier type. See the registry defined in
// Section 9.7.7 for any others.
Type string `json:"type"`
// value (required, string): The identifier itself.
Value string `json:"value"`
}
// NewOrder creates a new order with the server.
//
// "The client begins the certificate issuance process by sending a POST
// request to the server's newOrder resource." §7.4
func (c *Client) NewOrder(ctx context.Context, account Account, order Order) (Order, error) {
if err := c.provision(ctx); err != nil {
return order, err
}
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, c.dir.NewOrder, order, &order)
if err != nil {
return order, err
}
order.Location = resp.Header.Get("Location")
return order, nil
}
// FinalizeOrder finalizes the order with the server and polls under the server has
// updated the order status. The CSR must be in ASN.1 DER-encoded format. If this
// succeeds, the certificate is ready to download once this returns.
//
// "Once the client believes it has fulfilled the server's requirements,
// it should send a POST request to the order resource's finalize URL." §7.4
func (c *Client) FinalizeOrder(ctx context.Context, account Account, order Order, csrASN1DER []byte) (Order, error) {
if err := c.provision(ctx); err != nil {
return order, err
}
body := struct {
// csr (required, string): A CSR encoding the parameters for the
// certificate being requested [RFC2986]. The CSR is sent in the
// base64url-encoded version of the DER format. (Note: Because this
// field uses base64url, and does not include headers, it is
// different from PEM.) §7.4
CSR string `json:"csr"`
}{
CSR: base64.RawURLEncoding.EncodeToString(csrASN1DER),
}
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, order.Finalize, body, &order)
if err != nil {
// "A request to finalize an order will result in error if the order is
// not in the 'ready' state. In such cases, the server MUST return a
// 403 (Forbidden) error with a problem document of type
// 'orderNotReady'. The client should then send a POST-as-GET request
// to the order resource to obtain its current state. The status of the
// order will indicate what action the client should take (see below)."
// §7.4
var problem Problem
if errors.As(err, &problem) {
if problem.Type != ProblemTypeOrderNotReady {
return order, err
}
} else {
return order, err
}
}
// unlike with accounts and authorizations, the spec isn't clear on whether
// the server MUST set this on finalizing the order, but their example shows a
// Location header, so I guess if it's set in the response, we should keep it
if newLocation := resp.Header.Get("Location"); newLocation != "" {
order.Location = newLocation
}
if finished, err := orderIsFinished(order); finished {
return order, err
}
// TODO: "The elements of the "authorizations" and "identifiers" arrays are
// immutable once set. If a client observes a change
// in the contents of either array, then it SHOULD consider the order
// invalid."
maxDuration := c.pollTimeout()
start := time.Now()
for time.Since(start) < maxDuration {
// querying an order is expensive on the server-side, so we
// shouldn't do it too frequently; honor server preference
interval, err := retryAfter(resp, c.pollInterval())
if err != nil {
return order, err
}
select {
case <-time.After(interval):
case <-ctx.Done():
return order, ctx.Err()
}
resp, err = c.httpPostJWS(ctx, account.PrivateKey, account.Location, order.Location, nil, &order)
if err != nil {
return order, fmt.Errorf("polling order status: %w", err)
}
// (same reasoning as above)
if newLocation := resp.Header.Get("Location"); newLocation != "" {
order.Location = newLocation
}
if finished, err := orderIsFinished(order); finished {
return order, err
}
}
return order, fmt.Errorf("order took too long")
}
// orderIsFinished returns true if the order processing is complete,
// regardless of success or failure. If this function returns true,
// polling an order status should stop. If there is an error with the
// order, an error will be returned. This function should be called
// only after a request to finalize an order. See §7.4.
func orderIsFinished(order Order) (bool, error) {
switch order.Status {
case StatusInvalid:
// "invalid": The certificate will not be issued. Consider this
// order process abandoned.
return true, fmt.Errorf("final order is invalid: %w", order.Error)
case StatusPending:
// "pending": The server does not believe that the client has
// fulfilled the requirements. Check the "authorizations" array for
// entries that are still pending.
return true, fmt.Errorf("order pending, authorizations remaining: %v", order.Authorizations)
case StatusReady:
// "ready": The server agrees that the requirements have been
// fulfilled, and is awaiting finalization. Submit a finalization
// request.
// (we did just submit a finalization request, so this is an error)
return true, fmt.Errorf("unexpected state: %s - order already finalized", order.Status)
case StatusProcessing:
// "processing": The certificate is being issued. Send a GET request
// after the time given in the "Retry-After" header field of the
// response, if any.
return false, nil
case StatusValid:
// "valid": The server has issued the certificate and provisioned its
// URL to the "certificate" field of the order. Download the
// certificate.
return true, nil
default:
return true, fmt.Errorf("unrecognized order status: %s", order.Status)
}
}

@ -0,0 +1,136 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acme
import "fmt"
// Problem carries the details of an error from HTTP APIs as
// defined in RFC 7807: https://tools.ietf.org/html/rfc7807
// and as extended by RFC 8555 §6.7:
// https://tools.ietf.org/html/rfc8555#section-6.7
type Problem struct {
// "type" (string) - A URI reference [RFC3986] that identifies the
// problem type. This specification encourages that, when
// dereferenced, it provide human-readable documentation for the
// problem type (e.g., using HTML [W3C.REC-html5-20141028]). When
// this member is not present, its value is assumed to be
// "about:blank". §3.1
Type string `json:"type"`
// "title" (string) - A short, human-readable summary of the problem
// type. It SHOULD NOT change from occurrence to occurrence of the
// problem, except for purposes of localization (e.g., using
// proactive content negotiation; see [RFC7231], Section 3.4). §3.1
Title string `json:"title,omitempty"`
// "status" (number) - The HTTP status code ([RFC7231], Section 6)
// generated by the origin server for this occurrence of the problem.
// §3.1
Status int `json:"status,omitempty"`
// "detail" (string) - A human-readable explanation specific to this
// occurrence of the problem. §3.1
//
// RFC 8555 §6.7: "Clients SHOULD display the 'detail' field of all
// errors."
Detail string `json:"detail,omitempty"`
// "instance" (string) - A URI reference that identifies the specific
// occurrence of the problem. It may or may not yield further
// information if dereferenced. §3.1
Instance string `json:"instance,omitempty"`
// "Sometimes a CA may need to return multiple errors in response to a
// request. Additionally, the CA may need to attribute errors to
// specific identifiers. For instance, a newOrder request may contain
// multiple identifiers for which the CA cannot issue certificates. In
// this situation, an ACME problem document MAY contain the
// 'subproblems' field, containing a JSON array of problem documents."
// RFC 8555 §6.7.1
Subproblems []Subproblem `json:"subproblems,omitempty"`
// For convenience, we've added this field to associate with a value
// that is related to or caused the problem. It is not part of the
// spec, but, if a challenge fails for example, we can associate the
// error with the problematic authz object by setting this field.
// Challenge failures will have this set to an Authorization type.
Resource interface{} `json:"-"`
}
func (p Problem) Error() string {
// TODO: 7.3.3: Handle changes to Terms of Service (notice it uses the Instance field and Link header)
// RFC 8555 §6.7: "Clients SHOULD display the 'detail' field of all errors."
s := fmt.Sprintf("HTTP %d %s - %s", p.Status, p.Type, p.Detail)
if len(p.Subproblems) > 0 {
for _, v := range p.Subproblems {
s += fmt.Sprintf(", problem %q: %s", v.Type, v.Detail)
}
}
if p.Instance != "" {
s += ", url: " + p.Instance
}
return s
}
// Subproblem describes a more specific error in a problem according to
// RFC 8555 §6.7.1: "An ACME problem document MAY contain the
// 'subproblems' field, containing a JSON array of problem documents,
// each of which MAY contain an 'identifier' field."
type Subproblem struct {
Problem
// "If present, the 'identifier' field MUST contain an ACME
// identifier (Section 9.7.7)." §6.7.1
Identifier Identifier `json:"identifier,omitempty"`
}
// Standard token values for the "type" field of problems, as defined
// in RFC 8555 §6.7: https://tools.ietf.org/html/rfc8555#section-6.7
//
// "To facilitate automatic response to errors, this document defines the
// following standard tokens for use in the 'type' field (within the
// ACME URN namespace 'urn:ietf:params:acme:error:') ... This list is not
// exhaustive. The server MAY return errors whose 'type' field is set to
// a URI other than those defined above."
const (
// The ACME error URN prefix.
ProblemTypeNamespace = "urn:ietf:params:acme:error:"
ProblemTypeAccountDoesNotExist = ProblemTypeNamespace + "accountDoesNotExist"
ProblemTypeAlreadyRevoked = ProblemTypeNamespace + "alreadyRevoked"
ProblemTypeBadCSR = ProblemTypeNamespace + "badCSR"
ProblemTypeBadNonce = ProblemTypeNamespace + "badNonce"
ProblemTypeBadPublicKey = ProblemTypeNamespace + "badPublicKey"
ProblemTypeBadRevocationReason = ProblemTypeNamespace + "badRevocationReason"
ProblemTypeBadSignatureAlgorithm = ProblemTypeNamespace + "badSignatureAlgorithm"
ProblemTypeCAA = ProblemTypeNamespace + "caa"
ProblemTypeCompound = ProblemTypeNamespace + "compound"
ProblemTypeConnection = ProblemTypeNamespace + "connection"
ProblemTypeDNS = ProblemTypeNamespace + "dns"
ProblemTypeExternalAccountRequired = ProblemTypeNamespace + "externalAccountRequired"
ProblemTypeIncorrectResponse = ProblemTypeNamespace + "incorrectResponse"
ProblemTypeInvalidContact = ProblemTypeNamespace + "invalidContact"
ProblemTypeMalformed = ProblemTypeNamespace + "malformed"
ProblemTypeOrderNotReady = ProblemTypeNamespace + "orderNotReady"
ProblemTypeRateLimited = ProblemTypeNamespace + "rateLimited"
ProblemTypeRejectedIdentifier = ProblemTypeNamespace + "rejectedIdentifier"
ProblemTypeServerInternal = ProblemTypeNamespace + "serverInternal"
ProblemTypeTLS = ProblemTypeNamespace + "tls"
ProblemTypeUnauthorized = ProblemTypeNamespace + "unauthorized"
ProblemTypeUnsupportedContact = ProblemTypeNamespace + "unsupportedContact"
ProblemTypeUnsupportedIdentifier = ProblemTypeNamespace + "unsupportedIdentifier"
ProblemTypeUserActionRequired = ProblemTypeNamespace + "userActionRequired"
)

@ -0,0 +1,656 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package acmez implements the higher-level flow of the ACME specification,
// RFC 8555: https://tools.ietf.org/html/rfc8555, specifically the sequence
// in Section 7.1 (page 21).
//
// It makes it easy to obtain certificates with various challenge types
// using pluggable challenge solvers, and provides some handy utilities for
// implementing solvers and using the certificates. It DOES NOT manage
// certificates, it only gets them from the ACME server.
//
// NOTE: This package's main function is to get a certificate, not manage it.
// Most users will want to *manage* certificates over the lifetime of a
// long-running program such as a HTTPS or TLS server, and should use CertMagic
// instead: https://github.com/caddyserver/certmagic.
package acmez
import (
"context"
"crypto"
"crypto/rand"
"crypto/x509"
"errors"
"fmt"
weakrand "math/rand"
"net"
"net/url"
"sort"
"strings"
"sync"
"time"
"github.com/mholt/acmez/acme"
"go.uber.org/zap"
"golang.org/x/net/idna"
)
func init() {
weakrand.Seed(time.Now().UnixNano())
}
// Client is a high-level API for ACME operations. It wraps
// a lower-level ACME client with useful functions to make
// common flows easier, especially for the issuance of
// certificates.
type Client struct {
*acme.Client
// Map of solvers keyed by name of the challenge type.
ChallengeSolvers map[string]Solver
// An optional logger. Default: no logs
Logger *zap.Logger
}
// ObtainCertificateUsingCSR obtains all resulting certificate chains using the given CSR, which
// must be completely and properly filled out (particularly its DNSNames and Raw fields - this
// usually involves creating a template CSR, then calling x509.CreateCertificateRequest, then
// x509.ParseCertificateRequest on the output). The Subject CommonName is NOT considered.
//
// It implements every single part of the ACME flow described in RFC 8555 §7.1 with the exception
// of "Create account" because this method signature does not have a way to return the udpated
// account object. The account's status MUST be "valid" in order to succeed.
//
// As far as SANs go, this method currently only supports DNSNames on the csr.
func (c *Client) ObtainCertificateUsingCSR(ctx context.Context, account acme.Account, csr *x509.CertificateRequest) ([]acme.Certificate, error) {
if account.Status != acme.StatusValid {
return nil, fmt.Errorf("account status is not valid: %s", account.Status)
}
if csr == nil {
return nil, fmt.Errorf("missing CSR")
}
var ids []acme.Identifier
for _, name := range csr.DNSNames {
// "The domain name MUST be encoded in the form in which it would appear
// in a certificate. That is, it MUST be encoded according to the rules
// in Section 7 of [RFC5280]." §7.1.4
normalizedName, err := idna.ToASCII(name)
if err != nil {
return nil, fmt.Errorf("converting identifier '%s' to ASCII: %v", name, err)
}
ids = append(ids, acme.Identifier{
Type: "dns",
Value: normalizedName,
})
}
if len(ids) == 0 {
return nil, fmt.Errorf("no identifiers found")
}
order := acme.Order{Identifiers: ids}
var err error
// remember which challenge types failed for which identifiers
// so we can retry with other challenge types
failedChallengeTypes := make(failedChallengeMap)
const maxAttempts = 3 // hard cap on number of retries for good measure
for attempt := 1; attempt <= maxAttempts; attempt++ {
if attempt > 1 {
select {
case <-time.After(1 * time.Second):
case <-ctx.Done():
return nil, ctx.Err()
}
}
// create order for a new certificate
order, err = c.Client.NewOrder(ctx, account, order)
if err != nil {
return nil, fmt.Errorf("creating new order: %w", err)
}
// solve one challenge for each authz on the order
err = c.solveChallenges(ctx, account, order, failedChallengeTypes)
// yay, we win!
if err == nil {
break
}
// for some errors, we can retry with different challenge types
var problem acme.Problem
if errors.As(err, &problem) {
authz := problem.Resource.(acme.Authorization)
if c.Logger != nil {
c.Logger.Error("validating authorization",
zap.String("identifier", authz.IdentifierValue()),
zap.Error(err),
zap.String("order", order.Location),
zap.Int("attempt", attempt),
zap.Int("max_attempts", maxAttempts))
}
err = fmt.Errorf("solving challenge: %s: %w", authz.IdentifierValue(), err)
if errors.As(err, &retryableErr{}) {
continue
}
return nil, err
}
return nil, fmt.Errorf("solving challenges: %w (order=%s)", err, order.Location)
}
if c.Logger != nil {
c.Logger.Info("validations succeeded; finalizing order", zap.String("order", order.Location))
}
// finalize the order, which requests the CA to issue us a certificate
order, err = c.Client.FinalizeOrder(ctx, account, order, csr.Raw)
if err != nil {
return nil, fmt.Errorf("finalizing order %s: %w", order.Location, err)
}
// finally, download the certificate
certChains, err := c.Client.GetCertificateChain(ctx, account, order.Certificate)
if err != nil {
return nil, fmt.Errorf("downloading certificate chain from %s: %w (order=%s)",
order.Certificate, err, order.Location)
}
if c.Logger != nil {
if len(certChains) == 0 {
c.Logger.Info("no certificate chains offered by server")
} else {
c.Logger.Info("successfully downloaded available certificate chains",
zap.Int("count", len(certChains)),
zap.String("first_url", certChains[0].URL))
}
}
return certChains, nil
}
// ObtainCertificate is the same as ObtainCertificateUsingCSR, except it is a slight wrapper
// that generates the CSR for you. Doing so requires the private key you will be using for
// the certificate (different from the account private key). It obtains a certificate for
// the given SANs (domain names) using the provided account.
func (c *Client) ObtainCertificate(ctx context.Context, account acme.Account, certPrivateKey crypto.Signer, sans []string) ([]acme.Certificate, error) {
if len(sans) == 0 {
return nil, fmt.Errorf("no DNS names provided: %v", sans)
}
if certPrivateKey == nil {
return nil, fmt.Errorf("missing certificate private key")
}
csrTemplate := new(x509.CertificateRequest)
for _, name := range sans {
if ip := net.ParseIP(name); ip != nil {
csrTemplate.IPAddresses = append(csrTemplate.IPAddresses, ip)
} else if strings.Contains(name, "@") {
csrTemplate.EmailAddresses = append(csrTemplate.EmailAddresses, name)
} else if u, err := url.Parse(name); err == nil && strings.Contains(name, "/") {
csrTemplate.URIs = append(csrTemplate.URIs, u)
} else {
csrTemplate.DNSNames = append(csrTemplate.DNSNames, name)
}
}
// to properly fill out the CSR, we need to create it, then parse it
csrDER, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, certPrivateKey)
if err != nil {
return nil, fmt.Errorf("generating CSR: %v", err)
}
csr, err := x509.ParseCertificateRequest(csrDER)
if err != nil {
return nil, fmt.Errorf("parsing generated CSR: %v", err)
}
return c.ObtainCertificateUsingCSR(ctx, account, csr)
}
// getAuthzObjects constructs stateful authorization objects for each authz on the order.
// It includes all authorizations regardless of their status so that they can be
// deactivated at the end if necessary. Be sure to check authz status before operating
// on the authz; not all will be "pending" - some authorizations might already be valid.
func (c *Client) getAuthzObjects(ctx context.Context, account acme.Account, order acme.Order,
failedChallengeTypes failedChallengeMap) ([]*authzState, error) {
var authzStates []*authzState
var err error
// start by allowing each authz's solver to present for its challenge
for _, authzURL := range order.Authorizations {
authz := &authzState{account: account}
authz.Authorization, err = c.Client.GetAuthorization(ctx, account, authzURL)
if err != nil {
return nil, fmt.Errorf("getting authorization at %s: %w", authzURL, err)
}
// add all offered challenge types to our memory if they
// arent't there already; we use this for statistics to
// choose the most successful challenge type over time;
// if initial fill, randomize challenge order
preferredChallengesMu.Lock()
preferredWasEmpty := len(preferredChallenges) == 0
for _, chal := range authz.Challenges {
preferredChallenges.addUnique(chal.Type)
}
if preferredWasEmpty {
weakrand.Shuffle(len(preferredChallenges), func(i, j int) {
preferredChallenges[i], preferredChallenges[j] =
preferredChallenges[j], preferredChallenges[i]
})
}
preferredChallengesMu.Unlock()
// copy over any challenges that are not known to have already
// failed, making them candidates for solving for this authz
failedChallengeTypes.enqueueUnfailedChallenges(authz)
authzStates = append(authzStates, authz)
}
// sort authzs so that challenges which require waiting go first; no point
// in getting authorizations quickly while others will take a long time
sort.SliceStable(authzStates, func(i, j int) bool {
_, iIsWaiter := authzStates[i].currentSolver.(Waiter)
_, jIsWaiter := authzStates[j].currentSolver.(Waiter)
// "if i is a waiter, and j is not a waiter, then i is less than j"
return iIsWaiter && !jIsWaiter
})
return authzStates, nil
}
func (c *Client) solveChallenges(ctx context.Context, account acme.Account, order acme.Order, failedChallengeTypes failedChallengeMap) error {
authzStates, err := c.getAuthzObjects(ctx, account, order, failedChallengeTypes)
if err != nil {
return err
}
// when the function returns, make sure we clean up any and all resources
defer func() {
// always clean up any remaining challenge solvers
for _, authz := range authzStates {
if authz.currentSolver == nil {
// happens when authz state ended on a challenge we have no
// solver for or if we have already cleaned up this solver
continue
}
if err := authz.currentSolver.CleanUp(ctx, authz.currentChallenge); err != nil {
if c.Logger != nil {
c.Logger.Error("cleaning up solver",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type),
zap.Error(err))
}
}
}
if err == nil {
return
}
// if this function returns with an error, make sure to deactivate
// all pending or valid authorization objects so they don't "leak"
// See: https://github.com/go-acme/lego/issues/383 and https://github.com/go-acme/lego/issues/353
for _, authz := range authzStates {
if authz.Status != acme.StatusPending && authz.Status != acme.StatusValid {
continue
}
updatedAuthz, err := c.Client.DeactivateAuthorization(ctx, account, authz.Location)
if err != nil {
if c.Logger != nil {
c.Logger.Error("deactivating authorization",
zap.String("identifier", authz.IdentifierValue()),
zap.String("authz", authz.Location),
zap.Error(err))
}
}
authz.Authorization = updatedAuthz
}
}()
// present for all challenges first; this allows them all to begin any
// slow tasks up front if necessary before we start polling/waiting
for _, authz := range authzStates {
// see §7.1.6 for state transitions
if authz.Status != acme.StatusPending && authz.Status != acme.StatusValid {
return fmt.Errorf("authz %s has unexpected status; order will fail: %s", authz.Location, authz.Status)
}
if authz.Status == acme.StatusValid {
continue
}
err = c.presentForNextChallenge(ctx, authz)
if err != nil {
return err
}
}
// now that all solvers have had the opportunity to present, tell
// the server to begin the selected challenge for each authz
for _, authz := range authzStates {
err = c.initiateCurrentChallenge(ctx, authz)
if err != nil {
return err
}
}
// poll each authz to wait for completion of all challenges
for _, authz := range authzStates {
err = c.pollAuthorization(ctx, account, authz, failedChallengeTypes)
if err != nil {
return err
}
}
return nil
}
func (c *Client) presentForNextChallenge(ctx context.Context, authz *authzState) error {
if authz.Status != acme.StatusPending {
if authz.Status == acme.StatusValid && c.Logger != nil {
c.Logger.Info("authorization already valid",
zap.String("identifier", authz.IdentifierValue()),
zap.String("authz_url", authz.Location),
zap.Time("expires", authz.Expires))
}
return nil
}
err := c.nextChallenge(authz)
if err != nil {
return err
}
if c.Logger != nil {
c.Logger.Info("trying to solve challenge",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type),
zap.String("ca", c.Directory))
}
err = authz.currentSolver.Present(ctx, authz.currentChallenge)
if err != nil {
return fmt.Errorf("presenting for challenge: %w", err)
}
return nil
}
func (c *Client) initiateCurrentChallenge(ctx context.Context, authz *authzState) error {
if authz.Status != acme.StatusPending {
return nil
}
// by now, all challenges should have had an opportunity to present, so
// if this solver needs more time to finish presenting, wait on it now
// (yes, this does block the initiation of the other challenges, but
// that's probably OK, since we can't finalize the order until the slow
// challenges are done too)
if waiter, ok := authz.currentSolver.(Waiter); ok {
err := waiter.Wait(ctx, authz.currentChallenge)
if err != nil {
return fmt.Errorf("waiting for solver %T to be ready: %w", authz.currentSolver, err)
}
}
// tell the server to initiate the challenge
var err error
authz.currentChallenge, err = c.Client.InitiateChallenge(ctx, authz.account, authz.currentChallenge)
if err != nil {
return fmt.Errorf("initiating challenge with server: %w", err)
}
if c.Logger != nil {
c.Logger.Debug("challenge accepted",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type))
}
return nil
}
// nextChallenge sets the next challenge (and associated solver) on
// authz; it returns an error if there is no compatible challenge.
func (c *Client) nextChallenge(authz *authzState) error {
preferredChallengesMu.Lock()
defer preferredChallengesMu.Unlock()
// find the most-preferred challenge that is also in the list of
// remaining challenges, then make sure we have a solver for it
for _, prefChalType := range preferredChallenges {
for i, remainingChal := range authz.remainingChallenges {
if remainingChal.Type != prefChalType.typeName {
continue
}
authz.currentChallenge = remainingChal
authz.currentSolver = c.ChallengeSolvers[authz.currentChallenge.Type]
if authz.currentSolver != nil {
authz.remainingChallenges = append(authz.remainingChallenges[:i], authz.remainingChallenges[i+1:]...)
return nil
}
if c.Logger != nil {
c.Logger.Debug("no solver configured", zap.String("challenge_type", remainingChal.Type))
}
break
}
}
return fmt.Errorf("%s: no solvers available for remaining challenges (configured=%v offered=%v remaining=%v)",
authz.IdentifierValue(), c.enabledChallengeTypes(), authz.listOfferedChallenges(), authz.listRemainingChallenges())
}
func (c *Client) pollAuthorization(ctx context.Context, account acme.Account, authz *authzState, failedChallengeTypes failedChallengeMap) error {
// In §7.5.1, the spec says:
//
// "For challenges where the client can tell when the server has
// validated the challenge (e.g., by seeing an HTTP or DNS request
// from the server), the client SHOULD NOT begin polling until it has
// seen the validation request from the server."
//
// However, in practice, this is difficult in the general case because
// we would need to design some relatively-nuanced concurrency and hope
// that the solver implementations also get their side right -- and the
// fact that it's even possible only sometimes makes it harder, because
// each solver needs a way to signal whether we should wait for its
// approval. So no, I've decided not to implement that recommendation
// in this particular library, but any implementations that use the lower
// ACME API directly are welcome and encouraged to do so where possible.
var err error
authz.Authorization, err = c.Client.PollAuthorization(ctx, account, authz.Authorization)
// if a challenge was attempted (i.e. did not start valid)...
if authz.currentSolver != nil {
// increment the statistics on this challenge type before handling error
preferredChallengesMu.Lock()
preferredChallenges.increment(authz.currentChallenge.Type, err == nil)
preferredChallengesMu.Unlock()
// always clean up the challenge solver after polling, regardless of error
cleanupErr := authz.currentSolver.CleanUp(ctx, authz.currentChallenge)
if cleanupErr != nil && c.Logger != nil {
c.Logger.Error("cleaning up solver",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type),
zap.Error(err))
}
authz.currentSolver = nil // avoid cleaning it up again later
}
// finally, handle any error from validating the authz
if err != nil {
var problem acme.Problem
if errors.As(err, &problem) {
if c.Logger != nil {
c.Logger.Error("challenge failed",
zap.String("identifier", authz.IdentifierValue()),
zap.String("challenge_type", authz.currentChallenge.Type),
zap.Int("status_code", problem.Status),
zap.String("problem_type", problem.Type),
zap.String("error", problem.Detail))
}
failedChallengeTypes.rememberFailedChallenge(authz)
switch problem.Type {
case acme.ProblemTypeConnection,
acme.ProblemTypeDNS,
acme.ProblemTypeServerInternal,
acme.ProblemTypeUnauthorized,
acme.ProblemTypeTLS:
// this error might be recoverable with another challenge type
return retryableErr{err}
}
}
return fmt.Errorf("[%s] %w", authz.Authorization.IdentifierValue(), err)
}
return nil
}
func (c *Client) enabledChallengeTypes() []string {
enabledChallenges := make([]string, 0, len(c.ChallengeSolvers))
for name, val := range c.ChallengeSolvers {
if val != nil {
enabledChallenges = append(enabledChallenges, name)
}
}
return enabledChallenges
}
type authzState struct {
acme.Authorization
account acme.Account
currentChallenge acme.Challenge
currentSolver Solver
remainingChallenges []acme.Challenge
}
func (authz authzState) listOfferedChallenges() []string {
return challengeTypeNames(authz.Challenges)
}
func (authz authzState) listRemainingChallenges() []string {
return challengeTypeNames(authz.remainingChallenges)
}
func challengeTypeNames(challengeList []acme.Challenge) []string {
names := make([]string, 0, len(challengeList))
for _, chal := range challengeList {
names = append(names, chal.Type)
}
return names
}
// TODO: possibly configurable policy? converge to most successful (current) vs. completely random
// challengeHistory is a memory of how successful a challenge type is.
type challengeHistory struct {
typeName string
successes, total int
}
func (ch challengeHistory) successRatio() float64 {
if ch.total == 0 {
return 1.0
}
return float64(ch.successes) / float64(ch.total)
}
// failedChallengeMap keeps track of failed challenge types per identifier.
type failedChallengeMap map[string][]string
func (fcm failedChallengeMap) rememberFailedChallenge(authz *authzState) {
idKey := fcm.idKey(authz)
fcm[idKey] = append(fcm[idKey], authz.currentChallenge.Type)
}
// enqueueUnfailedChallenges enqueues each challenge offered in authz if it
// is not known to have failed for the authz's identifier already.
func (fcm failedChallengeMap) enqueueUnfailedChallenges(authz *authzState) {
idKey := fcm.idKey(authz)
for _, chal := range authz.Challenges {
if !contains(fcm[idKey], chal.Type) {
authz.remainingChallenges = append(authz.remainingChallenges, chal)
}
}
}
func (fcm failedChallengeMap) idKey(authz *authzState) string {
return authz.Identifier.Type + authz.IdentifierValue()
}
// challengeTypes is a list of challenges we've seen and/or
// used previously. It sorts from most successful to least
// successful, such that most successful challenges are first.
type challengeTypes []challengeHistory
// Len is part of sort.Interface.
func (ct challengeTypes) Len() int { return len(ct) }
// Swap is part of sort.Interface.
func (ct challengeTypes) Swap(i, j int) { ct[i], ct[j] = ct[j], ct[i] }
// Less is part of sort.Interface. It sorts challenge
// types from highest success ratio to lowest.
func (ct challengeTypes) Less(i, j int) bool {
return ct[i].successRatio() > ct[j].successRatio()
}
func (ct *challengeTypes) addUnique(challengeType string) {
for _, c := range *ct {
if c.typeName == challengeType {
return
}
}
*ct = append(*ct, challengeHistory{typeName: challengeType})
}
func (ct challengeTypes) increment(challengeType string, successful bool) {
defer sort.Stable(ct) // keep most successful challenges in front
for i, c := range ct {
if c.typeName == challengeType {
ct[i].total++
if successful {
ct[i].successes++
}
return
}
}
}
func contains(haystack []string, needle string) bool {
for _, s := range haystack {
if s == needle {
return true
}
}
return false
}
// retryableErr wraps an error that indicates the caller should retry
// the operation; specifically with a different challenge type.
type retryableErr struct{ error }
func (re retryableErr) Unwrap() error { return re.error }
// Keep a list of challenges we've seen offered by servers,
// and prefer keep an ordered list of
var (
preferredChallenges challengeTypes
preferredChallengesMu sync.Mutex
)

@ -0,0 +1,8 @@
module github.com/mholt/acmez
go 1.14
require (
go.uber.org/zap v1.15.0
golang.org/x/net v0.0.0-20200707034311-ab3426394381
)

@ -0,0 +1,61 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=

@ -0,0 +1,72 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acmez
import (
"context"
"github.com/mholt/acmez/acme"
)
// Solver is a type that can solve ACME challenges. All
// implementations MUST honor context cancellation.
type Solver interface {
// Present is called just before a challenge is initiated.
// The implementation MUST prepare anything that is necessary
// for completing the challenge; for example, provisioning
// an HTTP resource, TLS certificate, or a DNS record.
//
// It MUST return quickly. If presenting the challenge token
// will take time, then the implementation MUST do the
// minimum amount of work required in this method, and
// SHOULD additionally implement the Waiter interface.
// For example, a DNS challenge solver might make a quick
// HTTP request to a provider's API to create a new DNS
// record, but it might be several minutes or hours before
// the DNS record propagates. The API request should be
// done in Present(), and waiting for propagation should
// be done in Wait().
Present(context.Context, acme.Challenge) error
// CleanUp is called after a challenge is finished, whether
// successful or not. It MUST free/remove any resources it
// allocated/created during Present. It SHOULD NOT require
// that Present ran successfully. It MUST return quickly.
CleanUp(context.Context, acme.Challenge) error
}
// Waiter is an optional interface for Solvers to implement. Its
// primary purpose is to help ensure the challenge can be solved
// before the server gives up trying to verify the challenge.
//
// If implemented, it will be called after Present() but just
// before the challenge is initiated with the server. It blocks
// until the challenge is ready to be solved. (For example,
// waiting on a DNS record to propagate.) This allows challenges
// to succeed that would normally fail because they take too long
// to set up (i.e. the ACME server would give up polling DNS or
// the client would timeout its polling). By separating Present()
// from Wait(), it allows the slow part of all solvers to begin
// up front, rather than waiting on each solver one at a time.
//
// It MUST NOT do anything exclusive of Present() that is required
// for the challenge to succeed. In other words, if Present() is
// called but Wait() is not, then the challenge should still be able
// to succeed assuming infinite time.
//
// Implementations MUST honor context cancellation.
type Waiter interface {
Wait(context.Context, acme.Challenge) error
}

@ -0,0 +1,98 @@
// Copyright 2020 Matthew Holt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package acmez
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"math/big"
"time"
"github.com/mholt/acmez/acme"
)
// TLSALPN01ChallengeCert creates a certificate that can be used for
// handshakes while solving the tls-alpn-01 challenge. See RFC 8737 §3.
func TLSALPN01ChallengeCert(challenge acme.Challenge) (*tls.Certificate, error) {
keyAuthSum := sha256.Sum256([]byte(challenge.KeyAuthorization))
keyAuthSumASN1, err := asn1.Marshal(keyAuthSum[:sha256.Size])
if err != nil {
return nil, err
}
certKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, err
}
challengeKeyASN1, err := x509.MarshalECPrivateKey(certKey)
if err != nil {
return nil, err
}
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, err
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{CommonName: "ACME challenge"},
NotBefore: time.Now(),
NotAfter: time.Now().Add(24 * time.Hour * 365),
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
DNSNames: []string{challenge.Identifier.Value},
// add key authentication digest as the acmeValidation-v1 extension
// (marked as critical such that it won't be used by non-ACME software).
// Reference: https://www.rfc-editor.org/rfc/rfc8737.html#section-3
ExtraExtensions: []pkix.Extension{
{
Id: idPEACMEIdentifierV1,
Critical: true,
Value: keyAuthSumASN1,
},
},
}
challengeCertDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &certKey.PublicKey, certKey)
if err != nil {
return nil, err
}
challengeCertPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: challengeCertDER})
challengeKeyPEM := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: challengeKeyASN1})
cert, err := tls.X509KeyPair(challengeCertPEM, challengeKeyPEM)
if err != nil {
return nil, err
}
return &cert, nil
}
// ACMETLS1Protocol is the ALPN value for the TLS-ALPN challenge
// handshake. See RFC 8737 §6.2.
const ACMETLS1Protocol = "acme-tls/1"
// idPEACMEIdentifierV1 is the SMI Security for PKIX Certification Extension OID referencing the ACME extension.
// See RFC 8737 §6.1. https://www.rfc-editor.org/rfc/rfc8737.html#section-6.1
var idPEACMEIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31}

@ -0,0 +1,8 @@
coverage:
status:
project:
default:
target: 40%
threshold: null
patch: false
changes: false

@ -0,0 +1,4 @@
*.6
tags
test.out
a.out

@ -0,0 +1,17 @@
language: go
sudo: false
go:
- "1.12.x"
- "1.13.x"
- tip
env:
- GO111MODULE=on
script:
- go generate ./... && test `git ls-files --modified | wc -l` = 0
- go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./...
after_success:
- bash <(curl -s https://codecov.io/bash)

@ -0,0 +1 @@
Miek Gieben <miek@miek.nl>

@ -0,0 +1 @@
* @miekg @tmthrgd

@ -0,0 +1,10 @@
Alex A. Skinner
Andrew Tunnell-Jones
Ask Bjørn Hansen
Dave Cheney
Dusty Wilson
Marek Majkowski
Peter van Dijk
Omri Bahumi
Alex Sergeyev
James Hartig

@ -0,0 +1,9 @@
Copyright 2009 The Go Authors. All rights reserved. Use of this source code
is governed by a BSD-style license that can be found in the LICENSE file.
Extensions of the original work are copyright (c) 2011 Miek Gieben
Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
governed by a BSD-style license that can be found in the LICENSE file.
Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
governed by a BSD-style license that can be found in the LICENSE file.

@ -0,0 +1,30 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
As this is fork of the official Go code the same license applies.
Extensions of the original work are copyright (c) 2011 Miek Gieben

@ -0,0 +1,33 @@
# Makefile for fuzzing
#
# Use go-fuzz and needs the tools installed.
# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/
#
# Installing go-fuzz:
# $ make -f Makefile.fuzz get
# Installs:
# * github.com/dvyukov/go-fuzz/go-fuzz
# * get github.com/dvyukov/go-fuzz/go-fuzz-build
all: build
.PHONY: build
build:
go-fuzz-build -tags fuzz github.com/miekg/dns
.PHONY: build-newrr
build-newrr:
go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns
.PHONY: fuzz
fuzz:
go-fuzz -bin=dns-fuzz.zip -workdir=fuzz
.PHONY: get
get:
go get github.com/dvyukov/go-fuzz/go-fuzz
go get github.com/dvyukov/go-fuzz/go-fuzz-build
.PHONY: clean
clean:
rm *-fuzz.zip

@ -0,0 +1,52 @@
# Makefile for releasing.
#
# The release is controlled from version.go. The version found there is
# used to tag the git repo, we're not building any artifects so there is nothing
# to upload to github.
#
# * Up the version in version.go
# * Run: make -f Makefile.release release
# * will *commit* your change with 'Release $VERSION'
# * push to github
#
define GO
//+build ignore
package main
import (
"fmt"
"github.com/miekg/dns"
)
func main() {
fmt.Println(dns.Version.String())
}
endef
$(file > version_release.go,$(GO))
VERSION:=$(shell go run version_release.go)
TAG="v$(VERSION)"
all:
@echo Use the \'release\' target to start a release $(VERSION)
rm -f version_release.go
.PHONY: release
release: commit push
@echo Released $(VERSION)
rm -f version_release.go
.PHONY: commit
commit:
@echo Committing release $(VERSION)
git commit -am"Release $(VERSION)"
git tag $(TAG)
.PHONY: push
push:
@echo Pushing release $(VERSION) to master
git push --tags
git push

@ -0,0 +1,176 @@
[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns)
[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns)
[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns)
# Alternative (more granular) approach to a DNS library
> Less is more.
Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types.
It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there
isn't a convenience function for it. Server side and client side programming is supported, i.e. you
can build servers and resolvers with it.
We try to keep the "master" branch as sane as possible and at the bleeding edge of standards,
avoiding breaking changes wherever reasonable. We support the last two versions of Go.
# Goals
* KISS;
* Fast;
* Small API. If it's easy to code in Go, don't make a function for it.
# Users
A not-so-up-to-date-list-that-may-be-actually-current:
* https://github.com/coredns/coredns
* https://cloudflare.com
* https://github.com/abh/geodns
* https://github.com/baidu/bfe
* http://www.statdns.com/
* http://www.dnsinspect.com/
* https://github.com/chuangbo/jianbing-dictionary-dns
* http://www.dns-lg.com/
* https://github.com/fcambus/rrda
* https://github.com/kenshinx/godns
* https://github.com/skynetservices/skydns
* https://github.com/hashicorp/consul
* https://github.com/DevelopersPL/godnsagent
* https://github.com/duedil-ltd/discodns
* https://github.com/StalkR/dns-reverse-proxy
* https://github.com/tianon/rawdns
* https://mesosphere.github.io/mesos-dns/
* https://pulse.turbobytes.com/
* https://github.com/fcambus/statzone
* https://github.com/benschw/dns-clb-go
* https://github.com/corny/dnscheck for <http://public-dns.info/>
* https://namesmith.io
* https://github.com/miekg/unbound
* https://github.com/miekg/exdns
* https://dnslookup.org
* https://github.com/looterz/grimd
* https://github.com/phamhongviet/serf-dns
* https://github.com/mehrdadrad/mylg
* https://github.com/bamarni/dockness
* https://github.com/fffaraz/microdns
* http://kelda.io
* https://github.com/ipdcode/hades <https://jd.com>
* https://github.com/StackExchange/dnscontrol/
* https://www.dnsperf.com/
* https://dnssectest.net/
* https://dns.apebits.com
* https://github.com/oif/apex
* https://github.com/jedisct1/dnscrypt-proxy
* https://github.com/jedisct1/rpdns
* https://github.com/xor-gate/sshfp
* https://github.com/rs/dnstrace
* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss))
* https://github.com/semihalev/sdns
* https://render.com
* https://github.com/peterzen/goresolver
* https://github.com/folbricht/routedns
Send pull request if you want to be listed here.
# Features
* UDP/TCP queries, IPv4 and IPv6
* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported
* Fast
* Server side programming (mimicking the net/http package)
* Client side programming
* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519
* EDNS0, NSID, Cookies
* AXFR/IXFR
* TSIG, SIG(0)
* DNS over TLS (DoT): encrypted connection between client and server over TCP
* DNS name compression
Have fun!
Miek Gieben - 2010-2012 - <miek@miek.nl>
DNS Authors 2012-
# Building
This library uses Go modules and uses semantic versioning. Building is done with the `go` tool, so
the following should work:
go get github.com/miekg/dns
go build github.com/miekg/dns
## Examples
A short "how to use the API" is at the beginning of doc.go (this also will show when you call `godoc
github.com/miekg/dns`).
Example programs can be found in the `github.com/miekg/exdns` repository.
## Supported RFCs
*all of them*
* 103{4,5} - DNS standard
* 1348 - NSAP record (removed the record)
* 1982 - Serial Arithmetic
* 1876 - LOC record
* 1995 - IXFR
* 1996 - DNS notify
* 2136 - DNS Update (dynamic updates)
* 2181 - RRset definition - there is no RRset type though, just []RR
* 2537 - RSAMD5 DNS keys
* 2065 - DNSSEC (updated in later RFCs)
* 2671 - EDNS record
* 2782 - SRV record
* 2845 - TSIG record
* 2915 - NAPTR record
* 2929 - DNS IANA Considerations
* 3110 - RSASHA1 DNS keys
* 3123 - APL record
* 3225 - DO bit (DNSSEC OK)
* 340{1,2,3} - NAPTR record
* 3445 - Limiting the scope of (DNS)KEY
* 3597 - Unknown RRs
* 403{3,4,5} - DNSSEC + validation functions
* 4255 - SSHFP record
* 4343 - Case insensitivity
* 4408 - SPF record
* 4509 - SHA256 Hash in DS
* 4592 - Wildcards in the DNS
* 4635 - HMAC SHA TSIG
* 4701 - DHCID
* 4892 - id.server
* 5001 - NSID
* 5155 - NSEC3 record
* 5205 - HIP record
* 5702 - SHA2 in the DNS
* 5936 - AXFR
* 5966 - TCP implementation recommendations
* 6605 - ECDSA
* 6725 - IANA Registry Update
* 6742 - ILNP DNS
* 6840 - Clarifications and Implementation Notes for DNS Security
* 6844 - CAA record
* 6891 - EDNS0 update
* 6895 - DNS IANA considerations
* 6944 - DNSSEC DNSKEY Algorithm Status
* 6975 - Algorithm Understanding in DNSSEC
* 7043 - EUI48/EUI64 records
* 7314 - DNS (EDNS) EXPIRE Option
* 7477 - CSYNC RR
* 7828 - edns-tcp-keepalive EDNS0 Option
* 7553 - URI record
* 7858 - DNS over TLS: Initiation and Performance Considerations
* 7871 - EDNS0 Client Subnet
* 7873 - Domain Name System (DNS) Cookies
* 8080 - EdDSA for DNSSEC
* 8499 - DNS Terminology
## Loosely Based Upon
* ldns - <https://nlnetlabs.nl/projects/ldns/about/>
* NSD - <https://nlnetlabs.nl/projects/nsd/about/>
* Net::DNS - <http://www.net-dns.org/>
* GRONG - <https://github.com/bortzmeyer/grong>

@ -0,0 +1,61 @@
package dns
// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError.
// It returns a MsgAcceptAction to indicate what should happen with the message.
type MsgAcceptFunc func(dh Header) MsgAcceptAction
// DefaultMsgAcceptFunc checks the request and will reject if:
//
// * isn't a request (don't respond in that case)
//
// * opcode isn't OpcodeQuery or OpcodeNotify
//
// * Zero bit isn't zero
//
// * has more than 1 question in the question section
//
// * has more than 1 RR in the Answer section
//
// * has more than 0 RRs in the Authority section
//
// * has more than 2 RRs in the Additional section
//
var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
// MsgAcceptAction represents the action to be taken.
type MsgAcceptAction int
const (
MsgAccept MsgAcceptAction = iota // Accept the message
MsgReject // Reject the message with a RcodeFormatError
MsgIgnore // Ignore the error and send nothing back.
MsgRejectNotImplemented // Reject the message with a RcodeNotImplemented
)
func defaultMsgAcceptFunc(dh Header) MsgAcceptAction {
if isResponse := dh.Bits&_QR != 0; isResponse {
return MsgIgnore
}
// Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs.
opcode := int(dh.Bits>>11) & 0xF
if opcode != OpcodeQuery && opcode != OpcodeNotify {
return MsgRejectNotImplemented
}
if dh.Qdcount != 1 {
return MsgReject
}
// NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11.
if dh.Ancount > 1 {
return MsgReject
}
// IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3.
if dh.Nscount > 1 {
return MsgReject
}
if dh.Arcount > 2 {
return MsgReject
}
return MsgAccept
}

@ -0,0 +1,430 @@
package dns
// A client implementation.
import (
"context"
"crypto/tls"
"encoding/binary"
"fmt"
"io"
"net"
"strings"
"time"
)
const (
dnsTimeout time.Duration = 2 * time.Second
tcpIdleTimeout time.Duration = 8 * time.Second
)
// A Conn represents a connection to a DNS server.
type Conn struct {
net.Conn // a net.Conn holding the connection
UDPSize uint16 // minimum receive buffer for UDP messages
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
tsigRequestMAC string
}
// A Client defines parameters for a DNS client.
type Client struct {
Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
UDPSize uint16 // minimum receive buffer for UDP messages
TLSConfig *tls.Config // TLS connection configuration
Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
// Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext)
Timeout time.Duration
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
group singleflight
}
// Exchange performs a synchronous UDP query. It sends the message m to the address
// contained in a and waits for a reply. Exchange does not retry a failed query, nor
// will it fall back to TCP in case of truncation.
// See client.Exchange for more information on setting larger buffer sizes.
func Exchange(m *Msg, a string) (r *Msg, err error) {
client := Client{Net: "udp"}
r, _, err = client.Exchange(m, a)
return r, err
}
func (c *Client) dialTimeout() time.Duration {
if c.Timeout != 0 {
return c.Timeout
}
if c.DialTimeout != 0 {
return c.DialTimeout
}
return dnsTimeout
}
func (c *Client) readTimeout() time.Duration {
if c.ReadTimeout != 0 {
return c.ReadTimeout
}
return dnsTimeout
}
func (c *Client) writeTimeout() time.Duration {
if c.WriteTimeout != 0 {
return c.WriteTimeout
}
return dnsTimeout
}
// Dial connects to the address on the named network.
func (c *Client) Dial(address string) (conn *Conn, err error) {
// create a new dialer with the appropriate timeout
var d net.Dialer
if c.Dialer == nil {
d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())}
} else {
d = *c.Dialer
}
network := c.Net
if network == "" {
network = "udp"
}
useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls")
conn = new(Conn)
if useTLS {
network = strings.TrimSuffix(network, "-tls")
conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
} else {
conn.Conn, err = d.Dial(network, address)
}
if err != nil {
return nil, err
}
return conn, nil
}
// Exchange performs a synchronous query. It sends the message m to the address
// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
//
// c := new(dns.Client)
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
//
// Exchange does not retry a failed query, nor will it fall back to TCP in
// case of truncation.
// It is up to the caller to create a message that allows for larger responses to be
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit
// of 512 bytes
// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
// attribute appropriately
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
co, err := c.Dial(address)
if err != nil {
return nil, 0, err
}
defer co.Close()
return c.ExchangeWithConn(m, co)
}
// ExchangeWithConn has the same behavior as Exchange, just with a predetermined connection
// that will be used instead of creating a new one.
// Usage pattern with a *dns.Client:
// c := new(dns.Client)
// // connection management logic goes here
//
// conn := c.Dial(address)
// in, rtt, err := c.ExchangeWithConn(message, conn)
//
// This allows users of the library to implement their own connection management,
// as opposed to Exchange, which will always use new connections and incur the added overhead
// that entails when using "tcp" and especially "tcp-tls" clients.
func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight {
return c.exchange(m, conn)
}
q := m.Question[0]
key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
return c.exchange(m, conn)
})
if r != nil && shared {
r = r.Copy()
}
return r, rtt, err
}
func (c *Client) exchange(m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) {
opt := m.IsEdns0()
// If EDNS0 is used use that for size.
if opt != nil && opt.UDPSize() >= MinMsgSize {
co.UDPSize = opt.UDPSize()
}
// Otherwise use the client's configured UDP size.
if opt == nil && c.UDPSize >= MinMsgSize {
co.UDPSize = c.UDPSize
}
co.TsigSecret = c.TsigSecret
t := time.Now()
// write with the appropriate write timeout
co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout())))
if err = co.WriteMsg(m); err != nil {
return nil, 0, err
}
co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout())))
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
}
rtt = time.Since(t)
return r, rtt, err
}
// ReadMsg reads a message from the connection co.
// If the received message contains a TSIG record the transaction signature
// is verified. This method always tries to return the message, however if an
// error is returned there are no guarantees that the returned message is a
// valid representation of the packet read.
func (co *Conn) ReadMsg() (*Msg, error) {
p, err := co.ReadMsgHeader(nil)
if err != nil {
return nil, err
}
m := new(Msg)
if err := m.Unpack(p); err != nil {
// If an error was returned, we still want to allow the user to use
// the message, but naively they can just check err if they don't want
// to use an erroneous message
return m, err
}
if t := m.IsTsig(); t != nil {
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
return m, ErrSecret
}
// Need to work on the original message p, as that was used to calculate the tsig.
err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
}
return m, err
}
// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
// Returns message as a byte slice to be parsed with Msg.Unpack later on.
// Note that error handling on the message body is not possible as only the header is parsed.
func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
var (
p []byte
n int
err error
)
if _, ok := co.Conn.(net.PacketConn); ok {
if co.UDPSize > MinMsgSize {
p = make([]byte, co.UDPSize)
} else {
p = make([]byte, MinMsgSize)
}
n, err = co.Read(p)
} else {
var length uint16
if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
return nil, err
}
p = make([]byte, length)
n, err = io.ReadFull(co.Conn, p)
}
if err != nil {
return nil, err
} else if n < headerSize {
return nil, ErrShortRead
}
p = p[:n]
if hdr != nil {
dh, _, err := unpackMsgHdr(p, 0)
if err != nil {
return nil, err
}
*hdr = dh
}
return p, err
}
// Read implements the net.Conn read method.
func (co *Conn) Read(p []byte) (n int, err error) {
if co.Conn == nil {
return 0, ErrConnEmpty
}
if _, ok := co.Conn.(net.PacketConn); ok {
// UDP connection
return co.Conn.Read(p)
}
var length uint16
if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
return 0, err
}
if int(length) > len(p) {
return 0, io.ErrShortBuffer
}
return io.ReadFull(co.Conn, p[:length])
}
// WriteMsg sends a message through the connection co.
// If the message m contains a TSIG record the transaction
// signature is calculated.
func (co *Conn) WriteMsg(m *Msg) (err error) {
var out []byte
if t := m.IsTsig(); t != nil {
mac := ""
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
return ErrSecret
}
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
// Set for the next read, although only used in zone transfers
co.tsigRequestMAC = mac
} else {
out, err = m.Pack()
}
if err != nil {
return err
}
_, err = co.Write(out)
return err
}
// Write implements the net.Conn Write method.
func (co *Conn) Write(p []byte) (int, error) {
if len(p) > MaxMsgSize {
return 0, &Error{err: "message too large"}
}
if _, ok := co.Conn.(net.PacketConn); ok {
return co.Conn.Write(p)
}
l := make([]byte, 2)
binary.BigEndian.PutUint16(l, uint16(len(p)))
n, err := (&net.Buffers{l, p}).WriteTo(co.Conn)
return int(n), err
}
// Return the appropriate timeout for a specific request
func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration {
var requestTimeout time.Duration
if c.Timeout != 0 {
requestTimeout = c.Timeout
} else {
requestTimeout = timeout
}
// net.Dialer.Timeout has priority if smaller than the timeouts computed so
// far
if c.Dialer != nil && c.Dialer.Timeout != 0 {
if c.Dialer.Timeout < requestTimeout {
requestTimeout = c.Dialer.Timeout
}
}
return requestTimeout
}
// Dial connects to the address on the named network.
func Dial(network, address string) (conn *Conn, err error) {
conn = new(Conn)
conn.Conn, err = net.Dial(network, address)
if err != nil {
return nil, err
}
return conn, nil
}
// ExchangeContext performs a synchronous UDP query, like Exchange. It
// additionally obeys deadlines from the passed Context.
func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
client := Client{Net: "udp"}
r, _, err = client.ExchangeContext(ctx, m, a)
// ignorint rtt to leave the original ExchangeContext API unchanged, but
// this function will go away
return r, err
}
// ExchangeConn performs a synchronous query. It sends the message m via the connection
// c and waits for a reply. The connection c is not closed by ExchangeConn.
// Deprecated: This function is going away, but can easily be mimicked:
//
// co := &dns.Conn{Conn: c} // c is your net.Conn
// co.WriteMsg(m)
// in, _ := co.ReadMsg()
// co.Close()
//
func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
println("dns: ExchangeConn: this function is deprecated")
co := new(Conn)
co.Conn = c
if err = co.WriteMsg(m); err != nil {
return nil, err
}
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
}
return r, err
}
// DialTimeout acts like Dial but takes a timeout.
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
return client.Dial(address)
}
// DialWithTLS connects to the address on the named network with TLS.
func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
if !strings.HasSuffix(network, "-tls") {
network += "-tls"
}
client := Client{Net: network, TLSConfig: tlsConfig}
return client.Dial(address)
}
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
if !strings.HasSuffix(network, "-tls") {
network += "-tls"
}
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
return client.Dial(address)
}
// ExchangeContext acts like Exchange, but honors the deadline on the provided
// context, if present. If there is both a context deadline and a configured
// timeout on the client, the earliest of the two takes effect.
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
var timeout time.Duration
if deadline, ok := ctx.Deadline(); !ok {
timeout = 0
} else {
timeout = time.Until(deadline)
}
// not passing the context to the underlying calls, as the API does not support
// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
// TODO(tmthrgd,miekg): this is a race condition.
c.Dialer = &net.Dialer{Timeout: timeout}
return c.Exchange(m, a)
}

@ -0,0 +1,135 @@
package dns
import (
"bufio"
"io"
"os"
"strconv"
"strings"
)
// ClientConfig wraps the contents of the /etc/resolv.conf file.
type ClientConfig struct {
Servers []string // servers to use
Search []string // suffixes to append to local name
Port string // what port to use
Ndots int // number of dots in name to trigger absolute lookup
Timeout int // seconds before giving up on packet
Attempts int // lost packets before giving up on server, not used in the package dns
}
// ClientConfigFromFile parses a resolv.conf(5) like file and returns
// a *ClientConfig.
func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
file, err := os.Open(resolvconf)
if err != nil {
return nil, err
}
defer file.Close()
return ClientConfigFromReader(file)
}
// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument
func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
c := new(ClientConfig)
scanner := bufio.NewScanner(resolvconf)
c.Servers = make([]string, 0)
c.Search = make([]string, 0)
c.Port = "53"
c.Ndots = 1
c.Timeout = 5
c.Attempts = 2
for scanner.Scan() {
if err := scanner.Err(); err != nil {
return nil, err
}
line := scanner.Text()
f := strings.Fields(line)
if len(f) < 1 {
continue
}
switch f[0] {
case "nameserver": // add one name server
if len(f) > 1 {
// One more check: make sure server name is
// just an IP address. Otherwise we need DNS
// to look it up.
name := f[1]
c.Servers = append(c.Servers, name)
}
case "domain": // set search path to just this domain
if len(f) > 1 {
c.Search = make([]string, 1)
c.Search[0] = f[1]
} else {
c.Search = make([]string, 0)
}
case "search": // set search path to given servers
c.Search = append([]string(nil), f[1:]...)
case "options": // magic options
for _, s := range f[1:] {
switch {
case len(s) >= 6 && s[:6] == "ndots:":
n, _ := strconv.Atoi(s[6:])
if n < 0 {
n = 0
} else if n > 15 {
n = 15
}
c.Ndots = n
case len(s) >= 8 && s[:8] == "timeout:":
n, _ := strconv.Atoi(s[8:])
if n < 1 {
n = 1
}
c.Timeout = n
case len(s) >= 9 && s[:9] == "attempts:":
n, _ := strconv.Atoi(s[9:])
if n < 1 {
n = 1
}
c.Attempts = n
case s == "rotate":
/* not imp */
}
}
}
}
return c, nil
}
// NameList returns all of the names that should be queried based on the
// config. It is based off of go's net/dns name building, but it does not
// check the length of the resulting names.
func (c *ClientConfig) NameList(name string) []string {
// if this domain is already fully qualified, no append needed.
if IsFqdn(name) {
return []string{name}
}
// Check to see if the name has more labels than Ndots. Do this before making
// the domain fully qualified.
hasNdots := CountLabel(name) > c.Ndots
// Make the domain fully qualified.
name = Fqdn(name)
// Make a list of names based off search.
names := []string{}
// If name has enough dots, try that first.
if hasNdots {
names = append(names, name)
}
for _, s := range c.Search {
names = append(names, Fqdn(name+s))
}
// If we didn't have enough dots, try after suffixes.
if !hasNdots {
names = append(names, name)
}
return names
}

@ -0,0 +1,43 @@
package dns
import (
"crypto/sha256"
"crypto/sha512"
"crypto/x509"
"encoding/hex"
"errors"
)
// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
switch matchingType {
case 0:
switch selector {
case 0:
return hex.EncodeToString(cert.Raw), nil
case 1:
return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
}
case 1:
h := sha256.New()
switch selector {
case 0:
h.Write(cert.Raw)
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
h.Write(cert.RawSubjectPublicKeyInfo)
return hex.EncodeToString(h.Sum(nil)), nil
}
case 2:
h := sha512.New()
switch selector {
case 0:
h.Write(cert.Raw)
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
h.Write(cert.RawSubjectPublicKeyInfo)
return hex.EncodeToString(h.Sum(nil)), nil
}
}
return "", errors.New("dns: bad MatchingType or Selector")
}

@ -0,0 +1,384 @@
package dns
import (
"errors"
"net"
"strconv"
"strings"
)
const hexDigit = "0123456789abcdef"
// Everything is assumed in ClassINET.
// SetReply creates a reply message from a request message.
func (dns *Msg) SetReply(request *Msg) *Msg {
dns.Id = request.Id
dns.Response = true
dns.Opcode = request.Opcode
if dns.Opcode == OpcodeQuery {
dns.RecursionDesired = request.RecursionDesired // Copy rd bit
dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit
}
dns.Rcode = RcodeSuccess
if len(request.Question) > 0 {
dns.Question = make([]Question, 1)
dns.Question[0] = request.Question[0]
}
return dns
}
// SetQuestion creates a question message, it sets the Question
// section, generates an Id and sets the RecursionDesired (RD)
// bit to true.
func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
dns.Id = Id()
dns.RecursionDesired = true
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, t, ClassINET}
return dns
}
// SetNotify creates a notify message, it sets the Question
// section, generates an Id and sets the Authoritative (AA)
// bit to true.
func (dns *Msg) SetNotify(z string) *Msg {
dns.Opcode = OpcodeNotify
dns.Authoritative = true
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeSOA, ClassINET}
return dns
}
// SetRcode creates an error message suitable for the request.
func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
dns.SetReply(request)
dns.Rcode = rcode
return dns
}
// SetRcodeFormatError creates a message with FormError set.
func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
dns.Rcode = RcodeFormatError
dns.Opcode = OpcodeQuery
dns.Response = true
dns.Authoritative = false
dns.Id = request.Id
return dns
}
// SetUpdate makes the message a dynamic update message. It
// sets the ZONE section to: z, TypeSOA, ClassINET.
func (dns *Msg) SetUpdate(z string) *Msg {
dns.Id = Id()
dns.Response = false
dns.Opcode = OpcodeUpdate
dns.Compress = false // BIND9 cannot handle compression
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeSOA, ClassINET}
return dns
}
// SetIxfr creates message for requesting an IXFR.
func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Ns = make([]RR, 1)
s := new(SOA)
s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
s.Serial = serial
s.Ns = ns
s.Mbox = mbox
dns.Question[0] = Question{z, TypeIXFR, ClassINET}
dns.Ns[0] = s
return dns
}
// SetAxfr creates message for requesting an AXFR.
func (dns *Msg) SetAxfr(z string) *Msg {
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeAXFR, ClassINET}
return dns
}
// SetTsig appends a TSIG RR to the message.
// This is only a skeleton TSIG RR that is added as the last RR in the
// additional section. The TSIG is calculated when the message is being send.
func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg {
t := new(TSIG)
t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
t.Algorithm = algo
t.Fudge = fudge
t.TimeSigned = uint64(timesigned)
t.OrigId = dns.Id
dns.Extra = append(dns.Extra, t)
return dns
}
// SetEdns0 appends a EDNS0 OPT RR to the message.
// TSIG should always the last RR in a message.
func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
e := new(OPT)
e.Hdr.Name = "."
e.Hdr.Rrtype = TypeOPT
e.SetUDPSize(udpsize)
if do {
e.SetDo()
}
dns.Extra = append(dns.Extra, e)
return dns
}
// IsTsig checks if the message has a TSIG record as the last record
// in the additional section. It returns the TSIG record found or nil.
func (dns *Msg) IsTsig() *TSIG {
if len(dns.Extra) > 0 {
if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
return dns.Extra[len(dns.Extra)-1].(*TSIG)
}
}
return nil
}
// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
// record in the additional section will do. It returns the OPT record
// found or nil.
func (dns *Msg) IsEdns0() *OPT {
// RFC 6891, Section 6.1.1 allows the OPT record to appear
// anywhere in the additional record section, but it's usually at
// the end so start there.
for i := len(dns.Extra) - 1; i >= 0; i-- {
if dns.Extra[i].Header().Rrtype == TypeOPT {
return dns.Extra[i].(*OPT)
}
}
return nil
}
// popEdns0 is like IsEdns0, but it removes the record from the message.
func (dns *Msg) popEdns0() *OPT {
// RFC 6891, Section 6.1.1 allows the OPT record to appear
// anywhere in the additional record section, but it's usually at
// the end so start there.
for i := len(dns.Extra) - 1; i >= 0; i-- {
if dns.Extra[i].Header().Rrtype == TypeOPT {
opt := dns.Extra[i].(*OPT)
dns.Extra = append(dns.Extra[:i], dns.Extra[i+1:]...)
return opt
}
}
return nil
}
// IsDomainName checks if s is a valid domain name, it returns the number of
// labels and true, when a domain name is valid. Note that non fully qualified
// domain name is considered valid, in this case the last label is counted in
// the number of labels. When false is returned the number of labels is not
// defined. Also note that this function is extremely liberal; almost any
// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
// label fits in 63 characters and that the entire name will fit into the 255
// octet wire format limit.
func IsDomainName(s string) (labels int, ok bool) {
// XXX: The logic in this function was copied from packDomainName and
// should be kept in sync with that function.
const lenmsg = 256
if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata.
return 0, false
}
s = Fqdn(s)
// Each dot ends a segment of the name. Except for escaped dots (\.), which
// are normal dots.
var (
off int
begin int
wasDot bool
)
for i := 0; i < len(s); i++ {
switch s[i] {
case '\\':
if off+1 > lenmsg {
return labels, false
}
// check for \DDD
if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
i += 3
begin += 3
} else {
i++
begin++
}
wasDot = false
case '.':
if wasDot {
// two dots back to back is not legal
return labels, false
}
wasDot = true
labelLen := i - begin
if labelLen >= 1<<6 { // top two bits of length must be clear
return labels, false
}
// off can already (we're in a loop) be bigger than lenmsg
// this happens when a name isn't fully qualified
off += 1 + labelLen
if off > lenmsg {
return labels, false
}
labels++
begin = i + 1
default:
wasDot = false
}
}
return labels, true
}
// IsSubDomain checks if child is indeed a child of the parent. If child and parent
// are the same domain true is returned as well.
func IsSubDomain(parent, child string) bool {
// Entire child is contained in parent
return CompareDomainName(parent, child) == CountLabel(parent)
}
// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
// The checking is performed on the binary payload.
func IsMsg(buf []byte) error {
// Header
if len(buf) < headerSize {
return errors.New("dns: bad message header")
}
// Header: Opcode
// TODO(miek): more checks here, e.g. check all header bits.
return nil
}
// IsFqdn checks if a domain name is fully qualified.
func IsFqdn(s string) bool {
s2 := strings.TrimSuffix(s, ".")
if s == s2 {
return false
}
i := strings.LastIndexFunc(s2, func(r rune) bool {
return r != '\\'
})
// Test whether we have an even number of escape sequences before
// the dot or none.
return (len(s2)-i)%2 != 0
}
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
// This means the RRs need to have the same type, name, and class. Returns true
// if the RR set is valid, otherwise false.
func IsRRset(rrset []RR) bool {
if len(rrset) == 0 {
return false
}
if len(rrset) == 1 {
return true
}
rrHeader := rrset[0].Header()
rrType := rrHeader.Rrtype
rrClass := rrHeader.Class
rrName := rrHeader.Name
for _, rr := range rrset[1:] {
curRRHeader := rr.Header()
if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
// Mismatch between the records, so this is not a valid rrset for
//signing/verifying
return false
}
}
return true
}
// Fqdn return the fully qualified domain name from s.
// If s is already fully qualified, it behaves as the identity function.
func Fqdn(s string) string {
if IsFqdn(s) {
return s
}
return s + "."
}
// CanonicalName returns the domain name in canonical form. A name in canonical
// form is lowercase and fully qualified. See Section 6.2 in RFC 4034.
func CanonicalName(s string) string {
return strings.ToLower(Fqdn(s))
}
// Copied from the official Go code.
// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
// address suitable for reverse DNS (PTR) record lookups or an error if it fails
// to parse the IP address.
func ReverseAddr(addr string) (arpa string, err error) {
ip := net.ParseIP(addr)
if ip == nil {
return "", &Error{err: "unrecognized address: " + addr}
}
if v4 := ip.To4(); v4 != nil {
buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa."))
// Add it, in reverse, to the buffer
for i := len(v4) - 1; i >= 0; i-- {
buf = strconv.AppendInt(buf, int64(v4[i]), 10)
buf = append(buf, '.')
}
// Append "in-addr.arpa." and return (buf already has the final .)
buf = append(buf, "in-addr.arpa."...)
return string(buf), nil
}
// Must be IPv6
buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa."))
// Add it, in reverse, to the buffer
for i := len(ip) - 1; i >= 0; i-- {
v := ip[i]
buf = append(buf, hexDigit[v&0xF])
buf = append(buf, '.')
buf = append(buf, hexDigit[v>>4])
buf = append(buf, '.')
}
// Append "ip6.arpa." and return (buf already has the final .)
buf = append(buf, "ip6.arpa."...)
return string(buf), nil
}
// String returns the string representation for the type t.
func (t Type) String() string {
if t1, ok := TypeToString[uint16(t)]; ok {
return t1
}
return "TYPE" + strconv.Itoa(int(t))
}
// String returns the string representation for the class c.
func (c Class) String() string {
if s, ok := ClassToString[uint16(c)]; ok {
// Only emit mnemonics when they are unambiguous, specially ANY is in both.
if _, ok := StringToType[s]; !ok {
return s
}
}
return "CLASS" + strconv.Itoa(int(c))
}
// String returns the string representation for the name n.
func (n Name) String() string {
return sprintName(string(n))
}

134
vendor/github.com/miekg/dns/dns.go generated vendored

@ -0,0 +1,134 @@
package dns
import "strconv"
const (
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
defaultTtl = 3600 // Default internal TTL.
// DefaultMsgSize is the standard default for messages larger than 512 bytes.
DefaultMsgSize = 4096
// MinMsgSize is the minimal size of a DNS packet.
MinMsgSize = 512
// MaxMsgSize is the largest possible DNS packet.
MaxMsgSize = 65535
)
// Error represents a DNS error.
type Error struct{ err string }
func (e *Error) Error() string {
if e == nil {
return "dns: <nil>"
}
return "dns: " + e.err
}
// An RR represents a resource record.
type RR interface {
// Header returns the header of an resource record. The header contains
// everything up to the rdata.
Header() *RR_Header
// String returns the text representation of the resource record.
String() string
// copy returns a copy of the RR
copy() RR
// len returns the length (in octets) of the compressed or uncompressed RR in wire format.
//
// If compression is nil, the uncompressed size will be returned, otherwise the compressed
// size will be returned and domain names will be added to the map for future compression.
len(off int, compression map[string]struct{}) int
// pack packs the records RDATA into wire format. The header will
// already have been packed into msg.
pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error)
// unpack unpacks an RR from wire format.
//
// This will only be called on a new and empty RR type with only the header populated. It
// will only be called if the record's RDATA is non-empty.
unpack(msg []byte, off int) (off1 int, err error)
// parse parses an RR from zone file format.
//
// This will only be called on a new and empty RR type with only the header populated.
parse(c *zlexer, origin string) *ParseError
// isDuplicate returns whether the two RRs are duplicates.
isDuplicate(r2 RR) bool
}
// RR_Header is the header all DNS resource records share.
type RR_Header struct {
Name string `dns:"cdomain-name"`
Rrtype uint16
Class uint16
Ttl uint32
Rdlength uint16 // Length of data after header.
}
// Header returns itself. This is here to make RR_Header implements the RR interface.
func (h *RR_Header) Header() *RR_Header { return h }
// Just to implement the RR interface.
func (h *RR_Header) copy() RR { return nil }
func (h *RR_Header) String() string {
var s string
if h.Rrtype == TypeOPT {
s = ";"
// and maybe other things
}
s += sprintName(h.Name) + "\t"
s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
s += Class(h.Class).String() + "\t"
s += Type(h.Rrtype).String() + "\t"
return s
}
func (h *RR_Header) len(off int, compression map[string]struct{}) int {
l := domainNameLen(h.Name, off, compression, true)
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
return l
}
func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
// RR_Header has no RDATA to pack.
return off, nil
}
func (h *RR_Header) unpack(msg []byte, off int) (int, error) {
panic("dns: internal error: unpack should never be called on RR_Header")
}
func (h *RR_Header) parse(c *zlexer, origin string) *ParseError {
panic("dns: internal error: parse should never be called on RR_Header")
}
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
func (rr *RFC3597) ToRFC3597(r RR) error {
buf := make([]byte, Len(r)*2)
headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false)
if err != nil {
return err
}
buf = buf[:off]
*rr = RFC3597{Hdr: *r.Header()}
rr.Hdr.Rdlength = uint16(off - headerEnd)
if noRdata(rr.Hdr) {
return nil
}
_, err = rr.unpack(buf, headerEnd)
if err != nil {
return err
}
return nil
}

@ -0,0 +1,794 @@
package dns
import (
"bytes"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
_ "crypto/md5"
"crypto/rand"
"crypto/rsa"
_ "crypto/sha1"
_ "crypto/sha256"
_ "crypto/sha512"
"encoding/asn1"
"encoding/binary"
"encoding/hex"
"math/big"
"sort"
"strings"
"time"
"golang.org/x/crypto/ed25519"
)
// DNSSEC encryption algorithm codes.
const (
_ uint8 = iota
RSAMD5
DH
DSA
_ // Skip 4, RFC 6725, section 2.1
RSASHA1
DSANSEC3SHA1
RSASHA1NSEC3SHA1
RSASHA256
_ // Skip 9, RFC 6725, section 2.1
RSASHA512
_ // Skip 11, RFC 6725, section 2.1
ECCGOST
ECDSAP256SHA256
ECDSAP384SHA384
ED25519
ED448
INDIRECT uint8 = 252
PRIVATEDNS uint8 = 253 // Private (experimental keys)
PRIVATEOID uint8 = 254
)
// AlgorithmToString is a map of algorithm IDs to algorithm names.
var AlgorithmToString = map[uint8]string{
RSAMD5: "RSAMD5",
DH: "DH",
DSA: "DSA",
RSASHA1: "RSASHA1",
DSANSEC3SHA1: "DSA-NSEC3-SHA1",
RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
RSASHA256: "RSASHA256",
RSASHA512: "RSASHA512",
ECCGOST: "ECC-GOST",
ECDSAP256SHA256: "ECDSAP256SHA256",
ECDSAP384SHA384: "ECDSAP384SHA384",
ED25519: "ED25519",
ED448: "ED448",
INDIRECT: "INDIRECT",
PRIVATEDNS: "PRIVATEDNS",
PRIVATEOID: "PRIVATEOID",
}
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
var AlgorithmToHash = map[uint8]crypto.Hash{
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
DSA: crypto.SHA1,
RSASHA1: crypto.SHA1,
RSASHA1NSEC3SHA1: crypto.SHA1,
RSASHA256: crypto.SHA256,
ECDSAP256SHA256: crypto.SHA256,
ECDSAP384SHA384: crypto.SHA384,
RSASHA512: crypto.SHA512,
ED25519: crypto.Hash(0),
}
// DNSSEC hashing algorithm codes.
const (
_ uint8 = iota
SHA1 // RFC 4034
SHA256 // RFC 4509
GOST94 // RFC 5933
SHA384 // Experimental
SHA512 // Experimental
)
// HashToString is a map of hash IDs to names.
var HashToString = map[uint8]string{
SHA1: "SHA1",
SHA256: "SHA256",
GOST94: "GOST94",
SHA384: "SHA384",
SHA512: "SHA512",
}
// DNSKEY flag values.
const (
SEP = 1
REVOKE = 1 << 7
ZONE = 1 << 8
)
// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
type rrsigWireFmt struct {
TypeCovered uint16
Algorithm uint8
Labels uint8
OrigTtl uint32
Expiration uint32
Inception uint32
KeyTag uint16
SignerName string `dns:"domain-name"`
/* No Signature */
}
// Used for converting DNSKEY's rdata to wirefmt.
type dnskeyWireFmt struct {
Flags uint16
Protocol uint8
Algorithm uint8
PublicKey string `dns:"base64"`
/* Nothing is left out */
}
func divRoundUp(a, b int) int {
return (a + b - 1) / b
}
// KeyTag calculates the keytag (or key-id) of the DNSKEY.
func (k *DNSKEY) KeyTag() uint16 {
if k == nil {
return 0
}
var keytag int
switch k.Algorithm {
case RSAMD5:
// Look at the bottom two bytes of the modules, which the last
// item in the pubkey.
// This algorithm has been deprecated, but keep this key-tag calculation.
modulus, _ := fromBase64([]byte(k.PublicKey))
if len(modulus) > 1 {
x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
keytag = int(x)
}
default:
keywire := new(dnskeyWireFmt)
keywire.Flags = k.Flags
keywire.Protocol = k.Protocol
keywire.Algorithm = k.Algorithm
keywire.PublicKey = k.PublicKey
wire := make([]byte, DefaultMsgSize)
n, err := packKeyWire(keywire, wire)
if err != nil {
return 0
}
wire = wire[:n]
for i, v := range wire {
if i&1 != 0 {
keytag += int(v) // must be larger than uint32
} else {
keytag += int(v) << 8
}
}
keytag += keytag >> 16 & 0xFFFF
keytag &= 0xFFFF
}
return uint16(keytag)
}
// ToDS converts a DNSKEY record to a DS record.
func (k *DNSKEY) ToDS(h uint8) *DS {
if k == nil {
return nil
}
ds := new(DS)
ds.Hdr.Name = k.Hdr.Name
ds.Hdr.Class = k.Hdr.Class
ds.Hdr.Rrtype = TypeDS
ds.Hdr.Ttl = k.Hdr.Ttl
ds.Algorithm = k.Algorithm
ds.DigestType = h
ds.KeyTag = k.KeyTag()
keywire := new(dnskeyWireFmt)
keywire.Flags = k.Flags
keywire.Protocol = k.Protocol
keywire.Algorithm = k.Algorithm
keywire.PublicKey = k.PublicKey
wire := make([]byte, DefaultMsgSize)
n, err := packKeyWire(keywire, wire)
if err != nil {
return nil
}
wire = wire[:n]
owner := make([]byte, 255)
off, err1 := PackDomainName(CanonicalName(k.Hdr.Name), owner, 0, nil, false)
if err1 != nil {
return nil
}
owner = owner[:off]
// RFC4034:
// digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
// "|" denotes concatenation
// DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
var hash crypto.Hash
switch h {
case SHA1:
hash = crypto.SHA1
case SHA256:
hash = crypto.SHA256
case SHA384:
hash = crypto.SHA384
case SHA512:
hash = crypto.SHA512
default:
return nil
}
s := hash.New()
s.Write(owner)
s.Write(wire)
ds.Digest = hex.EncodeToString(s.Sum(nil))
return ds
}
// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
c := &CDNSKEY{DNSKEY: *k}
c.Hdr = k.Hdr
c.Hdr.Rrtype = TypeCDNSKEY
return c
}
// ToCDS converts a DS record to a CDS record.
func (d *DS) ToCDS() *CDS {
c := &CDS{DS: *d}
c.Hdr = d.Hdr
c.Hdr.Rrtype = TypeCDS
return c
}
// Sign signs an RRSet. The signature needs to be filled in with the values:
// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied
// from the RRset. Sign returns a non-nill error when the signing went OK.
// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non
// zero, it is used as-is, otherwise the TTL of the RRset is used as the
// OrigTTL.
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
if k == nil {
return ErrPrivKey
}
// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return ErrKey
}
h0 := rrset[0].Header()
rr.Hdr.Rrtype = TypeRRSIG
rr.Hdr.Name = h0.Name
rr.Hdr.Class = h0.Class
if rr.OrigTtl == 0 { // If set don't override
rr.OrigTtl = h0.Ttl
}
rr.TypeCovered = h0.Rrtype
rr.Labels = uint8(CountLabel(h0.Name))
if strings.HasPrefix(h0.Name, "*") {
rr.Labels-- // wildcard, remove from label count
}
sigwire := new(rrsigWireFmt)
sigwire.TypeCovered = rr.TypeCovered
sigwire.Algorithm = rr.Algorithm
sigwire.Labels = rr.Labels
sigwire.OrigTtl = rr.OrigTtl
sigwire.Expiration = rr.Expiration
sigwire.Inception = rr.Inception
sigwire.KeyTag = rr.KeyTag
// For signing, lowercase this name
sigwire.SignerName = CanonicalName(rr.SignerName)
// Create the desired binary blob
signdata := make([]byte, DefaultMsgSize)
n, err := packSigWire(sigwire, signdata)
if err != nil {
return err
}
signdata = signdata[:n]
wire, err := rawSignatureData(rrset, rr)
if err != nil {
return err
}
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
return ErrAlg
}
switch rr.Algorithm {
case ED25519:
// ed25519 signs the raw message and performs hashing internally.
// All other supported signature schemes operate over the pre-hashed
// message, and thus ed25519 must be handled separately here.
//
// The raw message is passed directly into sign and crypto.Hash(0) is
// used to signal to the crypto.Signer that the data has not been hashed.
signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm)
if err != nil {
return err
}
rr.Signature = toBase64(signature)
case RSAMD5, DSA, DSANSEC3SHA1:
// See RFC 6944.
return ErrAlg
default:
h := hash.New()
h.Write(signdata)
h.Write(wire)
signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
if err != nil {
return err
}
rr.Signature = toBase64(signature)
}
return nil
}
func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
signature, err := k.Sign(rand.Reader, hashed, hash)
if err != nil {
return nil, err
}
switch alg {
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
return signature, nil
case ECDSAP256SHA256, ECDSAP384SHA384:
ecdsaSignature := &struct {
R, S *big.Int
}{}
if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil {
return nil, err
}
var intlen int
switch alg {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
signature := intToBytes(ecdsaSignature.R, intlen)
signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
return signature, nil
// There is no defined interface for what a DSA backed crypto.Signer returns
case DSA, DSANSEC3SHA1:
// t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
// signature := []byte{byte(t)}
// signature = append(signature, intToBytes(r1, 20)...)
// signature = append(signature, intToBytes(s1, 20)...)
// rr.Signature = signature
case ED25519:
return signature, nil
}
return nil, ErrAlg
}
// Verify validates an RRSet with the signature and key. This is only the
// cryptographic test, the signature validity period must be checked separately.
// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
// First the easy checks
if !IsRRset(rrset) {
return ErrRRset
}
if rr.KeyTag != k.KeyTag() {
return ErrKey
}
if rr.Hdr.Class != k.Hdr.Class {
return ErrKey
}
if rr.Algorithm != k.Algorithm {
return ErrKey
}
if !strings.EqualFold(rr.SignerName, k.Hdr.Name) {
return ErrKey
}
if k.Protocol != 3 {
return ErrKey
}
// IsRRset checked that we have at least one RR and that the RRs in
// the set have consistent type, class, and name. Also check that type and
// class matches the RRSIG record.
if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered {
return ErrRRset
}
// RFC 4035 5.3.2. Reconstructing the Signed Data
// Copy the sig, except the rrsig data
sigwire := new(rrsigWireFmt)
sigwire.TypeCovered = rr.TypeCovered
sigwire.Algorithm = rr.Algorithm
sigwire.Labels = rr.Labels
sigwire.OrigTtl = rr.OrigTtl
sigwire.Expiration = rr.Expiration
sigwire.Inception = rr.Inception
sigwire.KeyTag = rr.KeyTag
sigwire.SignerName = CanonicalName(rr.SignerName)
// Create the desired binary blob
signeddata := make([]byte, DefaultMsgSize)
n, err := packSigWire(sigwire, signeddata)
if err != nil {
return err
}
signeddata = signeddata[:n]
wire, err := rawSignatureData(rrset, rr)
if err != nil {
return err
}
sigbuf := rr.sigBuf() // Get the binary signature data
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
// TODO(miek)
// remove the domain name and assume its ours?
}
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
return ErrAlg
}
switch rr.Algorithm {
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
// TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
pubkey := k.publicKeyRSA() // Get the key
if pubkey == nil {
return ErrKey
}
h := hash.New()
h.Write(signeddata)
h.Write(wire)
return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
case ECDSAP256SHA256, ECDSAP384SHA384:
pubkey := k.publicKeyECDSA()
if pubkey == nil {
return ErrKey
}
// Split sigbuf into the r and s coordinates
r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2])
s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:])
h := hash.New()
h.Write(signeddata)
h.Write(wire)
if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
return nil
}
return ErrSig
case ED25519:
pubkey := k.publicKeyED25519()
if pubkey == nil {
return ErrKey
}
if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) {
return nil
}
return ErrSig
default:
return ErrAlg
}
}
// ValidityPeriod uses RFC1982 serial arithmetic to calculate
// if a signature period is valid. If t is the zero time, the
// current time is taken other t is. Returns true if the signature
// is valid at the given time, otherwise returns false.
func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
var utc int64
if t.IsZero() {
utc = time.Now().UTC().Unix()
} else {
utc = t.UTC().Unix()
}
modi := (int64(rr.Inception) - utc) / year68
mode := (int64(rr.Expiration) - utc) / year68
ti := int64(rr.Inception) + modi*year68
te := int64(rr.Expiration) + mode*year68
return ti <= utc && utc <= te
}
// Return the signatures base64 encodedig sigdata as a byte slice.
func (rr *RRSIG) sigBuf() []byte {
sigbuf, err := fromBase64([]byte(rr.Signature))
if err != nil {
return nil
}
return sigbuf
}
// publicKeyRSA returns the RSA public key from a DNSKEY record.
func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
if len(keybuf) < 1+1+64 {
// Exponent must be at least 1 byte and modulus at least 64
return nil
}
// RFC 2537/3110, section 2. RSA Public KEY Resource Records
// Length is in the 0th byte, unless its zero, then it
// it in bytes 1 and 2 and its a 16 bit number
explen := uint16(keybuf[0])
keyoff := 1
if explen == 0 {
explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
keyoff = 3
}
if explen > 4 || explen == 0 || keybuf[keyoff] == 0 {
// Exponent larger than supported by the crypto package,
// empty, or contains prohibited leading zero.
return nil
}
modoff := keyoff + int(explen)
modlen := len(keybuf) - modoff
if modlen < 64 || modlen > 512 || keybuf[modoff] == 0 {
// Modulus is too small, large, or contains prohibited leading zero.
return nil
}
pubkey := new(rsa.PublicKey)
var expo uint64
// The exponent of length explen is between keyoff and modoff.
for _, v := range keybuf[keyoff:modoff] {
expo <<= 8
expo |= uint64(v)
}
if expo > 1<<31-1 {
// Larger exponent than supported by the crypto package.
return nil
}
pubkey.E = int(expo)
pubkey.N = new(big.Int).SetBytes(keybuf[modoff:])
return pubkey
}
// publicKeyECDSA returns the Curve public key from the DNSKEY record.
func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
pubkey := new(ecdsa.PublicKey)
switch k.Algorithm {
case ECDSAP256SHA256:
pubkey.Curve = elliptic.P256()
if len(keybuf) != 64 {
// wrongly encoded key
return nil
}
case ECDSAP384SHA384:
pubkey.Curve = elliptic.P384()
if len(keybuf) != 96 {
// Wrongly encoded key
return nil
}
}
pubkey.X = new(big.Int).SetBytes(keybuf[:len(keybuf)/2])
pubkey.Y = new(big.Int).SetBytes(keybuf[len(keybuf)/2:])
return pubkey
}
func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
if len(keybuf) < 22 {
return nil
}
t, keybuf := int(keybuf[0]), keybuf[1:]
size := 64 + t*8
q, keybuf := keybuf[:20], keybuf[20:]
if len(keybuf) != 3*size {
return nil
}
p, keybuf := keybuf[:size], keybuf[size:]
g, y := keybuf[:size], keybuf[size:]
pubkey := new(dsa.PublicKey)
pubkey.Parameters.Q = new(big.Int).SetBytes(q)
pubkey.Parameters.P = new(big.Int).SetBytes(p)
pubkey.Parameters.G = new(big.Int).SetBytes(g)
pubkey.Y = new(big.Int).SetBytes(y)
return pubkey
}
func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
if len(keybuf) != ed25519.PublicKeySize {
return nil
}
return keybuf
}
type wireSlice [][]byte
func (p wireSlice) Len() int { return len(p) }
func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p wireSlice) Less(i, j int) bool {
_, ioff, _ := UnpackDomainName(p[i], 0)
_, joff, _ := UnpackDomainName(p[j], 0)
return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
}
// Return the raw signature data.
func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
wires := make(wireSlice, len(rrset))
for i, r := range rrset {
r1 := r.copy()
h := r1.Header()
h.Ttl = s.OrigTtl
labels := SplitDomainName(h.Name)
// 6.2. Canonical RR Form. (4) - wildcards
if len(labels) > int(s.Labels) {
// Wildcard
h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
}
// RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
h.Name = CanonicalName(h.Name)
// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
// SRV, DNAME, A6
//
// RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
// Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
// that needs conversion to lowercase, and twice at that. Since HINFO
// records contain no domain names, they are not subject to case
// conversion.
switch x := r1.(type) {
case *NS:
x.Ns = CanonicalName(x.Ns)
case *MD:
x.Md = CanonicalName(x.Md)
case *MF:
x.Mf = CanonicalName(x.Mf)
case *CNAME:
x.Target = CanonicalName(x.Target)
case *SOA:
x.Ns = CanonicalName(x.Ns)
x.Mbox = CanonicalName(x.Mbox)
case *MB:
x.Mb = CanonicalName(x.Mb)
case *MG:
x.Mg = CanonicalName(x.Mg)
case *MR:
x.Mr = CanonicalName(x.Mr)
case *PTR:
x.Ptr = CanonicalName(x.Ptr)
case *MINFO:
x.Rmail = CanonicalName(x.Rmail)
x.Email = CanonicalName(x.Email)
case *MX:
x.Mx = CanonicalName(x.Mx)
case *RP:
x.Mbox = CanonicalName(x.Mbox)
x.Txt = CanonicalName(x.Txt)
case *AFSDB:
x.Hostname = CanonicalName(x.Hostname)
case *RT:
x.Host = CanonicalName(x.Host)
case *SIG:
x.SignerName = CanonicalName(x.SignerName)
case *PX:
x.Map822 = CanonicalName(x.Map822)
x.Mapx400 = CanonicalName(x.Mapx400)
case *NAPTR:
x.Replacement = CanonicalName(x.Replacement)
case *KX:
x.Exchanger = CanonicalName(x.Exchanger)
case *SRV:
x.Target = CanonicalName(x.Target)
case *DNAME:
x.Target = CanonicalName(x.Target)
}
// 6.2. Canonical RR Form. (5) - origTTL
wire := make([]byte, Len(r1)+1) // +1 to be safe(r)
off, err1 := PackRR(r1, wire, 0, nil, false)
if err1 != nil {
return nil, err1
}
wire = wire[:off]
wires[i] = wire
}
sort.Sort(wires)
for i, wire := range wires {
if i > 0 && bytes.Equal(wire, wires[i-1]) {
continue
}
buf = append(buf, wire...)
}
return buf, nil
}
func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
// copied from zmsg.go RRSIG packing
off, err := packUint16(sw.TypeCovered, msg, 0)
if err != nil {
return off, err
}
off, err = packUint8(sw.Algorithm, msg, off)
if err != nil {
return off, err
}
off, err = packUint8(sw.Labels, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.OrigTtl, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.Expiration, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.Inception, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(sw.KeyTag, msg, off)
if err != nil {
return off, err
}
off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
if err != nil {
return off, err
}
return off, nil
}
func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
// copied from zmsg.go DNSKEY packing
off, err := packUint16(dw.Flags, msg, 0)
if err != nil {
return off, err
}
off, err = packUint8(dw.Protocol, msg, off)
if err != nil {
return off, err
}
off, err = packUint8(dw.Algorithm, msg, off)
if err != nil {
return off, err
}
off, err = packStringBase64(dw.PublicKey, msg, off)
if err != nil {
return off, err
}
return off, nil
}

@ -0,0 +1,140 @@
package dns
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"math/big"
"golang.org/x/crypto/ed25519"
)
// Generate generates a DNSKEY of the given bit size.
// The public part is put inside the DNSKEY record.
// The Algorithm in the key must be set as this will define
// what kind of DNSKEY will be generated.
// The ECDSA algorithms imply a fixed keysize, in that case
// bits should be set to the size of the algorithm.
func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
switch k.Algorithm {
case RSAMD5, DSA, DSANSEC3SHA1:
return nil, ErrAlg
case RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
if bits < 512 || bits > 4096 {
return nil, ErrKeySize
}
case RSASHA512:
if bits < 1024 || bits > 4096 {
return nil, ErrKeySize
}
case ECDSAP256SHA256:
if bits != 256 {
return nil, ErrKeySize
}
case ECDSAP384SHA384:
if bits != 384 {
return nil, ErrKeySize
}
case ED25519:
if bits != 256 {
return nil, ErrKeySize
}
}
switch k.Algorithm {
case RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
priv, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return nil, err
}
k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
return priv, nil
case ECDSAP256SHA256, ECDSAP384SHA384:
var c elliptic.Curve
switch k.Algorithm {
case ECDSAP256SHA256:
c = elliptic.P256()
case ECDSAP384SHA384:
c = elliptic.P384()
}
priv, err := ecdsa.GenerateKey(c, rand.Reader)
if err != nil {
return nil, err
}
k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
return priv, nil
case ED25519:
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, err
}
k.setPublicKeyED25519(pub)
return priv, nil
default:
return nil, ErrAlg
}
}
// Set the public key (the value E and N)
func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
if _E == 0 || _N == nil {
return false
}
buf := exponentToBuf(_E)
buf = append(buf, _N.Bytes()...)
k.PublicKey = toBase64(buf)
return true
}
// Set the public key for Elliptic Curves
func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
if _X == nil || _Y == nil {
return false
}
var intlen int
switch k.Algorithm {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
return true
}
// Set the public key for Ed25519
func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool {
if _K == nil {
return false
}
k.PublicKey = toBase64(_K)
return true
}
// Set the public key (the values E and N) for RSA
// RFC 3110: Section 2. RSA Public KEY Resource Records
func exponentToBuf(_E int) []byte {
var buf []byte
i := big.NewInt(int64(_E)).Bytes()
if len(i) < 256 {
buf = make([]byte, 1, 1+len(i))
buf[0] = uint8(len(i))
} else {
buf = make([]byte, 3, 3+len(i))
buf[0] = 0
buf[1] = uint8(len(i) >> 8)
buf[2] = uint8(len(i))
}
buf = append(buf, i...)
return buf
}
// Set the public key for X and Y for Curve. The two
// values are just concatenated.
func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
buf := intToBytes(_X, intlen)
buf = append(buf, intToBytes(_Y, intlen)...)
return buf
}

@ -0,0 +1,322 @@
package dns
import (
"bufio"
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"io"
"math/big"
"strconv"
"strings"
"golang.org/x/crypto/ed25519"
)
// NewPrivateKey returns a PrivateKey by parsing the string s.
// s should be in the same form of the BIND private key files.
func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
if s == "" || s[len(s)-1] != '\n' { // We need a closing newline
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
}
return k.ReadPrivateKey(strings.NewReader(s), "")
}
// ReadPrivateKey reads a private key from the io.Reader q. The string file is
// only used in error reporting.
// The public key must be known, because some cryptographic algorithms embed
// the public inside the privatekey.
func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
m, err := parseKey(q, file)
if m == nil {
return nil, err
}
if _, ok := m["private-key-format"]; !ok {
return nil, ErrPrivKey
}
if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
return nil, ErrPrivKey
}
// TODO(mg): check if the pubkey matches the private key
algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
if err != nil {
return nil, ErrPrivKey
}
switch uint8(algo) {
case RSAMD5, DSA, DSANSEC3SHA1:
return nil, ErrAlg
case RSASHA1:
fallthrough
case RSASHA1NSEC3SHA1:
fallthrough
case RSASHA256:
fallthrough
case RSASHA512:
priv, err := readPrivateKeyRSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyRSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, nil
case ECCGOST:
return nil, ErrPrivKey
case ECDSAP256SHA256:
fallthrough
case ECDSAP384SHA384:
priv, err := readPrivateKeyECDSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyECDSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, nil
case ED25519:
return readPrivateKeyED25519(m)
default:
return nil, ErrPrivKey
}
}
// Read a private key (file) string and create a public key. Return the private key.
func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
p := new(rsa.PrivateKey)
p.Primes = []*big.Int{nil, nil}
for k, v := range m {
switch k {
case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
v1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
switch k {
case "modulus":
p.PublicKey.N = new(big.Int).SetBytes(v1)
case "publicexponent":
i := new(big.Int).SetBytes(v1)
p.PublicKey.E = int(i.Int64()) // int64 should be large enough
case "privateexponent":
p.D = new(big.Int).SetBytes(v1)
case "prime1":
p.Primes[0] = new(big.Int).SetBytes(v1)
case "prime2":
p.Primes[1] = new(big.Int).SetBytes(v1)
}
case "exponent1", "exponent2", "coefficient":
// not used in Go (yet)
case "created", "publish", "activate":
// not used in Go (yet)
}
}
return p, nil
}
func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
p := new(ecdsa.PrivateKey)
p.D = new(big.Int)
// TODO: validate that the required flags are present
for k, v := range m {
switch k {
case "privatekey":
v1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
p.D.SetBytes(v1)
case "created", "publish", "activate":
/* not used in Go (yet) */
}
}
return p, nil
}
func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
var p ed25519.PrivateKey
// TODO: validate that the required flags are present
for k, v := range m {
switch k {
case "privatekey":
p1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
if len(p1) != ed25519.SeedSize {
return nil, ErrPrivKey
}
p = ed25519.NewKeyFromSeed(p1)
case "created", "publish", "activate":
/* not used in Go (yet) */
}
}
return p, nil
}
// parseKey reads a private key from r. It returns a map[string]string,
// with the key-value pairs, or an error when the file is not correct.
func parseKey(r io.Reader, file string) (map[string]string, error) {
m := make(map[string]string)
var k string
c := newKLexer(r)
for l, ok := c.Next(); ok; l, ok = c.Next() {
// It should alternate
switch l.value {
case zKey:
k = l.token
case zValue:
if k == "" {
return nil, &ParseError{file, "no private key seen", l}
}
m[strings.ToLower(k)] = l.token
k = ""
}
}
// Surface any read errors from r.
if err := c.Err(); err != nil {
return nil, &ParseError{file: file, err: err.Error()}
}
return m, nil
}
type klexer struct {
br io.ByteReader
readErr error
line int
column int
key bool
eol bool // end-of-line
}
func newKLexer(r io.Reader) *klexer {
br, ok := r.(io.ByteReader)
if !ok {
br = bufio.NewReaderSize(r, 1024)
}
return &klexer{
br: br,
line: 1,
key: true,
}
}
func (kl *klexer) Err() error {
if kl.readErr == io.EOF {
return nil
}
return kl.readErr
}
// readByte returns the next byte from the input
func (kl *klexer) readByte() (byte, bool) {
if kl.readErr != nil {
return 0, false
}
c, err := kl.br.ReadByte()
if err != nil {
kl.readErr = err
return 0, false
}
// delay the newline handling until the next token is delivered,
// fixes off-by-one errors when reporting a parse error.
if kl.eol {
kl.line++
kl.column = 0
kl.eol = false
}
if c == '\n' {
kl.eol = true
} else {
kl.column++
}
return c, true
}
func (kl *klexer) Next() (lex, bool) {
var (
l lex
str strings.Builder
commt bool
)
for x, ok := kl.readByte(); ok; x, ok = kl.readByte() {
l.line, l.column = kl.line, kl.column
switch x {
case ':':
if commt || !kl.key {
break
}
kl.key = false
// Next token is a space, eat it
kl.readByte()
l.value = zKey
l.token = str.String()
return l, true
case ';':
commt = true
case '\n':
if commt {
// Reset a comment
commt = false
}
if kl.key && str.Len() == 0 {
// ignore empty lines
break
}
kl.key = true
l.value = zValue
l.token = str.String()
return l, true
default:
if commt {
break
}
str.WriteByte(x)
}
}
if kl.readErr != nil && kl.readErr != io.EOF {
// Don't return any tokens after a read error occurs.
return lex{value: zEOF}, false
}
if str.Len() > 0 {
// Send remainder
l.value = zValue
l.token = str.String()
return l, true
}
return lex{value: zEOF}, false
}

@ -0,0 +1,94 @@
package dns
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"math/big"
"strconv"
"golang.org/x/crypto/ed25519"
)
const format = "Private-key-format: v1.3\n"
var bigIntOne = big.NewInt(1)
// PrivateKeyString converts a PrivateKey to a string. This string has the same
// format as the private-key-file of BIND9 (Private-key-format: v1.3).
// It needs some info from the key (the algorithm), so its a method of the DNSKEY
// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey
func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
algorithm := strconv.Itoa(int(r.Algorithm))
algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
switch p := p.(type) {
case *rsa.PrivateKey:
modulus := toBase64(p.PublicKey.N.Bytes())
e := big.NewInt(int64(p.PublicKey.E))
publicExponent := toBase64(e.Bytes())
privateExponent := toBase64(p.D.Bytes())
prime1 := toBase64(p.Primes[0].Bytes())
prime2 := toBase64(p.Primes[1].Bytes())
// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
// and from: http://code.google.com/p/go/issues/detail?id=987
p1 := new(big.Int).Sub(p.Primes[0], bigIntOne)
q1 := new(big.Int).Sub(p.Primes[1], bigIntOne)
exp1 := new(big.Int).Mod(p.D, p1)
exp2 := new(big.Int).Mod(p.D, q1)
coeff := new(big.Int).ModInverse(p.Primes[1], p.Primes[0])
exponent1 := toBase64(exp1.Bytes())
exponent2 := toBase64(exp2.Bytes())
coefficient := toBase64(coeff.Bytes())
return format +
"Algorithm: " + algorithm + "\n" +
"Modulus: " + modulus + "\n" +
"PublicExponent: " + publicExponent + "\n" +
"PrivateExponent: " + privateExponent + "\n" +
"Prime1: " + prime1 + "\n" +
"Prime2: " + prime2 + "\n" +
"Exponent1: " + exponent1 + "\n" +
"Exponent2: " + exponent2 + "\n" +
"Coefficient: " + coefficient + "\n"
case *ecdsa.PrivateKey:
var intlen int
switch r.Algorithm {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
private := toBase64(intToBytes(p.D, intlen))
return format +
"Algorithm: " + algorithm + "\n" +
"PrivateKey: " + private + "\n"
case *dsa.PrivateKey:
T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8))
subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20))
base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
priv := toBase64(intToBytes(p.X, 20))
pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
return format +
"Algorithm: " + algorithm + "\n" +
"Prime(p): " + prime + "\n" +
"Subprime(q): " + subprime + "\n" +
"Base(g): " + base + "\n" +
"Private_value(x): " + priv + "\n" +
"Public_value(y): " + pub + "\n"
case ed25519.PrivateKey:
private := toBase64(p.Seed())
return format +
"Algorithm: " + algorithm + "\n" +
"PrivateKey: " + private + "\n"
default:
return ""
}
}

268
vendor/github.com/miekg/dns/doc.go generated vendored

@ -0,0 +1,268 @@
/*
Package dns implements a full featured interface to the Domain Name System.
Both server- and client-side programming is supported. The package allows
complete control over what is sent out to the DNS. The API follows the
less-is-more principle, by presenting a small, clean interface.
It supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
Note that domain names MUST be fully qualified before sending them, unqualified
names in a message will result in a packing failure.
Resource records are native types. They are not stored in wire format. Basic
usage pattern for creating a new resource record:
r := new(dns.MX)
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
r.Preference = 10
r.Mx = "mx.miek.nl."
Or directly from a string:
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
Or when the default origin (.) and TTL (3600) and class (IN) suit you:
mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
Or even:
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
In the DNS messages are exchanged, these messages contain resource records
(sets). Use pattern for creating a message:
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
Or when not certain if the domain name is fully qualified:
m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
The message m is now a message with the question section set to ask the MX
records for the miek.nl. zone.
The following is slightly more verbose, but more flexible:
m1 := new(dns.Msg)
m1.Id = dns.Id()
m1.RecursionDesired = true
m1.Question = make([]dns.Question, 1)
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
After creating a message it can be sent. Basic use pattern for synchronous
querying the DNS at a server configured on 127.0.0.1 and port 53:
c := new(dns.Client)
in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
Suppressing multiple outstanding queries (with the same question, type and
class) is as easy as setting:
c.SingleInflight = true
More advanced options are available using a net.Dialer and the corresponding API.
For example it is possible to set a timeout, or to specify a source IP address
and port to use for the connection:
c := new(dns.Client)
laddr := net.UDPAddr{
IP: net.ParseIP("[::1]"),
Port: 12345,
Zone: "",
}
c.Dialer := &net.Dialer{
Timeout: 200 * time.Millisecond,
LocalAddr: &laddr,
}
in, rtt, err := c.Exchange(m1, "8.8.8.8:53")
If these "advanced" features are not needed, a simple UDP query can be sent,
with:
in, err := dns.Exchange(m1, "127.0.0.1:53")
When this functions returns you will get DNS message. A DNS message consists
out of four sections.
The question section: in.Question, the answer section: in.Answer,
the authority section: in.Ns and the additional section: in.Extra.
Each of these sections (except the Question section) contain a []RR. Basic
use pattern for accessing the rdata of a TXT RR as the first RR in
the Answer section:
if t, ok := in.Answer[0].(*dns.TXT); ok {
// do something with t.Txt
}
Domain Name and TXT Character String Representations
Both domain names and TXT character strings are converted to presentation form
both when unpacked and when converted to strings.
For TXT character strings, tabs, carriage returns and line feeds will be
converted to \t, \r and \n respectively. Back slashes and quotations marks will
be escaped. Bytes below 32 and above 127 will be converted to \DDD form.
For domain names, in addition to the above rules brackets, periods, spaces,
semicolons and the at symbol are escaped.
DNSSEC
DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses
public key cryptography to sign resource records. The public keys are stored in
DNSKEY records and the signatures in RRSIG records.
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK)
bit to a request.
m := new(dns.Msg)
m.SetEdns0(4096, true)
Signature generation, signature verification and key generation are all supported.
DYNAMIC UPDATES
Dynamic updates reuses the DNS message format, but renames three of the
sections. Question is Zone, Answer is Prerequisite, Authority is Update, only
the Additional is not renamed. See RFC 2136 for the gory details.
You can set a rather complex set of rules for the existence of absence of
certain resource records or names in a zone to specify if resource records
should be added or removed. The table from RFC 2136 supplemented with the Go
DNS function shows which functions exist to specify the prerequisites.
3.2.4 - Table Of Metavalues Used In Prerequisite Section
CLASS TYPE RDATA Meaning Function
--------------------------------------------------------------
ANY ANY empty Name is in use dns.NameUsed
ANY rrset empty RRset exists (value indep) dns.RRsetUsed
NONE ANY empty Name is not in use dns.NameNotUsed
NONE rrset empty RRset does not exist dns.RRsetNotUsed
zone rrset rr RRset exists (value dep) dns.Used
The prerequisite section can also be left empty. If you have decided on the
prerequisites you can tell what RRs should be added or deleted. The next table
shows the options you have and what functions to call.
3.4.2.6 - Table Of Metavalues Used In Update Section
CLASS TYPE RDATA Meaning Function
---------------------------------------------------------------
ANY ANY empty Delete all RRsets from name dns.RemoveName
ANY rrset empty Delete an RRset dns.RemoveRRset
NONE rrset rr Delete an RR from RRset dns.Remove
zone rrset rr Add to an RRset dns.Insert
TRANSACTION SIGNATURE
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
Basic use pattern when querying with a TSIG name "axfr." (note that these key names
must be fully qualified - as they are domain names) and the base64 secret
"so6ZGir4GPAqINNh9U5c3A==":
If an incoming message contains a TSIG record it MUST be the last record in
the additional section (RFC2845 3.2). This means that you should make the
call to SetTsig last, right before executing the query. If you make any
changes to the RRset after calling SetTsig() the signature will be incorrect.
c := new(dns.Client)
c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
...
// When sending the TSIG RR is calculated and filled in before sending
When requesting an zone transfer (almost all TSIG usage is when requesting zone
transfers), with TSIG, this is the basic use pattern. In this example we
request an AXFR for miek.nl. with TSIG key named "axfr." and secret
"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54:
t := new(dns.Transfer)
m := new(dns.Msg)
t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m.SetAxfr("miek.nl.")
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
c, err := t.In(m, "176.58.119.54:53")
for r := range c { ... }
You can now read the records from the transfer as they come in. Each envelope
is checked with TSIG. If something is not correct an error is returned.
Basic use pattern validating and replying to a message that has TSIG set.
server := &dns.Server{Addr: ":53", Net: "udp"}
server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
go server.ListenAndServe()
dns.HandleFunc(".", handleRequest)
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
m := new(dns.Msg)
m.SetReply(r)
if r.IsTsig() != nil {
if w.TsigStatus() == nil {
// *Msg r has an TSIG record and it was validated
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
} else {
// *Msg r has an TSIG records and it was not validated
}
}
w.WriteMsg(m)
}
PRIVATE RRS
RFC 6895 sets aside a range of type codes for private use. This range is 65,280
- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
can be used, before requesting an official type code from IANA.
See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more
information.
EDNS0
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by
RFC 6891. It defines an new RR type, the OPT RR, which is then completely
abused.
Basic use pattern for creating an (empty) OPT RR:
o := new(dns.OPT)
o.Hdr.Name = "." // MUST be the root zone, per definition.
o.Hdr.Rrtype = dns.TypeOPT
The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces.
Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and
EDNS0_SUBNET (RFC 7871). Note that these options may be combined in an OPT RR.
Basic use pattern for a server to check if (and which) options are set:
// o is a dns.OPT
for _, s := range o.Option {
switch e := s.(type) {
case *dns.EDNS0_NSID:
// do stuff with e.Nsid
case *dns.EDNS0_SUBNET:
// access e.Family, e.Address, etc.
}
}
SIG(0)
From RFC 2931:
SIG(0) provides protection for DNS transactions and requests ....
... protection for glue records, DNS requests, protection for message headers
on requests and responses, and protection of the overall integrity of a response.
It works like TSIG, except that SIG(0) uses public key cryptography, instead of
the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256,
ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512.
Signing subsequent messages in multi-message sessions is not implemented.
*/
package dns

@ -0,0 +1,37 @@
package dns
//go:generate go run duplicate_generate.go
// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL.
// So this means the header data is equal *and* the RDATA is the same. Returns true
// if so, otherwise false. It's a protocol violation to have identical RRs in a message.
func IsDuplicate(r1, r2 RR) bool {
// Check whether the record header is identical.
if !r1.Header().isDuplicate(r2.Header()) {
return false
}
// Check whether the RDATA is identical.
return r1.isDuplicate(r2)
}
func (r1 *RR_Header) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*RR_Header)
if !ok {
return false
}
if r1.Class != r2.Class {
return false
}
if r1.Rrtype != r2.Rrtype {
return false
}
if !isDuplicateName(r1.Name, r2.Name) {
return false
}
// ignore TTL
return true
}
// isDuplicateName checks if the domain names s1 and s2 are equal.
func isDuplicateName(s1, s2 string) bool { return equal(s1, s2) }

675
vendor/github.com/miekg/dns/edns.go generated vendored

@ -0,0 +1,675 @@
package dns
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"net"
"strconv"
)
// EDNS0 Option codes.
const (
EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
EDNS0NSID = 0x3 // nsid (See RFC 5001)
EDNS0DAU = 0x5 // DNSSEC Algorithm Understood
EDNS0DHU = 0x6 // DS Hash Understood
EDNS0N3U = 0x7 // NSEC3 Hash Understood
EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871)
EDNS0EXPIRE = 0x9 // EDNS0 expire
EDNS0COOKIE = 0xa // EDNS0 Cookie
EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828)
EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830)
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
_DO = 1 << 15 // DNSSEC OK
)
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
// See RFC 6891.
type OPT struct {
Hdr RR_Header
Option []EDNS0 `dns:"opt"`
}
func (rr *OPT) String() string {
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
if rr.Do() {
s += "flags: do; "
} else {
s += "flags: ; "
}
s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
for _, o := range rr.Option {
switch o.(type) {
case *EDNS0_NSID:
s += "\n; NSID: " + o.String()
h, e := o.pack()
var r string
if e == nil {
for _, c := range h {
r += "(" + string(c) + ")"
}
s += " " + r
}
case *EDNS0_SUBNET:
s += "\n; SUBNET: " + o.String()
case *EDNS0_COOKIE:
s += "\n; COOKIE: " + o.String()
case *EDNS0_UL:
s += "\n; UPDATE LEASE: " + o.String()
case *EDNS0_LLQ:
s += "\n; LONG LIVED QUERIES: " + o.String()
case *EDNS0_DAU:
s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
case *EDNS0_DHU:
s += "\n; DS HASH UNDERSTOOD: " + o.String()
case *EDNS0_N3U:
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
case *EDNS0_LOCAL:
s += "\n; LOCAL OPT: " + o.String()
case *EDNS0_PADDING:
s += "\n; PADDING: " + o.String()
}
}
return s
}
func (rr *OPT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, o := range rr.Option {
l += 4 // Account for 2-byte option code and 2-byte option length.
lo, _ := o.pack()
l += len(lo)
}
return l
}
func (rr *OPT) parse(c *zlexer, origin string) *ParseError {
panic("dns: internal error: parse should never be called on OPT")
}
func (r1 *OPT) isDuplicate(r2 RR) bool { return false }
// return the old value -> delete SetVersion?
// Version returns the EDNS version used. Only zero is defined.
func (rr *OPT) Version() uint8 {
return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16)
}
// SetVersion sets the version of EDNS. This is usually zero.
func (rr *OPT) SetVersion(v uint8) {
rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16
}
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
func (rr *OPT) ExtendedRcode() int {
return int(rr.Hdr.Ttl&0xFF000000>>24) << 4
}
// SetExtendedRcode sets the EDNS extended RCODE field.
//
// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0.
func (rr *OPT) SetExtendedRcode(v uint16) {
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24
}
// UDPSize returns the UDP buffer size.
func (rr *OPT) UDPSize() uint16 {
return rr.Hdr.Class
}
// SetUDPSize sets the UDP buffer size.
func (rr *OPT) SetUDPSize(size uint16) {
rr.Hdr.Class = size
}
// Do returns the value of the DO (DNSSEC OK) bit.
func (rr *OPT) Do() bool {
return rr.Hdr.Ttl&_DO == _DO
}
// SetDo sets the DO (DNSSEC OK) bit.
// If we pass an argument, set the DO bit to that value.
// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored.
func (rr *OPT) SetDo(do ...bool) {
if len(do) == 1 {
if do[0] {
rr.Hdr.Ttl |= _DO
} else {
rr.Hdr.Ttl &^= _DO
}
} else {
rr.Hdr.Ttl |= _DO
}
}
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
type EDNS0 interface {
// Option returns the option code for the option.
Option() uint16
// pack returns the bytes of the option data.
pack() ([]byte, error)
// unpack sets the data as found in the buffer. Is also sets
// the length of the slice as the length of the option data.
unpack([]byte) error
// String returns the string representation of the option.
String() string
// copy returns a deep-copy of the option.
copy() EDNS0
}
// EDNS0_NSID option is used to retrieve a nameserver
// identifier. When sending a request Nsid must be set to the empty string
// The identifier is an opaque string encoded as hex.
// Basic use pattern for creating an nsid option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_NSID)
// e.Code = dns.EDNS0NSID
// e.Nsid = "AA"
// o.Option = append(o.Option, e)
type EDNS0_NSID struct {
Code uint16 // Always EDNS0NSID
Nsid string // This string needs to be hex encoded
}
func (e *EDNS0_NSID) pack() ([]byte, error) {
h, err := hex.DecodeString(e.Nsid)
if err != nil {
return nil, err
}
return h, nil
}
// Option implements the EDNS0 interface.
func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code.
func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
func (e *EDNS0_NSID) String() string { return e.Nsid }
func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} }
// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
// an idea of where the client lives. See RFC 7871. It can then give back a different
// answer depending on the location or network topology.
// Basic use pattern for creating an subnet option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_SUBNET)
// e.Code = dns.EDNS0SUBNET
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
// e.SourceScope = 0
// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
// o.Option = append(o.Option, e)
//
// This code will parse all the available bits when unpacking (up to optlen).
// When packing it will apply SourceNetmask. If you need more advanced logic,
// patches welcome and good luck.
type EDNS0_SUBNET struct {
Code uint16 // Always EDNS0SUBNET
Family uint16 // 1 for IP, 2 for IP6
SourceNetmask uint8
SourceScope uint8
Address net.IP
}
// Option implements the EDNS0 interface.
func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET }
func (e *EDNS0_SUBNET) pack() ([]byte, error) {
b := make([]byte, 4)
binary.BigEndian.PutUint16(b[0:], e.Family)
b[2] = e.SourceNetmask
b[3] = e.SourceScope
switch e.Family {
case 0:
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
// We might don't need to complain either
if e.SourceNetmask != 0 {
return nil, errors.New("dns: bad address family")
}
case 1:
if e.SourceNetmask > net.IPv4len*8 {
return nil, errors.New("dns: bad netmask")
}
if len(e.Address.To4()) != net.IPv4len {
return nil, errors.New("dns: bad address")
}
ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
b = append(b, ip[:needLength]...)
case 2:
if e.SourceNetmask > net.IPv6len*8 {
return nil, errors.New("dns: bad netmask")
}
if len(e.Address) != net.IPv6len {
return nil, errors.New("dns: bad address")
}
ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
b = append(b, ip[:needLength]...)
default:
return nil, errors.New("dns: bad address family")
}
return b, nil
}
func (e *EDNS0_SUBNET) unpack(b []byte) error {
if len(b) < 4 {
return ErrBuf
}
e.Family = binary.BigEndian.Uint16(b)
e.SourceNetmask = b[2]
e.SourceScope = b[3]
switch e.Family {
case 0:
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
// It's okay to accept such a packet
if e.SourceNetmask != 0 {
return errors.New("dns: bad address family")
}
e.Address = net.IPv4(0, 0, 0, 0)
case 1:
if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
return errors.New("dns: bad netmask")
}
addr := make(net.IP, net.IPv4len)
copy(addr, b[4:])
e.Address = addr.To16()
case 2:
if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
return errors.New("dns: bad netmask")
}
addr := make(net.IP, net.IPv6len)
copy(addr, b[4:])
e.Address = addr
default:
return errors.New("dns: bad address family")
}
return nil
}
func (e *EDNS0_SUBNET) String() (s string) {
if e.Address == nil {
s = "<nil>"
} else if e.Address.To4() != nil {
s = e.Address.String()
} else {
s = "[" + e.Address.String() + "]"
}
s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
return
}
func (e *EDNS0_SUBNET) copy() EDNS0 {
return &EDNS0_SUBNET{
e.Code,
e.Family,
e.SourceNetmask,
e.SourceScope,
e.Address,
}
}
// The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_COOKIE)
// e.Code = dns.EDNS0COOKIE
// e.Cookie = "24a5ac.."
// o.Option = append(o.Option, e)
//
// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
// always 8 bytes. It may then optionally be followed by the server cookie. The server
// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
//
// cCookie := o.Cookie[:16]
// sCookie := o.Cookie[16:]
//
// There is no guarantee that the Cookie string has a specific length.
type EDNS0_COOKIE struct {
Code uint16 // Always EDNS0COOKIE
Cookie string // Hex-encoded cookie data
}
func (e *EDNS0_COOKIE) pack() ([]byte, error) {
h, err := hex.DecodeString(e.Cookie)
if err != nil {
return nil, err
}
return h, nil
}
// Option implements the EDNS0 interface.
func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
func (e *EDNS0_COOKIE) String() string { return e.Cookie }
func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.Cookie} }
// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
// an expiration on an update RR. This is helpful for clients that cannot clean
// up after themselves. This is a draft RFC and more information can be found at
// https://tools.ietf.org/html/draft-sekar-dns-ul-02
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_UL)
// e.Code = dns.EDNS0UL
// e.Lease = 120 // in seconds
// o.Option = append(o.Option, e)
type EDNS0_UL struct {
Code uint16 // Always EDNS0UL
Lease uint32
KeyLease uint32
}
// Option implements the EDNS0 interface.
func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
func (e *EDNS0_UL) String() string { return fmt.Sprintf("%d %d", e.Lease, e.KeyLease) }
func (e *EDNS0_UL) copy() EDNS0 { return &EDNS0_UL{e.Code, e.Lease, e.KeyLease} }
// Copied: http://golang.org/src/pkg/net/dnsmsg.go
func (e *EDNS0_UL) pack() ([]byte, error) {
var b []byte
if e.KeyLease == 0 {
b = make([]byte, 4)
} else {
b = make([]byte, 8)
binary.BigEndian.PutUint32(b[4:], e.KeyLease)
}
binary.BigEndian.PutUint32(b, e.Lease)
return b, nil
}
func (e *EDNS0_UL) unpack(b []byte) error {
switch len(b) {
case 4:
e.KeyLease = 0
case 8:
e.KeyLease = binary.BigEndian.Uint32(b[4:])
default:
return ErrBuf
}
e.Lease = binary.BigEndian.Uint32(b)
return nil
}
// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
// Implemented for completeness, as the EDNS0 type code is assigned.
type EDNS0_LLQ struct {
Code uint16 // Always EDNS0LLQ
Version uint16
Opcode uint16
Error uint16
Id uint64
LeaseLife uint32
}
// Option implements the EDNS0 interface.
func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
func (e *EDNS0_LLQ) pack() ([]byte, error) {
b := make([]byte, 18)
binary.BigEndian.PutUint16(b[0:], e.Version)
binary.BigEndian.PutUint16(b[2:], e.Opcode)
binary.BigEndian.PutUint16(b[4:], e.Error)
binary.BigEndian.PutUint64(b[6:], e.Id)
binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
return b, nil
}
func (e *EDNS0_LLQ) unpack(b []byte) error {
if len(b) < 18 {
return ErrBuf
}
e.Version = binary.BigEndian.Uint16(b[0:])
e.Opcode = binary.BigEndian.Uint16(b[2:])
e.Error = binary.BigEndian.Uint16(b[4:])
e.Id = binary.BigEndian.Uint64(b[6:])
e.LeaseLife = binary.BigEndian.Uint32(b[14:])
return nil
}
func (e *EDNS0_LLQ) String() string {
s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) +
" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
return s
}
func (e *EDNS0_LLQ) copy() EDNS0 {
return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife}
}
// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
type EDNS0_DAU struct {
Code uint16 // Always EDNS0DAU
AlgCode []uint8
}
// Option implements the EDNS0 interface.
func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_DAU) String() string {
s := ""
for _, alg := range e.AlgCode {
if a, ok := AlgorithmToString[alg]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(alg))
}
}
return s
}
func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} }
// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
type EDNS0_DHU struct {
Code uint16 // Always EDNS0DHU
AlgCode []uint8
}
// Option implements the EDNS0 interface.
func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_DHU) String() string {
s := ""
for _, alg := range e.AlgCode {
if a, ok := HashToString[alg]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(alg))
}
}
return s
}
func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} }
// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
type EDNS0_N3U struct {
Code uint16 // Always EDNS0N3U
AlgCode []uint8
}
// Option implements the EDNS0 interface.
func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_N3U) String() string {
// Re-use the hash map
s := ""
for _, alg := range e.AlgCode {
if a, ok := HashToString[alg]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(alg))
}
}
return s
}
func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} }
// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
type EDNS0_EXPIRE struct {
Code uint16 // Always EDNS0EXPIRE
Expire uint32
}
// Option implements the EDNS0 interface.
func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
func (e *EDNS0_EXPIRE) copy() EDNS0 { return &EDNS0_EXPIRE{e.Code, e.Expire} }
func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, e.Expire)
return b, nil
}
func (e *EDNS0_EXPIRE) unpack(b []byte) error {
if len(b) == 0 {
// zero-length EXPIRE query, see RFC 7314 Section 2
return nil
}
if len(b) < 4 {
return ErrBuf
}
e.Expire = binary.BigEndian.Uint32(b)
return nil
}
// The EDNS0_LOCAL option is used for local/experimental purposes. The option
// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
// (RFC6891), although any unassigned code can actually be used. The content of
// the option is made available in Data, unaltered.
// Basic use pattern for creating a local option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_LOCAL)
// e.Code = dns.EDNS0LOCALSTART
// e.Data = []byte{72, 82, 74}
// o.Option = append(o.Option, e)
type EDNS0_LOCAL struct {
Code uint16
Data []byte
}
// Option implements the EDNS0 interface.
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
func (e *EDNS0_LOCAL) String() string {
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
}
func (e *EDNS0_LOCAL) copy() EDNS0 {
b := make([]byte, len(e.Data))
copy(b, e.Data)
return &EDNS0_LOCAL{e.Code, b}
}
func (e *EDNS0_LOCAL) pack() ([]byte, error) {
b := make([]byte, len(e.Data))
copied := copy(b, e.Data)
if copied != len(e.Data) {
return nil, ErrBuf
}
return b, nil
}
func (e *EDNS0_LOCAL) unpack(b []byte) error {
e.Data = make([]byte, len(b))
copied := copy(e.Data, b)
if copied != len(b) {
return ErrBuf
}
return nil
}
// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
// the TCP connection alive. See RFC 7828.
type EDNS0_TCP_KEEPALIVE struct {
Code uint16 // Always EDNSTCPKEEPALIVE
Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present;
Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order.
}
// Option implements the EDNS0 interface.
func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE }
func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) {
if e.Timeout != 0 && e.Length != 2 {
return nil, errors.New("dns: timeout specified but length is not 2")
}
if e.Timeout == 0 && e.Length != 0 {
return nil, errors.New("dns: timeout not specified but length is not 0")
}
b := make([]byte, 4+e.Length)
binary.BigEndian.PutUint16(b[0:], e.Code)
binary.BigEndian.PutUint16(b[2:], e.Length)
if e.Length == 2 {
binary.BigEndian.PutUint16(b[4:], e.Timeout)
}
return b, nil
}
func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error {
if len(b) < 4 {
return ErrBuf
}
e.Length = binary.BigEndian.Uint16(b[2:4])
if e.Length != 0 && e.Length != 2 {
return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10))
}
if e.Length == 2 {
if len(b) < 6 {
return ErrBuf
}
e.Timeout = binary.BigEndian.Uint16(b[4:6])
}
return nil
}
func (e *EDNS0_TCP_KEEPALIVE) String() (s string) {
s = "use tcp keep-alive"
if e.Length == 0 {
s += ", timeout omitted"
} else {
s += fmt.Sprintf(", timeout %dms", e.Timeout*100)
}
return
}
func (e *EDNS0_TCP_KEEPALIVE) copy() EDNS0 { return &EDNS0_TCP_KEEPALIVE{e.Code, e.Length, e.Timeout} }
// EDNS0_PADDING option is used to add padding to a request/response. The default
// value of padding SHOULD be 0x0 but other values MAY be used, for instance if
// compression is applied before encryption which may break signatures.
type EDNS0_PADDING struct {
Padding []byte
}
// Option implements the EDNS0 interface.
func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
func (e *EDNS0_PADDING) copy() EDNS0 {
b := make([]byte, len(e.Padding))
copy(b, e.Padding)
return &EDNS0_PADDING{b}
}

@ -0,0 +1,93 @@
package dns
import (
"net"
"reflect"
"strconv"
)
// NumField returns the number of rdata fields r has.
func NumField(r RR) int {
return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header
}
// Field returns the rdata field i as a string. Fields are indexed starting from 1.
// RR types that holds slice data, for instance the NSEC type bitmap will return a single
// string where the types are concatenated using a space.
// Accessing non existing fields will cause a panic.
func Field(r RR, i int) string {
if i == 0 {
return ""
}
d := reflect.ValueOf(r).Elem().Field(i)
switch d.Kind() {
case reflect.String:
return d.String()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(d.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return strconv.FormatUint(d.Uint(), 10)
case reflect.Slice:
switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
case `dns:"a"`:
// TODO(miek): Hmm store this as 16 bytes
if d.Len() < net.IPv4len {
return ""
}
if d.Len() < net.IPv6len {
return net.IPv4(byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()),
byte(d.Index(2).Uint()),
byte(d.Index(3).Uint())).String()
}
return net.IPv4(byte(d.Index(12).Uint()),
byte(d.Index(13).Uint()),
byte(d.Index(14).Uint()),
byte(d.Index(15).Uint())).String()
case `dns:"aaaa"`:
if d.Len() < net.IPv6len {
return ""
}
return net.IP{
byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()),
byte(d.Index(2).Uint()),
byte(d.Index(3).Uint()),
byte(d.Index(4).Uint()),
byte(d.Index(5).Uint()),
byte(d.Index(6).Uint()),
byte(d.Index(7).Uint()),
byte(d.Index(8).Uint()),
byte(d.Index(9).Uint()),
byte(d.Index(10).Uint()),
byte(d.Index(11).Uint()),
byte(d.Index(12).Uint()),
byte(d.Index(13).Uint()),
byte(d.Index(14).Uint()),
byte(d.Index(15).Uint()),
}.String()
case `dns:"nsec"`:
if d.Len() == 0 {
return ""
}
s := Type(d.Index(0).Uint()).String()
for i := 1; i < d.Len(); i++ {
s += " " + Type(d.Index(i).Uint()).String()
}
return s
default:
// if it does not have a tag its a string slice
fallthrough
case `dns:"txt"`:
if d.Len() == 0 {
return ""
}
s := d.Index(0).String()
for i := 1; i < d.Len(); i++ {
s += " " + d.Index(i).String()
}
return s
}
}
return ""
}

@ -0,0 +1,32 @@
// +build fuzz
package dns
import "strings"
func Fuzz(data []byte) int {
msg := new(Msg)
if err := msg.Unpack(data); err != nil {
return 0
}
if _, err := msg.Pack(); err != nil {
return 0
}
return 1
}
func FuzzNewRR(data []byte) int {
str := string(data)
// Do not fuzz lines that include the $INCLUDE keyword and hint the fuzzer
// at avoiding them.
// See GH#1025 for context.
if strings.Contains(strings.ToUpper(str), "$INCLUDE") {
return -1
}
if _, err := NewRR(str); err != nil {
return 0
}
return 1
}

@ -0,0 +1,247 @@
package dns
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
// Parse the $GENERATE statement as used in BIND9 zones.
// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
// We are called after '$GENERATE '. After which we expect:
// * the range (12-24/2)
// * lhs (ownername)
// * [[ttl][class]]
// * type
// * rhs (rdata)
// But we are lazy here, only the range is parsed *all* occurrences
// of $ after that are interpreted.
func (zp *ZoneParser) generate(l lex) (RR, bool) {
token := l.token
step := int64(1)
if i := strings.IndexByte(token, '/'); i >= 0 {
if i+1 == len(token) {
return zp.setParseError("bad step in $GENERATE range", l)
}
s, err := strconv.ParseInt(token[i+1:], 10, 64)
if err != nil || s <= 0 {
return zp.setParseError("bad step in $GENERATE range", l)
}
step = s
token = token[:i]
}
sx := strings.SplitN(token, "-", 2)
if len(sx) != 2 {
return zp.setParseError("bad start-stop in $GENERATE range", l)
}
start, err := strconv.ParseInt(sx[0], 10, 64)
if err != nil {
return zp.setParseError("bad start in $GENERATE range", l)
}
end, err := strconv.ParseInt(sx[1], 10, 64)
if err != nil {
return zp.setParseError("bad stop in $GENERATE range", l)
}
if end < 0 || start < 0 || end < start || (end-start)/step > 65535 {
return zp.setParseError("bad range in $GENERATE range", l)
}
// _BLANK
l, ok := zp.c.Next()
if !ok || l.value != zBlank {
return zp.setParseError("garbage after $GENERATE range", l)
}
// Create a complete new string, which we then parse again.
var s string
for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
if l.err {
return zp.setParseError("bad data in $GENERATE directive", l)
}
if l.value == zNewline {
break
}
s += l.token
}
r := &generateReader{
s: s,
cur: int(start),
start: int(start),
end: int(end),
step: int(step),
file: zp.file,
lex: &l,
}
zp.sub = NewZoneParser(r, zp.origin, zp.file)
zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
zp.sub.generateDisallowed = true
zp.sub.SetDefaultTTL(defaultTtl)
return zp.subNext()
}
type generateReader struct {
s string
si int
cur int
start int
end int
step int
mod bytes.Buffer
escape bool
eof bool
file string
lex *lex
}
func (r *generateReader) parseError(msg string, end int) *ParseError {
r.eof = true // Make errors sticky.
l := *r.lex
l.token = r.s[r.si-1 : end]
l.column += r.si // l.column starts one zBLANK before r.s
return &ParseError{r.file, msg, l}
}
func (r *generateReader) Read(p []byte) (int, error) {
// NewZLexer, through NewZoneParser, should use ReadByte and
// not end up here.
panic("not implemented")
}
func (r *generateReader) ReadByte() (byte, error) {
if r.eof {
return 0, io.EOF
}
if r.mod.Len() > 0 {
return r.mod.ReadByte()
}
if r.si >= len(r.s) {
r.si = 0
r.cur += r.step
r.eof = r.cur > r.end || r.cur < 0
return '\n', nil
}
si := r.si
r.si++
switch r.s[si] {
case '\\':
if r.escape {
r.escape = false
return '\\', nil
}
r.escape = true
return r.ReadByte()
case '$':
if r.escape {
r.escape = false
return '$', nil
}
mod := "%d"
if si >= len(r.s)-1 {
// End of the string
fmt.Fprintf(&r.mod, mod, r.cur)
return r.mod.ReadByte()
}
if r.s[si+1] == '$' {
r.si++
return '$', nil
}
var offset int
// Search for { and }
if r.s[si+1] == '{' {
// Modifier block
sep := strings.Index(r.s[si+2:], "}")
if sep < 0 {
return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
}
var errMsg string
mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
if errMsg != "" {
return 0, r.parseError(errMsg, si+3+sep)
}
if r.start+offset < 0 || int64(r.end) + int64(offset) > 1<<31-1 {
return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
}
r.si += 2 + sep // Jump to it
}
fmt.Fprintf(&r.mod, mod, r.cur+offset)
return r.mod.ReadByte()
default:
if r.escape { // Pretty useless here
r.escape = false
return r.ReadByte()
}
return r.s[si], nil
}
}
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
func modToPrintf(s string) (string, int, string) {
// Modifier is { offset [ ,width [ ,base ] ] } - provide default
// values for optional width and type, if necessary.
var offStr, widthStr, base string
switch xs := strings.Split(s, ","); len(xs) {
case 1:
offStr, widthStr, base = xs[0], "0", "d"
case 2:
offStr, widthStr, base = xs[0], xs[1], "d"
case 3:
offStr, widthStr, base = xs[0], xs[1], xs[2]
default:
return "", 0, "bad modifier in $GENERATE"
}
switch base {
case "o", "d", "x", "X":
default:
return "", 0, "bad base in $GENERATE"
}
offset, err := strconv.ParseInt(offStr, 10, 64)
if err != nil {
return "", 0, "bad offset in $GENERATE"
}
width, err := strconv.ParseInt(widthStr, 10, 64)
if err != nil || width < 0 || width > 255 {
return "", 0, "bad width in $GENERATE"
}
if width == 0 {
return "%" + base, int(offset), ""
}
return "%0" + widthStr + base, int(offset), ""
}

@ -0,0 +1,11 @@
module github.com/miekg/dns
go 1.12
require (
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
golang.org/x/net v0.0.0-20190923162816-aa69164e4478
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 // indirect
)

@ -0,0 +1,39 @@
golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4 h1:Vk3wNqEZwyGyei9yq5ekj7frek2u7HUfffJ1/opblzc=
golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM=
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3 h1:dgd4x4kJt7G4k4m93AYLzM8Ni6h2qLTfh9n9vXJT3/0=
golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611 h1:O33LKL7WyJgjN9CvxfTIomjIClbd/Kq86/iipowHQU0=
golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 h1:VvQyQJN0tSuecqgcIxMWnnfG5kSmgy9KZR9sW3W5QeA=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

@ -0,0 +1,212 @@
package dns
// Holds a bunch of helper functions for dealing with labels.
// SplitDomainName splits a name string into it's labels.
// www.miek.nl. returns []string{"www", "miek", "nl"}
// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
// The root label (.) returns nil. Note that using
// strings.Split(s) will work in most cases, but does not handle
// escaped dots (\.) for instance.
// s must be a syntactically valid domain name, see IsDomainName.
func SplitDomainName(s string) (labels []string) {
if len(s) == 0 {
return nil
}
fqdnEnd := 0 // offset of the final '.' or the length of the name
idx := Split(s)
begin := 0
if IsFqdn(s) {
fqdnEnd = len(s) - 1
} else {
fqdnEnd = len(s)
}
switch len(idx) {
case 0:
return nil
case 1:
// no-op
default:
for _, end := range idx[1:] {
labels = append(labels, s[begin:end-1])
begin = end
}
}
return append(labels, s[begin:fqdnEnd])
}
// CompareDomainName compares the names s1 and s2 and
// returns how many labels they have in common starting from the *right*.
// The comparison stops at the first inequality. The names are downcased
// before the comparison.
//
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
// www.miek.nl. and www.bla.nl. have one label in common: nl
//
// s1 and s2 must be syntactically valid domain names.
func CompareDomainName(s1, s2 string) (n int) {
// the first check: root label
if s1 == "." || s2 == "." {
return 0
}
l1 := Split(s1)
l2 := Split(s2)
j1 := len(l1) - 1 // end
i1 := len(l1) - 2 // start
j2 := len(l2) - 1
i2 := len(l2) - 2
// the second check can be done here: last/only label
// before we fall through into the for-loop below
if equal(s1[l1[j1]:], s2[l2[j2]:]) {
n++
} else {
return
}
for {
if i1 < 0 || i2 < 0 {
break
}
if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) {
n++
} else {
break
}
j1--
i1--
j2--
i2--
}
return
}
// CountLabel counts the number of labels in the string s.
// s must be a syntactically valid domain name.
func CountLabel(s string) (labels int) {
if s == "." {
return
}
off := 0
end := false
for {
off, end = NextLabel(s, off)
labels++
if end {
return
}
}
}
// Split splits a name s into its label indexes.
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
// The root name (.) returns nil. Also see SplitDomainName.
// s must be a syntactically valid domain name.
func Split(s string) []int {
if s == "." {
return nil
}
idx := make([]int, 1, 3)
off := 0
end := false
for {
off, end = NextLabel(s, off)
if end {
return idx
}
idx = append(idx, off)
}
}
// NextLabel returns the index of the start of the next label in the
// string s starting at offset.
// The bool end is true when the end of the string has been reached.
// Also see PrevLabel.
func NextLabel(s string, offset int) (i int, end bool) {
if s == "" {
return 0, true
}
for i = offset; i < len(s)-1; i++ {
if s[i] != '.' {
continue
}
j := i - 1
for j >= 0 && s[j] == '\\' {
j--
}
if (j-i)%2 == 0 {
continue
}
return i + 1, false
}
return i + 1, true
}
// PrevLabel returns the index of the label when starting from the right and
// jumping n labels to the left.
// The bool start is true when the start of the string has been overshot.
// Also see NextLabel.
func PrevLabel(s string, n int) (i int, start bool) {
if s == "" {
return 0, true
}
if n == 0 {
return len(s), false
}
l := len(s) - 1
if s[l] == '.' {
l--
}
for ; l >= 0 && n > 0; l-- {
if s[l] != '.' {
continue
}
j := l - 1
for j >= 0 && s[j] == '\\' {
j--
}
if (j-l)%2 == 0 {
continue
}
n--
if n == 0 {
return l + 1, false
}
}
return 0, n > 1
}
// equal compares a and b while ignoring case. It returns true when equal otherwise false.
func equal(a, b string) bool {
// might be lifted into API function.
la := len(a)
lb := len(b)
if la != lb {
return false
}
for i := la - 1; i >= 0; i-- {
ai := a[i]
bi := b[i]
if ai >= 'A' && ai <= 'Z' {
ai |= 'a' - 'A'
}
if bi >= 'A' && bi <= 'Z' {
bi |= 'a' - 'A'
}
if ai != bi {
return false
}
}
return true
}

@ -0,0 +1,44 @@
// +build go1.11
// +build aix darwin dragonfly freebsd linux netbsd openbsd
package dns
import (
"context"
"net"
"syscall"
"golang.org/x/sys/unix"
)
const supportsReusePort = true
func reuseportControl(network, address string, c syscall.RawConn) error {
var opErr error
err := c.Control(func(fd uintptr) {
opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
})
if err != nil {
return err
}
return opErr
}
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
var lc net.ListenConfig
if reuseport {
lc.Control = reuseportControl
}
return lc.Listen(context.Background(), network, addr)
}
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
var lc net.ListenConfig
if reuseport {
lc.Control = reuseportControl
}
return lc.ListenPacket(context.Background(), network, addr)
}

@ -0,0 +1,23 @@
// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
package dns
import "net"
const supportsReusePort = false
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
if reuseport {
// TODO(tmthrgd): return an error?
}
return net.Listen(network, addr)
}
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
if reuseport {
// TODO(tmthrgd): return an error?
}
return net.ListenPacket(network, addr)
}

1190
vendor/github.com/miekg/dns/msg.go generated vendored

File diff suppressed because it is too large Load Diff

@ -0,0 +1,766 @@
package dns
import (
"encoding/base32"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"net"
"strings"
)
// helper functions called from the generated zmsg.go
// These function are named after the tag to help pack/unpack, if there is no tag it is the name
// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
// packDataDomainName.
func unpackDataA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv4len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking a"}
}
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
off += net.IPv4len
return a, off, nil
}
func packDataA(a net.IP, msg []byte, off int) (int, error) {
switch len(a) {
case net.IPv4len, net.IPv6len:
// It must be a slice of 4, even if it is 16, we encode only the first 4
if off+net.IPv4len > len(msg) {
return len(msg), &Error{err: "overflow packing a"}
}
copy(msg[off:], a.To4())
off += net.IPv4len
case 0:
// Allowed, for dynamic updates.
default:
return len(msg), &Error{err: "overflow packing a"}
}
return off, nil
}
func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv6len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
}
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
off += net.IPv6len
return aaaa, off, nil
}
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
switch len(aaaa) {
case net.IPv6len:
if off+net.IPv6len > len(msg) {
return len(msg), &Error{err: "overflow packing aaaa"}
}
copy(msg[off:], aaaa)
off += net.IPv6len
case 0:
// Allowed, dynamic updates.
default:
return len(msg), &Error{err: "overflow packing aaaa"}
}
return off, nil
}
// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
// re-sliced msg according to the expected length of the RR.
func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
hdr := RR_Header{}
if off == len(msg) {
return hdr, off, msg, nil
}
hdr.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rrtype, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Class, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Ttl, off, err = unpackUint32(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rdlength, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
return hdr, off, msg, err
}
// packHeader packs an RR header, returning the offset to the end of the header.
// See PackDomainName for documentation about the compression.
func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
if off == len(msg) {
return off, nil
}
off, err := packDomainName(hdr.Name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Rrtype, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Class, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint32(hdr.Ttl, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR.
if err != nil {
return len(msg), err
}
return off, nil
}
// helper helper functions.
// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
// Returns an error if msg is smaller than the expected size.
func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
lenrd := off + int(rdlength)
if lenrd > len(msg) {
return msg, &Error{err: "overflowing header size"}
}
return msg[:lenrd], nil
}
var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
func fromBase32(s []byte) (buf []byte, err error) {
for i, b := range s {
if b >= 'a' && b <= 'z' {
s[i] = b - 32
}
}
buflen := base32HexNoPadEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base32HexNoPadEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase32(b []byte) string {
return base32HexNoPadEncoding.EncodeToString(b)
}
func fromBase64(s []byte) (buf []byte, err error) {
buflen := base64.StdEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base64.StdEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
// dynamicUpdate returns true if the Rdlength is zero.
func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
if off+1 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint8"}
}
return msg[off], off + 1, nil
}
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
if off+1 > len(msg) {
return len(msg), &Error{err: "overflow packing uint8"}
}
msg[off] = i
return off + 1, nil
}
func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
if off+2 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint16"}
}
return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
}
func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
if off+2 > len(msg) {
return len(msg), &Error{err: "overflow packing uint16"}
}
binary.BigEndian.PutUint16(msg[off:], i)
return off + 2, nil
}
func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
if off+4 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint32"}
}
return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
}
func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
if off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing uint32"}
}
binary.BigEndian.PutUint32(msg[off:], i)
return off + 4, nil
}
func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
if off+6 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
}
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
uint64(msg[off+4])<<8 | uint64(msg[off+5])
off += 6
return i, off, nil
}
func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
if off+6 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64 as uint48"}
}
msg[off] = byte(i >> 40)
msg[off+1] = byte(i >> 32)
msg[off+2] = byte(i >> 24)
msg[off+3] = byte(i >> 16)
msg[off+4] = byte(i >> 8)
msg[off+5] = byte(i)
off += 6
return off, nil
}
func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
if off+8 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64"}
}
return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
}
func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
if off+8 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64"}
}
binary.BigEndian.PutUint64(msg[off:], i)
off += 8
return off, nil
}
func unpackString(msg []byte, off int) (string, int, error) {
if off+1 > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
l := int(msg[off])
off++
if off+l > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
var s strings.Builder
consumed := 0
for i, b := range msg[off : off+l] {
switch {
case b == '"' || b == '\\':
if consumed == 0 {
s.Grow(l * 2)
}
s.Write(msg[off+consumed : off+i])
s.WriteByte('\\')
s.WriteByte(b)
consumed = i + 1
case b < ' ' || b > '~': // unprintable
if consumed == 0 {
s.Grow(l * 2)
}
s.Write(msg[off+consumed : off+i])
s.WriteString(escapeByte(b))
consumed = i + 1
}
}
if consumed == 0 { // no escaping needed
return string(msg[off : off+l]), off + l, nil
}
s.Write(msg[off+consumed : off+l])
return s.String(), off + l, nil
}
func packString(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packTxtString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base32"}
}
s := toBase32(msg[off:end])
return s, end, nil
}
func packStringBase32(s string, msg []byte, off int) (int, error) {
b32, err := fromBase32([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b32) > len(msg) {
return len(msg), &Error{err: "overflow packing base32"}
}
copy(msg[off:off+len(b32)], b32)
off += len(b32)
return off, nil
}
func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is base64 encoded value, so we don't need an explicit length
// to be set. Thus far all RR's that have base64 encoded fields have those as their
// last one. What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base64"}
}
s := toBase64(msg[off:end])
return s, end, nil
}
func packStringBase64(s string, msg []byte, off int) (int, error) {
b64, err := fromBase64([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b64) > len(msg) {
return len(msg), &Error{err: "overflow packing base64"}
}
copy(msg[off:off+len(b64)], b64)
off += len(b64)
return off, nil
}
func unpackStringHex(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is hex encoded value, so we don't need an explicit length
// to be set. NSEC and TSIG have hex fields with a length field.
// What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking hex"}
}
s := hex.EncodeToString(msg[off:end])
return s, end, nil
}
func packStringHex(s string, msg []byte, off int) (int, error) {
h, err := hex.DecodeString(s)
if err != nil {
return len(msg), err
}
if off+len(h) > len(msg) {
return len(msg), &Error{err: "overflow packing hex"}
}
copy(msg[off:off+len(h)], h)
off += len(h)
return off, nil
}
func unpackStringAny(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking anything"}
}
return string(msg[off:end]), end, nil
}
func packStringAny(s string, msg []byte, off int) (int, error) {
if off+len(s) > len(msg) {
return len(msg), &Error{err: "overflow packing anything"}
}
copy(msg[off:off+len(s)], s)
off += len(s)
return off, nil
}
func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
txt, off, err := unpackTxt(msg, off)
if err != nil {
return nil, len(msg), err
}
return txt, off, nil
}
func packStringTxt(s []string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
off, err := packTxt(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
var edns []EDNS0
Option:
var code uint16
if off+4 > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
code = binary.BigEndian.Uint16(msg[off:])
off += 2
optlen := binary.BigEndian.Uint16(msg[off:])
off += 2
if off+int(optlen) > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
e := makeDataOpt(code)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
if off < len(msg) {
goto Option
}
return edns, off, nil
}
func makeDataOpt(code uint16) EDNS0 {
switch code {
case EDNS0NSID:
return new(EDNS0_NSID)
case EDNS0SUBNET:
return new(EDNS0_SUBNET)
case EDNS0COOKIE:
return new(EDNS0_COOKIE)
case EDNS0EXPIRE:
return new(EDNS0_EXPIRE)
case EDNS0UL:
return new(EDNS0_UL)
case EDNS0LLQ:
return new(EDNS0_LLQ)
case EDNS0DAU:
return new(EDNS0_DAU)
case EDNS0DHU:
return new(EDNS0_DHU)
case EDNS0N3U:
return new(EDNS0_N3U)
case EDNS0PADDING:
return new(EDNS0_PADDING)
default:
e := new(EDNS0_LOCAL)
e.Code = code
return e
}
}
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
for _, el := range options {
b, err := el.pack()
if err != nil || off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing opt"}
}
binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
off += 4
if off+len(b) > len(msg) {
return len(msg), &Error{err: "overflow packing opt"}
}
// Actual data
copy(msg[off:off+len(b)], b)
off += len(b)
}
return off, nil
}
func unpackStringOctet(msg []byte, off int) (string, int, error) {
s := string(msg[off:])
return s, len(msg), nil
}
func packStringOctet(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packOctetString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
var nsec []uint16
length, window, lastwindow := 0, 0, -1
for off < len(msg) {
if off+2 > len(msg) {
return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
}
window = int(msg[off])
length = int(msg[off+1])
off += 2
if window <= lastwindow {
// RFC 4034: Blocks are present in the NSEC RR RDATA in
// increasing numerical order.
return nsec, len(msg), &Error{err: "out of order NSEC block"}
}
if length == 0 {
// RFC 4034: Blocks with no types present MUST NOT be included.
return nsec, len(msg), &Error{err: "empty NSEC block"}
}
if length > 32 {
return nsec, len(msg), &Error{err: "NSEC block too long"}
}
if off+length > len(msg) {
return nsec, len(msg), &Error{err: "overflowing NSEC block"}
}
// Walk the bytes in the window and extract the type bits
for j, b := range msg[off : off+length] {
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
nsec = append(nsec, uint16(window*256+j*8+0))
}
if b&0x40 == 0x40 {
nsec = append(nsec, uint16(window*256+j*8+1))
}
if b&0x20 == 0x20 {
nsec = append(nsec, uint16(window*256+j*8+2))
}
if b&0x10 == 0x10 {
nsec = append(nsec, uint16(window*256+j*8+3))
}
if b&0x8 == 0x8 {
nsec = append(nsec, uint16(window*256+j*8+4))
}
if b&0x4 == 0x4 {
nsec = append(nsec, uint16(window*256+j*8+5))
}
if b&0x2 == 0x2 {
nsec = append(nsec, uint16(window*256+j*8+6))
}
if b&0x1 == 0x1 {
nsec = append(nsec, uint16(window*256+j*8+7))
}
}
off += length
lastwindow = window
}
return nsec, off, nil
}
// typeBitMapLen is a helper function which computes the "maximum" length of
// a the NSEC Type BitMap field.
func typeBitMapLen(bitmap []uint16) int {
var l int
var lastwindow, lastlength uint16
for _, t := range bitmap {
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
l += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
// packDataNsec would return Error{err: "nsec bits out of order"} here, but
// when computing the length, we want do be liberal.
continue
}
lastwindow, lastlength = window, length
}
l += int(lastlength) + 2
return l
}
func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
if len(bitmap) == 0 {
return off, nil
}
var lastwindow, lastlength uint16
for _, t := range bitmap {
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
off += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
return len(msg), &Error{err: "nsec bits out of order"}
}
if off+2+int(length) > len(msg) {
return len(msg), &Error{err: "overflow packing nsec"}
}
// Setting the window #
msg[off] = byte(window)
// Setting the octets length
msg[off+1] = byte(length)
// Setting the bit value for the type in the right octet
msg[off+1+int(length)] |= byte(1 << (7 - t%8))
lastwindow, lastlength = window, length
}
off += int(lastlength) + 2
return off, nil
}
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
var (
servers []string
s string
err error
)
if end > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking domain names"}
}
for off < end {
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return servers, len(msg), err
}
servers = append(servers, s)
}
return servers, off, nil
}
func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) {
var err error
for _, name := range names {
off, err = packDomainName(name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
}
return off, nil
}
func packDataApl(data []APLPrefix, msg []byte, off int) (int, error) {
var err error
for i := range data {
off, err = packDataAplPrefix(&data[i], msg, off)
if err != nil {
return len(msg), err
}
}
return off, nil
}
func packDataAplPrefix(p *APLPrefix, msg []byte, off int) (int, error) {
if len(p.Network.IP) != len(p.Network.Mask) {
return len(msg), &Error{err: "address and mask lengths don't match"}
}
var err error
prefix, _ := p.Network.Mask.Size()
addr := p.Network.IP.Mask(p.Network.Mask)[:(prefix+7)/8]
switch len(p.Network.IP) {
case net.IPv4len:
off, err = packUint16(1, msg, off)
case net.IPv6len:
off, err = packUint16(2, msg, off)
default:
err = &Error{err: "unrecognized address family"}
}
if err != nil {
return len(msg), err
}
off, err = packUint8(uint8(prefix), msg, off)
if err != nil {
return len(msg), err
}
var n uint8
if p.Negation {
n = 0x80
}
adflen := uint8(len(addr)) & 0x7f
off, err = packUint8(n|adflen, msg, off)
if err != nil {
return len(msg), err
}
if off+len(addr) > len(msg) {
return len(msg), &Error{err: "overflow packing APL prefix"}
}
off += copy(msg[off:], addr)
return off, nil
}
func unpackDataApl(msg []byte, off int) ([]APLPrefix, int, error) {
var result []APLPrefix
for off < len(msg) {
prefix, end, err := unpackDataAplPrefix(msg, off)
if err != nil {
return nil, len(msg), err
}
off = end
result = append(result, prefix)
}
return result, off, nil
}
func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) {
family, off, err := unpackUint16(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
prefix, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
nlen, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
var ip []byte
switch family {
case 1:
ip = make([]byte, net.IPv4len)
case 2:
ip = make([]byte, net.IPv6len)
default:
return APLPrefix{}, len(msg), &Error{err: "unrecognized APL address family"}
}
if int(prefix) > 8*len(ip) {
return APLPrefix{}, len(msg), &Error{err: "APL prefix too long"}
}
afdlen := int(nlen & 0x7f)
if afdlen > len(ip) {
return APLPrefix{}, len(msg), &Error{err: "APL length too long"}
}
if off+afdlen > len(msg) {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL address"}
}
off += copy(ip, msg[off:off+afdlen])
if afdlen > 0 {
last := ip[afdlen-1]
if last == 0 {
return APLPrefix{}, len(msg), &Error{err: "extra APL address bits"}
}
}
ipnet := net.IPNet{
IP: ip,
Mask: net.CIDRMask(int(prefix), 8*len(ip)),
}
network := ipnet.IP.Mask(ipnet.Mask)
if !network.Equal(ipnet.IP) {
return APLPrefix{}, len(msg), &Error{err: "invalid APL address length"}
}
return APLPrefix{
Negation: (nlen & 0x80) != 0,
Network: ipnet,
}, off, nil
}

@ -0,0 +1,111 @@
package dns
// Truncate ensures the reply message will fit into the requested buffer
// size by removing records that exceed the requested size.
//
// It will first check if the reply fits without compression and then with
// compression. If it won't fit with compression, Truncate then walks the
// record adding as many records as possible without exceeding the
// requested buffer size.
//
// The TC bit will be set if any records were excluded from the message.
// This indicates to that the client should retry over TCP.
//
// According to RFC 2181, the TC bit should only be set if not all of the
// "required" RRs can be included in the response. Unfortunately, we have
// no way of knowing which RRs are required so we set the TC bit if any RR
// had to be omitted from the response.
//
// The appropriate buffer size can be retrieved from the requests OPT
// record, if present, and is transport specific otherwise. dns.MinMsgSize
// should be used for UDP requests without an OPT record, and
// dns.MaxMsgSize for TCP requests without an OPT record.
func (dns *Msg) Truncate(size int) {
if dns.IsTsig() != nil {
// To simplify this implementation, we don't perform
// truncation on responses with a TSIG record.
return
}
// RFC 6891 mandates that the payload size in an OPT record
// less than 512 bytes must be treated as equal to 512 bytes.
//
// For ease of use, we impose that restriction here.
if size < 512 {
size = 512
}
l := msgLenWithCompressionMap(dns, nil) // uncompressed length
if l <= size {
// Don't waste effort compressing this message.
dns.Compress = false
return
}
dns.Compress = true
edns0 := dns.popEdns0()
if edns0 != nil {
// Account for the OPT record that gets added at the end,
// by subtracting that length from our budget.
//
// The EDNS(0) OPT record must have the root domain and
// it's length is thus unaffected by compression.
size -= Len(edns0)
}
compression := make(map[string]struct{})
l = headerSize
for _, r := range dns.Question {
l += r.len(l, compression)
}
var numAnswer int
if l < size {
l, numAnswer = truncateLoop(dns.Answer, size, l, compression)
}
var numNS int
if l < size {
l, numNS = truncateLoop(dns.Ns, size, l, compression)
}
var numExtra int
if l < size {
_, numExtra = truncateLoop(dns.Extra, size, l, compression)
}
// See the function documentation for when we set this.
dns.Truncated = len(dns.Answer) > numAnswer ||
len(dns.Ns) > numNS || len(dns.Extra) > numExtra
dns.Answer = dns.Answer[:numAnswer]
dns.Ns = dns.Ns[:numNS]
dns.Extra = dns.Extra[:numExtra]
if edns0 != nil {
// Add the OPT record back onto the additional section.
dns.Extra = append(dns.Extra, edns0)
}
}
func truncateLoop(rrs []RR, size, l int, compression map[string]struct{}) (int, int) {
for i, r := range rrs {
if r == nil {
continue
}
l += r.len(l, compression)
if l > size {
// Return size, rather than l prior to this record,
// to prevent any further records being added.
return size, i
}
if l == size {
return l, i + 1
}
}
return l, len(rrs)
}

@ -0,0 +1,95 @@
package dns
import (
"crypto/sha1"
"encoding/hex"
"strings"
)
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
func HashName(label string, ha uint8, iter uint16, salt string) string {
if ha != SHA1 {
return ""
}
wireSalt := make([]byte, hex.DecodedLen(len(salt)))
n, err := packStringHex(salt, wireSalt, 0)
if err != nil {
return ""
}
wireSalt = wireSalt[:n]
name := make([]byte, 255)
off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
if err != nil {
return ""
}
name = name[:off]
s := sha1.New()
// k = 0
s.Write(name)
s.Write(wireSalt)
nsec3 := s.Sum(nil)
// k > 0
for k := uint16(0); k < iter; k++ {
s.Reset()
s.Write(nsec3)
s.Write(wireSalt)
nsec3 = s.Sum(nsec3[:0])
}
return toBase32(nsec3)
}
// Cover returns true if a name is covered by the NSEC3 record.
func (rr *NSEC3) Cover(name string) bool {
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
owner := strings.ToUpper(rr.Hdr.Name)
labelIndices := Split(owner)
if len(labelIndices) < 2 {
return false
}
ownerHash := owner[:labelIndices[1]-1]
ownerZone := owner[labelIndices[1]:]
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
return false
}
nextHash := rr.NextDomain
// if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash
if ownerHash == nextHash && nameHash != ownerHash { // empty interval
return true
}
if ownerHash > nextHash { // end of zone
if nameHash > ownerHash { // covered since there is nothing after ownerHash
return true
}
return nameHash < nextHash // if nameHash is before beginning of zone it is covered
}
if nameHash < ownerHash { // nameHash is before ownerHash, not covered
return false
}
return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash)
}
// Match returns true if a name matches the NSEC3 record
func (rr *NSEC3) Match(name string) bool {
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
owner := strings.ToUpper(rr.Hdr.Name)
labelIndices := Split(owner)
if len(labelIndices) < 2 {
return false
}
ownerHash := owner[:labelIndices[1]-1]
ownerZone := owner[labelIndices[1]:]
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
return false
}
if ownerHash == nameHash {
return true
}
return false
}

@ -0,0 +1,113 @@
package dns
import "strings"
// PrivateRdata is an interface used for implementing "Private Use" RR types, see
// RFC 6895. This allows one to experiment with new RR types, without requesting an
// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
type PrivateRdata interface {
// String returns the text presentaton of the Rdata of the Private RR.
String() string
// Parse parses the Rdata of the private RR.
Parse([]string) error
// Pack is used when packing a private RR into a buffer.
Pack([]byte) (int, error)
// Unpack is used when unpacking a private RR from a buffer.
Unpack([]byte) (int, error)
// Copy copies the Rdata into the PrivateRdata argument.
Copy(PrivateRdata) error
// Len returns the length in octets of the Rdata.
Len() int
}
// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
// It mocks normal RRs and implements dns.RR interface.
type PrivateRR struct {
Hdr RR_Header
Data PrivateRdata
generator func() PrivateRdata // for copy
}
// Header return the RR header of r.
func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
// Private len and copy parts to satisfy RR interface.
func (r *PrivateRR) len(off int, compression map[string]struct{}) int {
l := r.Hdr.len(off, compression)
l += r.Data.Len()
return l
}
func (r *PrivateRR) copy() RR {
// make new RR like this:
rr := &PrivateRR{r.Hdr, r.generator(), r.generator}
if err := r.Data.Copy(rr.Data); err != nil {
panic("dns: got value that could not be used to copy Private rdata: " + err.Error())
}
return rr
}
func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
n, err := r.Data.Pack(msg[off:])
if err != nil {
return len(msg), err
}
off += n
return off, nil
}
func (r *PrivateRR) unpack(msg []byte, off int) (int, error) {
off1, err := r.Data.Unpack(msg[off:])
off += off1
return off, err
}
func (r *PrivateRR) parse(c *zlexer, origin string) *ParseError {
var l lex
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
Fetch:
for {
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
switch l, _ = c.Next(); l.value {
case zNewline, zEOF:
break Fetch
case zString:
text = append(text, l.token)
}
}
err := r.Data.Parse(text)
if err != nil {
return &ParseError{"", err.Error(), l}
}
return nil
}
func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false }
// PrivateHandle registers a private resource record type. It requires
// string and numeric representation of private RR type and generator function as argument.
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
rtypestr = strings.ToUpper(rtypestr)
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator(), generator} }
TypeToString[rtype] = rtypestr
StringToType[rtypestr] = rtype
}
// PrivateHandleRemove removes definitions required to support private RR type.
func PrivateHandleRemove(rtype uint16) {
rtypestr, ok := TypeToString[rtype]
if ok {
delete(TypeToRR, rtype)
delete(TypeToString, rtype)
delete(StringToType, rtypestr)
}
}

@ -0,0 +1,52 @@
package dns
// StringToType is the reverse of TypeToString, needed for string parsing.
var StringToType = reverseInt16(TypeToString)
// StringToClass is the reverse of ClassToString, needed for string parsing.
var StringToClass = reverseInt16(ClassToString)
// StringToOpcode is a map of opcodes to strings.
var StringToOpcode = reverseInt(OpcodeToString)
// StringToRcode is a map of rcodes to strings.
var StringToRcode = reverseInt(RcodeToString)
func init() {
// Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733.
StringToRcode["NOTIMPL"] = RcodeNotImplemented
}
// StringToAlgorithm is the reverse of AlgorithmToString.
var StringToAlgorithm = reverseInt8(AlgorithmToString)
// StringToHash is a map of names to hash IDs.
var StringToHash = reverseInt8(HashToString)
// StringToCertType is the reverseof CertTypeToString.
var StringToCertType = reverseInt16(CertTypeToString)
// Reverse a map
func reverseInt8(m map[uint8]string) map[string]uint8 {
n := make(map[string]uint8, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt16(m map[uint16]string) map[string]uint16 {
n := make(map[string]uint16, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt(m map[int]string) map[string]int {
n := make(map[string]int, len(m))
for u, s := range m {
n[s] = u
}
return n
}

@ -0,0 +1,86 @@
package dns
// Dedup removes identical RRs from rrs. It preserves the original ordering.
// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
// rrs.
// m is used to store the RRs temporary. If it is nil a new map will be allocated.
func Dedup(rrs []RR, m map[string]RR) []RR {
if m == nil {
m = make(map[string]RR)
}
// Save the keys, so we don't have to call normalizedString twice.
keys := make([]*string, 0, len(rrs))
for _, r := range rrs {
key := normalizedString(r)
keys = append(keys, &key)
if mr, ok := m[key]; ok {
// Shortest TTL wins.
rh, mrh := r.Header(), mr.Header()
if mrh.Ttl > rh.Ttl {
mrh.Ttl = rh.Ttl
}
continue
}
m[key] = r
}
// If the length of the result map equals the amount of RRs we got,
// it means they were all different. We can then just return the original rrset.
if len(m) == len(rrs) {
return rrs
}
j := 0
for i, r := range rrs {
// If keys[i] lives in the map, we should copy and remove it.
if _, ok := m[*keys[i]]; ok {
delete(m, *keys[i])
rrs[j] = r
j++
}
if len(m) == 0 {
break
}
}
return rrs[:j]
}
// normalizedString returns a normalized string from r. The TTL
// is removed and the domain name is lowercased. We go from this:
// DomainName<TAB>TTL<TAB>CLASS<TAB>TYPE<TAB>RDATA to:
// lowercasename<TAB>CLASS<TAB>TYPE...
func normalizedString(r RR) string {
// A string Go DNS makes has: domainname<TAB>TTL<TAB>...
b := []byte(r.String())
// find the first non-escaped tab, then another, so we capture where the TTL lives.
esc := false
ttlStart, ttlEnd := 0, 0
for i := 0; i < len(b) && ttlEnd == 0; i++ {
switch {
case b[i] == '\\':
esc = !esc
case b[i] == '\t' && !esc:
if ttlStart == 0 {
ttlStart = i
continue
}
if ttlEnd == 0 {
ttlEnd = i
}
case b[i] >= 'A' && b[i] <= 'Z' && !esc:
b[i] += 32
default:
esc = false
}
}
// remove TTL.
copy(b[ttlStart:], b[ttlEnd:])
cut := ttlEnd - ttlStart
return string(b[:len(b)-cut])
}

1331
vendor/github.com/miekg/dns/scan.go generated vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,122 @@
package dns
import (
"sync"
)
// ServeMux is an DNS request multiplexer. It matches the zone name of
// each incoming request against a list of registered patterns add calls
// the handler for the pattern that most closely matches the zone name.
//
// ServeMux is DNSSEC aware, meaning that queries for the DS record are
// redirected to the parent zone (if that is also registered), otherwise
// the child gets the query.
//
// ServeMux is also safe for concurrent access from multiple goroutines.
//
// The zero ServeMux is empty and ready for use.
type ServeMux struct {
z map[string]Handler
m sync.RWMutex
}
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux {
return new(ServeMux)
}
// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = NewServeMux()
func (mux *ServeMux) match(q string, t uint16) Handler {
mux.m.RLock()
defer mux.m.RUnlock()
if mux.z == nil {
return nil
}
q = CanonicalName(q)
var handler Handler
for off, end := 0, false; !end; off, end = NextLabel(q, off) {
if h, ok := mux.z[q[off:]]; ok {
if t != TypeDS {
return h
}
// Continue for DS to see if we have a parent too, if so delegate to the parent
handler = h
}
}
// Wildcard match, if we have found nothing try the root zone as a last resort.
if h, ok := mux.z["."]; ok {
return h
}
return handler
}
// Handle adds a handler to the ServeMux for pattern.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
if mux.z == nil {
mux.z = make(map[string]Handler)
}
mux.z[CanonicalName(pattern)] = handler
mux.m.Unlock()
}
// HandleFunc adds a handler function to the ServeMux for pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
mux.Handle(pattern, HandlerFunc(handler))
}
// HandleRemove deregisters the handler specific for pattern from the ServeMux.
func (mux *ServeMux) HandleRemove(pattern string) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
delete(mux.z, CanonicalName(pattern))
mux.m.Unlock()
}
// ServeDNS dispatches the request to the handler whose pattern most
// closely matches the request message.
//
// ServeDNS is DNSSEC aware, meaning that queries for the DS record
// are redirected to the parent zone (if that is also registered),
// otherwise the child gets the query.
//
// If no handler is found, or there is no question, a standard SERVFAIL
// message is returned
func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
var h Handler
if len(req.Question) >= 1 { // allow more than one question
h = mux.match(req.Question[0].Name, req.Question[0].Qtype)
}
if h != nil {
h.ServeDNS(w, req)
} else {
HandleFailed(w, req)
}
}
// Handle registers the handler with the given pattern
// in the DefaultServeMux. The documentation for
// ServeMux explains how patterns are matched.
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
// HandleRemove deregisters the handle with the given pattern
// in the DefaultServeMux.
func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
// HandleFunc registers the handler function with the given pattern
// in the DefaultServeMux.
func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
DefaultServeMux.HandleFunc(pattern, handler)
}

@ -0,0 +1,764 @@
// DNS server implementation.
package dns
import (
"context"
"crypto/tls"
"encoding/binary"
"errors"
"io"
"net"
"strings"
"sync"
"time"
)
// Default maximum number of TCP queries before we close the socket.
const maxTCPQueries = 128
// aLongTimeAgo is a non-zero time, far in the past, used for
// immediate cancelation of network operations.
var aLongTimeAgo = time.Unix(1, 0)
// Handler is implemented by any value that implements ServeDNS.
type Handler interface {
ServeDNS(w ResponseWriter, r *Msg)
}
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as DNS handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler object that calls f.
type HandlerFunc func(ResponseWriter, *Msg)
// ServeDNS calls f(w, r).
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
f(w, r)
}
// A ResponseWriter interface is used by an DNS handler to
// construct an DNS response.
type ResponseWriter interface {
// LocalAddr returns the net.Addr of the server
LocalAddr() net.Addr
// RemoteAddr returns the net.Addr of the client that sent the current request.
RemoteAddr() net.Addr
// WriteMsg writes a reply back to the client.
WriteMsg(*Msg) error
// Write writes a raw buffer back to the client.
Write([]byte) (int, error)
// Close closes the connection.
Close() error
// TsigStatus returns the status of the Tsig.
TsigStatus() error
// TsigTimersOnly sets the tsig timers only boolean.
TsigTimersOnly(bool)
// Hijack lets the caller take over the connection.
// After a call to Hijack(), the DNS package will not do anything with the connection.
Hijack()
}
// A ConnectionStater interface is used by a DNS Handler to access TLS connection state
// when available.
type ConnectionStater interface {
ConnectionState() *tls.ConnectionState
}
type response struct {
closed bool // connection has been closed
hijacked bool // connection has been hijacked by handler
tsigTimersOnly bool
tsigStatus error
tsigRequestMAC string
tsigSecret map[string]string // the tsig secrets
udp *net.UDPConn // i/o connection if UDP was used
tcp net.Conn // i/o connection if TCP was used
udpSession *SessionUDP // oob data to get egress interface right
writer Writer // writer to output the raw DNS bits
}
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
func HandleFailed(w ResponseWriter, r *Msg) {
m := new(Msg)
m.SetRcode(r, RcodeServerFailure)
// does not matter if this write fails
w.WriteMsg(m)
}
// ListenAndServe Starts a server on address and network specified Invoke handler
// for incoming queries.
func ListenAndServe(addr string, network string, handler Handler) error {
server := &Server{Addr: addr, Net: network, Handler: handler}
return server.ListenAndServe()
}
// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
// http://golang.org/pkg/net/http/#ListenAndServeTLS
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return err
}
config := tls.Config{
Certificates: []tls.Certificate{cert},
}
server := &Server{
Addr: addr,
Net: "tcp-tls",
TLSConfig: &config,
Handler: handler,
}
return server.ListenAndServe()
}
// ActivateAndServe activates a server with a listener from systemd,
// l and p should not both be non-nil.
// If both l and p are not nil only p will be used.
// Invoke handler for incoming queries.
func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
server := &Server{Listener: l, PacketConn: p, Handler: handler}
return server.ActivateAndServe()
}
// Writer writes raw DNS messages; each call to Write should send an entire message.
type Writer interface {
io.Writer
}
// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message.
type Reader interface {
// ReadTCP reads a raw message from a TCP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
// ReadUDP reads a raw message from a UDP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
}
// defaultReader is an adapter for the Server struct that implements the Reader interface
// using the readTCP and readUDP func of the embedded Server.
type defaultReader struct {
*Server
}
func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
return dr.readTCP(conn, timeout)
}
func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
return dr.readUDP(conn, timeout)
}
// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
// Implementations should never return a nil Reader.
type DecorateReader func(Reader) Reader
// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
// Implementations should never return a nil Writer.
type DecorateWriter func(Writer) Writer
// A Server defines parameters for running an DNS server.
type Server struct {
// Address to listen on, ":dns" if empty.
Addr string
// if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
Net string
// TCP Listener to use, this is to aid in systemd's socket activation.
Listener net.Listener
// TLS connection configuration
TLSConfig *tls.Config
// UDP "Listener" to use, this is to aid in systemd's socket activation.
PacketConn net.PacketConn
// Handler to invoke, dns.DefaultServeMux if nil.
Handler Handler
// Default buffer size to use to read incoming UDP messages. If not set
// it defaults to MinMsgSize (512 B).
UDPSize int
// The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
ReadTimeout time.Duration
// The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
WriteTimeout time.Duration
// TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
IdleTimeout func() time.Duration
// Secret(s) for Tsig map[<zonename>]<base64 secret>. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2).
TsigSecret map[string]string
// If NotifyStartedFunc is set it is called once the server has started listening.
NotifyStartedFunc func()
// DecorateReader is optional, allows customization of the process that reads raw DNS messages.
DecorateReader DecorateReader
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
DecorateWriter DecorateWriter
// Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
MaxTCPQueries int
// Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address.
// It is only supported on go1.11+ and when using ListenAndServe.
ReusePort bool
// AcceptMsgFunc will check the incoming message and will reject it early in the process.
// By default DefaultMsgAcceptFunc will be used.
MsgAcceptFunc MsgAcceptFunc
// Shutdown handling
lock sync.RWMutex
started bool
shutdown chan struct{}
conns map[net.Conn]struct{}
// A pool for UDP message buffers.
udpPool sync.Pool
}
func (srv *Server) isStarted() bool {
srv.lock.RLock()
started := srv.started
srv.lock.RUnlock()
return started
}
func makeUDPBuffer(size int) func() interface{} {
return func() interface{} {
return make([]byte, size)
}
}
func (srv *Server) init() {
srv.shutdown = make(chan struct{})
srv.conns = make(map[net.Conn]struct{})
if srv.UDPSize == 0 {
srv.UDPSize = MinMsgSize
}
if srv.MsgAcceptFunc == nil {
srv.MsgAcceptFunc = DefaultMsgAcceptFunc
}
if srv.Handler == nil {
srv.Handler = DefaultServeMux
}
srv.udpPool.New = makeUDPBuffer(srv.UDPSize)
}
func unlockOnce(l sync.Locker) func() {
var once sync.Once
return func() { once.Do(l.Unlock) }
}
// ListenAndServe starts a nameserver on the configured address in *Server.
func (srv *Server) ListenAndServe() error {
unlock := unlockOnce(&srv.lock)
srv.lock.Lock()
defer unlock()
if srv.started {
return &Error{err: "server already started"}
}
addr := srv.Addr
if addr == "" {
addr = ":domain"
}
srv.init()
switch srv.Net {
case "tcp", "tcp4", "tcp6":
l, err := listenTCP(srv.Net, addr, srv.ReusePort)
if err != nil {
return err
}
srv.Listener = l
srv.started = true
unlock()
return srv.serveTCP(l)
case "tcp-tls", "tcp4-tls", "tcp6-tls":
if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) {
return errors.New("dns: neither Certificates nor GetCertificate set in Config")
}
network := strings.TrimSuffix(srv.Net, "-tls")
l, err := listenTCP(network, addr, srv.ReusePort)
if err != nil {
return err
}
l = tls.NewListener(l, srv.TLSConfig)
srv.Listener = l
srv.started = true
unlock()
return srv.serveTCP(l)
case "udp", "udp4", "udp6":
l, err := listenUDP(srv.Net, addr, srv.ReusePort)
if err != nil {
return err
}
u := l.(*net.UDPConn)
if e := setUDPSocketOptions(u); e != nil {
return e
}
srv.PacketConn = l
srv.started = true
unlock()
return srv.serveUDP(u)
}
return &Error{err: "bad network"}
}
// ActivateAndServe starts a nameserver with the PacketConn or Listener
// configured in *Server. Its main use is to start a server from systemd.
func (srv *Server) ActivateAndServe() error {
unlock := unlockOnce(&srv.lock)
srv.lock.Lock()
defer unlock()
if srv.started {
return &Error{err: "server already started"}
}
srv.init()
pConn := srv.PacketConn
l := srv.Listener
if pConn != nil {
// Check PacketConn interface's type is valid and value
// is not nil
if t, ok := pConn.(*net.UDPConn); ok && t != nil {
if e := setUDPSocketOptions(t); e != nil {
return e
}
srv.started = true
unlock()
return srv.serveUDP(t)
}
}
if l != nil {
srv.started = true
unlock()
return srv.serveTCP(l)
}
return &Error{err: "bad listeners"}
}
// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and
// ActivateAndServe will return.
func (srv *Server) Shutdown() error {
return srv.ShutdownContext(context.Background())
}
// ShutdownContext shuts down a server. After a call to ShutdownContext,
// ListenAndServe and ActivateAndServe will return.
//
// A context.Context may be passed to limit how long to wait for connections
// to terminate.
func (srv *Server) ShutdownContext(ctx context.Context) error {
srv.lock.Lock()
if !srv.started {
srv.lock.Unlock()
return &Error{err: "server not started"}
}
srv.started = false
if srv.PacketConn != nil {
srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads
}
if srv.Listener != nil {
srv.Listener.Close()
}
for rw := range srv.conns {
rw.SetReadDeadline(aLongTimeAgo) // Unblock reads
}
srv.lock.Unlock()
if testShutdownNotify != nil {
testShutdownNotify.Broadcast()
}
var ctxErr error
select {
case <-srv.shutdown:
case <-ctx.Done():
ctxErr = ctx.Err()
}
if srv.PacketConn != nil {
srv.PacketConn.Close()
}
return ctxErr
}
var testShutdownNotify *sync.Cond
// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
func (srv *Server) getReadTimeout() time.Duration {
if srv.ReadTimeout != 0 {
return srv.ReadTimeout
}
return dnsTimeout
}
// serveTCP starts a TCP listener for the server.
func (srv *Server) serveTCP(l net.Listener) error {
defer l.Close()
if srv.NotifyStartedFunc != nil {
srv.NotifyStartedFunc()
}
var wg sync.WaitGroup
defer func() {
wg.Wait()
close(srv.shutdown)
}()
for srv.isStarted() {
rw, err := l.Accept()
if err != nil {
if !srv.isStarted() {
return nil
}
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
continue
}
return err
}
srv.lock.Lock()
// Track the connection to allow unblocking reads on shutdown.
srv.conns[rw] = struct{}{}
srv.lock.Unlock()
wg.Add(1)
go srv.serveTCPConn(&wg, rw)
}
return nil
}
// serveUDP starts a UDP listener for the server.
func (srv *Server) serveUDP(l *net.UDPConn) error {
defer l.Close()
if srv.NotifyStartedFunc != nil {
srv.NotifyStartedFunc()
}
reader := Reader(defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
var wg sync.WaitGroup
defer func() {
wg.Wait()
close(srv.shutdown)
}()
rtimeout := srv.getReadTimeout()
// deadline is not used here
for srv.isStarted() {
m, s, err := reader.ReadUDP(l, rtimeout)
if err != nil {
if !srv.isStarted() {
return nil
}
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
continue
}
return err
}
if len(m) < headerSize {
if cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
continue
}
wg.Add(1)
go srv.serveUDPPacket(&wg, m, l, s)
}
return nil
}
// Serve a new TCP connection.
func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) {
w := &response{tsigSecret: srv.TsigSecret, tcp: rw}
if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w)
} else {
w.writer = w
}
reader := Reader(defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
idleTimeout := tcpIdleTimeout
if srv.IdleTimeout != nil {
idleTimeout = srv.IdleTimeout()
}
timeout := srv.getReadTimeout()
limit := srv.MaxTCPQueries
if limit == 0 {
limit = maxTCPQueries
}
for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ {
m, err := reader.ReadTCP(w.tcp, timeout)
if err != nil {
// TODO(tmthrgd): handle error
break
}
srv.serveDNS(m, w)
if w.closed {
break // Close() was called
}
if w.hijacked {
break // client will call Close() themselves
}
// The first read uses the read timeout, the rest use the
// idle timeout.
timeout = idleTimeout
}
if !w.hijacked {
w.Close()
}
srv.lock.Lock()
delete(srv.conns, w.tcp)
srv.lock.Unlock()
wg.Done()
}
// Serve a new UDP request.
func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u *net.UDPConn, s *SessionUDP) {
w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: s}
if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w)
} else {
w.writer = w
}
srv.serveDNS(m, w)
wg.Done()
}
func (srv *Server) serveDNS(m []byte, w *response) {
dh, off, err := unpackMsgHdr(m, 0)
if err != nil {
// Let client hang, they are sending crap; any reply can be used to amplify.
return
}
req := new(Msg)
req.setHdr(dh)
switch action := srv.MsgAcceptFunc(dh); action {
case MsgAccept:
if req.unpack(dh, m, off) == nil {
break
}
fallthrough
case MsgReject, MsgRejectNotImplemented:
opcode := req.Opcode
req.SetRcodeFormatError(req)
req.Zero = false
if action == MsgRejectNotImplemented {
req.Opcode = opcode
req.Rcode = RcodeNotImplemented
}
// Are we allowed to delete any OPT records here?
req.Ns, req.Answer, req.Extra = nil, nil, nil
w.WriteMsg(req)
fallthrough
case MsgIgnore:
if w.udp != nil && cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
return
}
w.tsigStatus = nil
if w.tsigSecret != nil {
if t := req.IsTsig(); t != nil {
if secret, ok := w.tsigSecret[t.Hdr.Name]; ok {
w.tsigStatus = TsigVerify(m, secret, "", false)
} else {
w.tsigStatus = ErrSecret
}
w.tsigTimersOnly = false
w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
}
}
if w.udp != nil && cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
srv.Handler.ServeDNS(w, req) // Writes back to the client
}
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
// If we race with ShutdownContext, the read deadline may
// have been set in the distant past to unblock the read
// below. We must not override it, otherwise we may block
// ShutdownContext.
srv.lock.RLock()
if srv.started {
conn.SetReadDeadline(time.Now().Add(timeout))
}
srv.lock.RUnlock()
var length uint16
if err := binary.Read(conn, binary.BigEndian, &length); err != nil {
return nil, err
}
m := make([]byte, length)
if _, err := io.ReadFull(conn, m); err != nil {
return nil, err
}
return m, nil
}
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
srv.lock.RLock()
if srv.started {
// See the comment in readTCP above.
conn.SetReadDeadline(time.Now().Add(timeout))
}
srv.lock.RUnlock()
m := srv.udpPool.Get().([]byte)
n, s, err := ReadFromSessionUDP(conn, m)
if err != nil {
srv.udpPool.Put(m)
return nil, nil, err
}
m = m[:n]
return m, s, nil
}
// WriteMsg implements the ResponseWriter.WriteMsg method.
func (w *response) WriteMsg(m *Msg) (err error) {
if w.closed {
return &Error{err: "WriteMsg called after Close"}
}
var data []byte
if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
if t := m.IsTsig(); t != nil {
data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
if err != nil {
return err
}
_, err = w.writer.Write(data)
return err
}
}
data, err = m.Pack()
if err != nil {
return err
}
_, err = w.writer.Write(data)
return err
}
// Write implements the ResponseWriter.Write method.
func (w *response) Write(m []byte) (int, error) {
if w.closed {
return 0, &Error{err: "Write called after Close"}
}
switch {
case w.udp != nil:
return WriteToSessionUDP(w.udp, m, w.udpSession)
case w.tcp != nil:
if len(m) > MaxMsgSize {
return 0, &Error{err: "message too large"}
}
l := make([]byte, 2)
binary.BigEndian.PutUint16(l, uint16(len(m)))
n, err := (&net.Buffers{l, m}).WriteTo(w.tcp)
return int(n), err
default:
panic("dns: internal error: udp and tcp both nil")
}
}
// LocalAddr implements the ResponseWriter.LocalAddr method.
func (w *response) LocalAddr() net.Addr {
switch {
case w.udp != nil:
return w.udp.LocalAddr()
case w.tcp != nil:
return w.tcp.LocalAddr()
default:
panic("dns: internal error: udp and tcp both nil")
}
}
// RemoteAddr implements the ResponseWriter.RemoteAddr method.
func (w *response) RemoteAddr() net.Addr {
switch {
case w.udpSession != nil:
return w.udpSession.RemoteAddr()
case w.tcp != nil:
return w.tcp.RemoteAddr()
default:
panic("dns: internal error: udpSession and tcp both nil")
}
}
// TsigStatus implements the ResponseWriter.TsigStatus method.
func (w *response) TsigStatus() error { return w.tsigStatus }
// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
// Hijack implements the ResponseWriter.Hijack method.
func (w *response) Hijack() { w.hijacked = true }
// Close implements the ResponseWriter.Close method
func (w *response) Close() error {
if w.closed {
return &Error{err: "connection already closed"}
}
w.closed = true
switch {
case w.udp != nil:
// Can't close the udp conn, as that is actually the listener.
return nil
case w.tcp != nil:
return w.tcp.Close()
default:
panic("dns: internal error: udp and tcp both nil")
}
}
// ConnectionState() implements the ConnectionStater.ConnectionState() interface.
func (w *response) ConnectionState() *tls.ConnectionState {
type tlsConnectionStater interface {
ConnectionState() tls.ConnectionState
}
if v, ok := w.tcp.(tlsConnectionStater); ok {
t := v.ConnectionState()
return &t
}
return nil
}

209
vendor/github.com/miekg/dns/sig0.go generated vendored

@ -0,0 +1,209 @@
package dns
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"encoding/binary"
"math/big"
"strings"
"time"
)
// Sign signs a dns.Msg. It fills the signature with the appropriate data.
// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
// and Expiration set.
func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
if k == nil {
return nil, ErrPrivKey
}
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return nil, ErrKey
}
rr.Hdr = RR_Header{Name: ".", Rrtype: TypeSIG, Class: ClassANY, Ttl: 0}
rr.OrigTtl, rr.TypeCovered, rr.Labels = 0, 0, 0
buf := make([]byte, m.Len()+Len(rr))
mbuf, err := m.PackBuffer(buf)
if err != nil {
return nil, err
}
if &buf[0] != &mbuf[0] {
return nil, ErrBuf
}
off, err := PackRR(rr, buf, len(mbuf), nil, false)
if err != nil {
return nil, err
}
buf = buf[:off:cap(buf)]
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
return nil, ErrAlg
}
hasher := hash.New()
// Write SIG rdata
hasher.Write(buf[len(mbuf)+1+2+2+4+2:])
// Write message
hasher.Write(buf[:len(mbuf)])
signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm)
if err != nil {
return nil, err
}
rr.Signature = toBase64(signature)
buf = append(buf, signature...)
if len(buf) > int(^uint16(0)) {
return nil, ErrBuf
}
// Adjust sig data length
rdoff := len(mbuf) + 1 + 2 + 2 + 4
rdlen := binary.BigEndian.Uint16(buf[rdoff:])
rdlen += uint16(len(signature))
binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
// Adjust additional count
adc := binary.BigEndian.Uint16(buf[10:])
adc++
binary.BigEndian.PutUint16(buf[10:], adc)
return buf, nil
}
// Verify validates the message buf using the key k.
// It's assumed that buf is a valid message from which rr was unpacked.
func (rr *SIG) Verify(k *KEY, buf []byte) error {
if k == nil {
return ErrKey
}
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return ErrKey
}
var hash crypto.Hash
switch rr.Algorithm {
case DSA, RSASHA1:
hash = crypto.SHA1
case RSASHA256, ECDSAP256SHA256:
hash = crypto.SHA256
case ECDSAP384SHA384:
hash = crypto.SHA384
case RSASHA512:
hash = crypto.SHA512
default:
return ErrAlg
}
hasher := hash.New()
buflen := len(buf)
qdc := binary.BigEndian.Uint16(buf[4:])
anc := binary.BigEndian.Uint16(buf[6:])
auc := binary.BigEndian.Uint16(buf[8:])
adc := binary.BigEndian.Uint16(buf[10:])
offset := headerSize
var err error
for i := uint16(0); i < qdc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip past Type and Class
offset += 2 + 2
}
for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip past Type, Class and TTL
offset += 2 + 2 + 4
if offset+1 >= buflen {
continue
}
rdlen := binary.BigEndian.Uint16(buf[offset:])
offset += 2
offset += int(rdlen)
}
if offset >= buflen {
return &Error{err: "overflowing unpacking signed message"}
}
// offset should be just prior to SIG
bodyend := offset
// owner name SHOULD be root
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip Type, Class, TTL, RDLen
offset += 2 + 2 + 4 + 2
sigstart := offset
// Skip Type Covered, Algorithm, Labels, Original TTL
offset += 2 + 1 + 1 + 4
if offset+4+4 >= buflen {
return &Error{err: "overflow unpacking signed message"}
}
expire := binary.BigEndian.Uint32(buf[offset:])
offset += 4
incept := binary.BigEndian.Uint32(buf[offset:])
offset += 4
now := uint32(time.Now().Unix())
if now < incept || now > expire {
return ErrTime
}
// Skip key tag
offset += 2
var signername string
signername, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// If key has come from the DNS name compression might
// have mangled the case of the name
if !strings.EqualFold(signername, k.Header().Name) {
return &Error{err: "signer name doesn't match key name"}
}
sigend := offset
hasher.Write(buf[sigstart:sigend])
hasher.Write(buf[:10])
hasher.Write([]byte{
byte((adc - 1) << 8),
byte(adc - 1),
})
hasher.Write(buf[12:bodyend])
hashed := hasher.Sum(nil)
sig := buf[sigend:]
switch k.Algorithm {
case DSA:
pk := k.publicKeyDSA()
sig = sig[1:]
r := new(big.Int).SetBytes(sig[:len(sig)/2])
s := new(big.Int).SetBytes(sig[len(sig)/2:])
if pk != nil {
if dsa.Verify(pk, hashed, r, s) {
return nil
}
return ErrSig
}
case RSASHA1, RSASHA256, RSASHA512:
pk := k.publicKeyRSA()
if pk != nil {
return rsa.VerifyPKCS1v15(pk, hash, hashed, sig)
}
case ECDSAP256SHA256, ECDSAP384SHA384:
pk := k.publicKeyECDSA()
r := new(big.Int).SetBytes(sig[:len(sig)/2])
s := new(big.Int).SetBytes(sig[len(sig)/2:])
if pk != nil {
if ecdsa.Verify(pk, hashed, r, s) {
return nil
}
return ErrSig
}
}
return ErrKeyAlg
}

@ -0,0 +1,61 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted for dns package usage by Miek Gieben.
package dns
import "sync"
import "time"
// call is an in-flight or completed singleflight.Do call
type call struct {
wg sync.WaitGroup
val *Msg
rtt time.Duration
err error
dups int
}
// singleflight represents a class of work and forms a namespace in
// which units of work can be executed with duplicate suppression.
type singleflight struct {
sync.Mutex // protects m
m map[string]*call // lazily initialized
dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges
}
// Do executes and returns the results of the given function, making
// sure that only one execution is in-flight for a given key at a
// time. If a duplicate comes in, the duplicate caller waits for the
// original to complete and receives the same results.
// The return value shared indicates whether v was given to multiple callers.
func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
g.Lock()
if g.m == nil {
g.m = make(map[string]*call)
}
if c, ok := g.m[key]; ok {
c.dups++
g.Unlock()
c.wg.Wait()
return c.val, c.rtt, c.err, true
}
c := new(call)
c.wg.Add(1)
g.m[key] = c
g.Unlock()
c.val, c.rtt, c.err = fn()
c.wg.Done()
if !g.dontDeleteForTesting {
g.Lock()
delete(g.m, key)
g.Unlock()
}
return c.val, c.rtt, c.err, c.dups > 0
}

@ -0,0 +1,44 @@
package dns
import (
"crypto/sha256"
"crypto/x509"
"encoding/hex"
)
// Sign creates a SMIMEA record from an SSL certificate.
func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
r.Hdr.Rrtype = TypeSMIMEA
r.Usage = uint8(usage)
r.Selector = uint8(selector)
r.MatchingType = uint8(matchingType)
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
return err
}
// Verify verifies a SMIMEA record against an SSL certificate. If it is OK
// a nil error is returned.
func (r *SMIMEA) Verify(cert *x509.Certificate) error {
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
if err != nil {
return err // Not also ErrSig?
}
if r.Certificate == c {
return nil
}
return ErrSig // ErrSig, really?
}
// SMIMEAName returns the ownername of a SMIMEA resource record as per the
// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
func SMIMEAName(email, domain string) (string, error) {
hasher := sha256.New()
hasher.Write([]byte(email))
// RFC Section 3: "The local-part is hashed using the SHA2-256
// algorithm with the hash truncated to 28 octets and
// represented in its hexadecimal representation to become the
// left-most label in the prepared domain name"
return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil
}

@ -0,0 +1,44 @@
package dns
import (
"crypto/x509"
"net"
"strconv"
)
// Sign creates a TLSA record from an SSL certificate.
func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
r.Hdr.Rrtype = TypeTLSA
r.Usage = uint8(usage)
r.Selector = uint8(selector)
r.MatchingType = uint8(matchingType)
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
return err
}
// Verify verifies a TLSA record against an SSL certificate. If it is OK
// a nil error is returned.
func (r *TLSA) Verify(cert *x509.Certificate) error {
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
if err != nil {
return err // Not also ErrSig?
}
if r.Certificate == c {
return nil
}
return ErrSig // ErrSig, really?
}
// TLSAName returns the ownername of a TLSA resource record as per the
// rules specified in RFC 6698, Section 3.
func TLSAName(name, service, network string) (string, error) {
if !IsFqdn(name) {
return "", ErrFqdn
}
p, err := net.LookupPort(network, service)
if err != nil {
return "", err
}
return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save