getk3sversion

pull/227/head
iwilltry42 5 years ago
parent 4929915717
commit f571ff3143
No known key found for this signature in database
GPG Key ID: 7BA57AD1CFF16110
  1. 5
      cmd/create/createCluster.go
  2. 2
      cmd/create/createNode.go
  3. 3
      cmd/root.go
  4. 2
      cmd/util/ports.go
  5. 4
      go.mod
  6. 118
      go.sum
  7. 38
      vendor/github.com/docker/distribution/.gitignore
  8. 16
      vendor/github.com/docker/distribution/.gometalinter.json
  9. 32
      vendor/github.com/docker/distribution/.mailmap
  10. 51
      vendor/github.com/docker/distribution/.travis.yml
  11. 117
      vendor/github.com/docker/distribution/BUILDING.md
  12. 148
      vendor/github.com/docker/distribution/CONTRIBUTING.md
  13. 29
      vendor/github.com/docker/distribution/Dockerfile
  14. 243
      vendor/github.com/docker/distribution/MAINTAINERS
  15. 102
      vendor/github.com/docker/distribution/Makefile
  16. 130
      vendor/github.com/docker/distribution/README.md
  17. 267
      vendor/github.com/docker/distribution/ROADMAP.md
  18. 265
      vendor/github.com/docker/distribution/blobs.go
  19. 7
      vendor/github.com/docker/distribution/doc.go
  20. 119
      vendor/github.com/docker/distribution/errors.go
  21. 57
      vendor/github.com/docker/distribution/go.mod
  22. 143
      vendor/github.com/docker/distribution/go.sum
  23. 1
      vendor/github.com/docker/distribution/manifest/doc.go
  24. 287
      vendor/github.com/docker/distribution/manifest/schema1/config_builder.go
  25. 184
      vendor/github.com/docker/distribution/manifest/schema1/manifest.go
  26. 98
      vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go
  27. 68
      vendor/github.com/docker/distribution/manifest/schema1/sign.go
  28. 32
      vendor/github.com/docker/distribution/manifest/schema1/verify.go
  29. 85
      vendor/github.com/docker/distribution/manifest/schema2/builder.go
  30. 144
      vendor/github.com/docker/distribution/manifest/schema2/manifest.go
  31. 12
      vendor/github.com/docker/distribution/manifest/versioned.go
  32. 125
      vendor/github.com/docker/distribution/manifests.go
  33. 118
      vendor/github.com/docker/distribution/registry.go
  34. 27
      vendor/github.com/docker/distribution/tags.go
  35. 13
      vendor/github.com/docker/libtrust/CONTRIBUTING.md
  36. 191
      vendor/github.com/docker/libtrust/LICENSE
  37. 3
      vendor/github.com/docker/libtrust/MAINTAINERS
  38. 22
      vendor/github.com/docker/libtrust/README.md
  39. 175
      vendor/github.com/docker/libtrust/certificates.go
  40. 9
      vendor/github.com/docker/libtrust/doc.go
  41. 428
      vendor/github.com/docker/libtrust/ec_key.go
  42. 50
      vendor/github.com/docker/libtrust/filter.go
  43. 56
      vendor/github.com/docker/libtrust/hash.go
  44. 657
      vendor/github.com/docker/libtrust/jsonsign.go
  45. 253
      vendor/github.com/docker/libtrust/key.go
  46. 255
      vendor/github.com/docker/libtrust/key_files.go
  47. 175
      vendor/github.com/docker/libtrust/key_manager.go
  48. 427
      vendor/github.com/docker/libtrust/rsa_key.go
  49. 363
      vendor/github.com/docker/libtrust/util.go
  50. 26
      vendor/github.com/heroku/docker-registry-client/LICENSE.md
  51. 151
      vendor/github.com/heroku/docker-registry-client/registry/authchallenge.go
  52. 23
      vendor/github.com/heroku/docker-registry-client/registry/basictransport.go
  53. 108
      vendor/github.com/heroku/docker-registry-client/registry/blob.go
  54. 46
      vendor/github.com/heroku/docker-registry-client/registry/errortransport.go
  55. 50
      vendor/github.com/heroku/docker-registry-client/registry/json.go
  56. 126
      vendor/github.com/heroku/docker-registry-client/registry/manifest.go
  57. 118
      vendor/github.com/heroku/docker-registry-client/registry/registry.go
  58. 26
      vendor/github.com/heroku/docker-registry-client/registry/repositories.go
  59. 25
      vendor/github.com/heroku/docker-registry-client/registry/tags.go
  60. 128
      vendor/github.com/heroku/docker-registry-client/registry/tokentransport.go
  61. 8
      vendor/modules.txt
  62. 40
      version/version.go

@ -63,7 +63,7 @@ func NewCmdCreateCluster() *cobra.Command {
cmd.Flags().IntP("masters", "m", 1, "Specify how many masters you want to create")
cmd.Flags().IntP("workers", "w", 0, "Specify how many workers you want to create")
// cmd.Flags().String("config", "", "Specify a cluster configuration file") // TODO: to implement
cmd.Flags().String("image", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.K3sVersion), "Specify k3s image that you want to use for the nodes")
cmd.Flags().String("image", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
cmd.Flags().String("network", "", "Join an existing network")
cmd.Flags().String("secret", "", "Specify a cluster secret. By default, we generate one.")
cmd.Flags().StringArrayP("volume", "v", nil, "Mount volumes into the nodes (Format: `--volume [SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d create -w 2 -v /my/path@worker[0,1] -v /tmp/test:/tmp/other@master[0]`")
@ -117,6 +117,9 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string) (runtimes.Runtime,
log.Errorln("No image specified")
log.Fatalln(err)
}
if image == "latest" {
image = version.GetK3sVersion(true)
}
// --masters
masterCount, err := cmd.Flags().GetInt("masters")

@ -61,7 +61,7 @@ func NewCmdCreateNode() *cobra.Command {
log.Fatalln("Failed to mark required flag '--cluster'")
}
cmd.Flags().String("image", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.K3sVersion), "Specify k3s image used for the node(s)") // TODO: get image version tag
cmd.Flags().String("image", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image used for the node(s)")
// done
return cmd

@ -117,13 +117,12 @@ func initLogging() {
// Completion
var completionFunctions = map[string]func(io.Writer) error{
"bash": rootCmd.GenBashCompletion,
"zsh": rootCmd.GenZshCompletion,
"zsh": rootCmd.GenZshCompletion, // FIXME: zsh completion requires https://github.com/spf13/cobra/pull/899 due to square brackets in our help texts
"psh": rootCmd.GenPowerShellCompletion,
"powershell": rootCmd.GenPowerShellCompletion,
}
// NewCmdCompletion creates a new completion command
// FIXME: this does not really work yet
func NewCmdCompletion() *cobra.Command {
// create new cobra command
cmd := &cobra.Command{

@ -70,5 +70,5 @@ func ParseAPIPort(portString string) (k3d.ExposeAPI, error) {
// ValidatePortMap validates a port mapping
func ValidatePortMap(portmap string) (string, error) {
return portmap, nil // TODO:
return portmap, nil // TODO: add validation of port mapping
}

@ -20,18 +20,16 @@ require (
github.com/gogo/googleapis v1.3.0 // indirect
github.com/golang/protobuf v1.3.1 // indirect
github.com/google/go-cmp v0.3.0 // indirect
github.com/gorilla/mux v1.7.3 // indirect
github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5
github.com/imdario/mergo v0.3.7 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
github.com/mitchellh/go-homedir v1.1.0
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v0.1.1 // indirect
github.com/opencontainers/runtime-spec v1.0.1 // indirect
github.com/sirupsen/logrus v1.4.2
github.com/spf13/cobra v0.0.5
github.com/stretchr/testify v1.4.0 // indirect
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
go.etcd.io/bbolt v1.3.3 // indirect
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc // indirect

118
go.sum

@ -10,7 +10,9 @@ github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+q
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -46,6 +48,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/distribution v0.0.0-20171011171712-7484e51bf6af/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c h1:6L6qod4JzOm9KEqmfSyO6ZhsnN9dlcISRt+xdoyZeGE=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/docker v1.4.2-0.20190905191220-3b23f9033967 h1:hqs6DQFz659/085bXwClBRGefVf+kWCTsQR6wwkOMiU=
@ -57,27 +60,67 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6Uezg
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho=
github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg=
github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw=
github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU=
github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk=
github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI=
github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks=
github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc=
github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/gogo/googleapis v1.3.0 h1:M695OaDJ5ipWvDPcoAg/YL9c3uORAegkEfBqTQF/fTQ=
github.com/gogo/googleapis v1.3.0/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE=
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM=
github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o=
github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
github.com/golangci/golangci-lint v1.17.2-0.20190909185456-6163a8a79084/go.mod h1:jXakAOSd+FMU9dP3D6IfBK7HyD1q/RLHI9NOY8veycY=
github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU=
github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU=
github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
@ -87,8 +130,13 @@ github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5 h1:6ZR6HQ+P9ZUwHlYq+bU7e9wqAImxKUguq8fp2gZSgCo=
github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5/go.mod h1:Yho0S7KhsnHQRCC5lDraYF1SsLMeWtf/tKdufKu3TJA=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
@ -98,7 +146,12 @@ github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@ -108,18 +161,33 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@ -131,6 +199,7 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
@ -143,25 +212,40 @@ github.com/prometheus/common v0.0.0-20180110214958-89604d197083 h1:BVsJT8+ZbyuL3
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 h1:hhvfGDVThBnd4kYisSFmYuHYeUhglxcwag7FhVPH9zM=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE=
github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -172,7 +256,13 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
@ -182,8 +272,12 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -192,29 +286,41 @@ golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQ
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 h1:tdsQdquKbTNMsSZLqnLELJGzCANp9oXhu6zFBW6ODx4=
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
@ -227,12 +333,24 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=

@ -0,0 +1,38 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# never checkin from the bin file (for now)
bin/*
# Test key files
*.pem
# Cover profiles
*.out
# Editor/IDE specific files.
*.sublime-project
*.sublime-workspace
.idea/*

@ -0,0 +1,16 @@
{
"Vendor": true,
"Deadline": "2m",
"Sort": ["linter", "severity", "path", "line"],
"EnableGC": true,
"Enable": [
"structcheck",
"staticcheck",
"unconvert",
"gofmt",
"goimports",
"golint",
"vet"
]
}

@ -0,0 +1,32 @@
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com>
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com>
Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com>
Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com>
Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net>
Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com>
Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com>
Yu Wang <yuwa@microsoft.com> yuwaMSFT2 <yuwa@microsoft.com>
Yu Wang <yuwa@microsoft.com> Yu Wang (UC) <yuwa@microsoft.com>
Olivier Gambier <olivier@docker.com> dmp <dmp@loaner.local>
Olivier Gambier <olivier@docker.com> Olivier <o+github@gambier.email>
Olivier Gambier <olivier@docker.com> Olivier <dmp42@users.noreply.github.com>
Elsan Li 李楠 <elsanli@tencent.com> elsanli(李楠) <elsanli@tencent.com>
Rui Cao <ruicao@alauda.io> ruicao <ruicao@alauda.io>
Gwendolynne Barr <gwendolynne.barr@docker.com> gbarr01 <gwendolynne.barr@docker.com>
Haibing Zhou 周海兵 <zhouhaibing089@gmail.com> zhouhaibing089 <zhouhaibing089@gmail.com>
Feng Honglin <tifayuki@gmail.com> tifayuki <tifayuki@gmail.com>
Helen Xie <xieyulin821@harmonycloud.cn> Helen-xie <xieyulin821@harmonycloud.cn>
Mike Brown <brownwm@us.ibm.com> Mike Brown <mikebrow@users.noreply.github.com>
Manish Tomar <manish.tomar@docker.com> Manish Tomar <manishtomar@users.noreply.github.com>
Sakeven Jiang <jc5930@sina.cn> sakeven <jc5930@sina.cn>

@ -0,0 +1,51 @@
dist: trusty
sudo: required
# setup travis so that we can run containers for integration tests
services:
- docker
language: go
go:
- "1.12.x"
go_import_path: github.com/docker/distribution
addons:
apt:
packages:
- python-minimal
env:
- TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1
before_install:
- uname -r
- sudo apt-get -q update
install:
- go get -u github.com/vbatts/git-validation
# TODO: Add enforcement of license
# - go get -u github.com/kunalkushwaha/ltag
- cd $TRAVIS_BUILD_DIR
script:
- export GOOS=$TRAVIS_GOOS
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
- DCO_VERBOSITY=-q script/validate/dco
- GOOS=linux GO111MODULE=off script/setup/install-dev-tools
- script/validate/vendor
- go build -i .
- make check
- make build
- make binaries
# Currently takes too long
#- if [ "$GOOS" = "linux" ]; then make test-race ; fi
- if [ "$GOOS" = "linux" ]; then make coverage ; fi
after_success:
- bash <(curl -s https://codecov.io/bash) -F linux
before_deploy:
# Run tests with storage driver configurations

@ -0,0 +1,117 @@
# Building the registry source
## Use-case
This is useful if you intend to actively work on the registry.
### Alternatives
Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
### Gotchas
You are expected to know your way around with go & git.
If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
## Build the development environment
The first prerequisite of properly building distribution targets is to have a Go
development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
environment.
If a Go development environment is setup, one can use `go get` to install the
`registry` command from the current latest:
go get github.com/docker/distribution/cmd/registry
The above will install the source repository into the `GOPATH`.
Now create the directory for the registry data (this might require you to set permissions properly)
mkdir -p /var/lib/registry
... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
The `registry`
binary can then be run with the following:
$ $GOPATH/bin/registry --version
$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
> __NOTE:__ While you do not need to use `go get` to checkout the distribution
> project, for these build instructions to work, the project must be checked
> out in the correct location in the `GOPATH`. This should almost always be
> `$GOPATH/src/github.com/docker/distribution`.
The registry can be run with the default config using the following
incantation:
$ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] debug server listening localhost:5001
If it is working, one should see the above log messages.
### Repeatable Builds
For the full development experience, one should `cd` into
`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
commands, such as `go test`, should work per package (please see
[Developing](#developing) if they don't work).
A `Makefile` has been provided as a convenience to support repeatable builds.
Please install the following into `GOPATH` for it to work:
go get github.com/golang/lint/golint
Once these commands are available in the `GOPATH`, run `make` to get a full
build:
$ make
+ clean
+ fmt
+ vet
+ lint
+ build
github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
github.com/sirupsen/logrus
github.com/docker/libtrust
...
github.com/yvasiyarov/gorelic
github.com/docker/distribution/registry/handlers
github.com/docker/distribution/cmd/registry
+ test
...
ok github.com/docker/distribution/digest 7.875s
ok github.com/docker/distribution/manifest 0.028s
ok github.com/docker/distribution/notifications 17.322s
? github.com/docker/distribution/registry [no test files]
ok github.com/docker/distribution/registry/api/v2 0.101s
? github.com/docker/distribution/registry/auth [no test files]
ok github.com/docker/distribution/registry/auth/silly 0.011s
...
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
+ binaries
The above provides a repeatable build using the contents of the vendor
directory. This includes formatting, vetting, linting, building,
testing and generating tagged binaries. We can verify this worked by running
the registry binary generated in the "./bin" directory:
$ ./bin/registry --version
./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
### Optional build tags
Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
the environment variable `DOCKER_BUILDTAGS`.

@ -0,0 +1,148 @@
# Contributing to the registry
## Before reporting an issue...
### If your problem is with...
- automated builds
- your account on the [Docker Hub](https://hub.docker.com/)
- any other [Docker Hub](https://hub.docker.com/) issue
Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
### If you...
- need help setting up your registry
- can't figure out something
- are not sure what's going on or what your problem is
Then please do not open an issue here yet - you should first try one of the following support forums:
- irc: #docker-distribution on freenode
- mailing-list: <distribution@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
### Reporting security issues
The Docker maintainers take security seriously. If you discover a security
issue, please bring it to their attention right away!
Please **DO NOT** file a public issue, instead send your report privately to
[security@docker.com](mailto:security@docker.com).
## Reporting an issue properly
By following these simple rules you will get better and faster feedback on your issue.
- search the bugtracker for an already reported issue
### If you found an issue that describes your problem:
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
- please refrain from adding "same thing here" or "+1" comments
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
- comment if you have some new, technical and relevant information to add to the case
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
### If you have not found an existing issue that describes your problem:
1. create a new issue, with a succinct title that describes your issue:
- bad title: "It doesn't work with my docker"
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
2. copy the output of:
- `docker version`
- `docker info`
- `docker exec <registry-container> registry --version`
3. copy the command line you used to launch your Registry
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
5. reproduce your problem and get your docker daemon logs showing the error
6. if relevant, copy your registry logs that show the error
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
## Contributing a patch for a known bug, or a small correction
You should follow the basic GitHub workflow:
1. fork
2. commit a change
3. make sure the tests pass
4. PR
Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
- configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
- sign your commits using `-s`: `git commit -s -m "My commit"`
Some simple rules to ensure quick merge:
- clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
- prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
- if you need to amend your PR following comments, please squash instead of adding more commits
## Contributing new features
You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
Then you should submit your implementation, clearly linking to the issue (and possible proposal).
Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
It's mandatory to:
- interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
- address maintainers' comments and modify your submission accordingly
- write tests for any new code
Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493)
## Coding Style
Unless explicitly stated, we follow all coding guidelines from the Go
community. While some of these standards may seem arbitrary, they somehow seem
to result in a solid, consistent codebase.
It is possible that the code base does not currently comply with these
guidelines. We are not looking for a massive PR that fixes this, since that
goes against the spirit of the guidelines. All new contributions should make a
best effort to clean up and make the code base better than they left it.
Obviously, apply your best judgement. Remember, the goal here is to make the
code base easier for humans to navigate and understand. Always keep that in
mind when nudging others to comply.
The rules:
1. All code should be formatted with `gofmt -s`.
2. All code should pass the default levels of
[`golint`](https://github.com/golang/lint).
3. All code should follow the guidelines covered in [Effective
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
4. Comment the code. Tell us the why, the history and the context.
5. Document _all_ declarations and methods, even private ones. Declare
expectations, caveats and anything else that may be important. If a type
gets exported, having the comments already there will ensure it's ready.
6. Variable name length should be proportional to its context and no longer.
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
In practice, short methods will have short variable names and globals will
have longer names.
7. No underscores in package names. If you need a compound name, step back,
and re-examine why you need a compound name. If you still think you need a
compound name, lose the underscore.
8. No utils or helpers packages. If a function is not general enough to
warrant its own package, it has not been written generally enough to be a
part of a util package. Just leave it unexported and well-documented.
9. All tests should run with `go test` and outside tooling should not be
required. No, we don't need another unit testing framework. Assertion
packages are acceptable if they provide _real_ incremental value.
10. Even though we call these "rules" above, they are actually just
guidelines. Since you've read all the rules, you now know that.
If you are having trouble getting into the mood of idiomatic Go, we recommend
reading through [Effective Go](http://golang.org/doc/effective_go.html). The
[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
kool-aid is a lot easier than going thirsty.

@ -0,0 +1,29 @@
FROM golang:1.12-alpine AS build
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
ENV DOCKER_BUILDTAGS include_oss include_gcs
ARG GOOS=linux
ARG GOARCH=amd64
ARG GOARM=6
ARG VERSION
ARG REVISION
RUN set -ex \
&& apk add --no-cache make git file
WORKDIR $DISTRIBUTION_DIR
COPY . $DISTRIBUTION_DIR
RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked"
FROM alpine:3.9
RUN set -ex \
&& apk add --no-cache ca-certificates apache2-utils
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry
VOLUME ["/var/lib/registry"]
EXPOSE 5000
ENTRYPOINT ["registry"]
CMD ["serve", "/etc/docker/registry/config.yml"]

@ -0,0 +1,243 @@
# Distribution maintainers file
#
# This file describes who runs the docker/distribution project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
[Rules]
[Rules.maintainers]
title = "What is a maintainer?"
text = """
There are different types of maintainers, with different responsibilities, but
all maintainers have 3 things in common:
1) They share responsibility in the project's success.
2) They have made a long-term, recurring time investment to improve the project.
3) They spend that time doing whatever needs to be done, not necessarily what
is the most interesting or fun.
Maintainers are often under-appreciated, because their work is harder to appreciate.
It's easy to appreciate a really cool and technically advanced feature. It's harder
to appreciate the absence of bugs, the slow but steady improvement in stability,
or the reliability of a release process. But those things distinguish a good
project from a great one.
"""
[Rules.reviewer]
title = "What is a reviewer?"
text = """
A reviewer is a core role within the project.
They share in reviewing issues and pull requests and their LGTM count towards the
required LGTM count to merge a code change into the project.
Reviewers are part of the organization but do not have write access.
Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
"""
[Rules.adding-maintainers]
title = "How are maintainers added?"
text = """
Maintainers are first and foremost contributors that have shown they are
committed to the long term success of a project. Contributors wanting to become
maintainers are expected to be deeply involved in contributing code, pull
request review, and triage of issues in the project for more than three months.
Just contributing does not make you a maintainer, it is about building trust
with the current maintainers of the project and being a person that they can
depend on and trust to make decisions in the best interest of the project.
Periodically, the existing maintainers curate a list of contributors that have
shown regular activity on the project over the prior months. From this list,
maintainer candidates are selected and proposed on the maintainers mailing list.
After a candidate has been announced on the maintainers mailing list, the
existing maintainers are given five business days to discuss the candidate,
raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing
list. Only maintainers of the repository that the candidate is proposed for are
allowed to vote.
If a candidate is approved, a maintainer will contact the candidate to invite
the candidate to open a pull request that adds the contributor to the
MAINTAINERS file. The candidate becomes a maintainer once the pull request is
merged.
"""
[Rules.stepping-down-policy]
title = "Stepping down policy"
text = """
Life priorities, interests, and passions can change. If you're a maintainer but
feel you must remove yourself from the list, inform other maintainers that you
intend to step down, and if possible, help find someone to pick up your work.
At the very least, ensure your work can be continued where you left off.
After you've informed other maintainers, create a pull request to remove
yourself from the MAINTAINERS file.
"""
[Rules.inactive-maintainers]
title = "Removal of inactive maintainers"
text = """
Similar to the procedure for adding new maintainers, existing maintainers can
be removed from the list if they do not show significant activity on the
project. Periodically, the maintainers review the list of maintainers and their
activity over the last three months.
If a maintainer has shown insufficient activity over this period, a neutral
person will contact the maintainer to ask if they want to continue being
a maintainer. If the maintainer decides to step down as a maintainer, they
open a pull request to be removed from the MAINTAINERS file.
If the maintainer wants to remain a maintainer, but is unable to perform the
required duties they can be removed with a vote of at least 66% of
the current maintainers. An e-mail is sent to the
mailing list, inviting maintainers of the project to vote. The voting period is
five business days. Issues related to a maintainer's performance should be
discussed with them among the other maintainers so that they are not surprised
by a pull request removing them.
"""
[Rules.decisions]
title = "How are decisions made?"
text = """
Short answer: EVERYTHING IS A PULL REQUEST.
distribution is an open-source project with an open design philosophy. This means
that the repository is the source of truth for EVERY aspect of the project,
including its philosophy, design, road map, and APIs. *If it's part of the
project, it's in the repo. If it's in the repo, it's part of the project.*
As a result, all decisions can be expressed as changes to the repository. An
implementation change is a change to the source code. An API change is a change
to the API specification. A philosophy change is a change to the philosophy
manifesto, and so on.
All decisions affecting distribution, big and small, follow the same 3 steps:
* Step 1: Open a pull request. Anyone can do this.
* Step 2: Discuss the pull request. Anyone can do this.
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
of the pull request and which areas of the project it affects.
"""
[Rules.DCO]
title = "Helping contributors with the DCO"
text = """
The [DCO or `Sign your work`](
https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work)
requirement is not intended as a roadblock or speed bump.
Some distribution contributors are not as familiar with `git`, or have used a web
based editor, and thus asking them to `git commit --amend -s` is not the best
way forward.
In this case, maintainers can update the commits based on clause (c) of the DCO.
The most trivial way for a contributor to allow the maintainer to do this, is to
add a DCO signature in a pull requests's comment, or a maintainer can simply
note that the change is sufficiently trivial that it does not substantially
change the existing contribution - i.e., a spelling change.
When you add someone's DCO, please also add your own to keep a log.
"""
[Rules."no direct push"]
title = "I'm a maintainer. Should I make pull requests too?"
text = """
Yes. Nobody should ever push to master directly. All changes should be
made through a pull request.
"""
[Rules.tsc]
title = "Conflict Resolution and technical disputes"
text = """
distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters."
"""
[Rules.meta]
title = "How is this process changed?"
text = "Just like everything else: by making a pull request :)"
# Current project organization
[Org]
[Org.Maintainers]
people = [
"dmcgowan",
"dmp42",
"stevvooe",
]
[Org.Reviewers]
people = [
"manishtomar",
"caervs",
"davidswu",
"RobbKistler"
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.caervs]
Name = "Ryan Abrams"
Email = "rdabrams@gmail.com"
GitHub = "caervs"
[people.davidswu]
Name = "David Wu"
Email = "dwu7401@gmail.com"
GitHub = "davidswu"
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@mcgstyle.net"
GitHub = "dmcgowan"
[people.dmp42]
Name = "Olivier Gambier"
Email = "olivier@docker.com"
GitHub = "dmp42"
[people.manishtomar]
Name = "Manish Tomar"
Email = "manish.tomar@docker.com"
GitHub = "manishtomar"
[people.RobbKistler]
Name = "Robb Kistler"
Email = "robb.kistler@docker.com"
GitHub = "RobbKistler"
[people.stevvooe]
Name = "Stephen Day"
Email = "stephen.day@docker.com"
GitHub = "stevvooe"

@ -0,0 +1,102 @@
# Root directory of the project (absolute path).
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
# Used to populate version variable in main package.
VERSION ?= $(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
REVISION ?= $(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
PKG=github.com/docker/distribution
# Project packages.
PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/)
INTEGRATION_PACKAGE=${PKG}
COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES})
# Project binaries.
COMMANDS=registry digest registry-api-descriptor-template
# Allow turning off function inlining and variable registerization
ifeq (${DISABLE_OPTIMIZATION},true)
GO_GCFLAGS=-gcflags "-N -l"
VERSION:="$(VERSION)-noopt"
endif
WHALE = "+"
# Go files
#
TESTFLAGS_RACE=
GOFILES=$(shell find . -type f -name '*.go')
GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)'
BINARIES=$(addprefix bin/,$(COMMANDS))
# Flags passed to `go test`
TESTFLAGS ?= -v $(TESTFLAGS_RACE)
TESTFLAGS_PARALLEL ?= 8
.PHONY: all build binaries check clean test test-race test-full integration coverage
.DEFAULT: all
all: binaries
# This only needs to be generated by hand when cutting full releases.
version/version.go:
@echo "$(WHALE) $@"
./version/version.sh > $@
check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck")
@echo "$(WHALE) $@"
gometalinter --config .gometalinter.json ./...
test: ## run tests, except integration test with test.short
@echo "$(WHALE) $@"
@go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
test-race: ## run tests, except integration test with test.short and race
@echo "$(WHALE) $@"
@go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
test-full: ## run tests, except integration tests
@echo "$(WHALE) $@"
@go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
integration: ## run integration tests
@echo "$(WHALE) $@"
@go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE}
coverage: ## generate coverprofiles from the unit tests
@echo "$(WHALE) $@"
@rm -f coverage.txt
@go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null
@( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \
go test ${GO_TAGS} ${TESTFLAGS} \
-cover \
-coverprofile=profile.out \
-covermode=atomic $$pkg || exit; \
if [ -f profile.out ]; then \
cat profile.out >> coverage.txt; \
rm profile.out; \
fi; \
done )
FORCE:
# Build a binary from a cmd.
bin/%: cmd/% FORCE
@echo "$(WHALE) $@${BINARY_SUFFIX}"
@go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$<
binaries: $(BINARIES) ## build binaries
@echo "$(WHALE) $@"
build:
@echo "$(WHALE) $@"
@go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES)
clean: ## clean up binaries
@echo "$(WHALE) $@"
@rm -f $(BINARIES)

@ -0,0 +1,130 @@
# Distribution
The Docker toolset to pack, ship, store, and deliver content.
This repository's main product is the Docker Registry 2.0 implementation
for storing and distributing Docker images. It supersedes the
[docker/docker-registry](https://github.com/docker/docker-registry)
project with a new API design, focused around security and performance.
<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
[![Build Status](https://travis-ci.org/docker/distribution.svg?branch=master)](https://travis-ci.org/docker/distribution)
[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution)
This repository contains the following components:
|**Component** |Description |
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
### How does this integrate with Docker engine?
This project should provide an implementation to a V2 API for use in the [Docker
core project](https://github.com/docker/docker). The API should be embeddable
and simplify the process of securely pulling and pushing content from `docker`
daemons.
### What are the long term goals of the Distribution project?
The _Distribution_ project has the further long term goal of providing a
secure tool chain for distributing content. The specifications, APIs and tools
should be as useful with Docker as they are without.
Our goal is to design a professional grade and extensible content distribution
system that allow users to:
* Enjoy an efficient, secured and reliable way to store, manage, package and
exchange content
* Hack/roll their own on top of healthy open-source components
* Implement their own home made solution through good specs, and solid
extensions mechanism.
## More about Registry 2.0
The new registry implementation provides the following benefits:
- faster push and pull
- new, more efficient implementation
- simplified deployment
- pluggable storage backend
- webhook notifications
For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
### Who needs to deploy a registry?
By default, Docker users pull images from Docker's public registry instance.
[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
ability. Users can also push images to a repository on Docker's public registry,
if they have a [Docker Hub](https://hub.docker.com/) account.
For some users and even companies, this default behavior is sufficient. For
others, it is not.
For example, users with their own software products may want to maintain a
registry for private, company images. Also, you may wish to deploy your own
image repository for images used to test or in continuous integration. For these
use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
may be the better choice.
### Migration to Registry 2.0
For those who have previously deployed their own registry based on the Registry
1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
data migration is required. A tool to assist with migration efforts has been
created. For more information see [docker/migrator](https://github.com/docker/migrator).
## Contribute
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
issues, fixes, and patches to this project. If you are contributing code, see
the instructions for [building a development environment](BUILDING.md).
## Support
If any issues are encountered while using the _Distribution_ project, several
avenues are available for support:
<table>
<tr>
<th align="left">
IRC
</th>
<td>
#docker-distribution on FreeNode
</td>
</tr>
<tr>
<th align="left">
Issue Tracker
</th>
<td>
github.com/docker/distribution/issues
</td>
</tr>
<tr>
<th align="left">
Google Groups
</th>
<td>
https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
</td>
</tr>
<tr>
<th align="left">
Mailing List
</th>
<td>
docker@dockerproject.org
</td>
</tr>
</table>
## License
This project is distributed under [Apache License, Version 2.0](LICENSE).

@ -0,0 +1,267 @@
# Roadmap
The Distribution Project consists of several components, some of which are
still being defined. This document defines the high-level goals of the
project, identifies the current components, and defines the release-
relationship to the Docker Platform.
* [Distribution Goals](#distribution-goals)
* [Distribution Components](#distribution-components)
* [Project Planning](#project-planning): release-relationship to the Docker Platform.
This road map is a living document, providing an overview of the goals and
considerations made in respect of the future of the project.
## Distribution Goals
- Replace the existing [docker registry](github.com/docker/docker-registry)
implementation as the primary implementation.
- Replace the existing push and pull code in the docker engine with the
distribution package.
- Define a strong data model for distributing docker images
- Provide a flexible distribution tool kit for use in the docker platform
- Unlock new distribution models
## Distribution Components
Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming
features and bugfixes for a component will be added to the relevant milestone. If a feature or
bugfix is not part of a milestone, it is currently unscheduled for
implementation.
* [Registry](#registry)
* [Distribution Package](#distribution-package)
***
### Registry
The new Docker registry is the main portion of the distribution repository.
Registry 2.0 is the first release of the next-generation registry. This was
primarily focused on implementing the [new registry
API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
with a focus on security and performance.
Following from the Distribution project goals above, we have a set of goals
for registry v2 that we would like to follow in the design. New features
should be compared against these goals.
#### Data Storage and Distribution First
The registry's first goal is to provide a reliable, consistent storage
location for Docker images. The registry should only provide the minimal
amount of indexing required to fetch image data and no more.
This means we should be selective in new features and API additions, including
those that may require expensive, ever growing indexes. Requests should be
servable in "constant time".
#### Content Addressability
All data objects used in the registry API should be content addressable.
Content identifiers should be secure and verifiable. This provides a secure,
reliable base from which to build more advanced content distribution systems.
#### Content Agnostic
In the past, changes to the image format would require large changes in Docker
and the Registry. By decoupling the distribution and image format, we can
allow the formats to progress without having to coordinate between the two.
This means that we should be focused on decoupling Docker from the registry
just as much as decoupling the registry from Docker. Such an approach will
allow us to unlock new distribution models that haven't been possible before.
We can take this further by saying that the new registry should be content
agnostic. The registry provides a model of names, tags, manifests and content
addresses and that model can be used to work with content.
#### Simplicity
The new registry should be closer to a microservice component than its
predecessor. This means it should have a narrower API and a low number of
service dependencies. It should be easy to deploy.
This means that other solutions should be explored before changing the API or
adding extra dependencies. If functionality is required, can it be added as an
extension or companion service.
#### Extensibility
The registry should provide extension points to add functionality. By keeping
the scope narrow, but providing the ability to add functionality.
Features like search, indexing, synchronization and registry explorers fall
into this category. No such feature should be added unless we've found it
impossible to do through an extension.
#### Active Feature Discussions
The following are feature discussions that are currently active.
If you don't see your favorite, unimplemented feature, feel free to contact us
via IRC or the mailing list and we can talk about adding it. The goal here is
to make sure that new features go through a rigid design process before
landing in the registry.
##### Proxying to other Registries
A _pull-through caching_ mode exists for the registry, but is restricted from
within the docker client to only mirror the official Docker Hub. This functionality
can be expanded when image provenance has been specified and implemented in the
distribution project.
##### Metadata storage
Metadata for the registry is currently stored with the manifest and layer data on
the storage backend. While this is a big win for simplicity and reliably maintaining
state, it comes with the cost of consistency and high latency. The mutable registry
metadata operations should be abstracted behind an API which will allow ACID compliant
storage systems to handle metadata.
##### Peer to Peer transfer
Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
##### Indexing, Search and Discovery
The original registry provided some implementation of search for use with
private registries. Support has been elided from V2 since we'd like to both
decouple search functionality from the registry. The makes the registry
simpler to deploy, especially in use cases where search is not needed, and
let's us decouple the image format from the registry.
There are explorations into using the catalog API and notification system to
build external indexes. The current line of thought is that we will define a
common search API to index and query docker images. Such a system could be run
as a companion to a registry or set of registries to power discovery.
The main issue with search and discovery is that there are so many ways to
accomplish it. There are two aspects to this project. The first is deciding on
how it will be done, including an API definition that can work with changing
data formats. The second is the process of integrating with `docker search`.
We expect that someone attempts to address the problem with the existing tools
and propose it as a standard search API or uses it to inform a standardization
process. Once this has been explored, we integrate with the docker client.
Please see the following for more detail:
- https://github.com/docker/distribution/issues/206
##### Deletes
> __NOTE:__ Deletes are a much asked for feature. Before requesting this
feature or participating in discussion, we ask that you read this section in
full and understand the problems behind deletes.
While, at first glance, implementing deleting seems simple, there are a number
mitigating factors that make many solutions not ideal or even pathological in
the context of a registry. The following paragraph discuss the background and
approaches that could be applied to arrive at a solution.
The goal of deletes in any system is to remove unused or unneeded data. Only
data requested for deletion should be removed and no other data. Removing
unintended data is worse than _not_ removing data that was requested for
removal but ideally, both are supported. Generally, according to this rule, we
err on holding data longer than needed, ensuring that it is only removed when
we can be certain that it can be removed. With the current behavior, we opt to
hold onto the data forever, ensuring that data cannot be incorrectly removed.
To understand the problems with implementing deletes, one must understand the
data model. All registry data is stored in a filesystem layout, implemented on
a "storage driver", effectively a _virtual file system_ (VFS). The storage
system must assume that this VFS layer will be eventually consistent and has
poor read- after-write consistency, since this is the lower common denominator
among the storage drivers. This is mitigated by writing values in reverse-
dependent order, but makes wider transactional operations unsafe.
Layered on the VFS model is a content-addressable _directed, acyclic graph_
(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
Since the same data can be referenced by multiple manifests, we only store
data once, even if it is in different repositories. Thus, we have a set of
blobs, referenced by tags and manifests. If we want to delete a blob we need
to be certain that it is no longer referenced by another manifest or tag. When
we delete a manifest, we also can try to delete the referenced blobs. Deciding
whether or not a blob has an active reference is the crux of the problem.
Conceptually, deleting a manifest and its resources is quite simple. Just find
all the manifests, enumerate the referenced blobs and delete the blobs not in
that set. An astute observer will recognize this as a garbage collection
problem. As with garbage collection in programming languages, this is very
simple when one always has a consistent view. When one adds parallelism and an
inconsistent view of data, it becomes very challenging.
A simple example can demonstrate this. Let's say we are deleting a manifest
_A_ in one process. We scan the manifest and decide that all the blobs are
ready for deletion. Concurrently, we have another process accepting a new
manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
is accepted and all the blobs are considered present, so the operation
proceeds. The original process then deletes the referenced blobs, assuming
they were unreferenced. The manifest _B_, which we thought had all of its data
present, can no longer be served by the registry, since the dependent data has
been deleted.
Deleting data from the registry safely requires some way to coordinate this
operation. The following approaches are being considered:
- _Reference Counting_ - Maintain a count of references to each blob. This is
challenging for a number of reasons: 1. maintaining a consistent consensus
of reference counts across a set of Registries and 2. Building the initial
list of reference counts for an existing registry. These challenges can be
met with a consensus protocol like Paxos or Raft in the first case and a
necessary but simple scan in the second..
- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
and find all blob references. Delete all unreferenced blobs. This approach
is very simple but requires disabling writes for a period of time while the
service reads all data. This is slow and expensive but very accurate and
effective.
- _Generational GC_ - Do something similar to above but instead of blocking
writes, writes are sent to another storage backend while reads are broadcast
to the new and old backends. GC is then performed on the read-only portion.
Because writes land in the new backend, the data in the read-only section
can be safely deleted. The main drawbacks of this approach are complexity
and coordination.
- _Centralized Oracle_ - Using a centralized, transactional database, we can
know exactly which data is referenced at any given time. This avoids
coordination problem by managing this data in a single location. We trade
off metadata scalability for simplicity and performance. This is a very good
option for most registry deployments. This would create a bottleneck for
registry metadata. However, metadata is generally not the main bottleneck
when serving images.
Please let us know if other solutions exist that we have yet to enumerate.
Note that for any approach, implementation is a massive consideration. For
example, a mark-sweep based solution may seem simple but the amount of work in
coordination offset the extra work it might take to build a _Centralized
Oracle_. We'll accept proposals for any solution but please coordinate with us
before dropping code.
At this time, we have traded off simplicity and ease of deployment for disk
space. Simplicity and ease of deployment tend to reduce developer involvement,
which is currently the most expensive resource in software engineering. Taking
on any solution for deletes will greatly effect these factors, trading off
very cheap disk space for a complex deployment and operational story.
Please see the following issues for more detail:
- https://github.com/docker/distribution/issues/422
- https://github.com/docker/distribution/issues/461
- https://github.com/docker/distribution/issues/462
### Distribution Package
At its core, the Distribution Project is a set of Go packages that make up
Distribution Components. At this time, most of these packages make up the
Registry implementation.
The package itself is considered unstable. If you're using it, please take care to vendor the dependent version.
For feature additions, please see the Registry section. In the future, we may break out a
separate Roadmap for distribution-specific features that apply to more than
just the registry.
***
### Project Planning
An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.

@ -0,0 +1,265 @@
package distribution
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/docker/distribution/reference"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go/v1"
)
var (
// ErrBlobExists returned when blob already exists
ErrBlobExists = errors.New("blob exists")
// ErrBlobDigestUnsupported when blob digest is an unsupported version.
ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
// ErrBlobUnknown when blob is not found.
ErrBlobUnknown = errors.New("unknown blob")
// ErrBlobUploadUnknown returned when upload is not found.
ErrBlobUploadUnknown = errors.New("blob upload unknown")
// ErrBlobInvalidLength returned when the blob has an expected length on
// commit, meaning mismatched with the descriptor or an invalid value.
ErrBlobInvalidLength = errors.New("blob invalid length")
)
// ErrBlobInvalidDigest returned when digest check fails.
type ErrBlobInvalidDigest struct {
Digest digest.Digest
Reason error
}
func (err ErrBlobInvalidDigest) Error() string {
return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
err.Digest, err.Reason)
}
// ErrBlobMounted returned when a blob is mounted from another repository
// instead of initiating an upload session.
type ErrBlobMounted struct {
From reference.Canonical
Descriptor Descriptor
}
func (err ErrBlobMounted) Error() string {
return fmt.Sprintf("blob mounted from: %v to: %v",
err.From, err.Descriptor)
}
// Descriptor describes targeted content. Used in conjunction with a blob
// store, a descriptor can be used to fetch, store and target any kind of
// blob. The struct also describes the wire protocol format. Fields should
// only be added but never changed.
type Descriptor struct {
// MediaType describe the type of the content. All text based formats are
// encoded as utf-8.
MediaType string `json:"mediaType,omitempty"`
// Size in bytes of content.
Size int64 `json:"size,omitempty"`
// Digest uniquely identifies the content. A byte stream can be verified
// against this digest.
Digest digest.Digest `json:"digest,omitempty"`
// URLs contains the source URLs of this content.
URLs []string `json:"urls,omitempty"`
// Annotations contains arbitrary metadata relating to the targeted content.
Annotations map[string]string `json:"annotations,omitempty"`
// Platform describes the platform which the image in the manifest runs on.
// This should only be used when referring to a manifest.
Platform *v1.Platform `json:"platform,omitempty"`
// NOTE: Before adding a field here, please ensure that all
// other options have been exhausted. Much of the type relationships
// depend on the simplicity of this type.
}
// Descriptor returns the descriptor, to make it satisfy the Describable
// interface. Note that implementations of Describable are generally objects
// which can be described, not simply descriptors; this exception is in place
// to make it more convenient to pass actual descriptors to functions that
// expect Describable objects.
func (d Descriptor) Descriptor() Descriptor {
return d
}
// BlobStatter makes blob descriptors available by digest. The service may
// provide a descriptor of a different digest if the provided digest is not
// canonical.
type BlobStatter interface {
// Stat provides metadata about a blob identified by the digest. If the
// blob is unknown to the describer, ErrBlobUnknown will be returned.
Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
}
// BlobDeleter enables deleting blobs from storage.
type BlobDeleter interface {
Delete(ctx context.Context, dgst digest.Digest) error
}
// BlobEnumerator enables iterating over blobs from storage
type BlobEnumerator interface {
Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
}
// BlobDescriptorService manages metadata about a blob by digest. Most
// implementations will not expose such an interface explicitly. Such mappings
// should be maintained by interacting with the BlobIngester. Hence, this is
// left off of BlobService and BlobStore.
type BlobDescriptorService interface {
BlobStatter
// SetDescriptor assigns the descriptor to the digest. The provided digest and
// the digest in the descriptor must map to identical content but they may
// differ on their algorithm. The descriptor must have the canonical
// digest of the content and the digest algorithm must match the
// annotators canonical algorithm.
//
// Such a facility can be used to map blobs between digest domains, with
// the restriction that the algorithm of the descriptor must match the
// canonical algorithm (ie sha256) of the annotator.
SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
// Clear enables descriptors to be unlinked
Clear(ctx context.Context, dgst digest.Digest) error
}
// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
type BlobDescriptorServiceFactory interface {
BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
}
// ReadSeekCloser is the primary reader type for blob data, combining
// io.ReadSeeker with io.Closer.
type ReadSeekCloser interface {
io.ReadSeeker
io.Closer
}
// BlobProvider describes operations for getting blob data.
type BlobProvider interface {
// Get returns the entire blob identified by digest along with the descriptor.
Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
// Open provides a ReadSeekCloser to the blob identified by the provided
// descriptor. If the blob is not known to the service, an error will be
// returned.
Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
}
// BlobServer can serve blobs via http.
type BlobServer interface {
// ServeBlob attempts to serve the blob, identified by dgst, via http. The
// service may decide to redirect the client elsewhere or serve the data
// directly.
//
// This handler only issues successful responses, such as 2xx or 3xx,
// meaning it serves data or issues a redirect. If the blob is not
// available, an error will be returned and the caller may still issue a
// response.
//
// The implementation may serve the same blob from a different digest
// domain. The appropriate headers will be set for the blob, unless they
// have already been set by the caller.
ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
}
// BlobIngester ingests blob data.
type BlobIngester interface {
// Put inserts the content p into the blob service, returning a descriptor
// or an error.
Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
// Create allocates a new blob writer to add a blob to this service. The
// returned handle can be written to and later resumed using an opaque
// identifier. With this approach, one can Close and Resume a BlobWriter
// multiple times until the BlobWriter is committed or cancelled.
Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
// Resume attempts to resume a write to a blob, identified by an id.
Resume(ctx context.Context, id string) (BlobWriter, error)
}
// BlobCreateOption is a general extensible function argument for blob creation
// methods. A BlobIngester may choose to honor any or none of the given
// BlobCreateOptions, which can be specific to the implementation of the
// BlobIngester receiving them.
// TODO (brianbland): unify this with ManifestServiceOption in the future
type BlobCreateOption interface {
Apply(interface{}) error
}
// CreateOptions is a collection of blob creation modifiers relevant to general
// blob storage intended to be configured by the BlobCreateOption.Apply method.
type CreateOptions struct {
Mount struct {
ShouldMount bool
From reference.Canonical
// Stat allows to pass precalculated descriptor to link and return.
// Blob access check will be skipped if set.
Stat *Descriptor
}
}
// BlobWriter provides a handle for inserting data into a blob store.
// Instances should be obtained from BlobWriteService.Writer and
// BlobWriteService.Resume. If supported by the store, a writer can be
// recovered with the id.
type BlobWriter interface {
io.WriteCloser
io.ReaderFrom
// Size returns the number of bytes written to this blob.
Size() int64
// ID returns the identifier for this writer. The ID can be used with the
// Blob service to later resume the write.
ID() string
// StartedAt returns the time this blob write was started.
StartedAt() time.Time
// Commit completes the blob writer process. The content is verified
// against the provided provisional descriptor, which may result in an
// error. Depending on the implementation, written data may be validated
// against the provisional descriptor fields. If MediaType is not present,
// the implementation may reject the commit or assign "application/octet-
// stream" to the blob. The returned descriptor may have a different
// digest depending on the blob store, referred to as the canonical
// descriptor.
Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
// Cancel ends the blob write without storing any data and frees any
// associated resources. Any data written thus far will be lost. Cancel
// implementations should allow multiple calls even after a commit that
// result in a no-op. This allows use of Cancel in a defer statement,
// increasing the assurance that it is correctly called.
Cancel(ctx context.Context) error
}
// BlobService combines the operations to access, read and write blobs. This
// can be used to describe remote blob services.
type BlobService interface {
BlobStatter
BlobProvider
BlobIngester
}
// BlobStore represent the entire suite of blob related operations. Such an
// implementation can access, read, write, delete and serve blobs.
type BlobStore interface {
BlobService
BlobServer
BlobDeleter
}

@ -0,0 +1,7 @@
// Package distribution will define the interfaces for the components of
// docker distribution. The goal is to allow users to reliably package, ship
// and store content related to docker images.
//
// This is currently a work in progress. More details are available in the
// README.md.
package distribution

@ -0,0 +1,119 @@
package distribution
import (
"errors"
"fmt"
"strings"
"github.com/opencontainers/go-digest"
)
// ErrAccessDenied is returned when an access to a requested resource is
// denied.
var ErrAccessDenied = errors.New("access denied")
// ErrManifestNotModified is returned when a conditional manifest GetByTag
// returns nil due to the client indicating it has the latest version
var ErrManifestNotModified = errors.New("manifest not modified")
// ErrUnsupported is returned when an unimplemented or unsupported action is
// performed
var ErrUnsupported = errors.New("operation unsupported")
// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1
// manifest but the registry is configured to reject it
var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported")
// ErrTagUnknown is returned if the given tag is not known by the tag service
type ErrTagUnknown struct {
Tag string
}
func (err ErrTagUnknown) Error() string {
return fmt.Sprintf("unknown tag=%s", err.Tag)
}
// ErrRepositoryUnknown is returned if the named repository is not known by
// the registry.
type ErrRepositoryUnknown struct {
Name string
}
func (err ErrRepositoryUnknown) Error() string {
return fmt.Sprintf("unknown repository name=%s", err.Name)
}
// ErrRepositoryNameInvalid should be used to denote an invalid repository
// name. Reason may set, indicating the cause of invalidity.
type ErrRepositoryNameInvalid struct {
Name string
Reason error
}
func (err ErrRepositoryNameInvalid) Error() string {
return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason)
}
// ErrManifestUnknown is returned if the manifest is not known by the
// registry.
type ErrManifestUnknown struct {
Name string
Tag string
}
func (err ErrManifestUnknown) Error() string {
return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
}
// ErrManifestUnknownRevision is returned when a manifest cannot be found by
// revision within a repository.
type ErrManifestUnknownRevision struct {
Name string
Revision digest.Digest
}
func (err ErrManifestUnknownRevision) Error() string {
return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
}
// ErrManifestUnverified is returned when the registry is unable to verify
// the manifest.
type ErrManifestUnverified struct{}
func (ErrManifestUnverified) Error() string {
return "unverified manifest"
}
// ErrManifestVerification provides a type to collect errors encountered
// during manifest verification. Currently, it accepts errors of all types,
// but it may be narrowed to those involving manifest verification.
type ErrManifestVerification []error
func (errs ErrManifestVerification) Error() string {
var parts []string
for _, err := range errs {
parts = append(parts, err.Error())
}
return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
}
// ErrManifestBlobUnknown returned when a referenced blob cannot be found.
type ErrManifestBlobUnknown struct {
Digest digest.Digest
}
func (err ErrManifestBlobUnknown) Error() string {
return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
}
// ErrManifestNameInvalid should be used to denote an invalid manifest
// name. Reason may set, indicating the cause of invalidity.
type ErrManifestNameInvalid struct {
Name string
Reason error
}
func (err ErrManifestNameInvalid) Error() string {
return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
}

@ -0,0 +1,57 @@
module github.com/docker/distribution
go 1.12
require (
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible
github.com/Azure/go-autorest v10.8.1+incompatible // indirect
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d
github.com/aws/aws-sdk-go v1.15.11
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a // indirect
github.com/bitly/go-simplejson v0.5.0 // indirect
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/bshuster-repo/logrus-logstash-hook v0.4.1
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c // indirect
github.com/dnaeon/go-vcr v1.0.1 // indirect
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33
github.com/gorilla/mux v1.7.2
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect
github.com/kr/pretty v0.1.0 // indirect
github.com/marstr/guid v1.1.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.1.2
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f // indirect
github.com/ncw/swift v1.0.47
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420
github.com/opencontainers/image-spec v1.0.0
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06 // indirect
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 // indirect
github.com/prometheus/common v0.0.0-20180110214958-89604d197083 // indirect
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 // indirect
github.com/satori/go.uuid v1.2.0 // indirect
github.com/sirupsen/logrus v1.4.2
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.3 // indirect
github.com/stretchr/testify v1.3.0 // indirect
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b // indirect
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed // indirect
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a // indirect
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789
gopkg.in/yaml.v2 v2.2.2
)

@ -0,0 +1,143 @@
cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible h1:KnPIugL51v3N3WwvaSmZbxukD1WuWXOiE9fRdu32f2I=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-autorest v10.8.1+incompatible h1:u0jVQf+a6k6x8A+sT60l6EY9XZu+kHdnZVPAYqpVRo0=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/aws/aws-sdk-go v1.15.11 h1:m45+Ru/wA+73cOZXiEGLDH2d9uLN3iHqMc0/z4noDXE=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1 h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c h1:KJAnOBuY9cTKVqB5cfbynpvFgeHRTREkRk8C977oFu4=
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 h1:LofdAjjjqCSXMwLGgOgnE+rdPuvX9DxCqaHwKy7i/ko=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33 h1:893HsJqtxp9z1SF76gg6hY70hRY1wVlTSnC/h1yUDCo=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES2GdFracYbBQh93GXac8fq7HeN6JnpduB8=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420 h1:Yu3681ykYHDfLoI6XVjL4JWmkE+3TX9yfIWwRCh1kFM=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.0 h1:jcw3cCH887bLKETGYpv8afogdYchbShR0eH6oD9d5PQ=
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06 h1:BqJUZe1wY8984P2XGsGIGieuao8wucwOwaTS10L9Lj8=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 h1:cLL6NowurKLMfCeQy4tIeph12XNQWgANCNvdyrOYKV4=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083 h1:BVsJT8+ZbyuL3hypz/HmEiM8h2P6hBQGig4el9/MdjA=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 h1:hhvfGDVThBnd4kYisSFmYuHYeUhglxcwag7FhVPH9zM=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b h1:lkjdUzSyJ5P1+eal9fxXX9Xg2BTfswsonKUse48C0uE=
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed h1:uPxWBzB3+mlnjy9W58qY1j/cjyFjutgw/Vhan2zLy/A=
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff h1:mk5zS3XLqVUzdF/CQCZ5ERujSF/8JFo+Wpkp/5I93NA=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 h1:Cpp2P6TPjujNoC5M2KHY6g7wfyLYfIWRZaSdIKfDasA=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a h1:zo0EaRwJM6T5UQ+QEt2dDSgEmbFJ4pZr/Rzsjpu7zgI=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789 h1:NMiUjDZiD6qDVeBOzpImftxXzQHCp2Y2QLdmaqU9MRk=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

@ -0,0 +1 @@
package manifest

@ -0,0 +1,287 @@
package schema1
import (
"context"
"crypto/sha512"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/reference"
"github.com/docker/libtrust"
"github.com/opencontainers/go-digest"
)
type diffID digest.Digest
// gzippedEmptyTar is a gzip-compressed version of an empty tar file
// (1024 NULL bytes)
var gzippedEmptyTar = []byte{
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
}
// digestSHA256GzippedEmptyTar is the canonical sha256 digest of
// gzippedEmptyTar
const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
// configManifestBuilder is a type for constructing manifests from an image
// configuration and generic descriptors.
type configManifestBuilder struct {
// bs is a BlobService used to create empty layer tars in the
// blob store if necessary.
bs distribution.BlobService
// pk is the libtrust private key used to sign the final manifest.
pk libtrust.PrivateKey
// configJSON is configuration supplied when the ManifestBuilder was
// created.
configJSON []byte
// ref contains the name and optional tag provided to NewConfigManifestBuilder.
ref reference.Named
// descriptors is the set of descriptors referencing the layers.
descriptors []distribution.Descriptor
// emptyTarDigest is set to a valid digest if an empty tar has been
// put in the blob store; otherwise it is empty.
emptyTarDigest digest.Digest
}
// NewConfigManifestBuilder is used to build new manifests for the current
// schema version from an image configuration and a set of descriptors.
// It takes a BlobService so that it can add an empty tar to the blob store
// if the resulting manifest needs empty layers.
func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder {
return &configManifestBuilder{
bs: bs,
pk: pk,
configJSON: configJSON,
ref: ref,
}
}
// Build produces a final manifest from the given references
func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) {
type imageRootFS struct {
Type string `json:"type"`
DiffIDs []diffID `json:"diff_ids,omitempty"`
BaseLayer string `json:"base_layer,omitempty"`
}
type imageHistory struct {
Created time.Time `json:"created"`
Author string `json:"author,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
Comment string `json:"comment,omitempty"`
EmptyLayer bool `json:"empty_layer,omitempty"`
}
type imageConfig struct {
RootFS *imageRootFS `json:"rootfs,omitempty"`
History []imageHistory `json:"history,omitempty"`
Architecture string `json:"architecture,omitempty"`
}
var img imageConfig
if err := json.Unmarshal(mb.configJSON, &img); err != nil {
return nil, err
}
if len(img.History) == 0 {
return nil, errors.New("empty history when trying to create schema1 manifest")
}
if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors)
}
// Generate IDs for each layer
// For non-top-level layers, create fake V1Compatibility strings that
// fit the format and don't collide with anything else, but don't
// result in runnable images on their own.
type v1Compatibility struct {
ID string `json:"id"`
Parent string `json:"parent,omitempty"`
Comment string `json:"comment,omitempty"`
Created time.Time `json:"created"`
ContainerConfig struct {
Cmd []string
} `json:"container_config,omitempty"`
Author string `json:"author,omitempty"`
ThrowAway bool `json:"throwaway,omitempty"`
}
fsLayerList := make([]FSLayer, len(img.History))
history := make([]History, len(img.History))
parent := ""
layerCounter := 0
for i, h := range img.History[:len(img.History)-1] {
var blobsum digest.Digest
if h.EmptyLayer {
if blobsum, err = mb.emptyTar(ctx); err != nil {
return nil, err
}
} else {
if len(img.RootFS.DiffIDs) <= layerCounter {
return nil, errors.New("too many non-empty layers in History section")
}
blobsum = mb.descriptors[layerCounter].Digest
layerCounter++
}
v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex()
if i == 0 && img.RootFS.BaseLayer != "" {
// windows-only baselayer setup
baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer))
parent = fmt.Sprintf("%x", baseID[:32])
}
v1Compatibility := v1Compatibility{
ID: v1ID,
Parent: parent,
Comment: h.Comment,
Created: h.Created,
Author: h.Author,
}
v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
if h.EmptyLayer {
v1Compatibility.ThrowAway = true
}
jsonBytes, err := json.Marshal(&v1Compatibility)
if err != nil {
return nil, err
}
reversedIndex := len(img.History) - i - 1
history[reversedIndex].V1Compatibility = string(jsonBytes)
fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum}
parent = v1ID
}
latestHistory := img.History[len(img.History)-1]
var blobsum digest.Digest
if latestHistory.EmptyLayer {
if blobsum, err = mb.emptyTar(ctx); err != nil {
return nil, err
}
} else {
if len(img.RootFS.DiffIDs) <= layerCounter {
return nil, errors.New("too many non-empty layers in History section")
}
blobsum = mb.descriptors[layerCounter].Digest
}
fsLayerList[0] = FSLayer{BlobSum: blobsum}
dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON)))
// Top-level v1compatibility string should be a modified version of the
// image config.
transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer)
if err != nil {
return nil, err
}
history[0].V1Compatibility = string(transformedConfig)
tag := ""
if tagged, isTagged := mb.ref.(reference.Tagged); isTagged {
tag = tagged.Tag()
}
mfst := Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 1,
},
Name: mb.ref.Name(),
Tag: tag,
Architecture: img.Architecture,
FSLayers: fsLayerList,
History: history,
}
return Sign(&mfst, mb.pk)
}
// emptyTar pushes a compressed empty tar to the blob store if one doesn't
// already exist, and returns its blobsum.
func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) {
if mb.emptyTarDigest != "" {
// Already put an empty tar
return mb.emptyTarDigest, nil
}
descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar)
switch err {
case nil:
mb.emptyTarDigest = descriptor.Digest
return descriptor.Digest, nil
case distribution.ErrBlobUnknown:
// nop
default:
return "", err
}
// Add gzipped empty tar to the blob store
descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar)
if err != nil {
return "", err
}
mb.emptyTarDigest = descriptor.Digest
return descriptor.Digest, nil
}
// AppendReference adds a reference to the current ManifestBuilder
func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error {
descriptor := d.Descriptor()
if err := descriptor.Digest.Validate(); err != nil {
return err
}
mb.descriptors = append(mb.descriptors, descriptor)
return nil
}
// References returns the current references added to this builder
func (mb *configManifestBuilder) References() []distribution.Descriptor {
return mb.descriptors
}
// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON
func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
// Top-level v1compatibility string should be a modified version of the
// image config.
var configAsMap map[string]*json.RawMessage
if err := json.Unmarshal(configJSON, &configAsMap); err != nil {
return nil, err
}
// Delete fields that didn't exist in old manifest
delete(configAsMap, "rootfs")
delete(configAsMap, "history")
configAsMap["id"] = rawJSON(v1ID)
if parentV1ID != "" {
configAsMap["parent"] = rawJSON(parentV1ID)
}
if throwaway {
configAsMap["throwaway"] = rawJSON(true)
}
return json.Marshal(configAsMap)
}
func rawJSON(value interface{}) *json.RawMessage {
jsonval, err := json.Marshal(value)
if err != nil {
return nil
}
return (*json.RawMessage)(&jsonval)
}

@ -0,0 +1,184 @@
package schema1
import (
"encoding/json"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/docker/libtrust"
"github.com/opencontainers/go-digest"
)
const (
// MediaTypeManifest specifies the mediaType for the current version. Note
// that for schema version 1, the the media is optionally "application/json".
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
// MediaTypeSignedManifest specifies the mediatype for current SignedManifest version
MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
// MediaTypeManifestLayer specifies the media type for manifest layers
MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 1,
}
)
func init() {
schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
sm := new(SignedManifest)
err := sm.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
desc := distribution.Descriptor{
Digest: digest.FromBytes(sm.Canonical),
Size: int64(len(sm.Canonical)),
MediaType: MediaTypeSignedManifest,
}
return sm, desc, err
}
err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
err = distribution.RegisterManifestSchema("", schema1Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
err = distribution.RegisterManifestSchema("application/json", schema1Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// FSLayer is a container struct for BlobSums defined in an image manifest
type FSLayer struct {
// BlobSum is the tarsum of the referenced filesystem image layer
BlobSum digest.Digest `json:"blobSum"`
}
// History stores unstructured v1 compatibility information
type History struct {
// V1Compatibility is the raw v1 compatibility information
V1Compatibility string `json:"v1Compatibility"`
}
// Manifest provides the base accessible fields for working with V2 image
// format in the registry.
type Manifest struct {
manifest.Versioned
// Name is the name of the image's repository
Name string `json:"name"`
// Tag is the tag of the image specified by this manifest
Tag string `json:"tag"`
// Architecture is the host architecture on which this image is intended to
// run
Architecture string `json:"architecture"`
// FSLayers is a list of filesystem layer blobSums contained in this image
FSLayers []FSLayer `json:"fsLayers"`
// History is a list of unstructured historical data for v1 compatibility
History []History `json:"history"`
}
// SignedManifest provides an envelope for a signed image manifest, including
// the format sensitive raw bytes.
type SignedManifest struct {
Manifest
// Canonical is the canonical byte representation of the ImageManifest,
// without any attached signatures. The manifest byte
// representation cannot change or it will have to be re-signed.
Canonical []byte `json:"-"`
// all contains the byte representation of the Manifest including signatures
// and is returned by Payload()
all []byte
}
// UnmarshalJSON populates a new SignedManifest struct from JSON data.
func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
sm.all = make([]byte, len(b))
// store manifest and signatures in all
copy(sm.all, b)
jsig, err := libtrust.ParsePrettySignature(b, "signatures")
if err != nil {
return err
}
// Resolve the payload in the manifest.
bytes, err := jsig.Payload()
if err != nil {
return err
}
// sm.Canonical stores the canonical manifest JSON
sm.Canonical = make([]byte, len(bytes))
copy(sm.Canonical, bytes)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(sm.Canonical, &manifest); err != nil {
return err
}
sm.Manifest = manifest
return nil
}
// References returns the descriptors of this manifests references
func (sm SignedManifest) References() []distribution.Descriptor {
dependencies := make([]distribution.Descriptor, len(sm.FSLayers))
for i, fsLayer := range sm.FSLayers {
dependencies[i] = distribution.Descriptor{
MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar",
Digest: fsLayer.BlobSum,
}
}
return dependencies
}
// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner
// contents. Applications requiring a marshaled signed manifest should simply
// use Raw directly, since the the content produced by json.Marshal will be
// compacted and will fail signature checks.
func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
if len(sm.all) > 0 {
return sm.all, nil
}
// If the raw data is not available, just dump the inner content.
return json.Marshal(&sm.Manifest)
}
// Payload returns the signed content of the signed manifest.
func (sm SignedManifest) Payload() (string, []byte, error) {
return MediaTypeSignedManifest, sm.all, nil
}
// Signatures returns the signatures as provided by
// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws
// signatures.
func (sm *SignedManifest) Signatures() ([][]byte, error) {
jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {
return nil, err
}
// Resolve the payload in the manifest.
return jsig.Signatures()
}

@ -0,0 +1,98 @@
package schema1
import (
"context"
"errors"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/reference"
"github.com/docker/libtrust"
"github.com/opencontainers/go-digest"
)
// referenceManifestBuilder is a type for constructing manifests from schema1
// dependencies.
type referenceManifestBuilder struct {
Manifest
pk libtrust.PrivateKey
}
// NewReferenceManifestBuilder is used to build new manifests for the current
// schema version using schema1 dependencies.
func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder {
tag := ""
if tagged, isTagged := ref.(reference.Tagged); isTagged {
tag = tagged.Tag()
}
return &referenceManifestBuilder{
Manifest: Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 1,
},
Name: ref.Name(),
Tag: tag,
Architecture: architecture,
},
pk: pk,
}
}
func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) {
m := mb.Manifest
if len(m.FSLayers) == 0 {
return nil, errors.New("cannot build manifest with zero layers or history")
}
m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers))
m.History = make([]History, len(mb.Manifest.History))
copy(m.FSLayers, mb.Manifest.FSLayers)
copy(m.History, mb.Manifest.History)
return Sign(&m, mb.pk)
}
// AppendReference adds a reference to the current ManifestBuilder
func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error {
r, ok := d.(Reference)
if !ok {
return fmt.Errorf("unable to add non-reference type to v1 builder")
}
// Entries need to be prepended
mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...)
mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...)
return nil
}
// References returns the current references added to this builder
func (mb *referenceManifestBuilder) References() []distribution.Descriptor {
refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers))
for i := range mb.Manifest.FSLayers {
layerDigest := mb.Manifest.FSLayers[i].BlobSum
history := mb.Manifest.History[i]
ref := Reference{layerDigest, 0, history}
refs[i] = ref.Descriptor()
}
return refs
}
// Reference describes a manifest v2, schema version 1 dependency.
// An FSLayer associated with a history entry.
type Reference struct {
Digest digest.Digest
Size int64 // if we know it, set it for the descriptor.
History History
}
// Descriptor describes a reference
func (r Reference) Descriptor() distribution.Descriptor {
return distribution.Descriptor{
MediaType: MediaTypeManifestLayer,
Digest: r.Digest,
Size: r.Size,
}
}

@ -0,0 +1,68 @@
package schema1
import (
"crypto/x509"
"encoding/json"
"github.com/docker/libtrust"
)
// Sign signs the manifest with the provided private key, returning a
// SignedManifest. This typically won't be used within the registry, except
// for testing.
func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) {
p, err := json.MarshalIndent(m, "", " ")
if err != nil {
return nil, err
}
js, err := libtrust.NewJSONSignature(p)
if err != nil {
return nil, err
}
if err := js.Sign(pk); err != nil {
return nil, err
}
pretty, err := js.PrettySignature("signatures")
if err != nil {
return nil, err
}
return &SignedManifest{
Manifest: *m,
all: pretty,
Canonical: p,
}, nil
}
// SignWithChain signs the manifest with the given private key and x509 chain.
// The public key of the first element in the chain must be the public key
// corresponding with the sign key.
func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) {
p, err := json.MarshalIndent(m, "", " ")
if err != nil {
return nil, err
}
js, err := libtrust.NewJSONSignature(p)
if err != nil {
return nil, err
}
if err := js.SignWithChain(key, chain); err != nil {
return nil, err
}
pretty, err := js.PrettySignature("signatures")
if err != nil {
return nil, err
}
return &SignedManifest{
Manifest: *m,
all: pretty,
Canonical: p,
}, nil
}

@ -0,0 +1,32 @@
package schema1
import (
"crypto/x509"
"github.com/docker/libtrust"
"github.com/sirupsen/logrus"
)
// Verify verifies the signature of the signed manifest returning the public
// keys used during signing.
func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) {
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {
logrus.WithField("err", err).Debugf("(*SignedManifest).Verify")
return nil, err
}
return js.Verify()
}
// VerifyChains verifies the signature of the signed manifest against the
// certificate pool returning the list of verified chains. Signatures without
// an x509 chain are not checked.
func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) {
js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
if err != nil {
return nil, err
}
return js.VerifyChains(ca)
}

@ -0,0 +1,85 @@
package schema2
import (
"context"
"github.com/docker/distribution"
"github.com/opencontainers/go-digest"
)
// builder is a type for constructing manifests.
type builder struct {
// bs is a BlobService used to publish the configuration blob.
bs distribution.BlobService
// configMediaType is media type used to describe configuration
configMediaType string
// configJSON references
configJSON []byte
// dependencies is a list of descriptors that gets built by successive
// calls to AppendReference. In case of image configuration these are layers.
dependencies []distribution.Descriptor
}
// NewManifestBuilder is used to build new manifests for the current schema
// version. It takes a BlobService so it can publish the configuration blob
// as part of the Build process.
func NewManifestBuilder(bs distribution.BlobService, configMediaType string, configJSON []byte) distribution.ManifestBuilder {
mb := &builder{
bs: bs,
configMediaType: configMediaType,
configJSON: make([]byte, len(configJSON)),
}
copy(mb.configJSON, configJSON)
return mb
}
// Build produces a final manifest from the given references.
func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
m := Manifest{
Versioned: SchemaVersion,
Layers: make([]distribution.Descriptor, len(mb.dependencies)),
}
copy(m.Layers, mb.dependencies)
configDigest := digest.FromBytes(mb.configJSON)
var err error
m.Config, err = mb.bs.Stat(ctx, configDigest)
switch err {
case nil:
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = mb.configMediaType
return FromStruct(m)
case distribution.ErrBlobUnknown:
// nop
default:
return nil, err
}
// Add config to the blob store
m.Config, err = mb.bs.Put(ctx, mb.configMediaType, mb.configJSON)
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = mb.configMediaType
if err != nil {
return nil, err
}
return FromStruct(m)
}
// AppendReference adds a reference to the current ManifestBuilder.
func (mb *builder) AppendReference(d distribution.Describable) error {
mb.dependencies = append(mb.dependencies, d.Descriptor())
return nil
}
// References returns the current references added to this builder.
func (mb *builder) References() []distribution.Descriptor {
return mb.dependencies
}

@ -0,0 +1,144 @@
package schema2
import (
"encoding/json"
"errors"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/opencontainers/go-digest"
)
const (
// MediaTypeManifest specifies the mediaType for the current version.
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
// MediaTypeImageConfig specifies the mediaType for the image configuration.
MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json"
// MediaTypePluginConfig specifies the mediaType for plugin configuration.
MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
// MediaTypeLayer is the mediaType used for layers referenced by the
// manifest.
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// MediaTypeForeignLayer is the mediaType used for layers that must be
// downloaded from foreign URLs.
MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
// MediaTypeUncompressedLayer is the mediaType used for layers which
// are not compressed.
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifest,
}
)
func init() {
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
m := new(DeserializedManifest)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
}
err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// Manifest defines a schema2 manifest.
type Manifest struct {
manifest.Versioned
// Config references the image configuration as a blob.
Config distribution.Descriptor `json:"config"`
// Layers lists descriptors for the layers referenced by the
// configuration.
Layers []distribution.Descriptor `json:"layers"`
}
// References returns the descriptors of this manifests references.
func (m Manifest) References() []distribution.Descriptor {
references := make([]distribution.Descriptor, 0, 1+len(m.Layers))
references = append(references, m.Config)
references = append(references, m.Layers...)
return references
}
// Target returns the target of this manifest.
func (m Manifest) Target() distribution.Descriptor {
return m.Config
}
// DeserializedManifest wraps Manifest with a copy of the original JSON.
// It satisfies the distribution.Manifest interface.
type DeserializedManifest struct {
Manifest
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
// DeserializedManifest which contains the manifest and its JSON representation.
func FromStruct(m Manifest) (*DeserializedManifest, error) {
var deserialized DeserializedManifest
deserialized.Manifest = m
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new Manifest struct from JSON data.
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(m.canonical, &manifest); err != nil {
return err
}
if manifest.MediaType != MediaTypeManifest {
return fmt.Errorf("mediaType in manifest should be '%s' not '%s'",
MediaTypeManifest, manifest.MediaType)
}
m.Manifest = manifest
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
}
// Payload returns the raw content of the manifest. The contents can be used to
// calculate the content identifier.
func (m DeserializedManifest) Payload() (string, []byte, error) {
return m.MediaType, m.canonical, nil
}

@ -0,0 +1,12 @@
package manifest
// Versioned provides a struct with the manifest schemaVersion and mediaType.
// Incoming content with unknown schema version can be decoded against this
// struct to check the version.
type Versioned struct {
// SchemaVersion is the image manifest schema that this image follows
SchemaVersion int `json:"schemaVersion"`
// MediaType is the media type of this schema.
MediaType string `json:"mediaType,omitempty"`
}

@ -0,0 +1,125 @@
package distribution
import (
"context"
"fmt"
"mime"
"github.com/opencontainers/go-digest"
)
// Manifest represents a registry object specifying a set of
// references and an optional target
type Manifest interface {
// References returns a list of objects which make up this manifest.
// A reference is anything which can be represented by a
// distribution.Descriptor. These can consist of layers, resources or other
// manifests.
//
// While no particular order is required, implementations should return
// them from highest to lowest priority. For example, one might want to
// return the base layer before the top layer.
References() []Descriptor
// Payload provides the serialized format of the manifest, in addition to
// the media type.
Payload() (mediaType string, payload []byte, err error)
}
// ManifestBuilder creates a manifest allowing one to include dependencies.
// Instances can be obtained from a version-specific manifest package. Manifest
// specific data is passed into the function which creates the builder.
type ManifestBuilder interface {
// Build creates the manifest from his builder.
Build(ctx context.Context) (Manifest, error)
// References returns a list of objects which have been added to this
// builder. The dependencies are returned in the order they were added,
// which should be from base to head.
References() []Descriptor
// AppendReference includes the given object in the manifest after any
// existing dependencies. If the add fails, such as when adding an
// unsupported dependency, an error may be returned.
//
// The destination of the reference is dependent on the manifest type and
// the dependency type.
AppendReference(dependency Describable) error
}
// ManifestService describes operations on image manifests.
type ManifestService interface {
// Exists returns true if the manifest exists.
Exists(ctx context.Context, dgst digest.Digest) (bool, error)
// Get retrieves the manifest specified by the given digest
Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
// Put creates or updates the given manifest returning the manifest digest
Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
// Delete removes the manifest specified by the given digest. Deleting
// a manifest that doesn't exist will return ErrManifestNotFound
Delete(ctx context.Context, dgst digest.Digest) error
}
// ManifestEnumerator enables iterating over manifests
type ManifestEnumerator interface {
// Enumerate calls ingester for each manifest.
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
}
// Describable is an interface for descriptors
type Describable interface {
Descriptor() Descriptor
}
// ManifestMediaTypes returns the supported media types for manifests.
func ManifestMediaTypes() (mediaTypes []string) {
for t := range mappings {
if t != "" {
mediaTypes = append(mediaTypes, t)
}
}
return
}
// UnmarshalFunc implements manifest unmarshalling a given MediaType
type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
var mappings = make(map[string]UnmarshalFunc)
// UnmarshalManifest looks up manifest unmarshal functions based on
// MediaType
func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
// Need to look up by the actual media type, not the raw contents of
// the header. Strip semicolons and anything following them.
var mediaType string
if ctHeader != "" {
var err error
mediaType, _, err = mime.ParseMediaType(ctHeader)
if err != nil {
return nil, Descriptor{}, err
}
}
unmarshalFunc, ok := mappings[mediaType]
if !ok {
unmarshalFunc, ok = mappings[""]
if !ok {
return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType)
}
}
return unmarshalFunc(p)
}
// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
// should be called from specific
func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error {
if _, ok := mappings[mediaType]; ok {
return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType)
}
mappings[mediaType] = u
return nil
}

@ -0,0 +1,118 @@
package distribution
import (
"context"
"github.com/docker/distribution/reference"
)
// Scope defines the set of items that match a namespace.
type Scope interface {
// Contains returns true if the name belongs to the namespace.
Contains(name string) bool
}
type fullScope struct{}
func (f fullScope) Contains(string) bool {
return true
}
// GlobalScope represents the full namespace scope which contains
// all other scopes.
var GlobalScope = Scope(fullScope{})
// Namespace represents a collection of repositories, addressable by name.
// Generally, a namespace is backed by a set of one or more services,
// providing facilities such as registry access, trust, and indexing.
type Namespace interface {
// Scope describes the names that can be used with this Namespace. The
// global namespace will have a scope that matches all names. The scope
// effectively provides an identity for the namespace.
Scope() Scope
// Repository should return a reference to the named repository. The
// registry may or may not have the repository but should always return a
// reference.
Repository(ctx context.Context, name reference.Named) (Repository, error)
// Repositories fills 'repos' with a lexicographically sorted catalog of repositories
// up to the size of 'repos' and returns the value 'n' for the number of entries
// which were filled. 'last' contains an offset in the catalog, and 'err' will be
// set to io.EOF if there are no more entries to obtain.
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
// Blobs returns a blob enumerator to access all blobs
Blobs() BlobEnumerator
// BlobStatter returns a BlobStatter to control
BlobStatter() BlobStatter
}
// RepositoryEnumerator describes an operation to enumerate repositories
type RepositoryEnumerator interface {
Enumerate(ctx context.Context, ingester func(string) error) error
}
// RepositoryRemover removes given repository
type RepositoryRemover interface {
Remove(ctx context.Context, name reference.Named) error
}
// ManifestServiceOption is a function argument for Manifest Service methods
type ManifestServiceOption interface {
Apply(ManifestService) error
}
// WithTag allows a tag to be passed into Put
func WithTag(tag string) ManifestServiceOption {
return WithTagOption{tag}
}
// WithTagOption holds a tag
type WithTagOption struct{ Tag string }
// Apply conforms to the ManifestServiceOption interface
func (o WithTagOption) Apply(m ManifestService) error {
// no implementation
return nil
}
// WithManifestMediaTypes lists the media types the client wishes
// the server to provide.
func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption {
return WithManifestMediaTypesOption{mediaTypes}
}
// WithManifestMediaTypesOption holds a list of accepted media types
type WithManifestMediaTypesOption struct{ MediaTypes []string }
// Apply conforms to the ManifestServiceOption interface
func (o WithManifestMediaTypesOption) Apply(m ManifestService) error {
// no implementation
return nil
}
// Repository is a named collection of manifests and layers.
type Repository interface {
// Named returns the name of the repository.
Named() reference.Named
// Manifests returns a reference to this repository's manifest service.
// with the supplied options applied.
Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
// Blobs returns a reference to this repository's blob service.
Blobs(ctx context.Context) BlobStore
// TODO(stevvooe): The above BlobStore return can probably be relaxed to
// be a BlobService for use with clients. This will allow such
// implementations to avoid implementing ServeBlob.
// Tags returns a reference to this repositories tag service
Tags(ctx context.Context) TagService
}
// TODO(stevvooe): Must add close methods to all these. May want to change the
// way instances are created to better reflect internal dependency
// relationships.

@ -0,0 +1,27 @@
package distribution
import (
"context"
)
// TagService provides access to information about tagged objects.
type TagService interface {
// Get retrieves the descriptor identified by the tag. Some
// implementations may differentiate between "trusted" tags and
// "untrusted" tags. If a tag is "untrusted", the mapping will be returned
// as an ErrTagUntrusted error, with the target descriptor.
Get(ctx context.Context, tag string) (Descriptor, error)
// Tag associates the tag with the provided descriptor, updating the
// current association, if needed.
Tag(ctx context.Context, tag string, desc Descriptor) error
// Untag removes the given tag association
Untag(ctx context.Context, tag string) error
// All returns the set of tags managed by this tag service
All(ctx context.Context) ([]string, error)
// Lookup returns the set of tags referencing the given digest.
Lookup(ctx context.Context, digest Descriptor) ([]string, error)
}

@ -0,0 +1,13 @@
# Contributing to libtrust
Want to hack on libtrust? Awesome! Here are instructions to get you
started.
libtrust is a part of the [Docker](https://www.docker.com) project, and follows
the same rules and principles. If you're already familiar with the way
Docker does things, you'll feel right at home.
Otherwise, go read
[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
Happy hacking!

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -0,0 +1,3 @@
Solomon Hykes <solomon@docker.com>
Josh Hawn <josh@docker.com> (github: jlhawn)
Derek McGowan <derek@docker.com> (github: dmcgowan)

@ -0,0 +1,22 @@
# libtrust
> **WARNING** this library is no longer actively developed, and will be integrated
> in the [docker/distribution][https://www.github.com/docker/distribution]
> repository in future.
Libtrust is library for managing authentication and authorization using public key cryptography.
Authentication is handled using the identity attached to the public key.
Libtrust provides multiple methods to prove possession of the private key associated with an identity.
- TLS x509 certificates
- Signature verification
- Key Challenge
Authorization and access control is managed through a distributed trust graph.
Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access.
## Copyright and license
Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
Docs released under Creative commons.

@ -0,0 +1,175 @@
package libtrust
import (
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"math/big"
"net"
"time"
)
type certTemplateInfo struct {
commonName string
domains []string
ipAddresses []net.IP
isCA bool
clientAuth bool
serverAuth bool
}
func generateCertTemplate(info *certTemplateInfo) *x509.Certificate {
// Generate a certificate template which is valid from the past week to
// 10 years from now. The usage of the certificate depends on the
// specified fields in the given certTempInfo object.
var (
keyUsage x509.KeyUsage
extKeyUsage []x509.ExtKeyUsage
)
if info.isCA {
keyUsage = x509.KeyUsageCertSign
}
if info.clientAuth {
extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth)
}
if info.serverAuth {
extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth)
}
return &x509.Certificate{
SerialNumber: big.NewInt(0),
Subject: pkix.Name{
CommonName: info.commonName,
},
NotBefore: time.Now().Add(-time.Hour * 24 * 7),
NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10),
DNSNames: info.domains,
IPAddresses: info.ipAddresses,
IsCA: info.isCA,
KeyUsage: keyUsage,
ExtKeyUsage: extKeyUsage,
BasicConstraintsValid: info.isCA,
}
}
func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) {
pubCertTemplate := generateCertTemplate(subInfo)
privCertTemplate := generateCertTemplate(issInfo)
certDER, err := x509.CreateCertificate(
rand.Reader, pubCertTemplate, privCertTemplate,
pub.CryptoPublicKey(), priv.CryptoPrivateKey(),
)
if err != nil {
return nil, fmt.Errorf("failed to create certificate: %s", err)
}
cert, err = x509.ParseCertificate(certDER)
if err != nil {
return nil, fmt.Errorf("failed to parse certificate: %s", err)
}
return
}
// GenerateSelfSignedServerCert creates a self-signed certificate for the
// given key which is to be used for TLS servers with the given domains and
// IP addresses.
func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) {
info := &certTemplateInfo{
commonName: key.KeyID(),
domains: domains,
ipAddresses: ipAddresses,
serverAuth: true,
}
return generateCert(key.PublicKey(), key, info, info)
}
// GenerateSelfSignedClientCert creates a self-signed certificate for the
// given key which is to be used for TLS clients.
func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) {
info := &certTemplateInfo{
commonName: key.KeyID(),
clientAuth: true,
}
return generateCert(key.PublicKey(), key, info, info)
}
// GenerateCACert creates a certificate which can be used as a trusted
// certificate authority.
func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) {
subjectInfo := &certTemplateInfo{
commonName: trustedKey.KeyID(),
isCA: true,
}
issuerInfo := &certTemplateInfo{
commonName: signer.KeyID(),
}
return generateCert(trustedKey, signer, subjectInfo, issuerInfo)
}
// GenerateCACertPool creates a certificate authority pool to be used for a
// TLS configuration. Any self-signed certificates issued by the specified
// trusted keys will be verified during a TLS handshake
func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
for _, trustedKey := range trustedKeys {
cert, err := GenerateCACert(signer, trustedKey)
if err != nil {
return nil, fmt.Errorf("failed to generate CA certificate: %s", err)
}
certPool.AddCert(cert)
}
return certPool, nil
}
// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded
// containing one or more certificates. The expected pem type is "CERTIFICATE".
func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
certificates := []*x509.Certificate{}
var block *pem.Block
block, b = pem.Decode(b)
for ; block != nil; block, b = pem.Decode(b) {
if block.Type == "CERTIFICATE" {
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certificates = append(certificates, cert)
} else {
return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
}
}
return certificates, nil
}
// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded
// containing one or more certificates. The expected pem type is "CERTIFICATE".
func LoadCertificatePool(filename string) (*x509.CertPool, error) {
certs, err := LoadCertificateBundle(filename)
if err != nil {
return nil, err
}
pool := x509.NewCertPool()
for _, cert := range certs {
pool.AddCert(cert)
}
return pool, nil
}

@ -0,0 +1,9 @@
/*
Package libtrust provides an interface for managing authentication and
authorization using public key cryptography. Authentication is handled
using the identity attached to the public key and verified through TLS
x509 certificates, a key challenge, or signature. Authorization and
access control is managed through a trust graph distributed between
both remote trust servers and locally cached and managed data.
*/
package libtrust

@ -0,0 +1,428 @@
package libtrust
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"math/big"
)
/*
* EC DSA PUBLIC KEY
*/
// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital
// signature algorithms.
type ecPublicKey struct {
*ecdsa.PublicKey
curveName string
signatureAlgorithm *signatureAlgorithm
extended map[string]interface{}
}
func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) {
curve := cryptoPublicKey.Curve
switch {
case curve == elliptic.P256():
return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil
case curve == elliptic.P384():
return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil
case curve == elliptic.P521():
return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil
default:
return nil, errors.New("unsupported elliptic curve")
}
}
// KeyType returns the key type for elliptic curve keys, i.e., "EC".
func (k *ecPublicKey) KeyType() string {
return "EC"
}
// CurveName returns the elliptic curve identifier.
// Possible values are "P-256", "P-384", and "P-521".
func (k *ecPublicKey) CurveName() string {
return k.curveName
}
// KeyID returns a distinct identifier which is unique to this Public Key.
func (k *ecPublicKey) KeyID() string {
return keyIDFromCryptoKey(k)
}
func (k *ecPublicKey) String() string {
return fmt.Sprintf("EC Public Key <%s>", k.KeyID())
}
// Verify verifyies the signature of the data in the io.Reader using this
// PublicKey. The alg parameter should identify the digital signature
// algorithm which was used to produce the signature and should be supported
// by this public key. Returns a nil error if the signature is valid.
func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
// For EC keys there is only one supported signature algorithm depending
// on the curve parameters.
if k.signatureAlgorithm.HeaderParam() != alg {
return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg)
}
// signature is the concatenation of (r, s), base64Url encoded.
sigLength := len(signature)
expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3)
if sigLength != expectedOctetLength {
return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength)
}
rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:]
r := new(big.Int).SetBytes(rBytes)
s := new(big.Int).SetBytes(sBytes)
hasher := k.signatureAlgorithm.HashID().New()
_, err := io.Copy(hasher, data)
if err != nil {
return fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
if !ecdsa.Verify(k.PublicKey, hash, r, s) {
return errors.New("invalid signature")
}
return nil
}
// CryptoPublicKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey {
return k.PublicKey
}
func (k *ecPublicKey) toMap() map[string]interface{} {
jwk := make(map[string]interface{})
for k, v := range k.extended {
jwk[k] = v
}
jwk["kty"] = k.KeyType()
jwk["kid"] = k.KeyID()
jwk["crv"] = k.CurveName()
xBytes := k.X.Bytes()
yBytes := k.Y.Bytes()
octetLength := (k.Params().BitSize + 7) >> 3
// MUST include leading zeros in the output so that x, y are each
// *octetLength* bytes long.
xBuf := make([]byte, octetLength-len(xBytes), octetLength)
yBuf := make([]byte, octetLength-len(yBytes), octetLength)
xBuf = append(xBuf, xBytes...)
yBuf = append(yBuf, yBytes...)
jwk["x"] = joseBase64UrlEncode(xBuf)
jwk["y"] = joseBase64UrlEncode(yBuf)
return jwk
}
// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
// elliptic curve keys.
func (k *ecPublicKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Public Key to DER-encoded PKIX format.
func (k *ecPublicKey) PEMBlock() (*pem.Block, error) {
derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
if err != nil {
return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err)
}
k.extended["kid"] = k.KeyID() // For display purposes.
return createPemBlock("PUBLIC KEY", derBytes, k.extended)
}
func (k *ecPublicKey) AddExtendedField(field string, value interface{}) {
k.extended[field] = value
}
func (k *ecPublicKey) GetExtendedField(field string) interface{} {
v, ok := k.extended[field]
if !ok {
return nil
}
return v
}
func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) {
// JWK key type (kty) has already been determined to be "EC".
// Need to extract 'crv', 'x', 'y', and 'kid' and check for
// consistency.
// Get the curve identifier value.
crv, err := stringFromMap(jwk, "crv")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err)
}
var (
curve elliptic.Curve
sigAlg *signatureAlgorithm
)
switch {
case crv == "P-256":
curve = elliptic.P256()
sigAlg = es256
case crv == "P-384":
curve = elliptic.P384()
sigAlg = es384
case crv == "P-521":
curve = elliptic.P521()
sigAlg = es512
default:
return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv)
}
// Get the X and Y coordinates for the public key point.
xB64Url, err := stringFromMap(jwk, "x")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
}
x, err := parseECCoordinate(xB64Url, curve)
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
}
yB64Url, err := stringFromMap(jwk, "y")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
}
y, err := parseECCoordinate(yB64Url, curve)
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
}
key := &ecPublicKey{
PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y},
curveName: crv, signatureAlgorithm: sigAlg,
}
// Key ID is optional too, but if it exists, it should match the key.
_, ok := jwk["kid"]
if ok {
kid, err := stringFromMap(jwk, "kid")
if err != nil {
return nil, fmt.Errorf("JWK EC Public Key ID: %s", err)
}
if kid != key.KeyID() {
return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid)
}
}
key.extended = jwk
return key, nil
}
/*
* EC DSA PRIVATE KEY
*/
// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature
// algorithms.
type ecPrivateKey struct {
ecPublicKey
*ecdsa.PrivateKey
}
func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) {
publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey)
if err != nil {
return nil, err
}
return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil
}
// PublicKey returns the Public Key data associated with this Private Key.
func (k *ecPrivateKey) PublicKey() PublicKey {
return &k.ecPublicKey
}
func (k *ecPrivateKey) String() string {
return fmt.Sprintf("EC Private Key <%s>", k.KeyID())
}
// Sign signs the data read from the io.Reader using a signature algorithm supported
// by the elliptic curve private key. If the specified hashing algorithm is
// supported by this key, that hash function is used to generate the signature
// otherwise the the default hashing algorithm for this key is used. Returns
// the signature and the name of the JWK signature algorithm used, e.g.,
// "ES256", "ES384", "ES512".
func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
// Generate a signature of the data using the internal alg.
// The given hashId is only a suggestion, and since EC keys only support
// on signature/hash algorithm given the curve name, we disregard it for
// the elliptic curve JWK signature implementation.
hasher := k.signatureAlgorithm.HashID().New()
_, err = io.Copy(hasher, data)
if err != nil {
return nil, "", fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash)
if err != nil {
return nil, "", fmt.Errorf("error producing signature: %s", err)
}
rBytes, sBytes := r.Bytes(), s.Bytes()
octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3
// MUST include leading zeros in the output
rBuf := make([]byte, octetLength-len(rBytes), octetLength)
sBuf := make([]byte, octetLength-len(sBytes), octetLength)
rBuf = append(rBuf, rBytes...)
sBuf = append(sBuf, sBytes...)
signature = append(rBuf, sBuf...)
alg = k.signatureAlgorithm.HeaderParam()
return
}
// CryptoPrivateKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
return k.PrivateKey
}
func (k *ecPrivateKey) toMap() map[string]interface{} {
jwk := k.ecPublicKey.toMap()
dBytes := k.D.Bytes()
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
// octets (where n is the order of the curve). This is because the private
// key d must be in the interval [1, n-1] so the bitlength of d should be
// no larger than the bitlength of n-1. The easiest way to find the octet
// length is to take bitlength(n-1), add 7 to force a carry, and shift this
// bit sequence right by 3, which is essentially dividing by 8 and adding
// 1 if there is any remainder. Thus, the private key value d should be
// output to (bitlength(n-1)+7)>>3 octets.
n := k.ecPublicKey.Params().N
octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
// Create a buffer with the necessary zero-padding.
dBuf := make([]byte, octetLength-len(dBytes), octetLength)
dBuf = append(dBuf, dBytes...)
jwk["d"] = joseBase64UrlEncode(dBuf)
return jwk
}
// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
// elliptic curve keys.
func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Private Key to DER-encoded PKIX format.
func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) {
derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey)
if err != nil {
return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err)
}
k.extended["keyID"] = k.KeyID() // For display purposes.
return createPemBlock("EC PRIVATE KEY", derBytes, k.extended)
}
func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) {
dB64Url, err := stringFromMap(jwk, "d")
if err != nil {
return nil, fmt.Errorf("JWK EC Private Key: %s", err)
}
// JWK key type (kty) has already been determined to be "EC".
// Need to extract the public key information, then extract the private
// key value 'd'.
publicKey, err := ecPublicKeyFromMap(jwk)
if err != nil {
return nil, err
}
d, err := parseECPrivateParam(dB64Url, publicKey.Curve)
if err != nil {
return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err)
}
key := &ecPrivateKey{
ecPublicKey: *publicKey,
PrivateKey: &ecdsa.PrivateKey{
PublicKey: *publicKey.PublicKey,
D: d,
},
}
return key, nil
}
/*
* Key Generation Functions.
*/
func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) {
k = new(ecPrivateKey)
k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
return nil, err
}
k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey
k.extended = make(map[string]interface{})
return
}
// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256.
func GenerateECP256PrivateKey() (PrivateKey, error) {
k, err := generateECPrivateKey(elliptic.P256())
if err != nil {
return nil, fmt.Errorf("error generating EC P-256 key: %s", err)
}
k.curveName = "P-256"
k.signatureAlgorithm = es256
return k, nil
}
// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384.
func GenerateECP384PrivateKey() (PrivateKey, error) {
k, err := generateECPrivateKey(elliptic.P384())
if err != nil {
return nil, fmt.Errorf("error generating EC P-384 key: %s", err)
}
k.curveName = "P-384"
k.signatureAlgorithm = es384
return k, nil
}
// GenerateECP521PrivateKey generates aĂź key pair using elliptic curve P-521.
func GenerateECP521PrivateKey() (PrivateKey, error) {
k, err := generateECPrivateKey(elliptic.P521())
if err != nil {
return nil, fmt.Errorf("error generating EC P-521 key: %s", err)
}
k.curveName = "P-521"
k.signatureAlgorithm = es512
return k, nil
}

@ -0,0 +1,50 @@
package libtrust
import (
"path/filepath"
)
// FilterByHosts filters the list of PublicKeys to only those which contain a
// 'hosts' pattern which matches the given host. If *includeEmpty* is true,
// then keys which do not specify any hosts are also returned.
func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) {
filtered := make([]PublicKey, 0, len(keys))
for _, pubKey := range keys {
var hosts []string
switch v := pubKey.GetExtendedField("hosts").(type) {
case []string:
hosts = v
case []interface{}:
for _, value := range v {
h, ok := value.(string)
if !ok {
continue
}
hosts = append(hosts, h)
}
}
if len(hosts) == 0 {
if includeEmpty {
filtered = append(filtered, pubKey)
}
continue
}
// Check if any hosts match pattern
for _, hostPattern := range hosts {
match, err := filepath.Match(hostPattern, host)
if err != nil {
return nil, err
}
if match {
filtered = append(filtered, pubKey)
continue
}
}
}
return filtered, nil
}

@ -0,0 +1,56 @@
package libtrust
import (
"crypto"
_ "crypto/sha256" // Registrer SHA224 and SHA256
_ "crypto/sha512" // Registrer SHA384 and SHA512
"fmt"
)
type signatureAlgorithm struct {
algHeaderParam string
hashID crypto.Hash
}
func (h *signatureAlgorithm) HeaderParam() string {
return h.algHeaderParam
}
func (h *signatureAlgorithm) HashID() crypto.Hash {
return h.hashID
}
var (
rs256 = &signatureAlgorithm{"RS256", crypto.SHA256}
rs384 = &signatureAlgorithm{"RS384", crypto.SHA384}
rs512 = &signatureAlgorithm{"RS512", crypto.SHA512}
es256 = &signatureAlgorithm{"ES256", crypto.SHA256}
es384 = &signatureAlgorithm{"ES384", crypto.SHA384}
es512 = &signatureAlgorithm{"ES512", crypto.SHA512}
)
func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) {
switch {
case alg == "RS256":
return rs256, nil
case alg == "RS384":
return rs384, nil
case alg == "RS512":
return rs512, nil
default:
return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg)
}
}
func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm {
switch {
case hashID == crypto.SHA512:
return rs512
case hashID == crypto.SHA384:
return rs384
case hashID == crypto.SHA256:
fallthrough
default:
return rs256
}
}

@ -0,0 +1,657 @@
package libtrust
import (
"bytes"
"crypto"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"sort"
"time"
"unicode"
)
var (
// ErrInvalidSignContent is used when the content to be signed is invalid.
ErrInvalidSignContent = errors.New("invalid sign content")
// ErrInvalidJSONContent is used when invalid json is encountered.
ErrInvalidJSONContent = errors.New("invalid json content")
// ErrMissingSignatureKey is used when the specified signature key
// does not exist in the JSON content.
ErrMissingSignatureKey = errors.New("missing signature key")
)
type jsHeader struct {
JWK PublicKey `json:"jwk,omitempty"`
Algorithm string `json:"alg"`
Chain []string `json:"x5c,omitempty"`
}
type jsSignature struct {
Header jsHeader `json:"header"`
Signature string `json:"signature"`
Protected string `json:"protected,omitempty"`
}
type jsSignaturesSorted []jsSignature
func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] }
func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) }
func (jsbkid jsSignaturesSorted) Less(i, j int) bool {
ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID()
si, sj := jsbkid[i].Signature, jsbkid[j].Signature
if ki == kj {
return si < sj
}
return ki < kj
}
type signKey struct {
PrivateKey
Chain []*x509.Certificate
}
// JSONSignature represents a signature of a json object.
type JSONSignature struct {
payload string
signatures []jsSignature
indent string
formatLength int
formatTail []byte
}
func newJSONSignature() *JSONSignature {
return &JSONSignature{
signatures: make([]jsSignature, 0, 1),
}
}
// Payload returns the encoded payload of the signature. This
// payload should not be signed directly
func (js *JSONSignature) Payload() ([]byte, error) {
return joseBase64UrlDecode(js.payload)
}
func (js *JSONSignature) protectedHeader() (string, error) {
protected := map[string]interface{}{
"formatLength": js.formatLength,
"formatTail": joseBase64UrlEncode(js.formatTail),
"time": time.Now().UTC().Format(time.RFC3339),
}
protectedBytes, err := json.Marshal(protected)
if err != nil {
return "", err
}
return joseBase64UrlEncode(protectedBytes), nil
}
func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) {
buf := make([]byte, len(js.payload)+len(protectedHeader)+1)
copy(buf, protectedHeader)
buf[len(protectedHeader)] = '.'
copy(buf[len(protectedHeader)+1:], js.payload)
return buf, nil
}
// Sign adds a signature using the given private key.
func (js *JSONSignature) Sign(key PrivateKey) error {
protected, err := js.protectedHeader()
if err != nil {
return err
}
signBytes, err := js.signBytes(protected)
if err != nil {
return err
}
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
if err != nil {
return err
}
js.signatures = append(js.signatures, jsSignature{
Header: jsHeader{
JWK: key.PublicKey(),
Algorithm: algorithm,
},
Signature: joseBase64UrlEncode(sigBytes),
Protected: protected,
})
return nil
}
// SignWithChain adds a signature using the given private key
// and setting the x509 chain. The public key of the first element
// in the chain must be the public key corresponding with the sign key.
func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error {
// Ensure key.Chain[0] is public key for key
//key.Chain.PublicKey
//key.PublicKey().CryptoPublicKey()
// Verify chain
protected, err := js.protectedHeader()
if err != nil {
return err
}
signBytes, err := js.signBytes(protected)
if err != nil {
return err
}
sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
if err != nil {
return err
}
header := jsHeader{
Chain: make([]string, len(chain)),
Algorithm: algorithm,
}
for i, cert := range chain {
header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw)
}
js.signatures = append(js.signatures, jsSignature{
Header: header,
Signature: joseBase64UrlEncode(sigBytes),
Protected: protected,
})
return nil
}
// Verify verifies all the signatures and returns the list of
// public keys used to sign. Any x509 chains are not checked.
func (js *JSONSignature) Verify() ([]PublicKey, error) {
keys := make([]PublicKey, len(js.signatures))
for i, signature := range js.signatures {
signBytes, err := js.signBytes(signature.Protected)
if err != nil {
return nil, err
}
var publicKey PublicKey
if len(signature.Header.Chain) > 0 {
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
if err != nil {
return nil, err
}
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, err
}
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
if err != nil {
return nil, err
}
} else if signature.Header.JWK != nil {
publicKey = signature.Header.JWK
} else {
return nil, errors.New("missing public key")
}
sigBytes, err := joseBase64UrlDecode(signature.Signature)
if err != nil {
return nil, err
}
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
if err != nil {
return nil, err
}
keys[i] = publicKey
}
return keys, nil
}
// VerifyChains verifies all the signatures and the chains associated
// with each signature and returns the list of verified chains.
// Signatures without an x509 chain are not checked.
func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
chains := make([][]*x509.Certificate, 0, len(js.signatures))
for _, signature := range js.signatures {
signBytes, err := js.signBytes(signature.Protected)
if err != nil {
return nil, err
}
var publicKey PublicKey
if len(signature.Header.Chain) > 0 {
certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
if err != nil {
return nil, err
}
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, err
}
publicKey, err = FromCryptoPublicKey(cert.PublicKey)
if err != nil {
return nil, err
}
intermediates := x509.NewCertPool()
if len(signature.Header.Chain) > 1 {
intermediateChain := signature.Header.Chain[1:]
for i := range intermediateChain {
certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i])
if err != nil {
return nil, err
}
intermediate, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, err
}
intermediates.AddCert(intermediate)
}
}
verifyOptions := x509.VerifyOptions{
Intermediates: intermediates,
Roots: ca,
}
verifiedChains, err := cert.Verify(verifyOptions)
if err != nil {
return nil, err
}
chains = append(chains, verifiedChains...)
sigBytes, err := joseBase64UrlDecode(signature.Signature)
if err != nil {
return nil, err
}
err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
if err != nil {
return nil, err
}
}
}
return chains, nil
}
// JWS returns JSON serialized JWS according to
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2
func (js *JSONSignature) JWS() ([]byte, error) {
if len(js.signatures) == 0 {
return nil, errors.New("missing signature")
}
sort.Sort(jsSignaturesSorted(js.signatures))
jsonMap := map[string]interface{}{
"payload": js.payload,
"signatures": js.signatures,
}
return json.MarshalIndent(jsonMap, "", " ")
}
func notSpace(r rune) bool {
return !unicode.IsSpace(r)
}
func detectJSONIndent(jsonContent []byte) (indent string) {
if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' {
quoteIndex := bytes.IndexRune(jsonContent[1:], '"')
if quoteIndex > 0 {
indent = string(jsonContent[2 : quoteIndex+1])
}
}
return
}
type jsParsedHeader struct {
JWK json.RawMessage `json:"jwk"`
Algorithm string `json:"alg"`
Chain []string `json:"x5c"`
}
type jsParsedSignature struct {
Header jsParsedHeader `json:"header"`
Signature string `json:"signature"`
Protected string `json:"protected"`
}
// ParseJWS parses a JWS serialized JSON object into a Json Signature.
func ParseJWS(content []byte) (*JSONSignature, error) {
type jsParsed struct {
Payload string `json:"payload"`
Signatures []jsParsedSignature `json:"signatures"`
}
parsed := &jsParsed{}
err := json.Unmarshal(content, parsed)
if err != nil {
return nil, err
}
if len(parsed.Signatures) == 0 {
return nil, errors.New("missing signatures")
}
payload, err := joseBase64UrlDecode(parsed.Payload)
if err != nil {
return nil, err
}
js, err := NewJSONSignature(payload)
if err != nil {
return nil, err
}
js.signatures = make([]jsSignature, len(parsed.Signatures))
for i, signature := range parsed.Signatures {
header := jsHeader{
Algorithm: signature.Header.Algorithm,
}
if signature.Header.Chain != nil {
header.Chain = signature.Header.Chain
}
if signature.Header.JWK != nil {
publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK))
if err != nil {
return nil, err
}
header.JWK = publicKey
}
js.signatures[i] = jsSignature{
Header: header,
Signature: signature.Signature,
Protected: signature.Protected,
}
}
return js, nil
}
// NewJSONSignature returns a new unsigned JWS from a json byte array.
// JSONSignature will need to be signed before serializing or storing.
// Optionally, one or more signatures can be provided as byte buffers,
// containing serialized JWS signatures, to assemble a fully signed JWS
// package. It is the callers responsibility to ensure uniqueness of the
// provided signatures.
func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) {
var dataMap map[string]interface{}
err := json.Unmarshal(content, &dataMap)
if err != nil {
return nil, err
}
js := newJSONSignature()
js.indent = detectJSONIndent(content)
js.payload = joseBase64UrlEncode(content)
// Find trailing } and whitespace, put in protected header
closeIndex := bytes.LastIndexFunc(content, notSpace)
if content[closeIndex] != '}' {
return nil, ErrInvalidJSONContent
}
lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace)
if content[lastRuneIndex] == ',' {
return nil, ErrInvalidJSONContent
}
js.formatLength = lastRuneIndex + 1
js.formatTail = content[js.formatLength:]
if len(signatures) > 0 {
for _, signature := range signatures {
var parsedJSig jsParsedSignature
if err := json.Unmarshal(signature, &parsedJSig); err != nil {
return nil, err
}
// TODO(stevvooe): A lot of the code below is repeated in
// ParseJWS. It will require more refactoring to fix that.
jsig := jsSignature{
Header: jsHeader{
Algorithm: parsedJSig.Header.Algorithm,
},
Signature: parsedJSig.Signature,
Protected: parsedJSig.Protected,
}
if parsedJSig.Header.Chain != nil {
jsig.Header.Chain = parsedJSig.Header.Chain
}
if parsedJSig.Header.JWK != nil {
publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK))
if err != nil {
return nil, err
}
jsig.Header.JWK = publicKey
}
js.signatures = append(js.signatures, jsig)
}
}
return js, nil
}
// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or
// struct. JWS will need to be signed before serializing or storing.
func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) {
switch content.(type) {
case map[string]interface{}:
case struct{}:
default:
return nil, errors.New("invalid data type")
}
js := newJSONSignature()
js.indent = " "
payload, err := json.MarshalIndent(content, "", js.indent)
if err != nil {
return nil, err
}
js.payload = joseBase64UrlEncode(payload)
// Remove '\n}' from formatted section, put in protected header
js.formatLength = len(payload) - 2
js.formatTail = payload[js.formatLength:]
return js, nil
}
func readIntFromMap(key string, m map[string]interface{}) (int, bool) {
value, ok := m[key]
if !ok {
return 0, false
}
switch v := value.(type) {
case int:
return v, true
case float64:
return int(v), true
default:
return 0, false
}
}
func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) {
value, ok := m[key]
if !ok {
return "", false
}
v, ok = value.(string)
return
}
// ParsePrettySignature parses a formatted signature into a
// JSON signature. If the signatures are missing the format information
// an error is thrown. The formatted signature must be created by
// the same method as format signature.
func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) {
var contentMap map[string]json.RawMessage
err := json.Unmarshal(content, &contentMap)
if err != nil {
return nil, fmt.Errorf("error unmarshalling content: %s", err)
}
sigMessage, ok := contentMap[signatureKey]
if !ok {
return nil, ErrMissingSignatureKey
}
var signatureBlocks []jsParsedSignature
err = json.Unmarshal([]byte(sigMessage), &signatureBlocks)
if err != nil {
return nil, fmt.Errorf("error unmarshalling signatures: %s", err)
}
js := newJSONSignature()
js.signatures = make([]jsSignature, len(signatureBlocks))
for i, signatureBlock := range signatureBlocks {
protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected)
if err != nil {
return nil, fmt.Errorf("base64 decode error: %s", err)
}
var protectedHeader map[string]interface{}
err = json.Unmarshal(protectedBytes, &protectedHeader)
if err != nil {
return nil, fmt.Errorf("error unmarshalling protected header: %s", err)
}
formatLength, ok := readIntFromMap("formatLength", protectedHeader)
if !ok {
return nil, errors.New("missing formatted length")
}
encodedTail, ok := readStringFromMap("formatTail", protectedHeader)
if !ok {
return nil, errors.New("missing formatted tail")
}
formatTail, err := joseBase64UrlDecode(encodedTail)
if err != nil {
return nil, fmt.Errorf("base64 decode error on tail: %s", err)
}
if js.formatLength == 0 {
js.formatLength = formatLength
} else if js.formatLength != formatLength {
return nil, errors.New("conflicting format length")
}
if len(js.formatTail) == 0 {
js.formatTail = formatTail
} else if bytes.Compare(js.formatTail, formatTail) != 0 {
return nil, errors.New("conflicting format tail")
}
header := jsHeader{
Algorithm: signatureBlock.Header.Algorithm,
Chain: signatureBlock.Header.Chain,
}
if signatureBlock.Header.JWK != nil {
publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK))
if err != nil {
return nil, fmt.Errorf("error unmarshalling public key: %s", err)
}
header.JWK = publicKey
}
js.signatures[i] = jsSignature{
Header: header,
Signature: signatureBlock.Signature,
Protected: signatureBlock.Protected,
}
}
if js.formatLength > len(content) {
return nil, errors.New("invalid format length")
}
formatted := make([]byte, js.formatLength+len(js.formatTail))
copy(formatted, content[:js.formatLength])
copy(formatted[js.formatLength:], js.formatTail)
js.indent = detectJSONIndent(formatted)
js.payload = joseBase64UrlEncode(formatted)
return js, nil
}
// PrettySignature formats a json signature into an easy to read
// single json serialized object.
func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) {
if len(js.signatures) == 0 {
return nil, errors.New("no signatures")
}
payload, err := joseBase64UrlDecode(js.payload)
if err != nil {
return nil, err
}
payload = payload[:js.formatLength]
sort.Sort(jsSignaturesSorted(js.signatures))
var marshalled []byte
var marshallErr error
if js.indent != "" {
marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent)
} else {
marshalled, marshallErr = json.Marshal(js.signatures)
}
if marshallErr != nil {
return nil, marshallErr
}
buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34))
buf.Write(payload)
buf.WriteByte(',')
if js.indent != "" {
buf.WriteByte('\n')
buf.WriteString(js.indent)
buf.WriteByte('"')
buf.WriteString(signatureKey)
buf.WriteString("\": ")
buf.Write(marshalled)
buf.WriteByte('\n')
} else {
buf.WriteByte('"')
buf.WriteString(signatureKey)
buf.WriteString("\":")
buf.Write(marshalled)
}
buf.WriteByte('}')
return buf.Bytes(), nil
}
// Signatures provides the signatures on this JWS as opaque blobs, sorted by
// keyID. These blobs can be stored and reassembled with payloads. Internally,
// they are simply marshaled json web signatures but implementations should
// not rely on this.
func (js *JSONSignature) Signatures() ([][]byte, error) {
sort.Sort(jsSignaturesSorted(js.signatures))
var sb [][]byte
for _, jsig := range js.signatures {
p, err := json.Marshal(jsig)
if err != nil {
return nil, err
}
sb = append(sb, p)
}
return sb, nil
}
// Merge combines the signatures from one or more other signatures into the
// method receiver. If the payloads differ for any argument, an error will be
// returned and the receiver will not be modified.
func (js *JSONSignature) Merge(others ...*JSONSignature) error {
merged := js.signatures
for _, other := range others {
if js.payload != other.payload {
return fmt.Errorf("payloads differ from merge target")
}
merged = append(merged, other.signatures...)
}
js.signatures = merged
return nil
}

@ -0,0 +1,253 @@
package libtrust
import (
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
)
// PublicKey is a generic interface for a Public Key.
type PublicKey interface {
// KeyType returns the key type for this key. For elliptic curve keys,
// this value should be "EC". For RSA keys, this value should be "RSA".
KeyType() string
// KeyID returns a distinct identifier which is unique to this Public Key.
// The format generated by this library is a base32 encoding of a 240 bit
// hash of the public key data divided into 12 groups like so:
// ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
KeyID() string
// Verify verifyies the signature of the data in the io.Reader using this
// Public Key. The alg parameter should identify the digital signature
// algorithm which was used to produce the signature and should be
// supported by this public key. Returns a nil error if the signature
// is valid.
Verify(data io.Reader, alg string, signature []byte) error
// CryptoPublicKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
CryptoPublicKey() crypto.PublicKey
// These public keys can be serialized to the standard JSON encoding for
// JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web
// Algorithms.
MarshalJSON() ([]byte, error)
// These keys can also be serialized to the standard PEM encoding.
PEMBlock() (*pem.Block, error)
// The string representation of a key is its key type and ID.
String() string
AddExtendedField(string, interface{})
GetExtendedField(string) interface{}
}
// PrivateKey is a generic interface for a Private Key.
type PrivateKey interface {
// A PrivateKey contains all fields and methods of a PublicKey of the
// same type. The MarshalJSON method also outputs the private key as a
// JSON Web Key, and the PEMBlock method outputs the private key as a
// PEM block.
PublicKey
// PublicKey returns the PublicKey associated with this PrivateKey.
PublicKey() PublicKey
// Sign signs the data read from the io.Reader using a signature algorithm
// supported by the private key. If the specified hashing algorithm is
// supported by this key, that hash function is used to generate the
// signature otherwise the the default hashing algorithm for this key is
// used. Returns the signature and identifier of the algorithm used.
Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error)
// CryptoPrivateKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The
// type is either *rsa.PublicKey or *ecdsa.PublicKey
CryptoPrivateKey() crypto.PrivateKey
}
// FromCryptoPublicKey returns a libtrust PublicKey representation of the given
// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given
// key is of an unsupported type.
func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) {
switch cryptoPublicKey := cryptoPublicKey.(type) {
case *ecdsa.PublicKey:
return fromECPublicKey(cryptoPublicKey)
case *rsa.PublicKey:
return fromRSAPublicKey(cryptoPublicKey), nil
default:
return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey)
}
}
// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given
// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given
// key is of an unsupported type.
func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) {
switch cryptoPrivateKey := cryptoPrivateKey.(type) {
case *ecdsa.PrivateKey:
return fromECPrivateKey(cryptoPrivateKey)
case *rsa.PrivateKey:
return fromRSAPrivateKey(cryptoPrivateKey), nil
default:
return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey)
}
}
// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust
// PublicKey or an error if there is a problem with the encoding.
func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) {
pemBlock, _ := pem.Decode(data)
if pemBlock == nil {
return nil, errors.New("unable to find PEM encoded data")
} else if pemBlock.Type != "PUBLIC KEY" {
return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
}
return pubKeyFromPEMBlock(pemBlock)
}
// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of
// PEM blocks appended one after the other and returns a slice of PublicKey
// objects that it finds.
func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) {
pubKeys := []PublicKey{}
for {
var pemBlock *pem.Block
pemBlock, data = pem.Decode(data)
if pemBlock == nil {
break
} else if pemBlock.Type != "PUBLIC KEY" {
return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
}
pubKey, err := pubKeyFromPEMBlock(pemBlock)
if err != nil {
return nil, err
}
pubKeys = append(pubKeys, pubKey)
}
return pubKeys, nil
}
// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust
// PrivateKey or an error if there is a problem with the encoding.
func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) {
pemBlock, _ := pem.Decode(data)
if pemBlock == nil {
return nil, errors.New("unable to find PEM encoded data")
}
var key PrivateKey
switch {
case pemBlock.Type == "RSA PRIVATE KEY":
rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err)
}
key = fromRSAPrivateKey(rsaPrivateKey)
case pemBlock.Type == "EC PRIVATE KEY":
ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err)
}
key, err = fromECPrivateKey(ecPrivateKey)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type)
}
addPEMHeadersToKey(pemBlock, key.PublicKey())
return key, nil
}
// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic
// Public Key to be used with libtrust.
func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) {
jwk := make(map[string]interface{})
err := json.Unmarshal(data, &jwk)
if err != nil {
return nil, fmt.Errorf(
"decoding JWK Public Key JSON data: %s\n", err,
)
}
// Get the Key Type value.
kty, err := stringFromMap(jwk, "kty")
if err != nil {
return nil, fmt.Errorf("JWK Public Key type: %s", err)
}
switch {
case kty == "EC":
// Call out to unmarshal EC public key.
return ecPublicKeyFromMap(jwk)
case kty == "RSA":
// Call out to unmarshal RSA public key.
return rsaPublicKeyFromMap(jwk)
default:
return nil, fmt.Errorf(
"JWK Public Key type not supported: %q\n", kty,
)
}
}
// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set
// and returns a slice of Public Key objects.
func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) {
rawKeys, err := loadJSONKeySetRaw(data)
if err != nil {
return nil, err
}
pubKeys := make([]PublicKey, 0, len(rawKeys))
for _, rawKey := range rawKeys {
pubKey, err := UnmarshalPublicKeyJWK(rawKey)
if err != nil {
return nil, err
}
pubKeys = append(pubKeys, pubKey)
}
return pubKeys, nil
}
// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic
// Private Key to be used with libtrust.
func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) {
jwk := make(map[string]interface{})
err := json.Unmarshal(data, &jwk)
if err != nil {
return nil, fmt.Errorf(
"decoding JWK Private Key JSON data: %s\n", err,
)
}
// Get the Key Type value.
kty, err := stringFromMap(jwk, "kty")
if err != nil {
return nil, fmt.Errorf("JWK Private Key type: %s", err)
}
switch {
case kty == "EC":
// Call out to unmarshal EC private key.
return ecPrivateKeyFromMap(jwk)
case kty == "RSA":
// Call out to unmarshal RSA private key.
return rsaPrivateKeyFromMap(jwk)
default:
return nil, fmt.Errorf(
"JWK Private Key type not supported: %q\n", kty,
)
}
}

@ -0,0 +1,255 @@
package libtrust
import (
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
)
var (
// ErrKeyFileDoesNotExist indicates that the private key file does not exist.
ErrKeyFileDoesNotExist = errors.New("key file does not exist")
)
func readKeyFileBytes(filename string) ([]byte, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
if os.IsNotExist(err) {
err = ErrKeyFileDoesNotExist
} else {
err = fmt.Errorf("unable to read key file %s: %s", filename, err)
}
return nil, err
}
return data, nil
}
/*
Loading and Saving of Public and Private Keys in either PEM or JWK format.
*/
// LoadKeyFile opens the given filename and attempts to read a Private Key
// encoded in either PEM or JWK format (if .json or .jwk file extension).
func LoadKeyFile(filename string) (PrivateKey, error) {
contents, err := readKeyFileBytes(filename)
if err != nil {
return nil, err
}
var key PrivateKey
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
key, err = UnmarshalPrivateKeyJWK(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode private key JWK: %s", err)
}
} else {
key, err = UnmarshalPrivateKeyPEM(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode private key PEM: %s", err)
}
}
return key, nil
}
// LoadPublicKeyFile opens the given filename and attempts to read a Public Key
// encoded in either PEM or JWK format (if .json or .jwk file extension).
func LoadPublicKeyFile(filename string) (PublicKey, error) {
contents, err := readKeyFileBytes(filename)
if err != nil {
return nil, err
}
var key PublicKey
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
key, err = UnmarshalPublicKeyJWK(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode public key JWK: %s", err)
}
} else {
key, err = UnmarshalPublicKeyPEM(contents)
if err != nil {
return nil, fmt.Errorf("unable to decode public key PEM: %s", err)
}
}
return key, nil
}
// SaveKey saves the given key to a file using the provided filename.
// This process will overwrite any existing file at the provided location.
func SaveKey(filename string, key PrivateKey) error {
var encodedKey []byte
var err error
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
// Encode in JSON Web Key format.
encodedKey, err = json.MarshalIndent(key, "", " ")
if err != nil {
return fmt.Errorf("unable to encode private key JWK: %s", err)
}
} else {
// Encode in PEM format.
pemBlock, err := key.PEMBlock()
if err != nil {
return fmt.Errorf("unable to encode private key PEM: %s", err)
}
encodedKey = pem.EncodeToMemory(pemBlock)
}
err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600))
if err != nil {
return fmt.Errorf("unable to write private key file %s: %s", filename, err)
}
return nil
}
// SavePublicKey saves the given public key to the file.
func SavePublicKey(filename string, key PublicKey) error {
var encodedKey []byte
var err error
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
// Encode in JSON Web Key format.
encodedKey, err = json.MarshalIndent(key, "", " ")
if err != nil {
return fmt.Errorf("unable to encode public key JWK: %s", err)
}
} else {
// Encode in PEM format.
pemBlock, err := key.PEMBlock()
if err != nil {
return fmt.Errorf("unable to encode public key PEM: %s", err)
}
encodedKey = pem.EncodeToMemory(pemBlock)
}
err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644))
if err != nil {
return fmt.Errorf("unable to write public key file %s: %s", filename, err)
}
return nil
}
// Public Key Set files
type jwkSet struct {
Keys []json.RawMessage `json:"keys"`
}
// LoadKeySetFile loads a key set
func LoadKeySetFile(filename string) ([]PublicKey, error) {
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
return loadJSONKeySetFile(filename)
}
// Must be a PEM format file
return loadPEMKeySetFile(filename)
}
func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) {
if len(data) == 0 {
// This is okay, just return an empty slice.
return []json.RawMessage{}, nil
}
keySet := jwkSet{}
err := json.Unmarshal(data, &keySet)
if err != nil {
return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err)
}
return keySet.Keys, nil
}
func loadJSONKeySetFile(filename string) ([]PublicKey, error) {
contents, err := readKeyFileBytes(filename)
if err != nil && err != ErrKeyFileDoesNotExist {
return nil, err
}
return UnmarshalPublicKeyJWKSet(contents)
}
func loadPEMKeySetFile(filename string) ([]PublicKey, error) {
data, err := readKeyFileBytes(filename)
if err != nil && err != ErrKeyFileDoesNotExist {
return nil, err
}
return UnmarshalPublicKeyPEMBundle(data)
}
// AddKeySetFile adds a key to a key set
func AddKeySetFile(filename string, key PublicKey) error {
if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
return addKeySetJSONFile(filename, key)
}
// Must be a PEM format file
return addKeySetPEMFile(filename, key)
}
func addKeySetJSONFile(filename string, key PublicKey) error {
encodedKey, err := json.Marshal(key)
if err != nil {
return fmt.Errorf("unable to encode trusted client key: %s", err)
}
contents, err := readKeyFileBytes(filename)
if err != nil && err != ErrKeyFileDoesNotExist {
return err
}
rawEntries, err := loadJSONKeySetRaw(contents)
if err != nil {
return err
}
rawEntries = append(rawEntries, json.RawMessage(encodedKey))
entriesWrapper := jwkSet{Keys: rawEntries}
encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ")
if err != nil {
return fmt.Errorf("unable to encode trusted client keys: %s", err)
}
err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644))
if err != nil {
return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err)
}
return nil
}
func addKeySetPEMFile(filename string, key PublicKey) error {
// Encode to PEM, open file for appending, write PEM.
file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644))
if err != nil {
return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err)
}
defer file.Close()
pemBlock, err := key.PEMBlock()
if err != nil {
return fmt.Errorf("unable to encoded trusted key: %s", err)
}
_, err = file.Write(pem.EncodeToMemory(pemBlock))
if err != nil {
return fmt.Errorf("unable to write trusted keys file: %s", err)
}
return nil
}

@ -0,0 +1,175 @@
package libtrust
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"sync"
)
// ClientKeyManager manages client keys on the filesystem
type ClientKeyManager struct {
key PrivateKey
clientFile string
clientDir string
clientLock sync.RWMutex
clients []PublicKey
configLock sync.Mutex
configs []*tls.Config
}
// NewClientKeyManager loads a new manager from a set of key files
// and managed by the given private key.
func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) {
m := &ClientKeyManager{
key: trustKey,
clientFile: clientFile,
clientDir: clientDir,
}
if err := m.loadKeys(); err != nil {
return nil, err
}
// TODO Start watching file and directory
return m, nil
}
func (c *ClientKeyManager) loadKeys() (err error) {
// Load authorized keys file
var clients []PublicKey
if c.clientFile != "" {
clients, err = LoadKeySetFile(c.clientFile)
if err != nil {
return fmt.Errorf("unable to load authorized keys: %s", err)
}
}
// Add clients from authorized keys directory
files, err := ioutil.ReadDir(c.clientDir)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("unable to open authorized keys directory: %s", err)
}
for _, f := range files {
if !f.IsDir() {
publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name()))
if err != nil {
return fmt.Errorf("unable to load authorized key file: %s", err)
}
clients = append(clients, publicKey)
}
}
c.clientLock.Lock()
c.clients = clients
c.clientLock.Unlock()
return nil
}
// RegisterTLSConfig registers a tls configuration to manager
// such that any changes to the keys may be reflected in
// the tls client CA pool
func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error {
c.clientLock.RLock()
certPool, err := GenerateCACertPool(c.key, c.clients)
if err != nil {
return fmt.Errorf("CA pool generation error: %s", err)
}
c.clientLock.RUnlock()
tlsConfig.ClientCAs = certPool
c.configLock.Lock()
c.configs = append(c.configs, tlsConfig)
c.configLock.Unlock()
return nil
}
// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for
// libtrust identity authentication for the domain specified
func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) {
tlsConfig := newTLSConfig()
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
if err := clients.RegisterTLSConfig(tlsConfig); err != nil {
return nil, err
}
// Generate cert
ips, domains, err := parseAddr(addr)
if err != nil {
return nil, err
}
// add domain that it expects clients to use
domains = append(domains, domain)
x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips)
if err != nil {
return nil, fmt.Errorf("certificate generation error: %s", err)
}
tlsConfig.Certificates = []tls.Certificate{{
Certificate: [][]byte{x509Cert.Raw},
PrivateKey: trustKey.CryptoPrivateKey(),
Leaf: x509Cert,
}}
return tlsConfig, nil
}
// NewCertAuthTLSConfig creates a tls.Config for the server to use for
// certificate authentication
func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) {
tlsConfig := newTLSConfig()
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err)
}
tlsConfig.Certificates = []tls.Certificate{cert}
// Verify client certificates against a CA?
if caPath != "" {
certPool := x509.NewCertPool()
file, err := ioutil.ReadFile(caPath)
if err != nil {
return nil, fmt.Errorf("Couldn't read CA certificate: %s", err)
}
certPool.AppendCertsFromPEM(file)
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
tlsConfig.ClientCAs = certPool
}
return tlsConfig, nil
}
func newTLSConfig() *tls.Config {
return &tls.Config{
NextProtos: []string{"http/1.1"},
// Avoid fallback on insecure SSL protocols
MinVersion: tls.VersionTLS10,
}
}
// parseAddr parses an address into an array of IPs and domains
func parseAddr(addr string) ([]net.IP, []string, error) {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, nil, err
}
var domains []string
var ips []net.IP
ip := net.ParseIP(host)
if ip != nil {
ips = []net.IP{ip}
} else {
domains = []string{host}
}
return ips, domains, nil
}

@ -0,0 +1,427 @@
package libtrust
import (
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"math/big"
)
/*
* RSA DSA PUBLIC KEY
*/
// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms.
type rsaPublicKey struct {
*rsa.PublicKey
extended map[string]interface{}
}
func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey {
return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}}
}
// KeyType returns the JWK key type for RSA keys, i.e., "RSA".
func (k *rsaPublicKey) KeyType() string {
return "RSA"
}
// KeyID returns a distinct identifier which is unique to this Public Key.
func (k *rsaPublicKey) KeyID() string {
return keyIDFromCryptoKey(k)
}
func (k *rsaPublicKey) String() string {
return fmt.Sprintf("RSA Public Key <%s>", k.KeyID())
}
// Verify verifyies the signature of the data in the io.Reader using this Public Key.
// The alg parameter should be the name of the JWA digital signature algorithm
// which was used to produce the signature and should be supported by this
// public key. Returns a nil error if the signature is valid.
func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
// Verify the signature of the given date, return non-nil error if valid.
sigAlg, err := rsaSignatureAlgorithmByName(alg)
if err != nil {
return fmt.Errorf("unable to verify Signature: %s", err)
}
hasher := sigAlg.HashID().New()
_, err = io.Copy(hasher, data)
if err != nil {
return fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature)
if err != nil {
return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err)
}
return nil
}
// CryptoPublicKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
return k.PublicKey
}
func (k *rsaPublicKey) toMap() map[string]interface{} {
jwk := make(map[string]interface{})
for k, v := range k.extended {
jwk[k] = v
}
jwk["kty"] = k.KeyType()
jwk["kid"] = k.KeyID()
jwk["n"] = joseBase64UrlEncode(k.N.Bytes())
jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E))
return jwk
}
// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
// RSA keys.
func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Public Key to DER-encoded PKIX format.
func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) {
derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
if err != nil {
return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err)
}
k.extended["kid"] = k.KeyID() // For display purposes.
return createPemBlock("PUBLIC KEY", derBytes, k.extended)
}
func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) {
k.extended[field] = value
}
func (k *rsaPublicKey) GetExtendedField(field string) interface{} {
v, ok := k.extended[field]
if !ok {
return nil
}
return v
}
func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) {
// JWK key type (kty) has already been determined to be "RSA".
// Need to extract 'n', 'e', and 'kid' and check for
// consistency.
// Get the modulus parameter N.
nB64Url, err := stringFromMap(jwk, "n")
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
}
n, err := parseRSAModulusParam(nB64Url)
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
}
// Get the public exponent E.
eB64Url, err := stringFromMap(jwk, "e")
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
}
e, err := parseRSAPublicExponentParam(eB64Url)
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
}
key := &rsaPublicKey{
PublicKey: &rsa.PublicKey{N: n, E: e},
}
// Key ID is optional, but if it exists, it should match the key.
_, ok := jwk["kid"]
if ok {
kid, err := stringFromMap(jwk, "kid")
if err != nil {
return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err)
}
if kid != key.KeyID() {
return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid)
}
}
if _, ok := jwk["d"]; ok {
return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent")
}
key.extended = jwk
return key, nil
}
/*
* RSA DSA PRIVATE KEY
*/
// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms.
type rsaPrivateKey struct {
rsaPublicKey
*rsa.PrivateKey
}
func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey {
return &rsaPrivateKey{
*fromRSAPublicKey(&cryptoPrivateKey.PublicKey),
cryptoPrivateKey,
}
}
// PublicKey returns the Public Key data associated with this Private Key.
func (k *rsaPrivateKey) PublicKey() PublicKey {
return &k.rsaPublicKey
}
func (k *rsaPrivateKey) String() string {
return fmt.Sprintf("RSA Private Key <%s>", k.KeyID())
}
// Sign signs the data read from the io.Reader using a signature algorithm supported
// by the RSA private key. If the specified hashing algorithm is supported by
// this key, that hash function is used to generate the signature otherwise the
// the default hashing algorithm for this key is used. Returns the signature
// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384",
// "RS512".
func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
// Generate a signature of the data using the internal alg.
sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID)
hasher := sigAlg.HashID().New()
_, err = io.Copy(hasher, data)
if err != nil {
return nil, "", fmt.Errorf("error reading data to sign: %s", err)
}
hash := hasher.Sum(nil)
signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash)
if err != nil {
return nil, "", fmt.Errorf("error producing signature: %s", err)
}
alg = sigAlg.HeaderParam()
return
}
// CryptoPrivateKey returns the internal object which can be used as a
// crypto.PublicKey for use with other standard library operations. The type
// is either *rsa.PublicKey or *ecdsa.PublicKey
func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
return k.PrivateKey
}
func (k *rsaPrivateKey) toMap() map[string]interface{} {
k.Precompute() // Make sure the precomputed values are stored.
jwk := k.rsaPublicKey.toMap()
jwk["d"] = joseBase64UrlEncode(k.D.Bytes())
jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes())
jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes())
jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes())
jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes())
jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes())
otherPrimes := k.Primes[2:]
if len(otherPrimes) > 0 {
otherPrimesInfo := make([]interface{}, len(otherPrimes))
for i, r := range otherPrimes {
otherPrimeInfo := make(map[string]string, 3)
otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes())
crtVal := k.Precomputed.CRTValues[i]
otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes())
otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes())
otherPrimesInfo[i] = otherPrimeInfo
}
jwk["oth"] = otherPrimesInfo
}
return jwk
}
// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
// RSA keys.
func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) {
return json.Marshal(k.toMap())
}
// PEMBlock serializes this Private Key to DER-encoded PKIX format.
func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) {
derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey)
k.extended["keyID"] = k.KeyID() // For display purposes.
return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended)
}
func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) {
// The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that
// only the private key exponent 'd' is REQUIRED, the others are just for
// signature/decryption optimizations and SHOULD be included when the JWK
// is produced. We MAY choose to accept a JWK which only includes 'd', but
// we're going to go ahead and not choose to accept it without the extra
// fields. Only the 'oth' field will be optional (for multi-prime keys).
privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err)
}
firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
}
secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
}
firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
}
secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
}
crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
}
var oth interface{}
if _, ok := jwk["oth"]; ok {
oth = jwk["oth"]
delete(jwk, "oth")
}
// JWK key type (kty) has already been determined to be "RSA".
// Need to extract the public key information, then extract the private
// key values.
publicKey, err := rsaPublicKeyFromMap(jwk)
if err != nil {
return nil, err
}
privateKey := &rsa.PrivateKey{
PublicKey: *publicKey.PublicKey,
D: privateExponent,
Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor},
Precomputed: rsa.PrecomputedValues{
Dp: firstFactorCRT,
Dq: secondFactorCRT,
Qinv: crtCoeff,
},
}
if oth != nil {
// Should be an array of more JSON objects.
otherPrimesInfo, ok := oth.([]interface{})
if !ok {
return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array")
}
numOtherPrimeFactors := len(otherPrimesInfo)
if numOtherPrimeFactors == 0 {
return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty")
}
otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors)
productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor)
crtValues := make([]rsa.CRTValue, numOtherPrimeFactors)
for i, val := range otherPrimesInfo {
otherPrimeinfo, ok := val.(map[string]interface{})
if !ok {
return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object")
}
otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
}
otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
}
otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t")
if err != nil {
return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
}
crtValue := crtValues[i]
crtValue.Exp = otherFactorCRT
crtValue.Coeff = otherCrtCoeff
crtValue.R = productOfPrimes
otherPrimeFactors[i] = otherPrimeFactor
productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor)
}
privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...)
privateKey.Precomputed.CRTValues = crtValues
}
key := &rsaPrivateKey{
rsaPublicKey: *publicKey,
PrivateKey: privateKey,
}
return key, nil
}
/*
* Key Generation Functions.
*/
func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) {
k = new(rsaPrivateKey)
k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return nil, err
}
k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey
k.extended = make(map[string]interface{})
return
}
// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA.
func GenerateRSA2048PrivateKey() (PrivateKey, error) {
k, err := generateRSAPrivateKey(2048)
if err != nil {
return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err)
}
return k, nil
}
// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA.
func GenerateRSA3072PrivateKey() (PrivateKey, error) {
k, err := generateRSAPrivateKey(3072)
if err != nil {
return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err)
}
return k, nil
}
// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA.
func GenerateRSA4096PrivateKey() (PrivateKey, error) {
k, err := generateRSAPrivateKey(4096)
if err != nil {
return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err)
}
return k, nil
}

@ -0,0 +1,363 @@
package libtrust
import (
"bytes"
"crypto"
"crypto/elliptic"
"crypto/tls"
"crypto/x509"
"encoding/base32"
"encoding/base64"
"encoding/binary"
"encoding/pem"
"errors"
"fmt"
"math/big"
"net/url"
"os"
"path/filepath"
"strings"
"time"
)
// LoadOrCreateTrustKey will load a PrivateKey from the specified path
func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) {
if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
return nil, err
}
trustKey, err := LoadKeyFile(trustKeyPath)
if err == ErrKeyFileDoesNotExist {
trustKey, err = GenerateECP256PrivateKey()
if err != nil {
return nil, fmt.Errorf("error generating key: %s", err)
}
if err := SaveKey(trustKeyPath, trustKey); err != nil {
return nil, fmt.Errorf("error saving key file: %s", err)
}
dir, file := filepath.Split(trustKeyPath)
if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil {
return nil, fmt.Errorf("error saving public key file: %s", err)
}
} else if err != nil {
return nil, fmt.Errorf("error loading key file: %s", err)
}
return trustKey, nil
}
// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity
// based authentication from the specified dockerUrl, the rootConfigPath and
// the server name to which it is connecting.
// If trustUnknownHosts is true it will automatically add the host to the
// known-hosts.json in rootConfigPath.
func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) {
tlsConfig := newTLSConfig()
trustKeyPath := filepath.Join(rootConfigPath, "key.json")
knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json")
u, err := url.Parse(dockerUrl)
if err != nil {
return nil, fmt.Errorf("unable to parse machine url")
}
if u.Scheme == "unix" {
return nil, nil
}
addr := u.Host
proto := "tcp"
trustKey, err := LoadOrCreateTrustKey(trustKeyPath)
if err != nil {
return nil, fmt.Errorf("unable to load trust key: %s", err)
}
knownHosts, err := LoadKeySetFile(knownHostsPath)
if err != nil {
return nil, fmt.Errorf("could not load trusted hosts file: %s", err)
}
allowedHosts, err := FilterByHosts(knownHosts, addr, false)
if err != nil {
return nil, fmt.Errorf("error filtering hosts: %s", err)
}
certPool, err := GenerateCACertPool(trustKey, allowedHosts)
if err != nil {
return nil, fmt.Errorf("Could not create CA pool: %s", err)
}
tlsConfig.ServerName = serverName
tlsConfig.RootCAs = certPool
x509Cert, err := GenerateSelfSignedClientCert(trustKey)
if err != nil {
return nil, fmt.Errorf("certificate generation error: %s", err)
}
tlsConfig.Certificates = []tls.Certificate{{
Certificate: [][]byte{x509Cert.Raw},
PrivateKey: trustKey.CryptoPrivateKey(),
Leaf: x509Cert,
}}
tlsConfig.InsecureSkipVerify = true
testConn, err := tls.Dial(proto, addr, tlsConfig)
if err != nil {
return nil, fmt.Errorf("tls Handshake error: %s", err)
}
opts := x509.VerifyOptions{
Roots: tlsConfig.RootCAs,
CurrentTime: time.Now(),
DNSName: tlsConfig.ServerName,
Intermediates: x509.NewCertPool(),
}
certs := testConn.ConnectionState().PeerCertificates
for i, cert := range certs {
if i == 0 {
continue
}
opts.Intermediates.AddCert(cert)
}
if _, err := certs[0].Verify(opts); err != nil {
if _, ok := err.(x509.UnknownAuthorityError); ok {
if trustUnknownHosts {
pubKey, err := FromCryptoPublicKey(certs[0].PublicKey)
if err != nil {
return nil, fmt.Errorf("error extracting public key from cert: %s", err)
}
pubKey.AddExtendedField("hosts", []string{addr})
if err := AddKeySetFile(knownHostsPath, pubKey); err != nil {
return nil, fmt.Errorf("error adding machine to known hosts: %s", err)
}
} else {
return nil, fmt.Errorf("unable to connect. unknown host: %s", addr)
}
}
}
testConn.Close()
tlsConfig.InsecureSkipVerify = false
return tlsConfig, nil
}
// joseBase64UrlEncode encodes the given data using the standard base64 url
// encoding format but with all trailing '=' characters omitted in accordance
// with the jose specification.
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
func joseBase64UrlEncode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// joseBase64UrlDecode decodes the given string using the standard base64 url
// decoder but first adds the appropriate number of trailing '=' characters in
// accordance with the jose specification.
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
func joseBase64UrlDecode(s string) ([]byte, error) {
s = strings.Replace(s, "\n", "", -1)
s = strings.Replace(s, " ", "", -1)
switch len(s) % 4 {
case 0:
case 2:
s += "=="
case 3:
s += "="
default:
return nil, errors.New("illegal base64url string")
}
return base64.URLEncoding.DecodeString(s)
}
func keyIDEncode(b []byte) string {
s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=")
var buf bytes.Buffer
var i int
for i = 0; i < len(s)/4-1; i++ {
start := i * 4
end := start + 4
buf.WriteString(s[start:end] + ":")
}
buf.WriteString(s[i*4:])
return buf.String()
}
func keyIDFromCryptoKey(pubKey PublicKey) string {
// Generate and return a 'libtrust' fingerprint of the public key.
// For an RSA key this should be:
// SHA256(DER encoded ASN1)
// Then truncated to 240 bits and encoded into 12 base32 groups like so:
// ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey())
if err != nil {
return ""
}
hasher := crypto.SHA256.New()
hasher.Write(derBytes)
return keyIDEncode(hasher.Sum(nil)[:30])
}
func stringFromMap(m map[string]interface{}, key string) (string, error) {
val, ok := m[key]
if !ok {
return "", fmt.Errorf("%q value not specified", key)
}
str, ok := val.(string)
if !ok {
return "", fmt.Errorf("%q value must be a string", key)
}
delete(m, key)
return str, nil
}
func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) {
curveByteLen := (curve.Params().BitSize + 7) >> 3
cBytes, err := joseBase64UrlDecode(cB64Url)
if err != nil {
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
cByteLength := len(cBytes)
if cByteLength != curveByteLen {
return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen)
}
return new(big.Int).SetBytes(cBytes), nil
}
func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) {
dBytes, err := joseBase64UrlDecode(dB64Url)
if err != nil {
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
// octets (where n is the order of the curve). This is because the private
// key d must be in the interval [1, n-1] so the bitlength of d should be
// no larger than the bitlength of n-1. The easiest way to find the octet
// length is to take bitlength(n-1), add 7 to force a carry, and shift this
// bit sequence right by 3, which is essentially dividing by 8 and adding
// 1 if there is any remainder. Thus, the private key value d should be
// output to (bitlength(n-1)+7)>>3 octets.
n := curve.Params().N
octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
dByteLength := len(dBytes)
if dByteLength != octetLength {
return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength)
}
return new(big.Int).SetBytes(dBytes), nil
}
func parseRSAModulusParam(nB64Url string) (*big.Int, error) {
nBytes, err := joseBase64UrlDecode(nB64Url)
if err != nil {
return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
return new(big.Int).SetBytes(nBytes), nil
}
func serializeRSAPublicExponentParam(e int) []byte {
// We MUST use the minimum number of octets to represent E.
// E is supposed to be 65537 for performance and security reasons
// and is what golang's rsa package generates, but it might be
// different if imported from some other generator.
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(e))
var i int
for i = 0; i < 8; i++ {
if buf[i] != 0 {
break
}
}
return buf[i:]
}
func parseRSAPublicExponentParam(eB64Url string) (int, error) {
eBytes, err := joseBase64UrlDecode(eB64Url)
if err != nil {
return 0, fmt.Errorf("invalid base64 URL encoding: %s", err)
}
// Only the minimum number of bytes were used to represent E, but
// binary.BigEndian.Uint32 expects at least 4 bytes, so we need
// to add zero padding if necassary.
byteLen := len(eBytes)
buf := make([]byte, 4-byteLen, 4)
eBytes = append(buf, eBytes...)
return int(binary.BigEndian.Uint32(eBytes)), nil
}
func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) {
b64Url, err := stringFromMap(m, key)
if err != nil {
return nil, err
}
paramBytes, err := joseBase64UrlDecode(b64Url)
if err != nil {
return nil, fmt.Errorf("invaled base64 URL encoding: %s", err)
}
return new(big.Int).SetBytes(paramBytes), nil
}
func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) {
pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}}
for k, v := range headers {
switch val := v.(type) {
case string:
pemBlock.Headers[k] = val
case []string:
if k == "hosts" {
pemBlock.Headers[k] = strings.Join(val, ",")
} else {
// Return error, non-encodable type
}
default:
// Return error, non-encodable type
}
}
return pemBlock, nil
}
func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) {
cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err)
}
pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
if err != nil {
return nil, err
}
addPEMHeadersToKey(pemBlock, pubKey)
return pubKey, nil
}
func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) {
for key, value := range pemBlock.Headers {
var safeVal interface{}
if key == "hosts" {
safeVal = strings.Split(value, ",")
} else {
safeVal = value
}
pubKey.AddExtendedField(key, safeVal)
}
}

@ -0,0 +1,26 @@
Copyright (c) 2015, Salesforce.com, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Salesforce.com nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -0,0 +1,151 @@
package registry
import (
"net/http"
"strings"
)
// Octet types from RFC 2616.
type octetType byte
// AuthorizationChallenge carries information
// from a WWW-Authenticate response header.
type AuthorizationChallenge struct {
Scheme string
Parameters map[string]string
}
var octetTypes [256]octetType
const (
isToken octetType = 1 << iota
isSpace
)
//nolint:gochecknoinits
func init() {
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
// TEXT = <any OCTET except CTLs, but including LWS>
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
// token = 1*<any CHAR except CTLs or separators>
// qdtext = <any TEXT except <">>
for c := 0; c < 256; c++ {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
if strings.ContainsRune(" \t\r\n", rune(c)) {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
t |= isToken
}
octetTypes[c] = t
}
}
func parseAuthHeader(header http.Header) []*AuthorizationChallenge {
var challenges []*AuthorizationChallenge
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
v, p := parseValueAndParams(h)
if v != "" {
challenges = append(challenges, &AuthorizationChallenge{Scheme: v, Parameters: p})
}
}
return challenges
}
func parseValueAndParams(header string) (value string, params map[string]string) {
params = make(map[string]string)
value, s := expectToken(header)
if value == "" {
return
}
value = strings.ToLower(value)
s = "," + skipSpace(s)
for strings.HasPrefix(s, ",") {
var pkey string
pkey, s = expectToken(skipSpace(s[1:]))
if pkey == "" {
return
}
if !strings.HasPrefix(s, "=") {
return
}
var pvalue string
pvalue, s = expectTokenOrQuoted(s[1:])
if pvalue == "" {
return
}
pkey = strings.ToLower(pkey)
params[pkey] = pvalue
s = skipSpace(s)
}
return
}
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isSpace == 0 {
break
}
}
return s[i:]
}
func expectToken(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isToken == 0 {
break
}
}
return s[:i], s[i:]
}
func expectTokenOrQuoted(s string) (value string, rest string) {
if !strings.HasPrefix(s, "\"") {
return expectToken(s)
}
s = s[1:]
for i := 0; i < len(s); i++ {
switch s[i] {
case '"':
return s[:i], s[i+1:]
case '\\':
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i += i; i < len(s); i++ {
b := s[i]
switch {
case escape:
escape = false
p[j] = b
j++
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
j++
}
}
return "", ""
}
}
return "", ""
}

@ -0,0 +1,23 @@
package registry
import (
"net/http"
"strings"
)
type BasicTransport struct {
Transport http.RoundTripper
URL string
Username string
Password string
}
func (t *BasicTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if strings.HasPrefix(req.URL.String(), t.URL) {
if t.Username != "" || t.Password != "" {
req.SetBasicAuth(t.Username, t.Password)
}
}
resp, err := t.Transport.RoundTrip(req)
return resp, err
}

@ -0,0 +1,108 @@
package registry
import (
"io"
"net/http"
"net/url"
"github.com/docker/distribution"
digest "github.com/opencontainers/go-digest"
)
func (registry *Registry) DownloadBlob(repository string, digest digest.Digest) (io.ReadCloser, error) {
url := registry.url("/v2/%s/blobs/%s", repository, digest)
registry.Logf("registry.blob.download url=%s repository=%s digest=%s", url, repository, digest)
resp, err := registry.Client.Get(url)
if err != nil {
return nil, err
}
return resp.Body, nil
}
func (registry *Registry) UploadBlob(repository string, digest digest.Digest, content io.Reader) error {
uploadURL, err := registry.initiateUpload(repository)
if err != nil {
return err
}
q := uploadURL.Query()
q.Set("digest", digest.String())
uploadURL.RawQuery = q.Encode()
registry.Logf("registry.blob.upload url=%s repository=%s digest=%s", uploadURL, repository, digest)
upload, err := http.NewRequest("PUT", uploadURL.String(), content)
if err != nil {
return err
}
upload.Header.Set("Content-Type", "application/octet-stream")
_, err = registry.Client.Do(upload)
return err
}
func (registry *Registry) HasBlob(repository string, digest digest.Digest) (bool, error) {
checkURL := registry.url("/v2/%s/blobs/%s", repository, digest)
registry.Logf("registry.blob.check url=%s repository=%s digest=%s", checkURL, repository, digest)
resp, err := registry.Client.Head(checkURL)
if resp != nil {
defer resp.Body.Close()
}
if err == nil {
return resp.StatusCode == http.StatusOK, nil
}
urlErr, ok := err.(*url.Error)
if !ok {
return false, err
}
httpErr, ok := urlErr.Err.(*HTTPStatusError)
if !ok {
return false, err
}
if httpErr.Response.StatusCode == http.StatusNotFound {
return false, nil
}
return false, err
}
func (registry *Registry) BlobMetadata(repository string, digest digest.Digest) (distribution.Descriptor, error) {
checkURL := registry.url("/v2/%s/blobs/%s", repository, digest)
registry.Logf("registry.blob.check url=%s repository=%s digest=%s", checkURL, repository, digest)
resp, err := registry.Client.Head(checkURL)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return distribution.Descriptor{}, err
}
return distribution.Descriptor{
Digest: digest,
Size: resp.ContentLength,
}, nil
}
func (registry *Registry) initiateUpload(repository string) (*url.URL, error) {
initiateURL := registry.url("/v2/%s/blobs/uploads/", repository)
registry.Logf("registry.blob.initiate-upload url=%s repository=%s", initiateURL, repository)
resp, err := registry.Client.Post(initiateURL, "application/octet-stream", nil)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return nil, err
}
location := resp.Header.Get("Location")
locationURL, err := url.Parse(location)
if err != nil {
return nil, err
}
return locationURL, nil
}

@ -0,0 +1,46 @@
package registry
import (
"fmt"
"io/ioutil"
"net/http"
)
type HTTPStatusError struct {
Response *http.Response
// Copied from `Response.Body` to avoid problems with unclosed bodies later.
// Nobody calls `err.Response.Body.Close()`, ever.
Body []byte
}
func (err *HTTPStatusError) Error() string {
return fmt.Sprintf("http: non-successful response (status=%v body=%q)", err.Response.StatusCode, err.Body)
}
var _ error = &HTTPStatusError{}
type ErrorTransport struct {
Transport http.RoundTripper
}
func (t *ErrorTransport) RoundTrip(request *http.Request) (*http.Response, error) {
resp, err := t.Transport.RoundTrip(request)
if err != nil {
return resp, err
}
if resp.StatusCode >= 400 {
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("http: failed to read response body (status=%v, err=%q)", resp.StatusCode, err)
}
return nil, &HTTPStatusError{
Response: resp,
Body: body,
}
}
return resp, err
}

@ -0,0 +1,50 @@
package registry
import (
"encoding/json"
"errors"
"net/http"
"regexp"
)
var (
ErrNoMorePages = errors.New("no more pages")
)
// getPaginatedJSON accepts a string and a pointer, and returns the
// next page URL while updating pointed-to variable with a parsed JSON
// value. When there are no more pages it returns `ErrNoMorePages`.
func (registry *Registry) getPaginatedJSON(url string, response interface{}) (string, error) {
resp, err := registry.Client.Get(url)
if err != nil {
return "", err
}
defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
err = decoder.Decode(response)
if err != nil {
return "", err
}
return getNextLink(resp)
}
// Matches an RFC 5988 (https://tools.ietf.org/html/rfc5988#section-5)
// Link header. For example,
//
// <http://registry.example.com/v2/_catalog?n=5&last=tag5>; type="application/json"; rel="next"
//
// The URL is _supposed_ to be wrapped by angle brackets `< ... >`,
// but e.g., quay.io does not include them. Similarly, params like
// `rel="next"` may not have quoted values in the wild.
var nextLinkRE = regexp.MustCompile(`^ *<?([^;>]+)>? *(?:;[^;]*)*; *rel="?next"?(?:;.*)?`)
func getNextLink(resp *http.Response) (string, error) {
for _, link := range resp.Header[http.CanonicalHeaderKey("Link")] {
parts := nextLinkRE.FindStringSubmatch(link)
if parts != nil {
return parts[1], nil
}
}
return "", ErrNoMorePages
}

@ -0,0 +1,126 @@
package registry
import (
"bytes"
"io/ioutil"
"net/http"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
digest "github.com/opencontainers/go-digest"
)
func (registry *Registry) Manifest(repository, reference string) (*schema1.SignedManifest, error) {
url := registry.url("/v2/%s/manifests/%s", repository, reference)
registry.Logf("registry.manifest.get url=%s repository=%s reference=%s", url, repository, reference)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", schema1.MediaTypeManifest)
resp, err := registry.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
signedManifest := &schema1.SignedManifest{}
err = signedManifest.UnmarshalJSON(body)
if err != nil {
return nil, err
}
return signedManifest, nil
}
func (registry *Registry) ManifestV2(repository, reference string) (*schema2.DeserializedManifest, error) {
url := registry.url("/v2/%s/manifests/%s", repository, reference)
registry.Logf("registry.manifest.get url=%s repository=%s reference=%s", url, repository, reference)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", schema2.MediaTypeManifest)
resp, err := registry.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
deserialized := &schema2.DeserializedManifest{}
err = deserialized.UnmarshalJSON(body)
if err != nil {
return nil, err
}
return deserialized, nil
}
func (registry *Registry) ManifestDigest(repository, reference string) (digest.Digest, error) {
url := registry.url("/v2/%s/manifests/%s", repository, reference)
registry.Logf("registry.manifest.head url=%s repository=%s reference=%s", url, repository, reference)
resp, err := registry.Client.Head(url)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return "", err
}
return digest.Parse(resp.Header.Get("Docker-Content-Digest"))
}
func (registry *Registry) DeleteManifest(repository string, digest digest.Digest) error {
url := registry.url("/v2/%s/manifests/%s", repository, digest)
registry.Logf("registry.manifest.delete url=%s repository=%s reference=%s", url, repository, digest)
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return err
}
resp, err := registry.Client.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return err
}
return nil
}
func (registry *Registry) PutManifest(repository, reference string, manifest distribution.Manifest) error {
url := registry.url("/v2/%s/manifests/%s", repository, reference)
registry.Logf("registry.manifest.put url=%s repository=%s reference=%s", url, repository, reference)
mediaType, payload, err := manifest.Payload()
if err != nil {
return err
}
buffer := bytes.NewBuffer(payload)
req, err := http.NewRequest("PUT", url, buffer)
if err != nil {
return err
}
req.Header.Set("Content-Type", mediaType)
resp, err := registry.Client.Do(req)
if resp != nil {
defer resp.Body.Close()
}
return err
}

@ -0,0 +1,118 @@
package registry
import (
"crypto/tls"
"fmt"
"log"
"net/http"
"strings"
)
type LogfCallback func(format string, args ...interface{})
/*
* Discard log messages silently.
*/
func Quiet(format string, args ...interface{}) {
/* discard logs */
}
/*
* Pass log messages along to Go's "log" module.
*/
func Log(format string, args ...interface{}) {
log.Printf(format, args...)
}
type Registry struct {
URL string
Client *http.Client
Logf LogfCallback
}
/*
* Create a new Registry with the given URL and credentials, then Ping()s it
* before returning it to verify that the registry is available.
*
* You can, alternately, construct a Registry manually by populating the fields.
* This passes http.DefaultTransport to WrapTransport when creating the
* http.Client.
*/
func New(registryURL, username, password string) (*Registry, error) {
transport := http.DefaultTransport
return newFromTransport(registryURL, username, password, transport, Log)
}
/*
* Create a new Registry, as with New, using an http.Transport that disables
* SSL certificate verification.
*/
func NewInsecure(registryURL, username, password string) (*Registry, error) {
transport := &http.Transport{
TLSClientConfig: &tls.Config{
// TODO: Why?
InsecureSkipVerify: true, //nolint:gosec
},
}
return newFromTransport(registryURL, username, password, transport, Log)
}
/*
* Given an existing http.RoundTripper such as http.DefaultTransport, build the
* transport stack necessary to authenticate to the Docker registry API. This
* adds in support for OAuth bearer tokens and HTTP Basic auth, and sets up
* error handling this library relies on.
*/
func WrapTransport(transport http.RoundTripper, url, username, password string) http.RoundTripper {
tokenTransport := &TokenTransport{
Transport: transport,
Username: username,
Password: password,
}
basicAuthTransport := &BasicTransport{
Transport: tokenTransport,
URL: url,
Username: username,
Password: password,
}
errorTransport := &ErrorTransport{
Transport: basicAuthTransport,
}
return errorTransport
}
func newFromTransport(registryURL, username, password string, transport http.RoundTripper, logf LogfCallback) (*Registry, error) {
url := strings.TrimSuffix(registryURL, "/")
transport = WrapTransport(transport, url, username, password)
registry := &Registry{
URL: url,
Client: &http.Client{
Transport: transport,
},
Logf: logf,
}
if err := registry.Ping(); err != nil {
return nil, err
}
return registry, nil
}
func (r *Registry) url(pathTemplate string, args ...interface{}) string {
pathSuffix := fmt.Sprintf(pathTemplate, args...)
url := fmt.Sprintf("%s%s", r.URL, pathSuffix)
return url
}
func (r *Registry) Ping() error {
url := r.url("/v2/")
r.Logf("registry.ping url=%s", url)
resp, err := r.Client.Get(url)
if resp != nil {
defer resp.Body.Close()
}
return err
}

@ -0,0 +1,26 @@
package registry
type repositoriesResponse struct {
Repositories []string `json:"repositories"`
}
func (registry *Registry) Repositories() ([]string, error) {
url := registry.url("/v2/_catalog")
repos := make([]string, 0, 10)
var err error //We create this here, otherwise url will be rescoped with :=
var response repositoriesResponse
for {
registry.Logf("registry.repositories url=%s", url)
url, err = registry.getPaginatedJSON(url, &response)
switch err {
case ErrNoMorePages:
repos = append(repos, response.Repositories...)
return repos, nil
case nil:
repos = append(repos, response.Repositories...)
continue
default:
return nil, err
}
}
}

@ -0,0 +1,25 @@
package registry
type tagsResponse struct {
Tags []string `json:"tags"`
}
func (registry *Registry) Tags(repository string) (tags []string, err error) {
url := registry.url("/v2/%s/tags/list", repository)
var response tagsResponse
for {
registry.Logf("registry.tags url=%s repository=%s", url, repository)
url, err = registry.getPaginatedJSON(url, &response)
switch err {
case ErrNoMorePages:
tags = append(tags, response.Tags...)
return tags, nil
case nil:
tags = append(tags, response.Tags...)
continue
default:
return nil, err
}
}
}

@ -0,0 +1,128 @@
package registry
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
)
type TokenTransport struct {
Transport http.RoundTripper
Username string
Password string
}
func (t *TokenTransport) RoundTrip(req *http.Request) (*http.Response, error) {
resp, err := t.Transport.RoundTrip(req)
if err != nil {
return resp, err
}
if authService := isTokenDemand(resp); authService != nil {
resp.Body.Close()
resp, err = t.authAndRetry(authService, req)
}
return resp, err
}
type authToken struct {
Token string `json:"token"`
}
func (t *TokenTransport) authAndRetry(authService *authService, req *http.Request) (*http.Response, error) {
token, authResp, err := t.auth(authService)
if err != nil {
return authResp, err
}
retryResp, err := t.retry(req, token)
return retryResp, err
}
func (t *TokenTransport) auth(authService *authService) (string, *http.Response, error) {
authReq, err := authService.Request(t.Username, t.Password)
if err != nil {
return "", nil, err
}
client := http.Client{
Transport: t.Transport,
}
response, err := client.Do(authReq)
if err != nil {
return "", nil, err
}
if response.StatusCode != http.StatusOK {
return "", response, err
}
defer response.Body.Close()
var authToken authToken
decoder := json.NewDecoder(response.Body)
err = decoder.Decode(&authToken)
if err != nil {
return "", nil, err
}
return authToken.Token, nil, nil
}
func (t *TokenTransport) retry(req *http.Request, token string) (*http.Response, error) {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
resp, err := t.Transport.RoundTrip(req)
return resp, err
}
type authService struct {
Realm string
Service string
Scope string
}
func (authService *authService) Request(username, password string) (*http.Request, error) {
url, err := url.Parse(authService.Realm)
if err != nil {
return nil, err
}
q := url.Query()
q.Set("service", authService.Service)
if authService.Scope != "" {
q.Set("scope", authService.Scope)
}
url.RawQuery = q.Encode()
request, err := http.NewRequest("GET", url.String(), nil)
if username != "" || password != "" {
request.SetBasicAuth(username, password)
}
return request, err
}
func isTokenDemand(resp *http.Response) *authService {
if resp == nil {
return nil
}
if resp.StatusCode != http.StatusUnauthorized {
return nil
}
return parseOauthHeader(resp)
}
func parseOauthHeader(resp *http.Response) *authService {
challenges := parseAuthHeader(resp.Header)
for _, challenge := range challenges {
if challenge.Scheme == "bearer" {
return &authService{
Realm: challenge.Parameters["realm"],
Service: challenge.Parameters["service"],
Scope: challenge.Parameters["scope"],
}
}
}
return nil
}

@ -79,7 +79,11 @@ github.com/containerd/ttrpc
# github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd
github.com/containerd/typeurl
# github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c
github.com/docker/distribution
github.com/docker/distribution/digestset
github.com/docker/distribution/manifest
github.com/docker/distribution/manifest/schema1
github.com/docker/distribution/manifest/schema2
github.com/docker/distribution/reference
github.com/docker/distribution/registry/api/errcode
# github.com/docker/docker v1.4.2-0.20190905191220-3b23f9033967
@ -109,6 +113,8 @@ github.com/docker/go-connections/tlsconfig
github.com/docker/go-events
# github.com/docker/go-units v0.4.0
github.com/docker/go-units
# github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7
github.com/docker/libtrust
# github.com/go-test/deep v1.0.4
github.com/go-test/deep
# github.com/gogo/googleapis v1.3.0
@ -123,6 +129,8 @@ github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/timestamp
# github.com/heroku/docker-registry-client v0.0.0-20190909225348-afc9e1acc3d5
github.com/heroku/docker-registry-client/registry
# github.com/inconshreveable/mousetrap v1.0.0
github.com/inconshreveable/mousetrap
# github.com/konsorten/go-windows-terminal-sequences v1.0.1

@ -21,6 +21,11 @@ THE SOFTWARE.
*/
package version
import (
"github.com/heroku/docker-registry-client/registry"
log "github.com/sirupsen/logrus"
)
// Version is the string that contains version
var Version string
@ -37,6 +42,39 @@ func GetVersion() string {
}
// GetK3sVersion returns the version string for K3s
func GetK3sVersion() string {
func GetK3sVersion(latest bool) string {
if latest {
version, err := fetchLatestK3sVersion()
if err != nil || version == "" {
log.Warnln("Failed to fetch latest K3s version from DockerHub, falling back to hardcoded version.")
return K3sVersion
}
return version
}
return K3sVersion
}
// fetchLatestK3sVersion tries to fetch the latest version of k3s from DockerHub
func fetchLatestK3sVersion() (string, error) {
url := "https://registry-1.docker.io/"
username := "" // anonymous
password := "" // anonymous
repository := "rancher/k3s"
hub, err := registry.New(url, username, password)
if err != nil {
return "", err
}
tags, err := hub.Tags(repository)
if err != nil || len(tags) == 0 {
return "", err
}
log.Debugln("Fetched the following tags for rancher/k3s from DockerHub:")
log.Debugln(tags)
return "sampleTag", nil
}

Loading…
Cancel
Save